]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.45-201108192305.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.45-201108192305.patch
1 diff -urNp linux-2.6.32.45/arch/alpha/include/asm/elf.h linux-2.6.32.45/arch/alpha/include/asm/elf.h
2 --- linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3 +++ linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.32.45/arch/alpha/include/asm/pgtable.h linux-2.6.32.45/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20 +++ linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.32.45/arch/alpha/kernel/module.c linux-2.6.32.45/arch/alpha/kernel/module.c
40 --- linux-2.6.32.45/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41 +++ linux-2.6.32.45/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.32.45/arch/alpha/kernel/osf_sys.c linux-2.6.32.45/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-08-09 18:35:28.000000000 -0400
53 +++ linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-2.6.32.45/arch/alpha/mm/fault.c linux-2.6.32.45/arch/alpha/mm/fault.c
86 --- linux-2.6.32.45/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
87 +++ linux-2.6.32.45/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-2.6.32.45/arch/arm/include/asm/elf.h linux-2.6.32.45/arch/arm/include/asm/elf.h
245 --- linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
246 +++ linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
247 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 diff -urNp linux-2.6.32.45/arch/arm/include/asm/kmap_types.h linux-2.6.32.45/arch/arm/include/asm/kmap_types.h
264 --- linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
265 +++ linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
266 @@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270 + KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274 diff -urNp linux-2.6.32.45/arch/arm/include/asm/uaccess.h linux-2.6.32.45/arch/arm/include/asm/uaccess.h
275 --- linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
276 +++ linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
277 @@ -22,6 +22,8 @@
278 #define VERIFY_READ 0
279 #define VERIFY_WRITE 1
280
281 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
282 +
283 /*
284 * The exception table consists of pairs of addresses: the first is the
285 * address of an instruction that is allowed to fault, and the second is
286 @@ -387,8 +389,23 @@ do { \
287
288
289 #ifdef CONFIG_MMU
290 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
291 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
292 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
293 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
294 +
295 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
296 +{
297 + if (!__builtin_constant_p(n))
298 + check_object_size(to, n, false);
299 + return ___copy_from_user(to, from, n);
300 +}
301 +
302 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
303 +{
304 + if (!__builtin_constant_p(n))
305 + check_object_size(from, n, true);
306 + return ___copy_to_user(to, from, n);
307 +}
308 +
309 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
310 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
311 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
312 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
313
314 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
315 {
316 + if ((long)n < 0)
317 + return n;
318 +
319 if (access_ok(VERIFY_READ, from, n))
320 n = __copy_from_user(to, from, n);
321 else /* security hole - plug it */
322 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
323
324 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
325 {
326 + if ((long)n < 0)
327 + return n;
328 +
329 if (access_ok(VERIFY_WRITE, to, n))
330 n = __copy_to_user(to, from, n);
331 return n;
332 diff -urNp linux-2.6.32.45/arch/arm/kernel/armksyms.c linux-2.6.32.45/arch/arm/kernel/armksyms.c
333 --- linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
334 +++ linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
335 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
336 #ifdef CONFIG_MMU
337 EXPORT_SYMBOL(copy_page);
338
339 -EXPORT_SYMBOL(__copy_from_user);
340 -EXPORT_SYMBOL(__copy_to_user);
341 +EXPORT_SYMBOL(___copy_from_user);
342 +EXPORT_SYMBOL(___copy_to_user);
343 EXPORT_SYMBOL(__clear_user);
344
345 EXPORT_SYMBOL(__get_user_1);
346 diff -urNp linux-2.6.32.45/arch/arm/kernel/kgdb.c linux-2.6.32.45/arch/arm/kernel/kgdb.c
347 --- linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
348 +++ linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
349 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
350 * and we handle the normal undef case within the do_undefinstr
351 * handler.
352 */
353 -struct kgdb_arch arch_kgdb_ops = {
354 +const struct kgdb_arch arch_kgdb_ops = {
355 #ifndef __ARMEB__
356 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
357 #else /* ! __ARMEB__ */
358 diff -urNp linux-2.6.32.45/arch/arm/kernel/traps.c linux-2.6.32.45/arch/arm/kernel/traps.c
359 --- linux-2.6.32.45/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
360 +++ linux-2.6.32.45/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
361 @@ -247,6 +247,8 @@ static void __die(const char *str, int e
362
363 DEFINE_SPINLOCK(die_lock);
364
365 +extern void gr_handle_kernel_exploit(void);
366 +
367 /*
368 * This function is protected against re-entrancy.
369 */
370 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
371 if (panic_on_oops)
372 panic("Fatal exception");
373
374 + gr_handle_kernel_exploit();
375 +
376 do_exit(SIGSEGV);
377 }
378
379 diff -urNp linux-2.6.32.45/arch/arm/lib/copy_from_user.S linux-2.6.32.45/arch/arm/lib/copy_from_user.S
380 --- linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
381 +++ linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
382 @@ -16,7 +16,7 @@
383 /*
384 * Prototype:
385 *
386 - * size_t __copy_from_user(void *to, const void *from, size_t n)
387 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
388 *
389 * Purpose:
390 *
391 @@ -84,11 +84,11 @@
392
393 .text
394
395 -ENTRY(__copy_from_user)
396 +ENTRY(___copy_from_user)
397
398 #include "copy_template.S"
399
400 -ENDPROC(__copy_from_user)
401 +ENDPROC(___copy_from_user)
402
403 .section .fixup,"ax"
404 .align 0
405 diff -urNp linux-2.6.32.45/arch/arm/lib/copy_to_user.S linux-2.6.32.45/arch/arm/lib/copy_to_user.S
406 --- linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
407 +++ linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
408 @@ -16,7 +16,7 @@
409 /*
410 * Prototype:
411 *
412 - * size_t __copy_to_user(void *to, const void *from, size_t n)
413 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
414 *
415 * Purpose:
416 *
417 @@ -88,11 +88,11 @@
418 .text
419
420 ENTRY(__copy_to_user_std)
421 -WEAK(__copy_to_user)
422 +WEAK(___copy_to_user)
423
424 #include "copy_template.S"
425
426 -ENDPROC(__copy_to_user)
427 +ENDPROC(___copy_to_user)
428
429 .section .fixup,"ax"
430 .align 0
431 diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess.S linux-2.6.32.45/arch/arm/lib/uaccess.S
432 --- linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
433 +++ linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
434 @@ -19,7 +19,7 @@
435
436 #define PAGE_SHIFT 12
437
438 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
439 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
440 * Purpose : copy a block to user memory from kernel memory
441 * Params : to - user memory
442 * : from - kernel memory
443 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
444 sub r2, r2, ip
445 b .Lc2u_dest_aligned
446
447 -ENTRY(__copy_to_user)
448 +ENTRY(___copy_to_user)
449 stmfd sp!, {r2, r4 - r7, lr}
450 cmp r2, #4
451 blt .Lc2u_not_enough
452 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
453 ldrgtb r3, [r1], #0
454 USER( strgtbt r3, [r0], #1) @ May fault
455 b .Lc2u_finished
456 -ENDPROC(__copy_to_user)
457 +ENDPROC(___copy_to_user)
458
459 .section .fixup,"ax"
460 .align 0
461 9001: ldmfd sp!, {r0, r4 - r7, pc}
462 .previous
463
464 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
465 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
466 * Purpose : copy a block from user memory to kernel memory
467 * Params : to - kernel memory
468 * : from - user memory
469 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
470 sub r2, r2, ip
471 b .Lcfu_dest_aligned
472
473 -ENTRY(__copy_from_user)
474 +ENTRY(___copy_from_user)
475 stmfd sp!, {r0, r2, r4 - r7, lr}
476 cmp r2, #4
477 blt .Lcfu_not_enough
478 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
479 USER( ldrgtbt r3, [r1], #1) @ May fault
480 strgtb r3, [r0], #1
481 b .Lcfu_finished
482 -ENDPROC(__copy_from_user)
483 +ENDPROC(___copy_from_user)
484
485 .section .fixup,"ax"
486 .align 0
487 diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c
488 --- linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
489 +++ linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
490 @@ -97,7 +97,7 @@ out:
491 }
492
493 unsigned long
494 -__copy_to_user(void __user *to, const void *from, unsigned long n)
495 +___copy_to_user(void __user *to, const void *from, unsigned long n)
496 {
497 /*
498 * This test is stubbed out of the main function above to keep
499 diff -urNp linux-2.6.32.45/arch/arm/mach-at91/pm.c linux-2.6.32.45/arch/arm/mach-at91/pm.c
500 --- linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
501 +++ linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
502 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
503 }
504
505
506 -static struct platform_suspend_ops at91_pm_ops ={
507 +static const struct platform_suspend_ops at91_pm_ops ={
508 .valid = at91_pm_valid_state,
509 .begin = at91_pm_begin,
510 .enter = at91_pm_enter,
511 diff -urNp linux-2.6.32.45/arch/arm/mach-omap1/pm.c linux-2.6.32.45/arch/arm/mach-omap1/pm.c
512 --- linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
513 +++ linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
514 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
515
516
517
518 -static struct platform_suspend_ops omap_pm_ops ={
519 +static const struct platform_suspend_ops omap_pm_ops ={
520 .prepare = omap_pm_prepare,
521 .enter = omap_pm_enter,
522 .finish = omap_pm_finish,
523 diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c
524 --- linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
525 +++ linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
526 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
527 enable_hlt();
528 }
529
530 -static struct platform_suspend_ops omap_pm_ops = {
531 +static const struct platform_suspend_ops omap_pm_ops = {
532 .prepare = omap2_pm_prepare,
533 .enter = omap2_pm_enter,
534 .finish = omap2_pm_finish,
535 diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c
536 --- linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
537 +++ linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
538 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
539 return;
540 }
541
542 -static struct platform_suspend_ops omap_pm_ops = {
543 +static const struct platform_suspend_ops omap_pm_ops = {
544 .begin = omap3_pm_begin,
545 .end = omap3_pm_end,
546 .prepare = omap3_pm_prepare,
547 diff -urNp linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c
548 --- linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
549 +++ linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
550 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
551 (state == PM_SUSPEND_MEM);
552 }
553
554 -static struct platform_suspend_ops pnx4008_pm_ops = {
555 +static const struct platform_suspend_ops pnx4008_pm_ops = {
556 .enter = pnx4008_pm_enter,
557 .valid = pnx4008_pm_valid,
558 };
559 diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/pm.c linux-2.6.32.45/arch/arm/mach-pxa/pm.c
560 --- linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
561 +++ linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
562 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
563 pxa_cpu_pm_fns->finish();
564 }
565
566 -static struct platform_suspend_ops pxa_pm_ops = {
567 +static const struct platform_suspend_ops pxa_pm_ops = {
568 .valid = pxa_pm_valid,
569 .enter = pxa_pm_enter,
570 .prepare = pxa_pm_prepare,
571 diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c
572 --- linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
573 +++ linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
574 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
575 }
576
577 #ifdef CONFIG_PM
578 -static struct platform_suspend_ops sharpsl_pm_ops = {
579 +static const struct platform_suspend_ops sharpsl_pm_ops = {
580 .prepare = pxa_pm_prepare,
581 .finish = pxa_pm_finish,
582 .enter = corgi_pxa_pm_enter,
583 diff -urNp linux-2.6.32.45/arch/arm/mach-sa1100/pm.c linux-2.6.32.45/arch/arm/mach-sa1100/pm.c
584 --- linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
585 +++ linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
586 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
587 return virt_to_phys(sp);
588 }
589
590 -static struct platform_suspend_ops sa11x0_pm_ops = {
591 +static const struct platform_suspend_ops sa11x0_pm_ops = {
592 .enter = sa11x0_pm_enter,
593 .valid = suspend_valid_only_mem,
594 };
595 diff -urNp linux-2.6.32.45/arch/arm/mm/fault.c linux-2.6.32.45/arch/arm/mm/fault.c
596 --- linux-2.6.32.45/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
597 +++ linux-2.6.32.45/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
598 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
599 }
600 #endif
601
602 +#ifdef CONFIG_PAX_PAGEEXEC
603 + if (fsr & FSR_LNX_PF) {
604 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
605 + do_group_exit(SIGKILL);
606 + }
607 +#endif
608 +
609 tsk->thread.address = addr;
610 tsk->thread.error_code = fsr;
611 tsk->thread.trap_no = 14;
612 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
613 }
614 #endif /* CONFIG_MMU */
615
616 +#ifdef CONFIG_PAX_PAGEEXEC
617 +void pax_report_insns(void *pc, void *sp)
618 +{
619 + long i;
620 +
621 + printk(KERN_ERR "PAX: bytes at PC: ");
622 + for (i = 0; i < 20; i++) {
623 + unsigned char c;
624 + if (get_user(c, (__force unsigned char __user *)pc+i))
625 + printk(KERN_CONT "?? ");
626 + else
627 + printk(KERN_CONT "%02x ", c);
628 + }
629 + printk("\n");
630 +
631 + printk(KERN_ERR "PAX: bytes at SP-4: ");
632 + for (i = -1; i < 20; i++) {
633 + unsigned long c;
634 + if (get_user(c, (__force unsigned long __user *)sp+i))
635 + printk(KERN_CONT "???????? ");
636 + else
637 + printk(KERN_CONT "%08lx ", c);
638 + }
639 + printk("\n");
640 +}
641 +#endif
642 +
643 /*
644 * First Level Translation Fault Handler
645 *
646 diff -urNp linux-2.6.32.45/arch/arm/mm/mmap.c linux-2.6.32.45/arch/arm/mm/mmap.c
647 --- linux-2.6.32.45/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
648 +++ linux-2.6.32.45/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
649 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
650 if (len > TASK_SIZE)
651 return -ENOMEM;
652
653 +#ifdef CONFIG_PAX_RANDMMAP
654 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
655 +#endif
656 +
657 if (addr) {
658 if (do_align)
659 addr = COLOUR_ALIGN(addr, pgoff);
660 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
661 addr = PAGE_ALIGN(addr);
662
663 vma = find_vma(mm, addr);
664 - if (TASK_SIZE - len >= addr &&
665 - (!vma || addr + len <= vma->vm_start))
666 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
667 return addr;
668 }
669 if (len > mm->cached_hole_size) {
670 - start_addr = addr = mm->free_area_cache;
671 + start_addr = addr = mm->free_area_cache;
672 } else {
673 - start_addr = addr = TASK_UNMAPPED_BASE;
674 - mm->cached_hole_size = 0;
675 + start_addr = addr = mm->mmap_base;
676 + mm->cached_hole_size = 0;
677 }
678
679 full_search:
680 @@ -94,14 +97,14 @@ full_search:
681 * Start a new search - just in case we missed
682 * some holes.
683 */
684 - if (start_addr != TASK_UNMAPPED_BASE) {
685 - start_addr = addr = TASK_UNMAPPED_BASE;
686 + if (start_addr != mm->mmap_base) {
687 + start_addr = addr = mm->mmap_base;
688 mm->cached_hole_size = 0;
689 goto full_search;
690 }
691 return -ENOMEM;
692 }
693 - if (!vma || addr + len <= vma->vm_start) {
694 + if (check_heap_stack_gap(vma, addr, len)) {
695 /*
696 * Remember the place where we stopped the search:
697 */
698 diff -urNp linux-2.6.32.45/arch/arm/plat-s3c/pm.c linux-2.6.32.45/arch/arm/plat-s3c/pm.c
699 --- linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
700 +++ linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
701 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
702 s3c_pm_check_cleanup();
703 }
704
705 -static struct platform_suspend_ops s3c_pm_ops = {
706 +static const struct platform_suspend_ops s3c_pm_ops = {
707 .enter = s3c_pm_enter,
708 .prepare = s3c_pm_prepare,
709 .finish = s3c_pm_finish,
710 diff -urNp linux-2.6.32.45/arch/avr32/include/asm/elf.h linux-2.6.32.45/arch/avr32/include/asm/elf.h
711 --- linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
712 +++ linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
713 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719
720 +#ifdef CONFIG_PAX_ASLR
721 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
722 +
723 +#define PAX_DELTA_MMAP_LEN 15
724 +#define PAX_DELTA_STACK_LEN 15
725 +#endif
726
727 /* This yields a mask that user programs can use to figure out what
728 instruction set this CPU supports. This could be done in user space,
729 diff -urNp linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h
730 --- linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
731 +++ linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
732 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
733 D(11) KM_IRQ1,
734 D(12) KM_SOFTIRQ0,
735 D(13) KM_SOFTIRQ1,
736 -D(14) KM_TYPE_NR
737 +D(14) KM_CLEARPAGE,
738 +D(15) KM_TYPE_NR
739 };
740
741 #undef D
742 diff -urNp linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c
743 --- linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
744 +++ linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
745 @@ -176,7 +176,7 @@ out:
746 return 0;
747 }
748
749 -static struct platform_suspend_ops avr32_pm_ops = {
750 +static const struct platform_suspend_ops avr32_pm_ops = {
751 .valid = avr32_pm_valid_state,
752 .enter = avr32_pm_enter,
753 };
754 diff -urNp linux-2.6.32.45/arch/avr32/mm/fault.c linux-2.6.32.45/arch/avr32/mm/fault.c
755 --- linux-2.6.32.45/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
756 +++ linux-2.6.32.45/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
757 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
758
759 int exception_trace = 1;
760
761 +#ifdef CONFIG_PAX_PAGEEXEC
762 +void pax_report_insns(void *pc, void *sp)
763 +{
764 + unsigned long i;
765 +
766 + printk(KERN_ERR "PAX: bytes at PC: ");
767 + for (i = 0; i < 20; i++) {
768 + unsigned char c;
769 + if (get_user(c, (unsigned char *)pc+i))
770 + printk(KERN_CONT "???????? ");
771 + else
772 + printk(KERN_CONT "%02x ", c);
773 + }
774 + printk("\n");
775 +}
776 +#endif
777 +
778 /*
779 * This routine handles page faults. It determines the address and the
780 * problem, and then passes it off to one of the appropriate routines.
781 @@ -157,6 +174,16 @@ bad_area:
782 up_read(&mm->mmap_sem);
783
784 if (user_mode(regs)) {
785 +
786 +#ifdef CONFIG_PAX_PAGEEXEC
787 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
788 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
789 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
790 + do_group_exit(SIGKILL);
791 + }
792 + }
793 +#endif
794 +
795 if (exception_trace && printk_ratelimit())
796 printk("%s%s[%d]: segfault at %08lx pc %08lx "
797 "sp %08lx ecr %lu\n",
798 diff -urNp linux-2.6.32.45/arch/blackfin/kernel/kgdb.c linux-2.6.32.45/arch/blackfin/kernel/kgdb.c
799 --- linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
800 +++ linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
801 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
802 return -1; /* this means that we do not want to exit from the handler */
803 }
804
805 -struct kgdb_arch arch_kgdb_ops = {
806 +const struct kgdb_arch arch_kgdb_ops = {
807 .gdb_bpt_instr = {0xa1},
808 #ifdef CONFIG_SMP
809 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
810 diff -urNp linux-2.6.32.45/arch/blackfin/mach-common/pm.c linux-2.6.32.45/arch/blackfin/mach-common/pm.c
811 --- linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
812 +++ linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
813 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
814 return 0;
815 }
816
817 -struct platform_suspend_ops bfin_pm_ops = {
818 +const struct platform_suspend_ops bfin_pm_ops = {
819 .enter = bfin_pm_enter,
820 .valid = bfin_pm_valid,
821 };
822 diff -urNp linux-2.6.32.45/arch/frv/include/asm/kmap_types.h linux-2.6.32.45/arch/frv/include/asm/kmap_types.h
823 --- linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
824 +++ linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
825 @@ -23,6 +23,7 @@ enum km_type {
826 KM_IRQ1,
827 KM_SOFTIRQ0,
828 KM_SOFTIRQ1,
829 + KM_CLEARPAGE,
830 KM_TYPE_NR
831 };
832
833 diff -urNp linux-2.6.32.45/arch/frv/mm/elf-fdpic.c linux-2.6.32.45/arch/frv/mm/elf-fdpic.c
834 --- linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
835 +++ linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
836 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
837 if (addr) {
838 addr = PAGE_ALIGN(addr);
839 vma = find_vma(current->mm, addr);
840 - if (TASK_SIZE - len >= addr &&
841 - (!vma || addr + len <= vma->vm_start))
842 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
843 goto success;
844 }
845
846 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
847 for (; vma; vma = vma->vm_next) {
848 if (addr > limit)
849 break;
850 - if (addr + len <= vma->vm_start)
851 + if (check_heap_stack_gap(vma, addr, len))
852 goto success;
853 addr = vma->vm_end;
854 }
855 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
856 for (; vma; vma = vma->vm_next) {
857 if (addr > limit)
858 break;
859 - if (addr + len <= vma->vm_start)
860 + if (check_heap_stack_gap(vma, addr, len))
861 goto success;
862 addr = vma->vm_end;
863 }
864 diff -urNp linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c
865 --- linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
866 +++ linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
867 @@ -17,7 +17,7 @@
868 #include <linux/swiotlb.h>
869 #include <asm/machvec.h>
870
871 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
872 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
873
874 /* swiotlb declarations & definitions: */
875 extern int swiotlb_late_init_with_default_size (size_t size);
876 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
877 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
878 }
879
880 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
881 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
882 {
883 if (use_swiotlb(dev))
884 return &swiotlb_dma_ops;
885 diff -urNp linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c
886 --- linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
887 +++ linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
888 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
889 },
890 };
891
892 -extern struct dma_map_ops swiotlb_dma_ops;
893 +extern const struct dma_map_ops swiotlb_dma_ops;
894
895 static int __init
896 sba_init(void)
897 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
898
899 __setup("sbapagesize=",sba_page_override);
900
901 -struct dma_map_ops sba_dma_ops = {
902 +const struct dma_map_ops sba_dma_ops = {
903 .alloc_coherent = sba_alloc_coherent,
904 .free_coherent = sba_free_coherent,
905 .map_page = sba_map_page,
906 diff -urNp linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c
907 --- linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
908 +++ linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
909 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
910
911 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
912
913 +#ifdef CONFIG_PAX_ASLR
914 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
915 +
916 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
917 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
918 +#endif
919 +
920 /* Ugly but avoids duplication */
921 #include "../../../fs/binfmt_elf.c"
922
923 diff -urNp linux-2.6.32.45/arch/ia64/ia32/ia32priv.h linux-2.6.32.45/arch/ia64/ia32/ia32priv.h
924 --- linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
925 +++ linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
926 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
927 #define ELF_DATA ELFDATA2LSB
928 #define ELF_ARCH EM_386
929
930 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
931 +#ifdef CONFIG_PAX_RANDUSTACK
932 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
933 +#else
934 +#define __IA32_DELTA_STACK 0UL
935 +#endif
936 +
937 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
938 +
939 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
940 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
941
942 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h
943 --- linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
944 +++ linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
945 @@ -12,7 +12,7 @@
946
947 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
948
949 -extern struct dma_map_ops *dma_ops;
950 +extern const struct dma_map_ops *dma_ops;
951 extern struct ia64_machine_vector ia64_mv;
952 extern void set_iommu_machvec(void);
953
954 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
955 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
956 dma_addr_t *daddr, gfp_t gfp)
957 {
958 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
959 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
960 void *caddr;
961
962 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
963 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
964 static inline void dma_free_coherent(struct device *dev, size_t size,
965 void *caddr, dma_addr_t daddr)
966 {
967 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
968 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
969 debug_dma_free_coherent(dev, size, caddr, daddr);
970 ops->free_coherent(dev, size, caddr, daddr);
971 }
972 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
973
974 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
975 {
976 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
977 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
978 return ops->mapping_error(dev, daddr);
979 }
980
981 static inline int dma_supported(struct device *dev, u64 mask)
982 {
983 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
984 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
985 return ops->dma_supported(dev, mask);
986 }
987
988 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/elf.h linux-2.6.32.45/arch/ia64/include/asm/elf.h
989 --- linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
990 +++ linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
991 @@ -43,6 +43,13 @@
992 */
993 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
994
995 +#ifdef CONFIG_PAX_ASLR
996 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
997 +
998 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
999 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1000 +#endif
1001 +
1002 #define PT_IA_64_UNWIND 0x70000001
1003
1004 /* IA-64 relocations: */
1005 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/machvec.h linux-2.6.32.45/arch/ia64/include/asm/machvec.h
1006 --- linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1007 +++ linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1008 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1009 /* DMA-mapping interface: */
1010 typedef void ia64_mv_dma_init (void);
1011 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1012 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1013 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1014
1015 /*
1016 * WARNING: The legacy I/O space is _architected_. Platforms are
1017 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1018 # endif /* CONFIG_IA64_GENERIC */
1019
1020 extern void swiotlb_dma_init(void);
1021 -extern struct dma_map_ops *dma_get_ops(struct device *);
1022 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1023
1024 /*
1025 * Define default versions so we can extend machvec for new platforms without having
1026 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/pgtable.h linux-2.6.32.45/arch/ia64/include/asm/pgtable.h
1027 --- linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1028 +++ linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1029 @@ -12,7 +12,7 @@
1030 * David Mosberger-Tang <davidm@hpl.hp.com>
1031 */
1032
1033 -
1034 +#include <linux/const.h>
1035 #include <asm/mman.h>
1036 #include <asm/page.h>
1037 #include <asm/processor.h>
1038 @@ -143,6 +143,17 @@
1039 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1040 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1041 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1042 +
1043 +#ifdef CONFIG_PAX_PAGEEXEC
1044 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1045 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1046 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1047 +#else
1048 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1049 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1050 +# define PAGE_COPY_NOEXEC PAGE_COPY
1051 +#endif
1052 +
1053 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1054 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1055 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1056 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/spinlock.h linux-2.6.32.45/arch/ia64/include/asm/spinlock.h
1057 --- linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1058 +++ linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1059 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1060 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1061
1062 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1063 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1064 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1065 }
1066
1067 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1068 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/uaccess.h linux-2.6.32.45/arch/ia64/include/asm/uaccess.h
1069 --- linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1070 +++ linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1071 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1072 const void *__cu_from = (from); \
1073 long __cu_len = (n); \
1074 \
1075 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1076 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1077 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1078 __cu_len; \
1079 })
1080 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1081 long __cu_len = (n); \
1082 \
1083 __chk_user_ptr(__cu_from); \
1084 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1085 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1086 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1087 __cu_len; \
1088 })
1089 diff -urNp linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c
1090 --- linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1091 +++ linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1092 @@ -3,7 +3,7 @@
1093 /* Set this to 1 if there is a HW IOMMU in the system */
1094 int iommu_detected __read_mostly;
1095
1096 -struct dma_map_ops *dma_ops;
1097 +const struct dma_map_ops *dma_ops;
1098 EXPORT_SYMBOL(dma_ops);
1099
1100 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1101 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1102 }
1103 fs_initcall(dma_init);
1104
1105 -struct dma_map_ops *dma_get_ops(struct device *dev)
1106 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1107 {
1108 return dma_ops;
1109 }
1110 diff -urNp linux-2.6.32.45/arch/ia64/kernel/module.c linux-2.6.32.45/arch/ia64/kernel/module.c
1111 --- linux-2.6.32.45/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1112 +++ linux-2.6.32.45/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1113 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1114 void
1115 module_free (struct module *mod, void *module_region)
1116 {
1117 - if (mod && mod->arch.init_unw_table &&
1118 - module_region == mod->module_init) {
1119 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1120 unw_remove_unwind_table(mod->arch.init_unw_table);
1121 mod->arch.init_unw_table = NULL;
1122 }
1123 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1124 }
1125
1126 static inline int
1127 +in_init_rx (const struct module *mod, uint64_t addr)
1128 +{
1129 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1130 +}
1131 +
1132 +static inline int
1133 +in_init_rw (const struct module *mod, uint64_t addr)
1134 +{
1135 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1136 +}
1137 +
1138 +static inline int
1139 in_init (const struct module *mod, uint64_t addr)
1140 {
1141 - return addr - (uint64_t) mod->module_init < mod->init_size;
1142 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1143 +}
1144 +
1145 +static inline int
1146 +in_core_rx (const struct module *mod, uint64_t addr)
1147 +{
1148 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1149 +}
1150 +
1151 +static inline int
1152 +in_core_rw (const struct module *mod, uint64_t addr)
1153 +{
1154 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1155 }
1156
1157 static inline int
1158 in_core (const struct module *mod, uint64_t addr)
1159 {
1160 - return addr - (uint64_t) mod->module_core < mod->core_size;
1161 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1162 }
1163
1164 static inline int
1165 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1166 break;
1167
1168 case RV_BDREL:
1169 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1170 + if (in_init_rx(mod, val))
1171 + val -= (uint64_t) mod->module_init_rx;
1172 + else if (in_init_rw(mod, val))
1173 + val -= (uint64_t) mod->module_init_rw;
1174 + else if (in_core_rx(mod, val))
1175 + val -= (uint64_t) mod->module_core_rx;
1176 + else if (in_core_rw(mod, val))
1177 + val -= (uint64_t) mod->module_core_rw;
1178 break;
1179
1180 case RV_LTV:
1181 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1182 * addresses have been selected...
1183 */
1184 uint64_t gp;
1185 - if (mod->core_size > MAX_LTOFF)
1186 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1187 /*
1188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1189 * at the end of the module.
1190 */
1191 - gp = mod->core_size - MAX_LTOFF / 2;
1192 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1193 else
1194 - gp = mod->core_size / 2;
1195 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1196 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1197 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1198 mod->arch.gp = gp;
1199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1200 }
1201 diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-dma.c linux-2.6.32.45/arch/ia64/kernel/pci-dma.c
1202 --- linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1203 +++ linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1204 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1205 .dma_mask = &fallback_dev.coherent_dma_mask,
1206 };
1207
1208 -extern struct dma_map_ops intel_dma_ops;
1209 +extern const struct dma_map_ops intel_dma_ops;
1210
1211 static int __init pci_iommu_init(void)
1212 {
1213 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1214 }
1215 EXPORT_SYMBOL(iommu_dma_supported);
1216
1217 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1218 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1219 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1220 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1221 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1222 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1223 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1224 +
1225 +static const struct dma_map_ops intel_iommu_dma_ops = {
1226 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1227 + .alloc_coherent = intel_alloc_coherent,
1228 + .free_coherent = intel_free_coherent,
1229 + .map_sg = intel_map_sg,
1230 + .unmap_sg = intel_unmap_sg,
1231 + .map_page = intel_map_page,
1232 + .unmap_page = intel_unmap_page,
1233 + .mapping_error = intel_mapping_error,
1234 +
1235 + .sync_single_for_cpu = machvec_dma_sync_single,
1236 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1237 + .sync_single_for_device = machvec_dma_sync_single,
1238 + .sync_sg_for_device = machvec_dma_sync_sg,
1239 + .dma_supported = iommu_dma_supported,
1240 +};
1241 +
1242 void __init pci_iommu_alloc(void)
1243 {
1244 - dma_ops = &intel_dma_ops;
1245 -
1246 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1247 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1248 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1249 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1250 - dma_ops->dma_supported = iommu_dma_supported;
1251 + dma_ops = &intel_iommu_dma_ops;
1252
1253 /*
1254 * The order of these functions is important for
1255 diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c
1256 --- linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1257 +++ linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1258 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1259 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1260 }
1261
1262 -struct dma_map_ops swiotlb_dma_ops = {
1263 +const struct dma_map_ops swiotlb_dma_ops = {
1264 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1265 .free_coherent = swiotlb_free_coherent,
1266 .map_page = swiotlb_map_page,
1267 diff -urNp linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c
1268 --- linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1269 +++ linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1270 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1271 if (REGION_NUMBER(addr) == RGN_HPAGE)
1272 addr = 0;
1273 #endif
1274 +
1275 +#ifdef CONFIG_PAX_RANDMMAP
1276 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1277 + addr = mm->free_area_cache;
1278 + else
1279 +#endif
1280 +
1281 if (!addr)
1282 addr = mm->free_area_cache;
1283
1284 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1285 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1286 /* At this point: (!vma || addr < vma->vm_end). */
1287 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1288 - if (start_addr != TASK_UNMAPPED_BASE) {
1289 + if (start_addr != mm->mmap_base) {
1290 /* Start a new search --- just in case we missed some holes. */
1291 - addr = TASK_UNMAPPED_BASE;
1292 + addr = mm->mmap_base;
1293 goto full_search;
1294 }
1295 return -ENOMEM;
1296 }
1297 - if (!vma || addr + len <= vma->vm_start) {
1298 + if (check_heap_stack_gap(vma, addr, len)) {
1299 /* Remember the address where we stopped this search: */
1300 mm->free_area_cache = addr + len;
1301 return addr;
1302 diff -urNp linux-2.6.32.45/arch/ia64/kernel/topology.c linux-2.6.32.45/arch/ia64/kernel/topology.c
1303 --- linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1304 +++ linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1305 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1306 return ret;
1307 }
1308
1309 -static struct sysfs_ops cache_sysfs_ops = {
1310 +static const struct sysfs_ops cache_sysfs_ops = {
1311 .show = cache_show
1312 };
1313
1314 diff -urNp linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S
1315 --- linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1316 +++ linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1317 @@ -190,7 +190,7 @@ SECTIONS
1318 /* Per-cpu data: */
1319 . = ALIGN(PERCPU_PAGE_SIZE);
1320 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1321 - __phys_per_cpu_start = __per_cpu_load;
1322 + __phys_per_cpu_start = per_cpu_load;
1323 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1324 * into percpu page size
1325 */
1326 diff -urNp linux-2.6.32.45/arch/ia64/mm/fault.c linux-2.6.32.45/arch/ia64/mm/fault.c
1327 --- linux-2.6.32.45/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1328 +++ linux-2.6.32.45/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1329 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1330 return pte_present(pte);
1331 }
1332
1333 +#ifdef CONFIG_PAX_PAGEEXEC
1334 +void pax_report_insns(void *pc, void *sp)
1335 +{
1336 + unsigned long i;
1337 +
1338 + printk(KERN_ERR "PAX: bytes at PC: ");
1339 + for (i = 0; i < 8; i++) {
1340 + unsigned int c;
1341 + if (get_user(c, (unsigned int *)pc+i))
1342 + printk(KERN_CONT "???????? ");
1343 + else
1344 + printk(KERN_CONT "%08x ", c);
1345 + }
1346 + printk("\n");
1347 +}
1348 +#endif
1349 +
1350 void __kprobes
1351 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1352 {
1353 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1354 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1355 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1356
1357 - if ((vma->vm_flags & mask) != mask)
1358 + if ((vma->vm_flags & mask) != mask) {
1359 +
1360 +#ifdef CONFIG_PAX_PAGEEXEC
1361 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1362 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1363 + goto bad_area;
1364 +
1365 + up_read(&mm->mmap_sem);
1366 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1367 + do_group_exit(SIGKILL);
1368 + }
1369 +#endif
1370 +
1371 goto bad_area;
1372
1373 + }
1374 +
1375 survive:
1376 /*
1377 * If for any reason at all we couldn't handle the fault, make
1378 diff -urNp linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c
1379 --- linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1380 +++ linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1381 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1382 /* At this point: (!vmm || addr < vmm->vm_end). */
1383 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1384 return -ENOMEM;
1385 - if (!vmm || (addr + len) <= vmm->vm_start)
1386 + if (check_heap_stack_gap(vmm, addr, len))
1387 return addr;
1388 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1389 }
1390 diff -urNp linux-2.6.32.45/arch/ia64/mm/init.c linux-2.6.32.45/arch/ia64/mm/init.c
1391 --- linux-2.6.32.45/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1392 +++ linux-2.6.32.45/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1393 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1394 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1395 vma->vm_end = vma->vm_start + PAGE_SIZE;
1396 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1397 +
1398 +#ifdef CONFIG_PAX_PAGEEXEC
1399 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1400 + vma->vm_flags &= ~VM_EXEC;
1401 +
1402 +#ifdef CONFIG_PAX_MPROTECT
1403 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1404 + vma->vm_flags &= ~VM_MAYEXEC;
1405 +#endif
1406 +
1407 + }
1408 +#endif
1409 +
1410 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1411 down_write(&current->mm->mmap_sem);
1412 if (insert_vm_struct(current->mm, vma)) {
1413 diff -urNp linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c
1414 --- linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1415 +++ linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1416 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1417 return ret;
1418 }
1419
1420 -static struct dma_map_ops sn_dma_ops = {
1421 +static const struct dma_map_ops sn_dma_ops = {
1422 .alloc_coherent = sn_dma_alloc_coherent,
1423 .free_coherent = sn_dma_free_coherent,
1424 .map_page = sn_dma_map_page,
1425 diff -urNp linux-2.6.32.45/arch/m32r/lib/usercopy.c linux-2.6.32.45/arch/m32r/lib/usercopy.c
1426 --- linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1427 +++ linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1428 @@ -14,6 +14,9 @@
1429 unsigned long
1430 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1431 {
1432 + if ((long)n < 0)
1433 + return n;
1434 +
1435 prefetch(from);
1436 if (access_ok(VERIFY_WRITE, to, n))
1437 __copy_user(to,from,n);
1438 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1439 unsigned long
1440 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1441 {
1442 + if ((long)n < 0)
1443 + return n;
1444 +
1445 prefetchw(to);
1446 if (access_ok(VERIFY_READ, from, n))
1447 __copy_user_zeroing(to,from,n);
1448 diff -urNp linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c
1449 --- linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1450 +++ linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1451 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1452
1453 }
1454
1455 -static struct platform_suspend_ops db1x_pm_ops = {
1456 +static const struct platform_suspend_ops db1x_pm_ops = {
1457 .valid = suspend_valid_only_mem,
1458 .begin = db1x_pm_begin,
1459 .enter = db1x_pm_enter,
1460 diff -urNp linux-2.6.32.45/arch/mips/include/asm/elf.h linux-2.6.32.45/arch/mips/include/asm/elf.h
1461 --- linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1462 +++ linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1463 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1464 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1465 #endif
1466
1467 +#ifdef CONFIG_PAX_ASLR
1468 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1469 +
1470 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1471 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1472 +#endif
1473 +
1474 #endif /* _ASM_ELF_H */
1475 diff -urNp linux-2.6.32.45/arch/mips/include/asm/page.h linux-2.6.32.45/arch/mips/include/asm/page.h
1476 --- linux-2.6.32.45/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1477 +++ linux-2.6.32.45/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1478 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1479 #ifdef CONFIG_CPU_MIPS32
1480 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1481 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1482 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1483 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1484 #else
1485 typedef struct { unsigned long long pte; } pte_t;
1486 #define pte_val(x) ((x).pte)
1487 diff -urNp linux-2.6.32.45/arch/mips/include/asm/system.h linux-2.6.32.45/arch/mips/include/asm/system.h
1488 --- linux-2.6.32.45/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1489 +++ linux-2.6.32.45/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1490 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1491 */
1492 #define __ARCH_WANT_UNLOCKED_CTXSW
1493
1494 -extern unsigned long arch_align_stack(unsigned long sp);
1495 +#define arch_align_stack(x) ((x) & ~0xfUL)
1496
1497 #endif /* _ASM_SYSTEM_H */
1498 diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c
1499 --- linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1500 +++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1501 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1502 #undef ELF_ET_DYN_BASE
1503 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1504
1505 +#ifdef CONFIG_PAX_ASLR
1506 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1507 +
1508 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1509 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1510 +#endif
1511 +
1512 #include <asm/processor.h>
1513 #include <linux/module.h>
1514 #include <linux/elfcore.h>
1515 diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c
1516 --- linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1517 +++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1518 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1519 #undef ELF_ET_DYN_BASE
1520 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1521
1522 +#ifdef CONFIG_PAX_ASLR
1523 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1524 +
1525 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1526 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1527 +#endif
1528 +
1529 #include <asm/processor.h>
1530
1531 /*
1532 diff -urNp linux-2.6.32.45/arch/mips/kernel/kgdb.c linux-2.6.32.45/arch/mips/kernel/kgdb.c
1533 --- linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1534 +++ linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1535 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1536 return -1;
1537 }
1538
1539 +/* cannot be const */
1540 struct kgdb_arch arch_kgdb_ops;
1541
1542 /*
1543 diff -urNp linux-2.6.32.45/arch/mips/kernel/process.c linux-2.6.32.45/arch/mips/kernel/process.c
1544 --- linux-2.6.32.45/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1545 +++ linux-2.6.32.45/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1546 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1547 out:
1548 return pc;
1549 }
1550 -
1551 -/*
1552 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1553 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1554 - */
1555 -unsigned long arch_align_stack(unsigned long sp)
1556 -{
1557 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1558 - sp -= get_random_int() & ~PAGE_MASK;
1559 -
1560 - return sp & ALMASK;
1561 -}
1562 diff -urNp linux-2.6.32.45/arch/mips/kernel/syscall.c linux-2.6.32.45/arch/mips/kernel/syscall.c
1563 --- linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1564 +++ linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1565 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1566 do_color_align = 0;
1567 if (filp || (flags & MAP_SHARED))
1568 do_color_align = 1;
1569 +
1570 +#ifdef CONFIG_PAX_RANDMMAP
1571 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1572 +#endif
1573 +
1574 if (addr) {
1575 if (do_color_align)
1576 addr = COLOUR_ALIGN(addr, pgoff);
1577 else
1578 addr = PAGE_ALIGN(addr);
1579 vmm = find_vma(current->mm, addr);
1580 - if (task_size - len >= addr &&
1581 - (!vmm || addr + len <= vmm->vm_start))
1582 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1583 return addr;
1584 }
1585 - addr = TASK_UNMAPPED_BASE;
1586 + addr = current->mm->mmap_base;
1587 if (do_color_align)
1588 addr = COLOUR_ALIGN(addr, pgoff);
1589 else
1590 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1591 /* At this point: (!vmm || addr < vmm->vm_end). */
1592 if (task_size - len < addr)
1593 return -ENOMEM;
1594 - if (!vmm || addr + len <= vmm->vm_start)
1595 + if (check_heap_stack_gap(vmm, addr, len))
1596 return addr;
1597 addr = vmm->vm_end;
1598 if (do_color_align)
1599 diff -urNp linux-2.6.32.45/arch/mips/mm/fault.c linux-2.6.32.45/arch/mips/mm/fault.c
1600 --- linux-2.6.32.45/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1601 +++ linux-2.6.32.45/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1602 @@ -26,6 +26,23 @@
1603 #include <asm/ptrace.h>
1604 #include <asm/highmem.h> /* For VMALLOC_END */
1605
1606 +#ifdef CONFIG_PAX_PAGEEXEC
1607 +void pax_report_insns(void *pc, void *sp)
1608 +{
1609 + unsigned long i;
1610 +
1611 + printk(KERN_ERR "PAX: bytes at PC: ");
1612 + for (i = 0; i < 5; i++) {
1613 + unsigned int c;
1614 + if (get_user(c, (unsigned int *)pc+i))
1615 + printk(KERN_CONT "???????? ");
1616 + else
1617 + printk(KERN_CONT "%08x ", c);
1618 + }
1619 + printk("\n");
1620 +}
1621 +#endif
1622 +
1623 /*
1624 * This routine handles page faults. It determines the address,
1625 * and the problem, and then passes it off to one of the appropriate
1626 diff -urNp linux-2.6.32.45/arch/parisc/include/asm/elf.h linux-2.6.32.45/arch/parisc/include/asm/elf.h
1627 --- linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1628 +++ linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1629 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1630
1631 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1632
1633 +#ifdef CONFIG_PAX_ASLR
1634 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1635 +
1636 +#define PAX_DELTA_MMAP_LEN 16
1637 +#define PAX_DELTA_STACK_LEN 16
1638 +#endif
1639 +
1640 /* This yields a mask that user programs can use to figure out what
1641 instruction set this CPU supports. This could be done in user space,
1642 but it's not easy, and we've already done it here. */
1643 diff -urNp linux-2.6.32.45/arch/parisc/include/asm/pgtable.h linux-2.6.32.45/arch/parisc/include/asm/pgtable.h
1644 --- linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1645 +++ linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1646 @@ -207,6 +207,17 @@
1647 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1648 #define PAGE_COPY PAGE_EXECREAD
1649 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1650 +
1651 +#ifdef CONFIG_PAX_PAGEEXEC
1652 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1653 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1654 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1655 +#else
1656 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1657 +# define PAGE_COPY_NOEXEC PAGE_COPY
1658 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1659 +#endif
1660 +
1661 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1662 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1663 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1664 diff -urNp linux-2.6.32.45/arch/parisc/kernel/module.c linux-2.6.32.45/arch/parisc/kernel/module.c
1665 --- linux-2.6.32.45/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1666 +++ linux-2.6.32.45/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1667 @@ -95,16 +95,38 @@
1668
1669 /* three functions to determine where in the module core
1670 * or init pieces the location is */
1671 +static inline int in_init_rx(struct module *me, void *loc)
1672 +{
1673 + return (loc >= me->module_init_rx &&
1674 + loc < (me->module_init_rx + me->init_size_rx));
1675 +}
1676 +
1677 +static inline int in_init_rw(struct module *me, void *loc)
1678 +{
1679 + return (loc >= me->module_init_rw &&
1680 + loc < (me->module_init_rw + me->init_size_rw));
1681 +}
1682 +
1683 static inline int in_init(struct module *me, void *loc)
1684 {
1685 - return (loc >= me->module_init &&
1686 - loc <= (me->module_init + me->init_size));
1687 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1688 +}
1689 +
1690 +static inline int in_core_rx(struct module *me, void *loc)
1691 +{
1692 + return (loc >= me->module_core_rx &&
1693 + loc < (me->module_core_rx + me->core_size_rx));
1694 +}
1695 +
1696 +static inline int in_core_rw(struct module *me, void *loc)
1697 +{
1698 + return (loc >= me->module_core_rw &&
1699 + loc < (me->module_core_rw + me->core_size_rw));
1700 }
1701
1702 static inline int in_core(struct module *me, void *loc)
1703 {
1704 - return (loc >= me->module_core &&
1705 - loc <= (me->module_core + me->core_size));
1706 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1707 }
1708
1709 static inline int in_local(struct module *me, void *loc)
1710 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1711 }
1712
1713 /* align things a bit */
1714 - me->core_size = ALIGN(me->core_size, 16);
1715 - me->arch.got_offset = me->core_size;
1716 - me->core_size += gots * sizeof(struct got_entry);
1717 -
1718 - me->core_size = ALIGN(me->core_size, 16);
1719 - me->arch.fdesc_offset = me->core_size;
1720 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1721 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1722 + me->arch.got_offset = me->core_size_rw;
1723 + me->core_size_rw += gots * sizeof(struct got_entry);
1724 +
1725 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1726 + me->arch.fdesc_offset = me->core_size_rw;
1727 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1728
1729 me->arch.got_max = gots;
1730 me->arch.fdesc_max = fdescs;
1731 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1732
1733 BUG_ON(value == 0);
1734
1735 - got = me->module_core + me->arch.got_offset;
1736 + got = me->module_core_rw + me->arch.got_offset;
1737 for (i = 0; got[i].addr; i++)
1738 if (got[i].addr == value)
1739 goto out;
1740 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1741 #ifdef CONFIG_64BIT
1742 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1743 {
1744 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1745 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1746
1747 if (!value) {
1748 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1749 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1750
1751 /* Create new one */
1752 fdesc->addr = value;
1753 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1754 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1755 return (Elf_Addr)fdesc;
1756 }
1757 #endif /* CONFIG_64BIT */
1758 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1759
1760 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1761 end = table + sechdrs[me->arch.unwind_section].sh_size;
1762 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1763 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1764
1765 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1766 me->arch.unwind_section, table, end, gp);
1767 diff -urNp linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c
1768 --- linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1769 +++ linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1770 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1771 /* At this point: (!vma || addr < vma->vm_end). */
1772 if (TASK_SIZE - len < addr)
1773 return -ENOMEM;
1774 - if (!vma || addr + len <= vma->vm_start)
1775 + if (check_heap_stack_gap(vma, addr, len))
1776 return addr;
1777 addr = vma->vm_end;
1778 }
1779 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1780 /* At this point: (!vma || addr < vma->vm_end). */
1781 if (TASK_SIZE - len < addr)
1782 return -ENOMEM;
1783 - if (!vma || addr + len <= vma->vm_start)
1784 + if (check_heap_stack_gap(vma, addr, len))
1785 return addr;
1786 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1787 if (addr < vma->vm_end) /* handle wraparound */
1788 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1789 if (flags & MAP_FIXED)
1790 return addr;
1791 if (!addr)
1792 - addr = TASK_UNMAPPED_BASE;
1793 + addr = current->mm->mmap_base;
1794
1795 if (filp) {
1796 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1797 diff -urNp linux-2.6.32.45/arch/parisc/kernel/traps.c linux-2.6.32.45/arch/parisc/kernel/traps.c
1798 --- linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1799 +++ linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1800 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1801
1802 down_read(&current->mm->mmap_sem);
1803 vma = find_vma(current->mm,regs->iaoq[0]);
1804 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1805 - && (vma->vm_flags & VM_EXEC)) {
1806 -
1807 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1808 fault_address = regs->iaoq[0];
1809 fault_space = regs->iasq[0];
1810
1811 diff -urNp linux-2.6.32.45/arch/parisc/mm/fault.c linux-2.6.32.45/arch/parisc/mm/fault.c
1812 --- linux-2.6.32.45/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1813 +++ linux-2.6.32.45/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1814 @@ -15,6 +15,7 @@
1815 #include <linux/sched.h>
1816 #include <linux/interrupt.h>
1817 #include <linux/module.h>
1818 +#include <linux/unistd.h>
1819
1820 #include <asm/uaccess.h>
1821 #include <asm/traps.h>
1822 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1823 static unsigned long
1824 parisc_acctyp(unsigned long code, unsigned int inst)
1825 {
1826 - if (code == 6 || code == 16)
1827 + if (code == 6 || code == 7 || code == 16)
1828 return VM_EXEC;
1829
1830 switch (inst & 0xf0000000) {
1831 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1832 }
1833 #endif
1834
1835 +#ifdef CONFIG_PAX_PAGEEXEC
1836 +/*
1837 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1838 + *
1839 + * returns 1 when task should be killed
1840 + * 2 when rt_sigreturn trampoline was detected
1841 + * 3 when unpatched PLT trampoline was detected
1842 + */
1843 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1844 +{
1845 +
1846 +#ifdef CONFIG_PAX_EMUPLT
1847 + int err;
1848 +
1849 + do { /* PaX: unpatched PLT emulation */
1850 + unsigned int bl, depwi;
1851 +
1852 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1853 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1854 +
1855 + if (err)
1856 + break;
1857 +
1858 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1859 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1860 +
1861 + err = get_user(ldw, (unsigned int *)addr);
1862 + err |= get_user(bv, (unsigned int *)(addr+4));
1863 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1864 +
1865 + if (err)
1866 + break;
1867 +
1868 + if (ldw == 0x0E801096U &&
1869 + bv == 0xEAC0C000U &&
1870 + ldw2 == 0x0E881095U)
1871 + {
1872 + unsigned int resolver, map;
1873 +
1874 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1875 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1876 + if (err)
1877 + break;
1878 +
1879 + regs->gr[20] = instruction_pointer(regs)+8;
1880 + regs->gr[21] = map;
1881 + regs->gr[22] = resolver;
1882 + regs->iaoq[0] = resolver | 3UL;
1883 + regs->iaoq[1] = regs->iaoq[0] + 4;
1884 + return 3;
1885 + }
1886 + }
1887 + } while (0);
1888 +#endif
1889 +
1890 +#ifdef CONFIG_PAX_EMUTRAMP
1891 +
1892 +#ifndef CONFIG_PAX_EMUSIGRT
1893 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1894 + return 1;
1895 +#endif
1896 +
1897 + do { /* PaX: rt_sigreturn emulation */
1898 + unsigned int ldi1, ldi2, bel, nop;
1899 +
1900 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1901 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1902 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1903 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1904 +
1905 + if (err)
1906 + break;
1907 +
1908 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1909 + ldi2 == 0x3414015AU &&
1910 + bel == 0xE4008200U &&
1911 + nop == 0x08000240U)
1912 + {
1913 + regs->gr[25] = (ldi1 & 2) >> 1;
1914 + regs->gr[20] = __NR_rt_sigreturn;
1915 + regs->gr[31] = regs->iaoq[1] + 16;
1916 + regs->sr[0] = regs->iasq[1];
1917 + regs->iaoq[0] = 0x100UL;
1918 + regs->iaoq[1] = regs->iaoq[0] + 4;
1919 + regs->iasq[0] = regs->sr[2];
1920 + regs->iasq[1] = regs->sr[2];
1921 + return 2;
1922 + }
1923 + } while (0);
1924 +#endif
1925 +
1926 + return 1;
1927 +}
1928 +
1929 +void pax_report_insns(void *pc, void *sp)
1930 +{
1931 + unsigned long i;
1932 +
1933 + printk(KERN_ERR "PAX: bytes at PC: ");
1934 + for (i = 0; i < 5; i++) {
1935 + unsigned int c;
1936 + if (get_user(c, (unsigned int *)pc+i))
1937 + printk(KERN_CONT "???????? ");
1938 + else
1939 + printk(KERN_CONT "%08x ", c);
1940 + }
1941 + printk("\n");
1942 +}
1943 +#endif
1944 +
1945 int fixup_exception(struct pt_regs *regs)
1946 {
1947 const struct exception_table_entry *fix;
1948 @@ -192,8 +303,33 @@ good_area:
1949
1950 acc_type = parisc_acctyp(code,regs->iir);
1951
1952 - if ((vma->vm_flags & acc_type) != acc_type)
1953 + if ((vma->vm_flags & acc_type) != acc_type) {
1954 +
1955 +#ifdef CONFIG_PAX_PAGEEXEC
1956 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1957 + (address & ~3UL) == instruction_pointer(regs))
1958 + {
1959 + up_read(&mm->mmap_sem);
1960 + switch (pax_handle_fetch_fault(regs)) {
1961 +
1962 +#ifdef CONFIG_PAX_EMUPLT
1963 + case 3:
1964 + return;
1965 +#endif
1966 +
1967 +#ifdef CONFIG_PAX_EMUTRAMP
1968 + case 2:
1969 + return;
1970 +#endif
1971 +
1972 + }
1973 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1974 + do_group_exit(SIGKILL);
1975 + }
1976 +#endif
1977 +
1978 goto bad_area;
1979 + }
1980
1981 /*
1982 * If for any reason at all we couldn't handle the fault, make
1983 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/device.h linux-2.6.32.45/arch/powerpc/include/asm/device.h
1984 --- linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
1985 +++ linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
1986 @@ -14,7 +14,7 @@ struct dev_archdata {
1987 struct device_node *of_node;
1988
1989 /* DMA operations on that device */
1990 - struct dma_map_ops *dma_ops;
1991 + const struct dma_map_ops *dma_ops;
1992
1993 /*
1994 * When an iommu is in use, dma_data is used as a ptr to the base of the
1995 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h
1996 --- linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
1997 +++ linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
1998 @@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
1999 #ifdef CONFIG_PPC64
2000 extern struct dma_map_ops dma_iommu_ops;
2001 #endif
2002 -extern struct dma_map_ops dma_direct_ops;
2003 +extern const struct dma_map_ops dma_direct_ops;
2004
2005 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2006 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2007 {
2008 /* We don't handle the NULL dev case for ISA for now. We could
2009 * do it via an out of line call but it is not needed for now. The
2010 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2011 return dev->archdata.dma_ops;
2012 }
2013
2014 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2015 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2016 {
2017 dev->archdata.dma_ops = ops;
2018 }
2019 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2020
2021 static inline int dma_supported(struct device *dev, u64 mask)
2022 {
2023 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2024 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2025
2026 if (unlikely(dma_ops == NULL))
2027 return 0;
2028 @@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2029
2030 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2031 {
2032 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2033 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2034
2035 if (unlikely(dma_ops == NULL))
2036 return -EIO;
2037 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2038 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2039 dma_addr_t *dma_handle, gfp_t flag)
2040 {
2041 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2042 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2043 void *cpu_addr;
2044
2045 BUG_ON(!dma_ops);
2046 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2047 static inline void dma_free_coherent(struct device *dev, size_t size,
2048 void *cpu_addr, dma_addr_t dma_handle)
2049 {
2050 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2051 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2052
2053 BUG_ON(!dma_ops);
2054
2055 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2056
2057 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2058 {
2059 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2060 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2061
2062 if (dma_ops->mapping_error)
2063 return dma_ops->mapping_error(dev, dma_addr);
2064 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/elf.h linux-2.6.32.45/arch/powerpc/include/asm/elf.h
2065 --- linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2066 +++ linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2067 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2068 the loader. We need to make sure that it is out of the way of the program
2069 that it will "exec", and that there is sufficient room for the brk. */
2070
2071 -extern unsigned long randomize_et_dyn(unsigned long base);
2072 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2073 +#define ELF_ET_DYN_BASE (0x20000000)
2074 +
2075 +#ifdef CONFIG_PAX_ASLR
2076 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2077 +
2078 +#ifdef __powerpc64__
2079 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2080 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2081 +#else
2082 +#define PAX_DELTA_MMAP_LEN 15
2083 +#define PAX_DELTA_STACK_LEN 15
2084 +#endif
2085 +#endif
2086
2087 /*
2088 * Our registers are always unsigned longs, whether we're a 32 bit
2089 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2090 (0x7ff >> (PAGE_SHIFT - 12)) : \
2091 (0x3ffff >> (PAGE_SHIFT - 12)))
2092
2093 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2094 -#define arch_randomize_brk arch_randomize_brk
2095 -
2096 #endif /* __KERNEL__ */
2097
2098 /*
2099 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/iommu.h linux-2.6.32.45/arch/powerpc/include/asm/iommu.h
2100 --- linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2101 +++ linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2102 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2103 extern void iommu_init_early_dart(void);
2104 extern void iommu_init_early_pasemi(void);
2105
2106 +/* dma-iommu.c */
2107 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2108 +
2109 #ifdef CONFIG_PCI
2110 extern void pci_iommu_init(void);
2111 extern void pci_direct_iommu_init(void);
2112 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h
2113 --- linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2114 +++ linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2115 @@ -26,6 +26,7 @@ enum km_type {
2116 KM_SOFTIRQ1,
2117 KM_PPC_SYNC_PAGE,
2118 KM_PPC_SYNC_ICACHE,
2119 + KM_CLEARPAGE,
2120 KM_TYPE_NR
2121 };
2122
2123 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page_64.h linux-2.6.32.45/arch/powerpc/include/asm/page_64.h
2124 --- linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2125 +++ linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2126 @@ -180,15 +180,18 @@ do { \
2127 * stack by default, so in the absense of a PT_GNU_STACK program header
2128 * we turn execute permission off.
2129 */
2130 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2131 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2132 +#define VM_STACK_DEFAULT_FLAGS32 \
2133 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2134 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2135
2136 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2137 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2138
2139 +#ifndef CONFIG_PAX_PAGEEXEC
2140 #define VM_STACK_DEFAULT_FLAGS \
2141 (test_thread_flag(TIF_32BIT) ? \
2142 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2143 +#endif
2144
2145 #include <asm-generic/getorder.h>
2146
2147 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page.h linux-2.6.32.45/arch/powerpc/include/asm/page.h
2148 --- linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2149 +++ linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
2150 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2151 * and needs to be executable. This means the whole heap ends
2152 * up being executable.
2153 */
2154 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2155 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156 +#define VM_DATA_DEFAULT_FLAGS32 \
2157 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2158 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159
2160 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2161 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2162 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2163 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2164 #endif
2165
2166 +#define ktla_ktva(addr) (addr)
2167 +#define ktva_ktla(addr) (addr)
2168 +
2169 #ifndef __ASSEMBLY__
2170
2171 #undef STRICT_MM_TYPECHECKS
2172 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pci.h linux-2.6.32.45/arch/powerpc/include/asm/pci.h
2173 --- linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2174 +++ linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2175 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2176 }
2177
2178 #ifdef CONFIG_PCI
2179 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2180 -extern struct dma_map_ops *get_pci_dma_ops(void);
2181 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2182 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2183 #else /* CONFIG_PCI */
2184 #define set_pci_dma_ops(d)
2185 #define get_pci_dma_ops() NULL
2186 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h
2187 --- linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2188 +++ linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2189 @@ -2,6 +2,7 @@
2190 #define _ASM_POWERPC_PGTABLE_H
2191 #ifdef __KERNEL__
2192
2193 +#include <linux/const.h>
2194 #ifndef __ASSEMBLY__
2195 #include <asm/processor.h> /* For TASK_SIZE */
2196 #include <asm/mmu.h>
2197 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h
2198 --- linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2199 +++ linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2200 @@ -21,6 +21,7 @@
2201 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2202 #define _PAGE_USER 0x004 /* usermode access allowed */
2203 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2204 +#define _PAGE_EXEC _PAGE_GUARDED
2205 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2206 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2207 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2208 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/reg.h linux-2.6.32.45/arch/powerpc/include/asm/reg.h
2209 --- linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2210 +++ linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2211 @@ -191,6 +191,7 @@
2212 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2213 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2214 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2215 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2216 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2217 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2218 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2219 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h
2220 --- linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2221 +++ linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2222 @@ -13,7 +13,7 @@
2223
2224 #include <linux/swiotlb.h>
2225
2226 -extern struct dma_map_ops swiotlb_dma_ops;
2227 +extern const struct dma_map_ops swiotlb_dma_ops;
2228
2229 static inline void dma_mark_clean(void *addr, size_t size) {}
2230
2231 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/system.h linux-2.6.32.45/arch/powerpc/include/asm/system.h
2232 --- linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2233 +++ linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2234 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2235 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2236 #endif
2237
2238 -extern unsigned long arch_align_stack(unsigned long sp);
2239 +#define arch_align_stack(x) ((x) & ~0xfUL)
2240
2241 /* Used in very early kernel initialization. */
2242 extern unsigned long reloc_offset(void);
2243 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h
2244 --- linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2245 +++ linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2246 @@ -13,6 +13,8 @@
2247 #define VERIFY_READ 0
2248 #define VERIFY_WRITE 1
2249
2250 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2251 +
2252 /*
2253 * The fs value determines whether argument validity checking should be
2254 * performed or not. If get_fs() == USER_DS, checking is performed, with
2255 @@ -327,52 +329,6 @@ do { \
2256 extern unsigned long __copy_tofrom_user(void __user *to,
2257 const void __user *from, unsigned long size);
2258
2259 -#ifndef __powerpc64__
2260 -
2261 -static inline unsigned long copy_from_user(void *to,
2262 - const void __user *from, unsigned long n)
2263 -{
2264 - unsigned long over;
2265 -
2266 - if (access_ok(VERIFY_READ, from, n))
2267 - return __copy_tofrom_user((__force void __user *)to, from, n);
2268 - if ((unsigned long)from < TASK_SIZE) {
2269 - over = (unsigned long)from + n - TASK_SIZE;
2270 - return __copy_tofrom_user((__force void __user *)to, from,
2271 - n - over) + over;
2272 - }
2273 - return n;
2274 -}
2275 -
2276 -static inline unsigned long copy_to_user(void __user *to,
2277 - const void *from, unsigned long n)
2278 -{
2279 - unsigned long over;
2280 -
2281 - if (access_ok(VERIFY_WRITE, to, n))
2282 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2283 - if ((unsigned long)to < TASK_SIZE) {
2284 - over = (unsigned long)to + n - TASK_SIZE;
2285 - return __copy_tofrom_user(to, (__force void __user *)from,
2286 - n - over) + over;
2287 - }
2288 - return n;
2289 -}
2290 -
2291 -#else /* __powerpc64__ */
2292 -
2293 -#define __copy_in_user(to, from, size) \
2294 - __copy_tofrom_user((to), (from), (size))
2295 -
2296 -extern unsigned long copy_from_user(void *to, const void __user *from,
2297 - unsigned long n);
2298 -extern unsigned long copy_to_user(void __user *to, const void *from,
2299 - unsigned long n);
2300 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2301 - unsigned long n);
2302 -
2303 -#endif /* __powerpc64__ */
2304 -
2305 static inline unsigned long __copy_from_user_inatomic(void *to,
2306 const void __user *from, unsigned long n)
2307 {
2308 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2309 if (ret == 0)
2310 return 0;
2311 }
2312 +
2313 + if (!__builtin_constant_p(n))
2314 + check_object_size(to, n, false);
2315 +
2316 return __copy_tofrom_user((__force void __user *)to, from, n);
2317 }
2318
2319 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2320 if (ret == 0)
2321 return 0;
2322 }
2323 +
2324 + if (!__builtin_constant_p(n))
2325 + check_object_size(from, n, true);
2326 +
2327 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2328 }
2329
2330 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2331 return __copy_to_user_inatomic(to, from, size);
2332 }
2333
2334 +#ifndef __powerpc64__
2335 +
2336 +static inline unsigned long __must_check copy_from_user(void *to,
2337 + const void __user *from, unsigned long n)
2338 +{
2339 + unsigned long over;
2340 +
2341 + if ((long)n < 0)
2342 + return n;
2343 +
2344 + if (access_ok(VERIFY_READ, from, n)) {
2345 + if (!__builtin_constant_p(n))
2346 + check_object_size(to, n, false);
2347 + return __copy_tofrom_user((__force void __user *)to, from, n);
2348 + }
2349 + if ((unsigned long)from < TASK_SIZE) {
2350 + over = (unsigned long)from + n - TASK_SIZE;
2351 + if (!__builtin_constant_p(n - over))
2352 + check_object_size(to, n - over, false);
2353 + return __copy_tofrom_user((__force void __user *)to, from,
2354 + n - over) + over;
2355 + }
2356 + return n;
2357 +}
2358 +
2359 +static inline unsigned long __must_check copy_to_user(void __user *to,
2360 + const void *from, unsigned long n)
2361 +{
2362 + unsigned long over;
2363 +
2364 + if ((long)n < 0)
2365 + return n;
2366 +
2367 + if (access_ok(VERIFY_WRITE, to, n)) {
2368 + if (!__builtin_constant_p(n))
2369 + check_object_size(from, n, true);
2370 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2371 + }
2372 + if ((unsigned long)to < TASK_SIZE) {
2373 + over = (unsigned long)to + n - TASK_SIZE;
2374 + if (!__builtin_constant_p(n))
2375 + check_object_size(from, n - over, true);
2376 + return __copy_tofrom_user(to, (__force void __user *)from,
2377 + n - over) + over;
2378 + }
2379 + return n;
2380 +}
2381 +
2382 +#else /* __powerpc64__ */
2383 +
2384 +#define __copy_in_user(to, from, size) \
2385 + __copy_tofrom_user((to), (from), (size))
2386 +
2387 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2388 +{
2389 + if ((long)n < 0 || n > INT_MAX)
2390 + return n;
2391 +
2392 + if (!__builtin_constant_p(n))
2393 + check_object_size(to, n, false);
2394 +
2395 + if (likely(access_ok(VERIFY_READ, from, n)))
2396 + n = __copy_from_user(to, from, n);
2397 + else
2398 + memset(to, 0, n);
2399 + return n;
2400 +}
2401 +
2402 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2403 +{
2404 + if ((long)n < 0 || n > INT_MAX)
2405 + return n;
2406 +
2407 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2408 + if (!__builtin_constant_p(n))
2409 + check_object_size(from, n, true);
2410 + n = __copy_to_user(to, from, n);
2411 + }
2412 + return n;
2413 +}
2414 +
2415 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2416 + unsigned long n);
2417 +
2418 +#endif /* __powerpc64__ */
2419 +
2420 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2421
2422 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2423 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c
2424 --- linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2425 +++ linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2426 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2427 &cache_assoc_attr,
2428 };
2429
2430 -static struct sysfs_ops cache_index_ops = {
2431 +static const struct sysfs_ops cache_index_ops = {
2432 .show = cache_index_show,
2433 };
2434
2435 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma.c linux-2.6.32.45/arch/powerpc/kernel/dma.c
2436 --- linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2437 +++ linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2438 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2439 }
2440 #endif
2441
2442 -struct dma_map_ops dma_direct_ops = {
2443 +const struct dma_map_ops dma_direct_ops = {
2444 .alloc_coherent = dma_direct_alloc_coherent,
2445 .free_coherent = dma_direct_free_coherent,
2446 .map_sg = dma_direct_map_sg,
2447 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c
2448 --- linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2449 +++ linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2450 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2451 }
2452
2453 /* We support DMA to/from any memory page via the iommu */
2454 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2455 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2456 {
2457 struct iommu_table *tbl = get_iommu_table_base(dev);
2458
2459 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c
2460 --- linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2461 +++ linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2462 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2463 * map_page, and unmap_page on highmem, use normal dma_ops
2464 * for everything else.
2465 */
2466 -struct dma_map_ops swiotlb_dma_ops = {
2467 +const struct dma_map_ops swiotlb_dma_ops = {
2468 .alloc_coherent = dma_direct_alloc_coherent,
2469 .free_coherent = dma_direct_free_coherent,
2470 .map_sg = swiotlb_map_sg_attrs,
2471 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S
2472 --- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2473 +++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2474 @@ -455,6 +455,7 @@ storage_fault_common:
2475 std r14,_DAR(r1)
2476 std r15,_DSISR(r1)
2477 addi r3,r1,STACK_FRAME_OVERHEAD
2478 + bl .save_nvgprs
2479 mr r4,r14
2480 mr r5,r15
2481 ld r14,PACA_EXGEN+EX_R14(r13)
2482 @@ -464,8 +465,7 @@ storage_fault_common:
2483 cmpdi r3,0
2484 bne- 1f
2485 b .ret_from_except_lite
2486 -1: bl .save_nvgprs
2487 - mr r5,r3
2488 +1: mr r5,r3
2489 addi r3,r1,STACK_FRAME_OVERHEAD
2490 ld r4,_DAR(r1)
2491 bl .bad_page_fault
2492 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S
2493 --- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2494 +++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2495 @@ -818,10 +818,10 @@ handle_page_fault:
2496 11: ld r4,_DAR(r1)
2497 ld r5,_DSISR(r1)
2498 addi r3,r1,STACK_FRAME_OVERHEAD
2499 + bl .save_nvgprs
2500 bl .do_page_fault
2501 cmpdi r3,0
2502 beq+ 13f
2503 - bl .save_nvgprs
2504 mr r5,r3
2505 addi r3,r1,STACK_FRAME_OVERHEAD
2506 lwz r4,_DAR(r1)
2507 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c
2508 --- linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2509 +++ linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2510 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2511 return 1;
2512 }
2513
2514 -static struct dma_map_ops ibmebus_dma_ops = {
2515 +static const struct dma_map_ops ibmebus_dma_ops = {
2516 .alloc_coherent = ibmebus_alloc_coherent,
2517 .free_coherent = ibmebus_free_coherent,
2518 .map_sg = ibmebus_map_sg,
2519 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/kgdb.c linux-2.6.32.45/arch/powerpc/kernel/kgdb.c
2520 --- linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2521 +++ linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2522 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2523 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2524 return 0;
2525
2526 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2527 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2528 regs->nip += 4;
2529
2530 return 1;
2531 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2532 /*
2533 * Global data
2534 */
2535 -struct kgdb_arch arch_kgdb_ops = {
2536 +const struct kgdb_arch arch_kgdb_ops = {
2537 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2538 };
2539
2540 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module_32.c linux-2.6.32.45/arch/powerpc/kernel/module_32.c
2541 --- linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2542 +++ linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2543 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2544 me->arch.core_plt_section = i;
2545 }
2546 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2547 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2548 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2549 return -ENOEXEC;
2550 }
2551
2552 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2553
2554 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2555 /* Init, or core PLT? */
2556 - if (location >= mod->module_core
2557 - && location < mod->module_core + mod->core_size)
2558 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2559 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2560 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2561 - else
2562 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2563 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2564 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2565 + else {
2566 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2567 + return ~0UL;
2568 + }
2569
2570 /* Find this entry, or if that fails, the next avail. entry */
2571 while (entry->jump[0]) {
2572 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module.c linux-2.6.32.45/arch/powerpc/kernel/module.c
2573 --- linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2574 +++ linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2575 @@ -31,11 +31,24 @@
2576
2577 LIST_HEAD(module_bug_list);
2578
2579 +#ifdef CONFIG_PAX_KERNEXEC
2580 void *module_alloc(unsigned long size)
2581 {
2582 if (size == 0)
2583 return NULL;
2584
2585 + return vmalloc(size);
2586 +}
2587 +
2588 +void *module_alloc_exec(unsigned long size)
2589 +#else
2590 +void *module_alloc(unsigned long size)
2591 +#endif
2592 +
2593 +{
2594 + if (size == 0)
2595 + return NULL;
2596 +
2597 return vmalloc_exec(size);
2598 }
2599
2600 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2601 vfree(module_region);
2602 }
2603
2604 +#ifdef CONFIG_PAX_KERNEXEC
2605 +void module_free_exec(struct module *mod, void *module_region)
2606 +{
2607 + module_free(mod, module_region);
2608 +}
2609 +#endif
2610 +
2611 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2612 const Elf_Shdr *sechdrs,
2613 const char *name)
2614 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/pci-common.c linux-2.6.32.45/arch/powerpc/kernel/pci-common.c
2615 --- linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2616 +++ linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2617 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2618 unsigned int ppc_pci_flags = 0;
2619
2620
2621 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2622 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2623
2624 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2625 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2626 {
2627 pci_dma_ops = dma_ops;
2628 }
2629
2630 -struct dma_map_ops *get_pci_dma_ops(void)
2631 +const struct dma_map_ops *get_pci_dma_ops(void)
2632 {
2633 return pci_dma_ops;
2634 }
2635 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/process.c linux-2.6.32.45/arch/powerpc/kernel/process.c
2636 --- linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2637 +++ linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2638 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2639 * Lookup NIP late so we have the best change of getting the
2640 * above info out without failing
2641 */
2642 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2643 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2644 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2645 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2646 #endif
2647 show_stack(current, (unsigned long *) regs->gpr[1]);
2648 if (!user_mode(regs))
2649 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2650 newsp = stack[0];
2651 ip = stack[STACK_FRAME_LR_SAVE];
2652 if (!firstframe || ip != lr) {
2653 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2654 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2655 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2656 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2657 - printk(" (%pS)",
2658 + printk(" (%pA)",
2659 (void *)current->ret_stack[curr_frame].ret);
2660 curr_frame--;
2661 }
2662 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2663 struct pt_regs *regs = (struct pt_regs *)
2664 (sp + STACK_FRAME_OVERHEAD);
2665 lr = regs->link;
2666 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2667 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2668 regs->trap, (void *)regs->nip, (void *)lr);
2669 firstframe = 1;
2670 }
2671 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2672 }
2673
2674 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2675 -
2676 -unsigned long arch_align_stack(unsigned long sp)
2677 -{
2678 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2679 - sp -= get_random_int() & ~PAGE_MASK;
2680 - return sp & ~0xf;
2681 -}
2682 -
2683 -static inline unsigned long brk_rnd(void)
2684 -{
2685 - unsigned long rnd = 0;
2686 -
2687 - /* 8MB for 32bit, 1GB for 64bit */
2688 - if (is_32bit_task())
2689 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2690 - else
2691 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2692 -
2693 - return rnd << PAGE_SHIFT;
2694 -}
2695 -
2696 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2697 -{
2698 - unsigned long base = mm->brk;
2699 - unsigned long ret;
2700 -
2701 -#ifdef CONFIG_PPC_STD_MMU_64
2702 - /*
2703 - * If we are using 1TB segments and we are allowed to randomise
2704 - * the heap, we can put it above 1TB so it is backed by a 1TB
2705 - * segment. Otherwise the heap will be in the bottom 1TB
2706 - * which always uses 256MB segments and this may result in a
2707 - * performance penalty.
2708 - */
2709 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2710 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2711 -#endif
2712 -
2713 - ret = PAGE_ALIGN(base + brk_rnd());
2714 -
2715 - if (ret < mm->brk)
2716 - return mm->brk;
2717 -
2718 - return ret;
2719 -}
2720 -
2721 -unsigned long randomize_et_dyn(unsigned long base)
2722 -{
2723 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2724 -
2725 - if (ret < base)
2726 - return base;
2727 -
2728 - return ret;
2729 -}
2730 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_32.c linux-2.6.32.45/arch/powerpc/kernel/signal_32.c
2731 --- linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2732 +++ linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2733 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2734 /* Save user registers on the stack */
2735 frame = &rt_sf->uc.uc_mcontext;
2736 addr = frame;
2737 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2738 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2739 if (save_user_regs(regs, frame, 0, 1))
2740 goto badframe;
2741 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2742 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_64.c linux-2.6.32.45/arch/powerpc/kernel/signal_64.c
2743 --- linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2744 +++ linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2745 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2746 current->thread.fpscr.val = 0;
2747
2748 /* Set up to return from userspace. */
2749 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2750 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2751 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2752 } else {
2753 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2754 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c
2755 --- linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2756 +++ linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2757 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2758 if (oldlenp) {
2759 if (!error) {
2760 if (get_user(oldlen, oldlenp) ||
2761 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2762 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2763 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2764 error = -EFAULT;
2765 }
2766 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2767 }
2768 return error;
2769 }
2770 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/traps.c linux-2.6.32.45/arch/powerpc/kernel/traps.c
2771 --- linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2772 +++ linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2773 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2774 static inline void pmac_backlight_unblank(void) { }
2775 #endif
2776
2777 +extern void gr_handle_kernel_exploit(void);
2778 +
2779 int die(const char *str, struct pt_regs *regs, long err)
2780 {
2781 static struct {
2782 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2783 if (panic_on_oops)
2784 panic("Fatal exception");
2785
2786 + gr_handle_kernel_exploit();
2787 +
2788 oops_exit();
2789 do_exit(err);
2790
2791 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vdso.c linux-2.6.32.45/arch/powerpc/kernel/vdso.c
2792 --- linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2793 +++ linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2794 @@ -36,6 +36,7 @@
2795 #include <asm/firmware.h>
2796 #include <asm/vdso.h>
2797 #include <asm/vdso_datapage.h>
2798 +#include <asm/mman.h>
2799
2800 #include "setup.h"
2801
2802 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2803 vdso_base = VDSO32_MBASE;
2804 #endif
2805
2806 - current->mm->context.vdso_base = 0;
2807 + current->mm->context.vdso_base = ~0UL;
2808
2809 /* vDSO has a problem and was disabled, just don't "enable" it for the
2810 * process
2811 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2812 vdso_base = get_unmapped_area(NULL, vdso_base,
2813 (vdso_pages << PAGE_SHIFT) +
2814 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2815 - 0, 0);
2816 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2817 if (IS_ERR_VALUE(vdso_base)) {
2818 rc = vdso_base;
2819 goto fail_mmapsem;
2820 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vio.c linux-2.6.32.45/arch/powerpc/kernel/vio.c
2821 --- linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2822 +++ linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2823 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2824 vio_cmo_dealloc(viodev, alloc_size);
2825 }
2826
2827 -struct dma_map_ops vio_dma_mapping_ops = {
2828 +static const struct dma_map_ops vio_dma_mapping_ops = {
2829 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2830 .free_coherent = vio_dma_iommu_free_coherent,
2831 .map_sg = vio_dma_iommu_map_sg,
2832 .unmap_sg = vio_dma_iommu_unmap_sg,
2833 + .dma_supported = dma_iommu_dma_supported,
2834 .map_page = vio_dma_iommu_map_page,
2835 .unmap_page = vio_dma_iommu_unmap_page,
2836
2837 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2838
2839 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2840 {
2841 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2842 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2843 }
2844
2845 diff -urNp linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c
2846 --- linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2847 +++ linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2848 @@ -9,22 +9,6 @@
2849 #include <linux/module.h>
2850 #include <asm/uaccess.h>
2851
2852 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2853 -{
2854 - if (likely(access_ok(VERIFY_READ, from, n)))
2855 - n = __copy_from_user(to, from, n);
2856 - else
2857 - memset(to, 0, n);
2858 - return n;
2859 -}
2860 -
2861 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2862 -{
2863 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2864 - n = __copy_to_user(to, from, n);
2865 - return n;
2866 -}
2867 -
2868 unsigned long copy_in_user(void __user *to, const void __user *from,
2869 unsigned long n)
2870 {
2871 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2872 return n;
2873 }
2874
2875 -EXPORT_SYMBOL(copy_from_user);
2876 -EXPORT_SYMBOL(copy_to_user);
2877 EXPORT_SYMBOL(copy_in_user);
2878
2879 diff -urNp linux-2.6.32.45/arch/powerpc/mm/fault.c linux-2.6.32.45/arch/powerpc/mm/fault.c
2880 --- linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2881 +++ linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2882 @@ -30,6 +30,10 @@
2883 #include <linux/kprobes.h>
2884 #include <linux/kdebug.h>
2885 #include <linux/perf_event.h>
2886 +#include <linux/slab.h>
2887 +#include <linux/pagemap.h>
2888 +#include <linux/compiler.h>
2889 +#include <linux/unistd.h>
2890
2891 #include <asm/firmware.h>
2892 #include <asm/page.h>
2893 @@ -40,6 +44,7 @@
2894 #include <asm/uaccess.h>
2895 #include <asm/tlbflush.h>
2896 #include <asm/siginfo.h>
2897 +#include <asm/ptrace.h>
2898
2899
2900 #ifdef CONFIG_KPROBES
2901 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2902 }
2903 #endif
2904
2905 +#ifdef CONFIG_PAX_PAGEEXEC
2906 +/*
2907 + * PaX: decide what to do with offenders (regs->nip = fault address)
2908 + *
2909 + * returns 1 when task should be killed
2910 + */
2911 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2912 +{
2913 + return 1;
2914 +}
2915 +
2916 +void pax_report_insns(void *pc, void *sp)
2917 +{
2918 + unsigned long i;
2919 +
2920 + printk(KERN_ERR "PAX: bytes at PC: ");
2921 + for (i = 0; i < 5; i++) {
2922 + unsigned int c;
2923 + if (get_user(c, (unsigned int __user *)pc+i))
2924 + printk(KERN_CONT "???????? ");
2925 + else
2926 + printk(KERN_CONT "%08x ", c);
2927 + }
2928 + printk("\n");
2929 +}
2930 +#endif
2931 +
2932 /*
2933 * Check whether the instruction at regs->nip is a store using
2934 * an update addressing form which will update r1.
2935 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2936 * indicate errors in DSISR but can validly be set in SRR1.
2937 */
2938 if (trap == 0x400)
2939 - error_code &= 0x48200000;
2940 + error_code &= 0x58200000;
2941 else
2942 is_write = error_code & DSISR_ISSTORE;
2943 #else
2944 @@ -250,7 +282,7 @@ good_area:
2945 * "undefined". Of those that can be set, this is the only
2946 * one which seems bad.
2947 */
2948 - if (error_code & 0x10000000)
2949 + if (error_code & DSISR_GUARDED)
2950 /* Guarded storage error. */
2951 goto bad_area;
2952 #endif /* CONFIG_8xx */
2953 @@ -265,7 +297,7 @@ good_area:
2954 * processors use the same I/D cache coherency mechanism
2955 * as embedded.
2956 */
2957 - if (error_code & DSISR_PROTFAULT)
2958 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2959 goto bad_area;
2960 #endif /* CONFIG_PPC_STD_MMU */
2961
2962 @@ -335,6 +367,23 @@ bad_area:
2963 bad_area_nosemaphore:
2964 /* User mode accesses cause a SIGSEGV */
2965 if (user_mode(regs)) {
2966 +
2967 +#ifdef CONFIG_PAX_PAGEEXEC
2968 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2969 +#ifdef CONFIG_PPC_STD_MMU
2970 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2971 +#else
2972 + if (is_exec && regs->nip == address) {
2973 +#endif
2974 + switch (pax_handle_fetch_fault(regs)) {
2975 + }
2976 +
2977 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2978 + do_group_exit(SIGKILL);
2979 + }
2980 + }
2981 +#endif
2982 +
2983 _exception(SIGSEGV, regs, code, address);
2984 return 0;
2985 }
2986 diff -urNp linux-2.6.32.45/arch/powerpc/mm/mmap_64.c linux-2.6.32.45/arch/powerpc/mm/mmap_64.c
2987 --- linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
2988 +++ linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
2989 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2990 */
2991 if (mmap_is_legacy()) {
2992 mm->mmap_base = TASK_UNMAPPED_BASE;
2993 +
2994 +#ifdef CONFIG_PAX_RANDMMAP
2995 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2996 + mm->mmap_base += mm->delta_mmap;
2997 +#endif
2998 +
2999 mm->get_unmapped_area = arch_get_unmapped_area;
3000 mm->unmap_area = arch_unmap_area;
3001 } else {
3002 mm->mmap_base = mmap_base();
3003 +
3004 +#ifdef CONFIG_PAX_RANDMMAP
3005 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3006 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3007 +#endif
3008 +
3009 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3010 mm->unmap_area = arch_unmap_area_topdown;
3011 }
3012 diff -urNp linux-2.6.32.45/arch/powerpc/mm/slice.c linux-2.6.32.45/arch/powerpc/mm/slice.c
3013 --- linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3014 +++ linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3015 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3016 if ((mm->task_size - len) < addr)
3017 return 0;
3018 vma = find_vma(mm, addr);
3019 - return (!vma || (addr + len) <= vma->vm_start);
3020 + return check_heap_stack_gap(vma, addr, len);
3021 }
3022
3023 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3024 @@ -256,7 +256,7 @@ full_search:
3025 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3026 continue;
3027 }
3028 - if (!vma || addr + len <= vma->vm_start) {
3029 + if (check_heap_stack_gap(vma, addr, len)) {
3030 /*
3031 * Remember the place where we stopped the search:
3032 */
3033 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3034 }
3035 }
3036
3037 - addr = mm->mmap_base;
3038 - while (addr > len) {
3039 + if (mm->mmap_base < len)
3040 + addr = -ENOMEM;
3041 + else
3042 + addr = mm->mmap_base - len;
3043 +
3044 + while (!IS_ERR_VALUE(addr)) {
3045 /* Go down by chunk size */
3046 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3047 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3048
3049 /* Check for hit with different page size */
3050 mask = slice_range_to_mask(addr, len);
3051 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3052 * return with success:
3053 */
3054 vma = find_vma(mm, addr);
3055 - if (!vma || (addr + len) <= vma->vm_start) {
3056 + if (check_heap_stack_gap(vma, addr, len)) {
3057 /* remember the address as a hint for next time */
3058 if (use_cache)
3059 mm->free_area_cache = addr;
3060 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3061 mm->cached_hole_size = vma->vm_start - addr;
3062
3063 /* try just below the current vma->vm_start */
3064 - addr = vma->vm_start;
3065 + addr = skip_heap_stack_gap(vma, len);
3066 }
3067
3068 /*
3069 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3070 if (fixed && addr > (mm->task_size - len))
3071 return -EINVAL;
3072
3073 +#ifdef CONFIG_PAX_RANDMMAP
3074 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3075 + addr = 0;
3076 +#endif
3077 +
3078 /* If hint, make sure it matches our alignment restrictions */
3079 if (!fixed && addr) {
3080 addr = _ALIGN_UP(addr, 1ul << pshift);
3081 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c
3082 --- linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3083 +++ linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3084 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3085 lite5200_pm_target_state = PM_SUSPEND_ON;
3086 }
3087
3088 -static struct platform_suspend_ops lite5200_pm_ops = {
3089 +static const struct platform_suspend_ops lite5200_pm_ops = {
3090 .valid = lite5200_pm_valid,
3091 .begin = lite5200_pm_begin,
3092 .prepare = lite5200_pm_prepare,
3093 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3094 --- linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3095 +++ linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3096 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3097 iounmap(mbar);
3098 }
3099
3100 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3101 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3102 .valid = mpc52xx_pm_valid,
3103 .prepare = mpc52xx_pm_prepare,
3104 .enter = mpc52xx_pm_enter,
3105 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c
3106 --- linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3107 +++ linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3108 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3109 return ret;
3110 }
3111
3112 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3113 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3114 .valid = mpc83xx_suspend_valid,
3115 .begin = mpc83xx_suspend_begin,
3116 .enter = mpc83xx_suspend_enter,
3117 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c
3118 --- linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3119 +++ linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3120 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3121
3122 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3123
3124 -struct dma_map_ops dma_iommu_fixed_ops = {
3125 +const struct dma_map_ops dma_iommu_fixed_ops = {
3126 .alloc_coherent = dma_fixed_alloc_coherent,
3127 .free_coherent = dma_fixed_free_coherent,
3128 .map_sg = dma_fixed_map_sg,
3129 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c
3130 --- linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3131 +++ linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3132 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3133 return mask >= DMA_BIT_MASK(32);
3134 }
3135
3136 -static struct dma_map_ops ps3_sb_dma_ops = {
3137 +static const struct dma_map_ops ps3_sb_dma_ops = {
3138 .alloc_coherent = ps3_alloc_coherent,
3139 .free_coherent = ps3_free_coherent,
3140 .map_sg = ps3_sb_map_sg,
3141 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3142 .unmap_page = ps3_unmap_page,
3143 };
3144
3145 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3146 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3147 .alloc_coherent = ps3_alloc_coherent,
3148 .free_coherent = ps3_free_coherent,
3149 .map_sg = ps3_ioc0_map_sg,
3150 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig
3151 --- linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3152 +++ linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3153 @@ -2,6 +2,8 @@ config PPC_PSERIES
3154 depends on PPC64 && PPC_BOOK3S
3155 bool "IBM pSeries & new (POWER5-based) iSeries"
3156 select MPIC
3157 + select PCI_MSI
3158 + select XICS
3159 select PPC_I8259
3160 select PPC_RTAS
3161 select RTAS_ERROR_LOGGING
3162 diff -urNp linux-2.6.32.45/arch/s390/include/asm/elf.h linux-2.6.32.45/arch/s390/include/asm/elf.h
3163 --- linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3164 +++ linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3165 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3166 that it will "exec", and that there is sufficient room for the brk. */
3167 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3168
3169 +#ifdef CONFIG_PAX_ASLR
3170 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3171 +
3172 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3173 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3174 +#endif
3175 +
3176 /* This yields a mask that user programs can use to figure out what
3177 instruction set this CPU supports. */
3178
3179 diff -urNp linux-2.6.32.45/arch/s390/include/asm/setup.h linux-2.6.32.45/arch/s390/include/asm/setup.h
3180 --- linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3181 +++ linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3182 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3183 void detect_memory_layout(struct mem_chunk chunk[]);
3184
3185 #ifdef CONFIG_S390_SWITCH_AMODE
3186 -extern unsigned int switch_amode;
3187 +#define switch_amode (1)
3188 #else
3189 #define switch_amode (0)
3190 #endif
3191
3192 #ifdef CONFIG_S390_EXEC_PROTECT
3193 -extern unsigned int s390_noexec;
3194 +#define s390_noexec (1)
3195 #else
3196 #define s390_noexec (0)
3197 #endif
3198 diff -urNp linux-2.6.32.45/arch/s390/include/asm/uaccess.h linux-2.6.32.45/arch/s390/include/asm/uaccess.h
3199 --- linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3200 +++ linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3201 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3202 copy_to_user(void __user *to, const void *from, unsigned long n)
3203 {
3204 might_fault();
3205 +
3206 + if ((long)n < 0)
3207 + return n;
3208 +
3209 if (access_ok(VERIFY_WRITE, to, n))
3210 n = __copy_to_user(to, from, n);
3211 return n;
3212 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3213 static inline unsigned long __must_check
3214 __copy_from_user(void *to, const void __user *from, unsigned long n)
3215 {
3216 + if ((long)n < 0)
3217 + return n;
3218 +
3219 if (__builtin_constant_p(n) && (n <= 256))
3220 return uaccess.copy_from_user_small(n, from, to);
3221 else
3222 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3223 copy_from_user(void *to, const void __user *from, unsigned long n)
3224 {
3225 might_fault();
3226 +
3227 + if ((long)n < 0)
3228 + return n;
3229 +
3230 if (access_ok(VERIFY_READ, from, n))
3231 n = __copy_from_user(to, from, n);
3232 else
3233 diff -urNp linux-2.6.32.45/arch/s390/Kconfig linux-2.6.32.45/arch/s390/Kconfig
3234 --- linux-2.6.32.45/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3235 +++ linux-2.6.32.45/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3236 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3237
3238 config S390_SWITCH_AMODE
3239 bool "Switch kernel/user addressing modes"
3240 + default y
3241 help
3242 This option allows to switch the addressing modes of kernel and user
3243 - space. The kernel parameter switch_amode=on will enable this feature,
3244 - default is disabled. Enabling this (via kernel parameter) on machines
3245 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3246 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3247 + will reduce system performance.
3248
3249 Note that this option will also be selected by selecting the execute
3250 - protection option below. Enabling the execute protection via the
3251 - noexec kernel parameter will also switch the addressing modes,
3252 - independent of the switch_amode kernel parameter.
3253 + protection option below. Enabling the execute protection will also
3254 + switch the addressing modes, independent of this option.
3255
3256
3257 config S390_EXEC_PROTECT
3258 bool "Data execute protection"
3259 + default y
3260 select S390_SWITCH_AMODE
3261 help
3262 This option allows to enable a buffer overflow protection for user
3263 space programs and it also selects the addressing mode option above.
3264 - The kernel parameter noexec=on will enable this feature and also
3265 - switch the addressing modes, default is disabled. Enabling this (via
3266 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3267 - will reduce system performance.
3268 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3269 + reduce system performance.
3270
3271 comment "Code generation options"
3272
3273 diff -urNp linux-2.6.32.45/arch/s390/kernel/module.c linux-2.6.32.45/arch/s390/kernel/module.c
3274 --- linux-2.6.32.45/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3275 +++ linux-2.6.32.45/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3276 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3277
3278 /* Increase core size by size of got & plt and set start
3279 offsets for got and plt. */
3280 - me->core_size = ALIGN(me->core_size, 4);
3281 - me->arch.got_offset = me->core_size;
3282 - me->core_size += me->arch.got_size;
3283 - me->arch.plt_offset = me->core_size;
3284 - me->core_size += me->arch.plt_size;
3285 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3286 + me->arch.got_offset = me->core_size_rw;
3287 + me->core_size_rw += me->arch.got_size;
3288 + me->arch.plt_offset = me->core_size_rx;
3289 + me->core_size_rx += me->arch.plt_size;
3290 return 0;
3291 }
3292
3293 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3294 if (info->got_initialized == 0) {
3295 Elf_Addr *gotent;
3296
3297 - gotent = me->module_core + me->arch.got_offset +
3298 + gotent = me->module_core_rw + me->arch.got_offset +
3299 info->got_offset;
3300 *gotent = val;
3301 info->got_initialized = 1;
3302 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3303 else if (r_type == R_390_GOTENT ||
3304 r_type == R_390_GOTPLTENT)
3305 *(unsigned int *) loc =
3306 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3307 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3308 else if (r_type == R_390_GOT64 ||
3309 r_type == R_390_GOTPLT64)
3310 *(unsigned long *) loc = val;
3311 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3312 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3313 if (info->plt_initialized == 0) {
3314 unsigned int *ip;
3315 - ip = me->module_core + me->arch.plt_offset +
3316 + ip = me->module_core_rx + me->arch.plt_offset +
3317 info->plt_offset;
3318 #ifndef CONFIG_64BIT
3319 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3320 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3321 val - loc + 0xffffUL < 0x1ffffeUL) ||
3322 (r_type == R_390_PLT32DBL &&
3323 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3324 - val = (Elf_Addr) me->module_core +
3325 + val = (Elf_Addr) me->module_core_rx +
3326 me->arch.plt_offset +
3327 info->plt_offset;
3328 val += rela->r_addend - loc;
3329 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3330 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3331 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3332 val = val + rela->r_addend -
3333 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3334 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3335 if (r_type == R_390_GOTOFF16)
3336 *(unsigned short *) loc = val;
3337 else if (r_type == R_390_GOTOFF32)
3338 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3339 break;
3340 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3341 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3342 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3343 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3344 rela->r_addend - loc;
3345 if (r_type == R_390_GOTPC)
3346 *(unsigned int *) loc = val;
3347 diff -urNp linux-2.6.32.45/arch/s390/kernel/setup.c linux-2.6.32.45/arch/s390/kernel/setup.c
3348 --- linux-2.6.32.45/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3349 +++ linux-2.6.32.45/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3350 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3351 early_param("mem", early_parse_mem);
3352
3353 #ifdef CONFIG_S390_SWITCH_AMODE
3354 -unsigned int switch_amode = 0;
3355 -EXPORT_SYMBOL_GPL(switch_amode);
3356 -
3357 static int set_amode_and_uaccess(unsigned long user_amode,
3358 unsigned long user32_amode)
3359 {
3360 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3361 return 0;
3362 }
3363 }
3364 -
3365 -/*
3366 - * Switch kernel/user addressing modes?
3367 - */
3368 -static int __init early_parse_switch_amode(char *p)
3369 -{
3370 - switch_amode = 1;
3371 - return 0;
3372 -}
3373 -early_param("switch_amode", early_parse_switch_amode);
3374 -
3375 #else /* CONFIG_S390_SWITCH_AMODE */
3376 static inline int set_amode_and_uaccess(unsigned long user_amode,
3377 unsigned long user32_amode)
3378 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3379 }
3380 #endif /* CONFIG_S390_SWITCH_AMODE */
3381
3382 -#ifdef CONFIG_S390_EXEC_PROTECT
3383 -unsigned int s390_noexec = 0;
3384 -EXPORT_SYMBOL_GPL(s390_noexec);
3385 -
3386 -/*
3387 - * Enable execute protection?
3388 - */
3389 -static int __init early_parse_noexec(char *p)
3390 -{
3391 - if (!strncmp(p, "off", 3))
3392 - return 0;
3393 - switch_amode = 1;
3394 - s390_noexec = 1;
3395 - return 0;
3396 -}
3397 -early_param("noexec", early_parse_noexec);
3398 -#endif /* CONFIG_S390_EXEC_PROTECT */
3399 -
3400 static void setup_addressing_mode(void)
3401 {
3402 if (s390_noexec) {
3403 diff -urNp linux-2.6.32.45/arch/s390/mm/mmap.c linux-2.6.32.45/arch/s390/mm/mmap.c
3404 --- linux-2.6.32.45/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3405 +++ linux-2.6.32.45/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3406 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3407 */
3408 if (mmap_is_legacy()) {
3409 mm->mmap_base = TASK_UNMAPPED_BASE;
3410 +
3411 +#ifdef CONFIG_PAX_RANDMMAP
3412 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3413 + mm->mmap_base += mm->delta_mmap;
3414 +#endif
3415 +
3416 mm->get_unmapped_area = arch_get_unmapped_area;
3417 mm->unmap_area = arch_unmap_area;
3418 } else {
3419 mm->mmap_base = mmap_base();
3420 +
3421 +#ifdef CONFIG_PAX_RANDMMAP
3422 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3423 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3424 +#endif
3425 +
3426 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3427 mm->unmap_area = arch_unmap_area_topdown;
3428 }
3429 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3430 */
3431 if (mmap_is_legacy()) {
3432 mm->mmap_base = TASK_UNMAPPED_BASE;
3433 +
3434 +#ifdef CONFIG_PAX_RANDMMAP
3435 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3436 + mm->mmap_base += mm->delta_mmap;
3437 +#endif
3438 +
3439 mm->get_unmapped_area = s390_get_unmapped_area;
3440 mm->unmap_area = arch_unmap_area;
3441 } else {
3442 mm->mmap_base = mmap_base();
3443 +
3444 +#ifdef CONFIG_PAX_RANDMMAP
3445 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3446 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3447 +#endif
3448 +
3449 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3450 mm->unmap_area = arch_unmap_area_topdown;
3451 }
3452 diff -urNp linux-2.6.32.45/arch/score/include/asm/system.h linux-2.6.32.45/arch/score/include/asm/system.h
3453 --- linux-2.6.32.45/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3454 +++ linux-2.6.32.45/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3455 @@ -17,7 +17,7 @@ do { \
3456 #define finish_arch_switch(prev) do {} while (0)
3457
3458 typedef void (*vi_handler_t)(void);
3459 -extern unsigned long arch_align_stack(unsigned long sp);
3460 +#define arch_align_stack(x) (x)
3461
3462 #define mb() barrier()
3463 #define rmb() barrier()
3464 diff -urNp linux-2.6.32.45/arch/score/kernel/process.c linux-2.6.32.45/arch/score/kernel/process.c
3465 --- linux-2.6.32.45/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3466 +++ linux-2.6.32.45/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3467 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3468
3469 return task_pt_regs(task)->cp0_epc;
3470 }
3471 -
3472 -unsigned long arch_align_stack(unsigned long sp)
3473 -{
3474 - return sp;
3475 -}
3476 diff -urNp linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c
3477 --- linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3478 +++ linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3479 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3480 return 0;
3481 }
3482
3483 -static struct platform_suspend_ops hp6x0_pm_ops = {
3484 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3485 .enter = hp6x0_pm_enter,
3486 .valid = suspend_valid_only_mem,
3487 };
3488 diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c
3489 --- linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3490 +++ linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3491 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3492 NULL,
3493 };
3494
3495 -static struct sysfs_ops sq_sysfs_ops = {
3496 +static const struct sysfs_ops sq_sysfs_ops = {
3497 .show = sq_sysfs_show,
3498 .store = sq_sysfs_store,
3499 };
3500 diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c
3501 --- linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3502 +++ linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3503 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3504 return 0;
3505 }
3506
3507 -static struct platform_suspend_ops sh_pm_ops = {
3508 +static const struct platform_suspend_ops sh_pm_ops = {
3509 .enter = sh_pm_enter,
3510 .valid = suspend_valid_only_mem,
3511 };
3512 diff -urNp linux-2.6.32.45/arch/sh/kernel/kgdb.c linux-2.6.32.45/arch/sh/kernel/kgdb.c
3513 --- linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3514 +++ linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3515 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3516 {
3517 }
3518
3519 -struct kgdb_arch arch_kgdb_ops = {
3520 +const struct kgdb_arch arch_kgdb_ops = {
3521 /* Breakpoint instruction: trapa #0x3c */
3522 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3523 .gdb_bpt_instr = { 0x3c, 0xc3 },
3524 diff -urNp linux-2.6.32.45/arch/sh/mm/mmap.c linux-2.6.32.45/arch/sh/mm/mmap.c
3525 --- linux-2.6.32.45/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3526 +++ linux-2.6.32.45/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3527 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3528 addr = PAGE_ALIGN(addr);
3529
3530 vma = find_vma(mm, addr);
3531 - if (TASK_SIZE - len >= addr &&
3532 - (!vma || addr + len <= vma->vm_start))
3533 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3534 return addr;
3535 }
3536
3537 @@ -106,7 +105,7 @@ full_search:
3538 }
3539 return -ENOMEM;
3540 }
3541 - if (likely(!vma || addr + len <= vma->vm_start)) {
3542 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3543 /*
3544 * Remember the place where we stopped the search:
3545 */
3546 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3547 addr = PAGE_ALIGN(addr);
3548
3549 vma = find_vma(mm, addr);
3550 - if (TASK_SIZE - len >= addr &&
3551 - (!vma || addr + len <= vma->vm_start))
3552 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3553 return addr;
3554 }
3555
3556 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3557 /* make sure it can fit in the remaining address space */
3558 if (likely(addr > len)) {
3559 vma = find_vma(mm, addr-len);
3560 - if (!vma || addr <= vma->vm_start) {
3561 + if (check_heap_stack_gap(vma, addr - len, len)) {
3562 /* remember the address as a hint for next time */
3563 return (mm->free_area_cache = addr-len);
3564 }
3565 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3566 if (unlikely(mm->mmap_base < len))
3567 goto bottomup;
3568
3569 - addr = mm->mmap_base-len;
3570 - if (do_colour_align)
3571 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3572 + addr = mm->mmap_base - len;
3573
3574 do {
3575 + if (do_colour_align)
3576 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3577 /*
3578 * Lookup failure means no vma is above this address,
3579 * else if new region fits below vma->vm_start,
3580 * return with success:
3581 */
3582 vma = find_vma(mm, addr);
3583 - if (likely(!vma || addr+len <= vma->vm_start)) {
3584 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3585 /* remember the address as a hint for next time */
3586 return (mm->free_area_cache = addr);
3587 }
3588 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3589 mm->cached_hole_size = vma->vm_start - addr;
3590
3591 /* try just below the current vma->vm_start */
3592 - addr = vma->vm_start-len;
3593 - if (do_colour_align)
3594 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3595 - } while (likely(len < vma->vm_start));
3596 + addr = skip_heap_stack_gap(vma, len);
3597 + } while (!IS_ERR_VALUE(addr));
3598
3599 bottomup:
3600 /*
3601 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h
3602 --- linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3603 +++ linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-08-18 23:11:34.000000000 -0400
3604 @@ -14,18 +14,40 @@
3605 #define ATOMIC64_INIT(i) { (i) }
3606
3607 #define atomic_read(v) ((v)->counter)
3608 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3609 +{
3610 + return v->counter;
3611 +}
3612 #define atomic64_read(v) ((v)->counter)
3613 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3614 +{
3615 + return v->counter;
3616 +}
3617
3618 #define atomic_set(v, i) (((v)->counter) = i)
3619 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3620 +{
3621 + v->counter = i;
3622 +}
3623 #define atomic64_set(v, i) (((v)->counter) = i)
3624 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3625 +{
3626 + v->counter = i;
3627 +}
3628
3629 extern void atomic_add(int, atomic_t *);
3630 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3631 extern void atomic64_add(long, atomic64_t *);
3632 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3633 extern void atomic_sub(int, atomic_t *);
3634 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3635 extern void atomic64_sub(long, atomic64_t *);
3636 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3637
3638 extern int atomic_add_ret(int, atomic_t *);
3639 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3640 extern long atomic64_add_ret(long, atomic64_t *);
3641 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3642 extern int atomic_sub_ret(int, atomic_t *);
3643 extern long atomic64_sub_ret(long, atomic64_t *);
3644
3645 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3646 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3647
3648 #define atomic_inc_return(v) atomic_add_ret(1, v)
3649 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3650 +{
3651 + return atomic_add_ret_unchecked(1, v);
3652 +}
3653 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3654 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3655 +{
3656 + return atomic64_add_ret_unchecked(1, v);
3657 +}
3658
3659 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3660 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3661
3662 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3663 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3664 +{
3665 + return atomic_add_ret_unchecked(i, v);
3666 +}
3667 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3668 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3669 +{
3670 + return atomic64_add_ret_unchecked(i, v);
3671 +}
3672
3673 /*
3674 * atomic_inc_and_test - increment and test
3675 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
3676 * other cases.
3677 */
3678 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3679 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3680 +{
3681 + return atomic_inc_return_unchecked(v) == 0;
3682 +}
3683 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3684
3685 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3686 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
3687 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3688
3689 #define atomic_inc(v) atomic_add(1, v)
3690 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3691 +{
3692 + atomic_add_unchecked(1, v);
3693 +}
3694 #define atomic64_inc(v) atomic64_add(1, v)
3695 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3696 +{
3697 + atomic64_add_unchecked(1, v);
3698 +}
3699
3700 #define atomic_dec(v) atomic_sub(1, v)
3701 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3702 +{
3703 + atomic_sub_unchecked(1, v);
3704 +}
3705 #define atomic64_dec(v) atomic64_sub(1, v)
3706 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3707 +{
3708 + atomic64_sub_unchecked(1, v);
3709 +}
3710
3711 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3712 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3713
3714 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3715 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3716 +{
3717 + return cmpxchg(&v->counter, old, new);
3718 +}
3719 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3720 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3721 +{
3722 + return xchg(&v->counter, new);
3723 +}
3724
3725 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3726 {
3727 - int c, old;
3728 + int c, old, new;
3729 c = atomic_read(v);
3730 for (;;) {
3731 - if (unlikely(c == (u)))
3732 + if (unlikely(c == u))
3733 break;
3734 - old = atomic_cmpxchg((v), c, c + (a));
3735 +
3736 + asm volatile("addcc %2, %0, %0\n"
3737 +
3738 +#ifdef CONFIG_PAX_REFCOUNT
3739 + "tvs %%icc, 6\n"
3740 +#endif
3741 +
3742 + : "=r" (new)
3743 + : "0" (c), "ir" (a)
3744 + : "cc");
3745 +
3746 + old = atomic_cmpxchg(v, c, new);
3747 if (likely(old == c))
3748 break;
3749 c = old;
3750 }
3751 - return c != (u);
3752 + return c != u;
3753 }
3754
3755 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3756 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
3757 #define atomic64_cmpxchg(v, o, n) \
3758 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3759 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3760 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3761 +{
3762 + return xchg(&v->counter, new);
3763 +}
3764
3765 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3766 {
3767 - long c, old;
3768 + long c, old, new;
3769 c = atomic64_read(v);
3770 for (;;) {
3771 - if (unlikely(c == (u)))
3772 + if (unlikely(c == u))
3773 break;
3774 - old = atomic64_cmpxchg((v), c, c + (a));
3775 +
3776 + asm volatile("addcc %2, %0, %0\n"
3777 +
3778 +#ifdef CONFIG_PAX_REFCOUNT
3779 + "tvs %%xcc, 6\n"
3780 +#endif
3781 +
3782 + : "=r" (new)
3783 + : "0" (c), "ir" (a)
3784 + : "cc");
3785 +
3786 + old = atomic64_cmpxchg(v, c, new);
3787 if (likely(old == c))
3788 break;
3789 c = old;
3790 }
3791 - return c != (u);
3792 + return c != u;
3793 }
3794
3795 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3796 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/cache.h linux-2.6.32.45/arch/sparc/include/asm/cache.h
3797 --- linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3798 +++ linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3799 @@ -8,7 +8,7 @@
3800 #define _SPARC_CACHE_H
3801
3802 #define L1_CACHE_SHIFT 5
3803 -#define L1_CACHE_BYTES 32
3804 +#define L1_CACHE_BYTES 32UL
3805 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3806
3807 #ifdef CONFIG_SPARC32
3808 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h
3809 --- linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3810 +++ linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3811 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3812 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3813 #define dma_is_consistent(d, h) (1)
3814
3815 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3816 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3817 extern struct bus_type pci_bus_type;
3818
3819 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3820 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3821 {
3822 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3823 if (dev->bus == &pci_bus_type)
3824 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3825 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3826 dma_addr_t *dma_handle, gfp_t flag)
3827 {
3828 - struct dma_map_ops *ops = get_dma_ops(dev);
3829 + const struct dma_map_ops *ops = get_dma_ops(dev);
3830 void *cpu_addr;
3831
3832 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3833 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3834 static inline void dma_free_coherent(struct device *dev, size_t size,
3835 void *cpu_addr, dma_addr_t dma_handle)
3836 {
3837 - struct dma_map_ops *ops = get_dma_ops(dev);
3838 + const struct dma_map_ops *ops = get_dma_ops(dev);
3839
3840 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3841 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3842 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_32.h linux-2.6.32.45/arch/sparc/include/asm/elf_32.h
3843 --- linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3844 +++ linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3845 @@ -116,6 +116,13 @@ typedef struct {
3846
3847 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3848
3849 +#ifdef CONFIG_PAX_ASLR
3850 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3851 +
3852 +#define PAX_DELTA_MMAP_LEN 16
3853 +#define PAX_DELTA_STACK_LEN 16
3854 +#endif
3855 +
3856 /* This yields a mask that user programs can use to figure out what
3857 instruction set this cpu supports. This can NOT be done in userspace
3858 on Sparc. */
3859 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_64.h linux-2.6.32.45/arch/sparc/include/asm/elf_64.h
3860 --- linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3861 +++ linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3862 @@ -163,6 +163,12 @@ typedef struct {
3863 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3864 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3865
3866 +#ifdef CONFIG_PAX_ASLR
3867 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3868 +
3869 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3870 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3871 +#endif
3872
3873 /* This yields a mask that user programs can use to figure out what
3874 instruction set this cpu supports. */
3875 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h
3876 --- linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3877 +++ linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3878 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3879 BTFIXUPDEF_INT(page_none)
3880 BTFIXUPDEF_INT(page_copy)
3881 BTFIXUPDEF_INT(page_readonly)
3882 +
3883 +#ifdef CONFIG_PAX_PAGEEXEC
3884 +BTFIXUPDEF_INT(page_shared_noexec)
3885 +BTFIXUPDEF_INT(page_copy_noexec)
3886 +BTFIXUPDEF_INT(page_readonly_noexec)
3887 +#endif
3888 +
3889 BTFIXUPDEF_INT(page_kernel)
3890
3891 #define PMD_SHIFT SUN4C_PMD_SHIFT
3892 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3893 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3894 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3895
3896 +#ifdef CONFIG_PAX_PAGEEXEC
3897 +extern pgprot_t PAGE_SHARED_NOEXEC;
3898 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3899 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3900 +#else
3901 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3902 +# define PAGE_COPY_NOEXEC PAGE_COPY
3903 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3904 +#endif
3905 +
3906 extern unsigned long page_kernel;
3907
3908 #ifdef MODULE
3909 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h
3910 --- linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3911 +++ linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3912 @@ -115,6 +115,13 @@
3913 SRMMU_EXEC | SRMMU_REF)
3914 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3915 SRMMU_EXEC | SRMMU_REF)
3916 +
3917 +#ifdef CONFIG_PAX_PAGEEXEC
3918 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3919 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3920 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3921 +#endif
3922 +
3923 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3924 SRMMU_DIRTY | SRMMU_REF)
3925
3926 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h
3927 --- linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3928 +++ linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-08-18 23:19:30.000000000 -0400
3929 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3930
3931 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3932
3933 -static void inline arch_read_lock(raw_rwlock_t *lock)
3934 +static inline void arch_read_lock(raw_rwlock_t *lock)
3935 {
3936 unsigned long tmp1, tmp2;
3937
3938 __asm__ __volatile__ (
3939 "1: ldsw [%2], %0\n"
3940 " brlz,pn %0, 2f\n"
3941 -"4: add %0, 1, %1\n"
3942 +"4: addcc %0, 1, %1\n"
3943 +
3944 +#ifdef CONFIG_PAX_REFCOUNT
3945 +" tvs %%icc, 6\n"
3946 +#endif
3947 +
3948 " cas [%2], %0, %1\n"
3949 " cmp %0, %1\n"
3950 " bne,pn %%icc, 1b\n"
3951 @@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rw
3952 " .previous"
3953 : "=&r" (tmp1), "=&r" (tmp2)
3954 : "r" (lock)
3955 - : "memory");
3956 + : "memory", "cc");
3957 }
3958
3959 -static int inline arch_read_trylock(raw_rwlock_t *lock)
3960 +static inline int arch_read_trylock(raw_rwlock_t *lock)
3961 {
3962 int tmp1, tmp2;
3963
3964 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3965 "1: ldsw [%2], %0\n"
3966 " brlz,a,pn %0, 2f\n"
3967 " mov 0, %0\n"
3968 -" add %0, 1, %1\n"
3969 +" addcc %0, 1, %1\n"
3970 +
3971 +#ifdef CONFIG_PAX_REFCOUNT
3972 +" tvs %%icc, 6\n"
3973 +#endif
3974 +
3975 " cas [%2], %0, %1\n"
3976 " cmp %0, %1\n"
3977 " bne,pn %%icc, 1b\n"
3978 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
3979 return tmp1;
3980 }
3981
3982 -static void inline arch_read_unlock(raw_rwlock_t *lock)
3983 +static inline void arch_read_unlock(raw_rwlock_t *lock)
3984 {
3985 unsigned long tmp1, tmp2;
3986
3987 __asm__ __volatile__(
3988 "1: lduw [%2], %0\n"
3989 -" sub %0, 1, %1\n"
3990 +" subcc %0, 1, %1\n"
3991 +
3992 +#ifdef CONFIG_PAX_REFCOUNT
3993 +" tvs %%icc, 6\n"
3994 +#endif
3995 +
3996 " cas [%2], %0, %1\n"
3997 " cmp %0, %1\n"
3998 " bne,pn %%xcc, 1b\n"
3999 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4000 : "memory");
4001 }
4002
4003 -static void inline arch_write_lock(raw_rwlock_t *lock)
4004 +static inline void arch_write_lock(raw_rwlock_t *lock)
4005 {
4006 unsigned long mask, tmp1, tmp2;
4007
4008 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4009 : "memory");
4010 }
4011
4012 -static void inline arch_write_unlock(raw_rwlock_t *lock)
4013 +static inline void arch_write_unlock(raw_rwlock_t *lock)
4014 {
4015 __asm__ __volatile__(
4016 " stw %%g0, [%0]"
4017 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw
4018 : "memory");
4019 }
4020
4021 -static int inline arch_write_trylock(raw_rwlock_t *lock)
4022 +static inline int arch_write_trylock(raw_rwlock_t *lock)
4023 {
4024 unsigned long mask, tmp1, tmp2, result;
4025
4026 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h
4027 --- linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
4028 +++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4029 @@ -50,6 +50,8 @@ struct thread_info {
4030 unsigned long w_saved;
4031
4032 struct restart_block restart_block;
4033 +
4034 + unsigned long lowest_stack;
4035 };
4036
4037 /*
4038 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h
4039 --- linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4040 +++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4041 @@ -68,6 +68,8 @@ struct thread_info {
4042 struct pt_regs *kern_una_regs;
4043 unsigned int kern_una_insn;
4044
4045 + unsigned long lowest_stack;
4046 +
4047 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4048 };
4049
4050 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h
4051 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4052 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4053 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4054
4055 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4056 {
4057 - if (n && __access_ok((unsigned long) to, n))
4058 + if ((long)n < 0)
4059 + return n;
4060 +
4061 + if (n && __access_ok((unsigned long) to, n)) {
4062 + if (!__builtin_constant_p(n))
4063 + check_object_size(from, n, true);
4064 return __copy_user(to, (__force void __user *) from, n);
4065 - else
4066 + } else
4067 return n;
4068 }
4069
4070 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4071 {
4072 + if ((long)n < 0)
4073 + return n;
4074 +
4075 + if (!__builtin_constant_p(n))
4076 + check_object_size(from, n, true);
4077 +
4078 return __copy_user(to, (__force void __user *) from, n);
4079 }
4080
4081 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4082 {
4083 - if (n && __access_ok((unsigned long) from, n))
4084 + if ((long)n < 0)
4085 + return n;
4086 +
4087 + if (n && __access_ok((unsigned long) from, n)) {
4088 + if (!__builtin_constant_p(n))
4089 + check_object_size(to, n, false);
4090 return __copy_user((__force void __user *) to, from, n);
4091 - else
4092 + } else
4093 return n;
4094 }
4095
4096 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4097 {
4098 + if ((long)n < 0)
4099 + return n;
4100 +
4101 return __copy_user((__force void __user *) to, from, n);
4102 }
4103
4104 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h
4105 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4106 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4107 @@ -9,6 +9,7 @@
4108 #include <linux/compiler.h>
4109 #include <linux/string.h>
4110 #include <linux/thread_info.h>
4111 +#include <linux/kernel.h>
4112 #include <asm/asi.h>
4113 #include <asm/system.h>
4114 #include <asm/spitfire.h>
4115 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4116 static inline unsigned long __must_check
4117 copy_from_user(void *to, const void __user *from, unsigned long size)
4118 {
4119 - unsigned long ret = ___copy_from_user(to, from, size);
4120 + unsigned long ret;
4121
4122 + if ((long)size < 0 || size > INT_MAX)
4123 + return size;
4124 +
4125 + if (!__builtin_constant_p(size))
4126 + check_object_size(to, size, false);
4127 +
4128 + ret = ___copy_from_user(to, from, size);
4129 if (unlikely(ret))
4130 ret = copy_from_user_fixup(to, from, size);
4131 return ret;
4132 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4133 static inline unsigned long __must_check
4134 copy_to_user(void __user *to, const void *from, unsigned long size)
4135 {
4136 - unsigned long ret = ___copy_to_user(to, from, size);
4137 + unsigned long ret;
4138 +
4139 + if ((long)size < 0 || size > INT_MAX)
4140 + return size;
4141 +
4142 + if (!__builtin_constant_p(size))
4143 + check_object_size(from, size, true);
4144
4145 + ret = ___copy_to_user(to, from, size);
4146 if (unlikely(ret))
4147 ret = copy_to_user_fixup(to, from, size);
4148 return ret;
4149 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess.h linux-2.6.32.45/arch/sparc/include/asm/uaccess.h
4150 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4151 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4152 @@ -1,5 +1,13 @@
4153 #ifndef ___ASM_SPARC_UACCESS_H
4154 #define ___ASM_SPARC_UACCESS_H
4155 +
4156 +#ifdef __KERNEL__
4157 +#ifndef __ASSEMBLY__
4158 +#include <linux/types.h>
4159 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4160 +#endif
4161 +#endif
4162 +
4163 #if defined(__sparc__) && defined(__arch64__)
4164 #include <asm/uaccess_64.h>
4165 #else
4166 diff -urNp linux-2.6.32.45/arch/sparc/kernel/iommu.c linux-2.6.32.45/arch/sparc/kernel/iommu.c
4167 --- linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4168 +++ linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4169 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4170 spin_unlock_irqrestore(&iommu->lock, flags);
4171 }
4172
4173 -static struct dma_map_ops sun4u_dma_ops = {
4174 +static const struct dma_map_ops sun4u_dma_ops = {
4175 .alloc_coherent = dma_4u_alloc_coherent,
4176 .free_coherent = dma_4u_free_coherent,
4177 .map_page = dma_4u_map_page,
4178 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4179 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4180 };
4181
4182 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4183 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4184 EXPORT_SYMBOL(dma_ops);
4185
4186 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4187 diff -urNp linux-2.6.32.45/arch/sparc/kernel/ioport.c linux-2.6.32.45/arch/sparc/kernel/ioport.c
4188 --- linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4189 +++ linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4190 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4191 BUG();
4192 }
4193
4194 -struct dma_map_ops sbus_dma_ops = {
4195 +const struct dma_map_ops sbus_dma_ops = {
4196 .alloc_coherent = sbus_alloc_coherent,
4197 .free_coherent = sbus_free_coherent,
4198 .map_page = sbus_map_page,
4199 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4200 .sync_sg_for_device = sbus_sync_sg_for_device,
4201 };
4202
4203 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4204 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4205 EXPORT_SYMBOL(dma_ops);
4206
4207 static int __init sparc_register_ioport(void)
4208 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4209 }
4210 }
4211
4212 -struct dma_map_ops pci32_dma_ops = {
4213 +const struct dma_map_ops pci32_dma_ops = {
4214 .alloc_coherent = pci32_alloc_coherent,
4215 .free_coherent = pci32_free_coherent,
4216 .map_page = pci32_map_page,
4217 diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c
4218 --- linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4219 +++ linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4220 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4221 {
4222 }
4223
4224 -struct kgdb_arch arch_kgdb_ops = {
4225 +const struct kgdb_arch arch_kgdb_ops = {
4226 /* Breakpoint instruction: ta 0x7d */
4227 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4228 };
4229 diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c
4230 --- linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4231 +++ linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4232 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4233 {
4234 }
4235
4236 -struct kgdb_arch arch_kgdb_ops = {
4237 +const struct kgdb_arch arch_kgdb_ops = {
4238 /* Breakpoint instruction: ta 0x72 */
4239 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4240 };
4241 diff -urNp linux-2.6.32.45/arch/sparc/kernel/Makefile linux-2.6.32.45/arch/sparc/kernel/Makefile
4242 --- linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4243 +++ linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4244 @@ -3,7 +3,7 @@
4245 #
4246
4247 asflags-y := -ansi
4248 -ccflags-y := -Werror
4249 +#ccflags-y := -Werror
4250
4251 extra-y := head_$(BITS).o
4252 extra-y += init_task.o
4253 diff -urNp linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c
4254 --- linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4255 +++ linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4256 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4257 spin_unlock_irqrestore(&iommu->lock, flags);
4258 }
4259
4260 -static struct dma_map_ops sun4v_dma_ops = {
4261 +static const struct dma_map_ops sun4v_dma_ops = {
4262 .alloc_coherent = dma_4v_alloc_coherent,
4263 .free_coherent = dma_4v_free_coherent,
4264 .map_page = dma_4v_map_page,
4265 diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_32.c linux-2.6.32.45/arch/sparc/kernel/process_32.c
4266 --- linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4267 +++ linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4268 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4269 rw->ins[4], rw->ins[5],
4270 rw->ins[6],
4271 rw->ins[7]);
4272 - printk("%pS\n", (void *) rw->ins[7]);
4273 + printk("%pA\n", (void *) rw->ins[7]);
4274 rw = (struct reg_window32 *) rw->ins[6];
4275 }
4276 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4277 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4278
4279 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4280 r->psr, r->pc, r->npc, r->y, print_tainted());
4281 - printk("PC: <%pS>\n", (void *) r->pc);
4282 + printk("PC: <%pA>\n", (void *) r->pc);
4283 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4284 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4285 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4286 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4287 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4288 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4289 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4290 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4291
4292 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4293 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4294 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4295 rw = (struct reg_window32 *) fp;
4296 pc = rw->ins[7];
4297 printk("[%08lx : ", pc);
4298 - printk("%pS ] ", (void *) pc);
4299 + printk("%pA ] ", (void *) pc);
4300 fp = rw->ins[6];
4301 } while (++count < 16);
4302 printk("\n");
4303 diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_64.c linux-2.6.32.45/arch/sparc/kernel/process_64.c
4304 --- linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4305 +++ linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4306 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4307 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4308 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4309 if (regs->tstate & TSTATE_PRIV)
4310 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4311 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4312 }
4313
4314 void show_regs(struct pt_regs *regs)
4315 {
4316 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4317 regs->tpc, regs->tnpc, regs->y, print_tainted());
4318 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4319 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4320 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4321 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4322 regs->u_regs[3]);
4323 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4324 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4325 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4326 regs->u_regs[15]);
4327 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4328 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4329 show_regwindow(regs);
4330 }
4331
4332 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4333 ((tp && tp->task) ? tp->task->pid : -1));
4334
4335 if (gp->tstate & TSTATE_PRIV) {
4336 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4337 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4338 (void *) gp->tpc,
4339 (void *) gp->o7,
4340 (void *) gp->i7,
4341 diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c
4342 --- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4343 +++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4344 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4345 if (ARCH_SUN4C && len > 0x20000000)
4346 return -ENOMEM;
4347 if (!addr)
4348 - addr = TASK_UNMAPPED_BASE;
4349 + addr = current->mm->mmap_base;
4350
4351 if (flags & MAP_SHARED)
4352 addr = COLOUR_ALIGN(addr);
4353 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4354 }
4355 if (TASK_SIZE - PAGE_SIZE - len < addr)
4356 return -ENOMEM;
4357 - if (!vmm || addr + len <= vmm->vm_start)
4358 + if (check_heap_stack_gap(vmm, addr, len))
4359 return addr;
4360 addr = vmm->vm_end;
4361 if (flags & MAP_SHARED)
4362 diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c
4363 --- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4364 +++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4365 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4366 /* We do not accept a shared mapping if it would violate
4367 * cache aliasing constraints.
4368 */
4369 - if ((flags & MAP_SHARED) &&
4370 + if ((filp || (flags & MAP_SHARED)) &&
4371 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4372 return -EINVAL;
4373 return addr;
4374 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4375 if (filp || (flags & MAP_SHARED))
4376 do_color_align = 1;
4377
4378 +#ifdef CONFIG_PAX_RANDMMAP
4379 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4380 +#endif
4381 +
4382 if (addr) {
4383 if (do_color_align)
4384 addr = COLOUR_ALIGN(addr, pgoff);
4385 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4386 addr = PAGE_ALIGN(addr);
4387
4388 vma = find_vma(mm, addr);
4389 - if (task_size - len >= addr &&
4390 - (!vma || addr + len <= vma->vm_start))
4391 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4392 return addr;
4393 }
4394
4395 if (len > mm->cached_hole_size) {
4396 - start_addr = addr = mm->free_area_cache;
4397 + start_addr = addr = mm->free_area_cache;
4398 } else {
4399 - start_addr = addr = TASK_UNMAPPED_BASE;
4400 + start_addr = addr = mm->mmap_base;
4401 mm->cached_hole_size = 0;
4402 }
4403
4404 @@ -175,14 +178,14 @@ full_search:
4405 vma = find_vma(mm, VA_EXCLUDE_END);
4406 }
4407 if (unlikely(task_size < addr)) {
4408 - if (start_addr != TASK_UNMAPPED_BASE) {
4409 - start_addr = addr = TASK_UNMAPPED_BASE;
4410 + if (start_addr != mm->mmap_base) {
4411 + start_addr = addr = mm->mmap_base;
4412 mm->cached_hole_size = 0;
4413 goto full_search;
4414 }
4415 return -ENOMEM;
4416 }
4417 - if (likely(!vma || addr + len <= vma->vm_start)) {
4418 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4419 /*
4420 * Remember the place where we stopped the search:
4421 */
4422 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4423 /* We do not accept a shared mapping if it would violate
4424 * cache aliasing constraints.
4425 */
4426 - if ((flags & MAP_SHARED) &&
4427 + if ((filp || (flags & MAP_SHARED)) &&
4428 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4429 return -EINVAL;
4430 return addr;
4431 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4432 addr = PAGE_ALIGN(addr);
4433
4434 vma = find_vma(mm, addr);
4435 - if (task_size - len >= addr &&
4436 - (!vma || addr + len <= vma->vm_start))
4437 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4438 return addr;
4439 }
4440
4441 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4442 /* make sure it can fit in the remaining address space */
4443 if (likely(addr > len)) {
4444 vma = find_vma(mm, addr-len);
4445 - if (!vma || addr <= vma->vm_start) {
4446 + if (check_heap_stack_gap(vma, addr - len, len)) {
4447 /* remember the address as a hint for next time */
4448 return (mm->free_area_cache = addr-len);
4449 }
4450 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4451 if (unlikely(mm->mmap_base < len))
4452 goto bottomup;
4453
4454 - addr = mm->mmap_base-len;
4455 - if (do_color_align)
4456 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4457 + addr = mm->mmap_base - len;
4458
4459 do {
4460 + if (do_color_align)
4461 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4462 /*
4463 * Lookup failure means no vma is above this address,
4464 * else if new region fits below vma->vm_start,
4465 * return with success:
4466 */
4467 vma = find_vma(mm, addr);
4468 - if (likely(!vma || addr+len <= vma->vm_start)) {
4469 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4470 /* remember the address as a hint for next time */
4471 return (mm->free_area_cache = addr);
4472 }
4473 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4474 mm->cached_hole_size = vma->vm_start - addr;
4475
4476 /* try just below the current vma->vm_start */
4477 - addr = vma->vm_start-len;
4478 - if (do_color_align)
4479 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4480 - } while (likely(len < vma->vm_start));
4481 + addr = skip_heap_stack_gap(vma, len);
4482 + } while (!IS_ERR_VALUE(addr));
4483
4484 bottomup:
4485 /*
4486 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4487 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4488 sysctl_legacy_va_layout) {
4489 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4490 +
4491 +#ifdef CONFIG_PAX_RANDMMAP
4492 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4493 + mm->mmap_base += mm->delta_mmap;
4494 +#endif
4495 +
4496 mm->get_unmapped_area = arch_get_unmapped_area;
4497 mm->unmap_area = arch_unmap_area;
4498 } else {
4499 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4500 gap = (task_size / 6 * 5);
4501
4502 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4503 +
4504 +#ifdef CONFIG_PAX_RANDMMAP
4505 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4506 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4507 +#endif
4508 +
4509 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4510 mm->unmap_area = arch_unmap_area_topdown;
4511 }
4512 diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_32.c linux-2.6.32.45/arch/sparc/kernel/traps_32.c
4513 --- linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4514 +++ linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4515 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4516 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4517 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4518
4519 +extern void gr_handle_kernel_exploit(void);
4520 +
4521 void die_if_kernel(char *str, struct pt_regs *regs)
4522 {
4523 static int die_counter;
4524 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4525 count++ < 30 &&
4526 (((unsigned long) rw) >= PAGE_OFFSET) &&
4527 !(((unsigned long) rw) & 0x7)) {
4528 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4529 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4530 (void *) rw->ins[7]);
4531 rw = (struct reg_window32 *)rw->ins[6];
4532 }
4533 }
4534 printk("Instruction DUMP:");
4535 instruction_dump ((unsigned long *) regs->pc);
4536 - if(regs->psr & PSR_PS)
4537 + if(regs->psr & PSR_PS) {
4538 + gr_handle_kernel_exploit();
4539 do_exit(SIGKILL);
4540 + }
4541 do_exit(SIGSEGV);
4542 }
4543
4544 diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_64.c linux-2.6.32.45/arch/sparc/kernel/traps_64.c
4545 --- linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4546 +++ linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4547 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4548 i + 1,
4549 p->trapstack[i].tstate, p->trapstack[i].tpc,
4550 p->trapstack[i].tnpc, p->trapstack[i].tt);
4551 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4552 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4553 }
4554 }
4555
4556 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4557
4558 lvl -= 0x100;
4559 if (regs->tstate & TSTATE_PRIV) {
4560 +
4561 +#ifdef CONFIG_PAX_REFCOUNT
4562 + if (lvl == 6)
4563 + pax_report_refcount_overflow(regs);
4564 +#endif
4565 +
4566 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4567 die_if_kernel(buffer, regs);
4568 }
4569 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4570 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4571 {
4572 char buffer[32];
4573 -
4574 +
4575 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4576 0, lvl, SIGTRAP) == NOTIFY_STOP)
4577 return;
4578
4579 +#ifdef CONFIG_PAX_REFCOUNT
4580 + if (lvl == 6)
4581 + pax_report_refcount_overflow(regs);
4582 +#endif
4583 +
4584 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4585
4586 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4587 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4588 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4589 printk("%s" "ERROR(%d): ",
4590 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4591 - printk("TPC<%pS>\n", (void *) regs->tpc);
4592 + printk("TPC<%pA>\n", (void *) regs->tpc);
4593 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4594 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4595 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4596 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4597 smp_processor_id(),
4598 (type & 0x1) ? 'I' : 'D',
4599 regs->tpc);
4600 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4601 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4602 panic("Irrecoverable Cheetah+ parity error.");
4603 }
4604
4605 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4606 smp_processor_id(),
4607 (type & 0x1) ? 'I' : 'D',
4608 regs->tpc);
4609 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4610 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4611 }
4612
4613 struct sun4v_error_entry {
4614 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4615
4616 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4617 regs->tpc, tl);
4618 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4619 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4620 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4621 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4622 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4623 (void *) regs->u_regs[UREG_I7]);
4624 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4625 "pte[%lx] error[%lx]\n",
4626 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4627
4628 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4629 regs->tpc, tl);
4630 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4631 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4632 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4633 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4634 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4635 (void *) regs->u_regs[UREG_I7]);
4636 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4637 "pte[%lx] error[%lx]\n",
4638 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4639 fp = (unsigned long)sf->fp + STACK_BIAS;
4640 }
4641
4642 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4643 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4644 } while (++count < 16);
4645 }
4646
4647 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4648 return (struct reg_window *) (fp + STACK_BIAS);
4649 }
4650
4651 +extern void gr_handle_kernel_exploit(void);
4652 +
4653 void die_if_kernel(char *str, struct pt_regs *regs)
4654 {
4655 static int die_counter;
4656 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4657 while (rw &&
4658 count++ < 30&&
4659 is_kernel_stack(current, rw)) {
4660 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4661 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4662 (void *) rw->ins[7]);
4663
4664 rw = kernel_stack_up(rw);
4665 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4666 }
4667 user_instruction_dump ((unsigned int __user *) regs->tpc);
4668 }
4669 - if (regs->tstate & TSTATE_PRIV)
4670 + if (regs->tstate & TSTATE_PRIV) {
4671 + gr_handle_kernel_exploit();
4672 do_exit(SIGKILL);
4673 + }
4674 +
4675 do_exit(SIGSEGV);
4676 }
4677 EXPORT_SYMBOL(die_if_kernel);
4678 diff -urNp linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S
4679 --- linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-03-27 14:31:47.000000000 -0400
4680 +++ linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-07-13 22:20:05.000000000 -0400
4681 @@ -127,7 +127,7 @@ do_int_load:
4682 wr %o5, 0x0, %asi
4683 retl
4684 mov 0, %o0
4685 - .size __do_int_load, .-__do_int_load
4686 + .size do_int_load, .-do_int_load
4687
4688 .section __ex_table,"a"
4689 .word 4b, __retl_efault
4690 diff -urNp linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c
4691 --- linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4692 +++ linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4693 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4694 if (count < 5) {
4695 last_time = jiffies;
4696 count++;
4697 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4698 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4699 regs->tpc, (void *) regs->tpc);
4700 }
4701 }
4702 diff -urNp linux-2.6.32.45/arch/sparc/lib/atomic_64.S linux-2.6.32.45/arch/sparc/lib/atomic_64.S
4703 --- linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4704 +++ linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4705 @@ -18,7 +18,12 @@
4706 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4707 BACKOFF_SETUP(%o2)
4708 1: lduw [%o1], %g1
4709 - add %g1, %o0, %g7
4710 + addcc %g1, %o0, %g7
4711 +
4712 +#ifdef CONFIG_PAX_REFCOUNT
4713 + tvs %icc, 6
4714 +#endif
4715 +
4716 cas [%o1], %g1, %g7
4717 cmp %g1, %g7
4718 bne,pn %icc, 2f
4719 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4720 2: BACKOFF_SPIN(%o2, %o3, 1b)
4721 .size atomic_add, .-atomic_add
4722
4723 + .globl atomic_add_unchecked
4724 + .type atomic_add_unchecked,#function
4725 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4726 + BACKOFF_SETUP(%o2)
4727 +1: lduw [%o1], %g1
4728 + add %g1, %o0, %g7
4729 + cas [%o1], %g1, %g7
4730 + cmp %g1, %g7
4731 + bne,pn %icc, 2f
4732 + nop
4733 + retl
4734 + nop
4735 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4736 + .size atomic_add_unchecked, .-atomic_add_unchecked
4737 +
4738 .globl atomic_sub
4739 .type atomic_sub,#function
4740 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4741 BACKOFF_SETUP(%o2)
4742 1: lduw [%o1], %g1
4743 - sub %g1, %o0, %g7
4744 + subcc %g1, %o0, %g7
4745 +
4746 +#ifdef CONFIG_PAX_REFCOUNT
4747 + tvs %icc, 6
4748 +#endif
4749 +
4750 cas [%o1], %g1, %g7
4751 cmp %g1, %g7
4752 bne,pn %icc, 2f
4753 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4754 2: BACKOFF_SPIN(%o2, %o3, 1b)
4755 .size atomic_sub, .-atomic_sub
4756
4757 + .globl atomic_sub_unchecked
4758 + .type atomic_sub_unchecked,#function
4759 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4760 + BACKOFF_SETUP(%o2)
4761 +1: lduw [%o1], %g1
4762 + sub %g1, %o0, %g7
4763 + cas [%o1], %g1, %g7
4764 + cmp %g1, %g7
4765 + bne,pn %icc, 2f
4766 + nop
4767 + retl
4768 + nop
4769 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4770 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4771 +
4772 .globl atomic_add_ret
4773 .type atomic_add_ret,#function
4774 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4775 BACKOFF_SETUP(%o2)
4776 1: lduw [%o1], %g1
4777 - add %g1, %o0, %g7
4778 + addcc %g1, %o0, %g7
4779 +
4780 +#ifdef CONFIG_PAX_REFCOUNT
4781 + tvs %icc, 6
4782 +#endif
4783 +
4784 cas [%o1], %g1, %g7
4785 cmp %g1, %g7
4786 bne,pn %icc, 2f
4787 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4788 2: BACKOFF_SPIN(%o2, %o3, 1b)
4789 .size atomic_add_ret, .-atomic_add_ret
4790
4791 + .globl atomic_add_ret_unchecked
4792 + .type atomic_add_ret_unchecked,#function
4793 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4794 + BACKOFF_SETUP(%o2)
4795 +1: lduw [%o1], %g1
4796 + addcc %g1, %o0, %g7
4797 + cas [%o1], %g1, %g7
4798 + cmp %g1, %g7
4799 + bne,pn %icc, 2f
4800 + add %g7, %o0, %g7
4801 + sra %g7, 0, %o0
4802 + retl
4803 + nop
4804 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4805 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4806 +
4807 .globl atomic_sub_ret
4808 .type atomic_sub_ret,#function
4809 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4810 BACKOFF_SETUP(%o2)
4811 1: lduw [%o1], %g1
4812 - sub %g1, %o0, %g7
4813 + subcc %g1, %o0, %g7
4814 +
4815 +#ifdef CONFIG_PAX_REFCOUNT
4816 + tvs %icc, 6
4817 +#endif
4818 +
4819 cas [%o1], %g1, %g7
4820 cmp %g1, %g7
4821 bne,pn %icc, 2f
4822 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4823 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4824 BACKOFF_SETUP(%o2)
4825 1: ldx [%o1], %g1
4826 - add %g1, %o0, %g7
4827 + addcc %g1, %o0, %g7
4828 +
4829 +#ifdef CONFIG_PAX_REFCOUNT
4830 + tvs %xcc, 6
4831 +#endif
4832 +
4833 casx [%o1], %g1, %g7
4834 cmp %g1, %g7
4835 bne,pn %xcc, 2f
4836 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4837 2: BACKOFF_SPIN(%o2, %o3, 1b)
4838 .size atomic64_add, .-atomic64_add
4839
4840 + .globl atomic64_add_unchecked
4841 + .type atomic64_add_unchecked,#function
4842 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4843 + BACKOFF_SETUP(%o2)
4844 +1: ldx [%o1], %g1
4845 + addcc %g1, %o0, %g7
4846 + casx [%o1], %g1, %g7
4847 + cmp %g1, %g7
4848 + bne,pn %xcc, 2f
4849 + nop
4850 + retl
4851 + nop
4852 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4853 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4854 +
4855 .globl atomic64_sub
4856 .type atomic64_sub,#function
4857 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4858 BACKOFF_SETUP(%o2)
4859 1: ldx [%o1], %g1
4860 - sub %g1, %o0, %g7
4861 + subcc %g1, %o0, %g7
4862 +
4863 +#ifdef CONFIG_PAX_REFCOUNT
4864 + tvs %xcc, 6
4865 +#endif
4866 +
4867 casx [%o1], %g1, %g7
4868 cmp %g1, %g7
4869 bne,pn %xcc, 2f
4870 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4871 2: BACKOFF_SPIN(%o2, %o3, 1b)
4872 .size atomic64_sub, .-atomic64_sub
4873
4874 + .globl atomic64_sub_unchecked
4875 + .type atomic64_sub_unchecked,#function
4876 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4877 + BACKOFF_SETUP(%o2)
4878 +1: ldx [%o1], %g1
4879 + subcc %g1, %o0, %g7
4880 + casx [%o1], %g1, %g7
4881 + cmp %g1, %g7
4882 + bne,pn %xcc, 2f
4883 + nop
4884 + retl
4885 + nop
4886 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4887 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4888 +
4889 .globl atomic64_add_ret
4890 .type atomic64_add_ret,#function
4891 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4892 BACKOFF_SETUP(%o2)
4893 1: ldx [%o1], %g1
4894 - add %g1, %o0, %g7
4895 + addcc %g1, %o0, %g7
4896 +
4897 +#ifdef CONFIG_PAX_REFCOUNT
4898 + tvs %xcc, 6
4899 +#endif
4900 +
4901 casx [%o1], %g1, %g7
4902 cmp %g1, %g7
4903 bne,pn %xcc, 2f
4904 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4905 2: BACKOFF_SPIN(%o2, %o3, 1b)
4906 .size atomic64_add_ret, .-atomic64_add_ret
4907
4908 + .globl atomic64_add_ret_unchecked
4909 + .type atomic64_add_ret_unchecked,#function
4910 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4911 + BACKOFF_SETUP(%o2)
4912 +1: ldx [%o1], %g1
4913 + addcc %g1, %o0, %g7
4914 + casx [%o1], %g1, %g7
4915 + cmp %g1, %g7
4916 + bne,pn %xcc, 2f
4917 + add %g7, %o0, %g7
4918 + mov %g7, %o0
4919 + retl
4920 + nop
4921 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4922 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4923 +
4924 .globl atomic64_sub_ret
4925 .type atomic64_sub_ret,#function
4926 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4927 BACKOFF_SETUP(%o2)
4928 1: ldx [%o1], %g1
4929 - sub %g1, %o0, %g7
4930 + subcc %g1, %o0, %g7
4931 +
4932 +#ifdef CONFIG_PAX_REFCOUNT
4933 + tvs %xcc, 6
4934 +#endif
4935 +
4936 casx [%o1], %g1, %g7
4937 cmp %g1, %g7
4938 bne,pn %xcc, 2f
4939 diff -urNp linux-2.6.32.45/arch/sparc/lib/ksyms.c linux-2.6.32.45/arch/sparc/lib/ksyms.c
4940 --- linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4941 +++ linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-08-19 23:05:14.000000000 -0400
4942 @@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
4943
4944 /* Atomic counter implementation. */
4945 EXPORT_SYMBOL(atomic_add);
4946 +EXPORT_SYMBOL(atomic_add_unchecked);
4947 EXPORT_SYMBOL(atomic_add_ret);
4948 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4949 EXPORT_SYMBOL(atomic_sub);
4950 +EXPORT_SYMBOL(atomic_sub_unchecked);
4951 EXPORT_SYMBOL(atomic_sub_ret);
4952 EXPORT_SYMBOL(atomic64_add);
4953 +EXPORT_SYMBOL(atomic64_add_unchecked);
4954 EXPORT_SYMBOL(atomic64_add_ret);
4955 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4956 EXPORT_SYMBOL(atomic64_sub);
4957 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4958 EXPORT_SYMBOL(atomic64_sub_ret);
4959
4960 /* Atomic bit operations. */
4961 diff -urNp linux-2.6.32.45/arch/sparc/lib/Makefile linux-2.6.32.45/arch/sparc/lib/Makefile
4962 --- linux-2.6.32.45/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4963 +++ linux-2.6.32.45/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4964 @@ -2,7 +2,7 @@
4965 #
4966
4967 asflags-y := -ansi -DST_DIV0=0x02
4968 -ccflags-y := -Werror
4969 +#ccflags-y := -Werror
4970
4971 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4972 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4973 diff -urNp linux-2.6.32.45/arch/sparc/lib/rwsem_64.S linux-2.6.32.45/arch/sparc/lib/rwsem_64.S
4974 --- linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4975 +++ linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4976 @@ -11,7 +11,12 @@
4977 .globl __down_read
4978 __down_read:
4979 1: lduw [%o0], %g1
4980 - add %g1, 1, %g7
4981 + addcc %g1, 1, %g7
4982 +
4983 +#ifdef CONFIG_PAX_REFCOUNT
4984 + tvs %icc, 6
4985 +#endif
4986 +
4987 cas [%o0], %g1, %g7
4988 cmp %g1, %g7
4989 bne,pn %icc, 1b
4990 @@ -33,7 +38,12 @@ __down_read:
4991 .globl __down_read_trylock
4992 __down_read_trylock:
4993 1: lduw [%o0], %g1
4994 - add %g1, 1, %g7
4995 + addcc %g1, 1, %g7
4996 +
4997 +#ifdef CONFIG_PAX_REFCOUNT
4998 + tvs %icc, 6
4999 +#endif
5000 +
5001 cmp %g7, 0
5002 bl,pn %icc, 2f
5003 mov 0, %o1
5004 @@ -51,7 +61,12 @@ __down_write:
5005 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5006 1:
5007 lduw [%o0], %g3
5008 - add %g3, %g1, %g7
5009 + addcc %g3, %g1, %g7
5010 +
5011 +#ifdef CONFIG_PAX_REFCOUNT
5012 + tvs %icc, 6
5013 +#endif
5014 +
5015 cas [%o0], %g3, %g7
5016 cmp %g3, %g7
5017 bne,pn %icc, 1b
5018 @@ -77,7 +92,12 @@ __down_write_trylock:
5019 cmp %g3, 0
5020 bne,pn %icc, 2f
5021 mov 0, %o1
5022 - add %g3, %g1, %g7
5023 + addcc %g3, %g1, %g7
5024 +
5025 +#ifdef CONFIG_PAX_REFCOUNT
5026 + tvs %icc, 6
5027 +#endif
5028 +
5029 cas [%o0], %g3, %g7
5030 cmp %g3, %g7
5031 bne,pn %icc, 1b
5032 @@ -90,7 +110,12 @@ __down_write_trylock:
5033 __up_read:
5034 1:
5035 lduw [%o0], %g1
5036 - sub %g1, 1, %g7
5037 + subcc %g1, 1, %g7
5038 +
5039 +#ifdef CONFIG_PAX_REFCOUNT
5040 + tvs %icc, 6
5041 +#endif
5042 +
5043 cas [%o0], %g1, %g7
5044 cmp %g1, %g7
5045 bne,pn %icc, 1b
5046 @@ -118,7 +143,12 @@ __up_write:
5047 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5048 1:
5049 lduw [%o0], %g3
5050 - sub %g3, %g1, %g7
5051 + subcc %g3, %g1, %g7
5052 +
5053 +#ifdef CONFIG_PAX_REFCOUNT
5054 + tvs %icc, 6
5055 +#endif
5056 +
5057 cas [%o0], %g3, %g7
5058 cmp %g3, %g7
5059 bne,pn %icc, 1b
5060 @@ -143,7 +173,12 @@ __downgrade_write:
5061 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5062 1:
5063 lduw [%o0], %g3
5064 - sub %g3, %g1, %g7
5065 + subcc %g3, %g1, %g7
5066 +
5067 +#ifdef CONFIG_PAX_REFCOUNT
5068 + tvs %icc, 6
5069 +#endif
5070 +
5071 cas [%o0], %g3, %g7
5072 cmp %g3, %g7
5073 bne,pn %icc, 1b
5074 diff -urNp linux-2.6.32.45/arch/sparc/Makefile linux-2.6.32.45/arch/sparc/Makefile
5075 --- linux-2.6.32.45/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5076 +++ linux-2.6.32.45/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5077 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5078 # Export what is needed by arch/sparc/boot/Makefile
5079 export VMLINUX_INIT VMLINUX_MAIN
5080 VMLINUX_INIT := $(head-y) $(init-y)
5081 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5082 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5083 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5084 VMLINUX_MAIN += $(drivers-y) $(net-y)
5085
5086 diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_32.c linux-2.6.32.45/arch/sparc/mm/fault_32.c
5087 --- linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5088 +++ linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5089 @@ -21,6 +21,9 @@
5090 #include <linux/interrupt.h>
5091 #include <linux/module.h>
5092 #include <linux/kdebug.h>
5093 +#include <linux/slab.h>
5094 +#include <linux/pagemap.h>
5095 +#include <linux/compiler.h>
5096
5097 #include <asm/system.h>
5098 #include <asm/page.h>
5099 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5100 return safe_compute_effective_address(regs, insn);
5101 }
5102
5103 +#ifdef CONFIG_PAX_PAGEEXEC
5104 +#ifdef CONFIG_PAX_DLRESOLVE
5105 +static void pax_emuplt_close(struct vm_area_struct *vma)
5106 +{
5107 + vma->vm_mm->call_dl_resolve = 0UL;
5108 +}
5109 +
5110 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5111 +{
5112 + unsigned int *kaddr;
5113 +
5114 + vmf->page = alloc_page(GFP_HIGHUSER);
5115 + if (!vmf->page)
5116 + return VM_FAULT_OOM;
5117 +
5118 + kaddr = kmap(vmf->page);
5119 + memset(kaddr, 0, PAGE_SIZE);
5120 + kaddr[0] = 0x9DE3BFA8U; /* save */
5121 + flush_dcache_page(vmf->page);
5122 + kunmap(vmf->page);
5123 + return VM_FAULT_MAJOR;
5124 +}
5125 +
5126 +static const struct vm_operations_struct pax_vm_ops = {
5127 + .close = pax_emuplt_close,
5128 + .fault = pax_emuplt_fault
5129 +};
5130 +
5131 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5132 +{
5133 + int ret;
5134 +
5135 + vma->vm_mm = current->mm;
5136 + vma->vm_start = addr;
5137 + vma->vm_end = addr + PAGE_SIZE;
5138 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5139 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5140 + vma->vm_ops = &pax_vm_ops;
5141 +
5142 + ret = insert_vm_struct(current->mm, vma);
5143 + if (ret)
5144 + return ret;
5145 +
5146 + ++current->mm->total_vm;
5147 + return 0;
5148 +}
5149 +#endif
5150 +
5151 +/*
5152 + * PaX: decide what to do with offenders (regs->pc = fault address)
5153 + *
5154 + * returns 1 when task should be killed
5155 + * 2 when patched PLT trampoline was detected
5156 + * 3 when unpatched PLT trampoline was detected
5157 + */
5158 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5159 +{
5160 +
5161 +#ifdef CONFIG_PAX_EMUPLT
5162 + int err;
5163 +
5164 + do { /* PaX: patched PLT emulation #1 */
5165 + unsigned int sethi1, sethi2, jmpl;
5166 +
5167 + err = get_user(sethi1, (unsigned int *)regs->pc);
5168 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5169 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5170 +
5171 + if (err)
5172 + break;
5173 +
5174 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5175 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5176 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5177 + {
5178 + unsigned int addr;
5179 +
5180 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5181 + addr = regs->u_regs[UREG_G1];
5182 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5183 + regs->pc = addr;
5184 + regs->npc = addr+4;
5185 + return 2;
5186 + }
5187 + } while (0);
5188 +
5189 + { /* PaX: patched PLT emulation #2 */
5190 + unsigned int ba;
5191 +
5192 + err = get_user(ba, (unsigned int *)regs->pc);
5193 +
5194 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5195 + unsigned int addr;
5196 +
5197 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5198 + regs->pc = addr;
5199 + regs->npc = addr+4;
5200 + return 2;
5201 + }
5202 + }
5203 +
5204 + do { /* PaX: patched PLT emulation #3 */
5205 + unsigned int sethi, jmpl, nop;
5206 +
5207 + err = get_user(sethi, (unsigned int *)regs->pc);
5208 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5209 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5210 +
5211 + if (err)
5212 + break;
5213 +
5214 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5215 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5216 + nop == 0x01000000U)
5217 + {
5218 + unsigned int addr;
5219 +
5220 + addr = (sethi & 0x003FFFFFU) << 10;
5221 + regs->u_regs[UREG_G1] = addr;
5222 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5223 + regs->pc = addr;
5224 + regs->npc = addr+4;
5225 + return 2;
5226 + }
5227 + } while (0);
5228 +
5229 + do { /* PaX: unpatched PLT emulation step 1 */
5230 + unsigned int sethi, ba, nop;
5231 +
5232 + err = get_user(sethi, (unsigned int *)regs->pc);
5233 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
5234 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5235 +
5236 + if (err)
5237 + break;
5238 +
5239 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5240 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5241 + nop == 0x01000000U)
5242 + {
5243 + unsigned int addr, save, call;
5244 +
5245 + if ((ba & 0xFFC00000U) == 0x30800000U)
5246 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5247 + else
5248 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5249 +
5250 + err = get_user(save, (unsigned int *)addr);
5251 + err |= get_user(call, (unsigned int *)(addr+4));
5252 + err |= get_user(nop, (unsigned int *)(addr+8));
5253 + if (err)
5254 + break;
5255 +
5256 +#ifdef CONFIG_PAX_DLRESOLVE
5257 + if (save == 0x9DE3BFA8U &&
5258 + (call & 0xC0000000U) == 0x40000000U &&
5259 + nop == 0x01000000U)
5260 + {
5261 + struct vm_area_struct *vma;
5262 + unsigned long call_dl_resolve;
5263 +
5264 + down_read(&current->mm->mmap_sem);
5265 + call_dl_resolve = current->mm->call_dl_resolve;
5266 + up_read(&current->mm->mmap_sem);
5267 + if (likely(call_dl_resolve))
5268 + goto emulate;
5269 +
5270 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5271 +
5272 + down_write(&current->mm->mmap_sem);
5273 + if (current->mm->call_dl_resolve) {
5274 + call_dl_resolve = current->mm->call_dl_resolve;
5275 + up_write(&current->mm->mmap_sem);
5276 + if (vma)
5277 + kmem_cache_free(vm_area_cachep, vma);
5278 + goto emulate;
5279 + }
5280 +
5281 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5282 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5283 + up_write(&current->mm->mmap_sem);
5284 + if (vma)
5285 + kmem_cache_free(vm_area_cachep, vma);
5286 + return 1;
5287 + }
5288 +
5289 + if (pax_insert_vma(vma, call_dl_resolve)) {
5290 + up_write(&current->mm->mmap_sem);
5291 + kmem_cache_free(vm_area_cachep, vma);
5292 + return 1;
5293 + }
5294 +
5295 + current->mm->call_dl_resolve = call_dl_resolve;
5296 + up_write(&current->mm->mmap_sem);
5297 +
5298 +emulate:
5299 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5300 + regs->pc = call_dl_resolve;
5301 + regs->npc = addr+4;
5302 + return 3;
5303 + }
5304 +#endif
5305 +
5306 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5307 + if ((save & 0xFFC00000U) == 0x05000000U &&
5308 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5309 + nop == 0x01000000U)
5310 + {
5311 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5312 + regs->u_regs[UREG_G2] = addr + 4;
5313 + addr = (save & 0x003FFFFFU) << 10;
5314 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5315 + regs->pc = addr;
5316 + regs->npc = addr+4;
5317 + return 3;
5318 + }
5319 + }
5320 + } while (0);
5321 +
5322 + do { /* PaX: unpatched PLT emulation step 2 */
5323 + unsigned int save, call, nop;
5324 +
5325 + err = get_user(save, (unsigned int *)(regs->pc-4));
5326 + err |= get_user(call, (unsigned int *)regs->pc);
5327 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5328 + if (err)
5329 + break;
5330 +
5331 + if (save == 0x9DE3BFA8U &&
5332 + (call & 0xC0000000U) == 0x40000000U &&
5333 + nop == 0x01000000U)
5334 + {
5335 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5336 +
5337 + regs->u_regs[UREG_RETPC] = regs->pc;
5338 + regs->pc = dl_resolve;
5339 + regs->npc = dl_resolve+4;
5340 + return 3;
5341 + }
5342 + } while (0);
5343 +#endif
5344 +
5345 + return 1;
5346 +}
5347 +
5348 +void pax_report_insns(void *pc, void *sp)
5349 +{
5350 + unsigned long i;
5351 +
5352 + printk(KERN_ERR "PAX: bytes at PC: ");
5353 + for (i = 0; i < 8; i++) {
5354 + unsigned int c;
5355 + if (get_user(c, (unsigned int *)pc+i))
5356 + printk(KERN_CONT "???????? ");
5357 + else
5358 + printk(KERN_CONT "%08x ", c);
5359 + }
5360 + printk("\n");
5361 +}
5362 +#endif
5363 +
5364 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5365 unsigned long address)
5366 {
5367 @@ -231,6 +495,24 @@ good_area:
5368 if(!(vma->vm_flags & VM_WRITE))
5369 goto bad_area;
5370 } else {
5371 +
5372 +#ifdef CONFIG_PAX_PAGEEXEC
5373 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5374 + up_read(&mm->mmap_sem);
5375 + switch (pax_handle_fetch_fault(regs)) {
5376 +
5377 +#ifdef CONFIG_PAX_EMUPLT
5378 + case 2:
5379 + case 3:
5380 + return;
5381 +#endif
5382 +
5383 + }
5384 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5385 + do_group_exit(SIGKILL);
5386 + }
5387 +#endif
5388 +
5389 /* Allow reads even for write-only mappings */
5390 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5391 goto bad_area;
5392 diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_64.c linux-2.6.32.45/arch/sparc/mm/fault_64.c
5393 --- linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5394 +++ linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5395 @@ -20,6 +20,9 @@
5396 #include <linux/kprobes.h>
5397 #include <linux/kdebug.h>
5398 #include <linux/percpu.h>
5399 +#include <linux/slab.h>
5400 +#include <linux/pagemap.h>
5401 +#include <linux/compiler.h>
5402
5403 #include <asm/page.h>
5404 #include <asm/pgtable.h>
5405 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5406 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5407 regs->tpc);
5408 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5409 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5410 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5411 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5412 dump_stack();
5413 unhandled_fault(regs->tpc, current, regs);
5414 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5415 show_regs(regs);
5416 }
5417
5418 +#ifdef CONFIG_PAX_PAGEEXEC
5419 +#ifdef CONFIG_PAX_DLRESOLVE
5420 +static void pax_emuplt_close(struct vm_area_struct *vma)
5421 +{
5422 + vma->vm_mm->call_dl_resolve = 0UL;
5423 +}
5424 +
5425 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5426 +{
5427 + unsigned int *kaddr;
5428 +
5429 + vmf->page = alloc_page(GFP_HIGHUSER);
5430 + if (!vmf->page)
5431 + return VM_FAULT_OOM;
5432 +
5433 + kaddr = kmap(vmf->page);
5434 + memset(kaddr, 0, PAGE_SIZE);
5435 + kaddr[0] = 0x9DE3BFA8U; /* save */
5436 + flush_dcache_page(vmf->page);
5437 + kunmap(vmf->page);
5438 + return VM_FAULT_MAJOR;
5439 +}
5440 +
5441 +static const struct vm_operations_struct pax_vm_ops = {
5442 + .close = pax_emuplt_close,
5443 + .fault = pax_emuplt_fault
5444 +};
5445 +
5446 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5447 +{
5448 + int ret;
5449 +
5450 + vma->vm_mm = current->mm;
5451 + vma->vm_start = addr;
5452 + vma->vm_end = addr + PAGE_SIZE;
5453 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5454 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5455 + vma->vm_ops = &pax_vm_ops;
5456 +
5457 + ret = insert_vm_struct(current->mm, vma);
5458 + if (ret)
5459 + return ret;
5460 +
5461 + ++current->mm->total_vm;
5462 + return 0;
5463 +}
5464 +#endif
5465 +
5466 +/*
5467 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5468 + *
5469 + * returns 1 when task should be killed
5470 + * 2 when patched PLT trampoline was detected
5471 + * 3 when unpatched PLT trampoline was detected
5472 + */
5473 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5474 +{
5475 +
5476 +#ifdef CONFIG_PAX_EMUPLT
5477 + int err;
5478 +
5479 + do { /* PaX: patched PLT emulation #1 */
5480 + unsigned int sethi1, sethi2, jmpl;
5481 +
5482 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5483 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5484 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5485 +
5486 + if (err)
5487 + break;
5488 +
5489 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5490 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5491 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5492 + {
5493 + unsigned long addr;
5494 +
5495 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5496 + addr = regs->u_regs[UREG_G1];
5497 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5498 +
5499 + if (test_thread_flag(TIF_32BIT))
5500 + addr &= 0xFFFFFFFFUL;
5501 +
5502 + regs->tpc = addr;
5503 + regs->tnpc = addr+4;
5504 + return 2;
5505 + }
5506 + } while (0);
5507 +
5508 + { /* PaX: patched PLT emulation #2 */
5509 + unsigned int ba;
5510 +
5511 + err = get_user(ba, (unsigned int *)regs->tpc);
5512 +
5513 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5514 + unsigned long addr;
5515 +
5516 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5517 +
5518 + if (test_thread_flag(TIF_32BIT))
5519 + addr &= 0xFFFFFFFFUL;
5520 +
5521 + regs->tpc = addr;
5522 + regs->tnpc = addr+4;
5523 + return 2;
5524 + }
5525 + }
5526 +
5527 + do { /* PaX: patched PLT emulation #3 */
5528 + unsigned int sethi, jmpl, nop;
5529 +
5530 + err = get_user(sethi, (unsigned int *)regs->tpc);
5531 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5532 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5533 +
5534 + if (err)
5535 + break;
5536 +
5537 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5538 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5539 + nop == 0x01000000U)
5540 + {
5541 + unsigned long addr;
5542 +
5543 + addr = (sethi & 0x003FFFFFU) << 10;
5544 + regs->u_regs[UREG_G1] = addr;
5545 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5546 +
5547 + if (test_thread_flag(TIF_32BIT))
5548 + addr &= 0xFFFFFFFFUL;
5549 +
5550 + regs->tpc = addr;
5551 + regs->tnpc = addr+4;
5552 + return 2;
5553 + }
5554 + } while (0);
5555 +
5556 + do { /* PaX: patched PLT emulation #4 */
5557 + unsigned int sethi, mov1, call, mov2;
5558 +
5559 + err = get_user(sethi, (unsigned int *)regs->tpc);
5560 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5561 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5562 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5563 +
5564 + if (err)
5565 + break;
5566 +
5567 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5568 + mov1 == 0x8210000FU &&
5569 + (call & 0xC0000000U) == 0x40000000U &&
5570 + mov2 == 0x9E100001U)
5571 + {
5572 + unsigned long addr;
5573 +
5574 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5575 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5576 +
5577 + if (test_thread_flag(TIF_32BIT))
5578 + addr &= 0xFFFFFFFFUL;
5579 +
5580 + regs->tpc = addr;
5581 + regs->tnpc = addr+4;
5582 + return 2;
5583 + }
5584 + } while (0);
5585 +
5586 + do { /* PaX: patched PLT emulation #5 */
5587 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5588 +
5589 + err = get_user(sethi, (unsigned int *)regs->tpc);
5590 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5591 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5592 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5593 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5594 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5595 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5596 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5597 +
5598 + if (err)
5599 + break;
5600 +
5601 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5602 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5603 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5604 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5605 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5606 + sllx == 0x83287020U &&
5607 + jmpl == 0x81C04005U &&
5608 + nop == 0x01000000U)
5609 + {
5610 + unsigned long addr;
5611 +
5612 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5613 + regs->u_regs[UREG_G1] <<= 32;
5614 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5615 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5616 + regs->tpc = addr;
5617 + regs->tnpc = addr+4;
5618 + return 2;
5619 + }
5620 + } while (0);
5621 +
5622 + do { /* PaX: patched PLT emulation #6 */
5623 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5624 +
5625 + err = get_user(sethi, (unsigned int *)regs->tpc);
5626 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5627 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5628 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5629 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5630 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5631 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5632 +
5633 + if (err)
5634 + break;
5635 +
5636 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5637 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5638 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5639 + sllx == 0x83287020U &&
5640 + (or & 0xFFFFE000U) == 0x8A116000U &&
5641 + jmpl == 0x81C04005U &&
5642 + nop == 0x01000000U)
5643 + {
5644 + unsigned long addr;
5645 +
5646 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5647 + regs->u_regs[UREG_G1] <<= 32;
5648 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5649 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5650 + regs->tpc = addr;
5651 + regs->tnpc = addr+4;
5652 + return 2;
5653 + }
5654 + } while (0);
5655 +
5656 + do { /* PaX: unpatched PLT emulation step 1 */
5657 + unsigned int sethi, ba, nop;
5658 +
5659 + err = get_user(sethi, (unsigned int *)regs->tpc);
5660 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5661 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5662 +
5663 + if (err)
5664 + break;
5665 +
5666 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5667 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5668 + nop == 0x01000000U)
5669 + {
5670 + unsigned long addr;
5671 + unsigned int save, call;
5672 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5673 +
5674 + if ((ba & 0xFFC00000U) == 0x30800000U)
5675 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5676 + else
5677 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5678 +
5679 + if (test_thread_flag(TIF_32BIT))
5680 + addr &= 0xFFFFFFFFUL;
5681 +
5682 + err = get_user(save, (unsigned int *)addr);
5683 + err |= get_user(call, (unsigned int *)(addr+4));
5684 + err |= get_user(nop, (unsigned int *)(addr+8));
5685 + if (err)
5686 + break;
5687 +
5688 +#ifdef CONFIG_PAX_DLRESOLVE
5689 + if (save == 0x9DE3BFA8U &&
5690 + (call & 0xC0000000U) == 0x40000000U &&
5691 + nop == 0x01000000U)
5692 + {
5693 + struct vm_area_struct *vma;
5694 + unsigned long call_dl_resolve;
5695 +
5696 + down_read(&current->mm->mmap_sem);
5697 + call_dl_resolve = current->mm->call_dl_resolve;
5698 + up_read(&current->mm->mmap_sem);
5699 + if (likely(call_dl_resolve))
5700 + goto emulate;
5701 +
5702 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5703 +
5704 + down_write(&current->mm->mmap_sem);
5705 + if (current->mm->call_dl_resolve) {
5706 + call_dl_resolve = current->mm->call_dl_resolve;
5707 + up_write(&current->mm->mmap_sem);
5708 + if (vma)
5709 + kmem_cache_free(vm_area_cachep, vma);
5710 + goto emulate;
5711 + }
5712 +
5713 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5714 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5715 + up_write(&current->mm->mmap_sem);
5716 + if (vma)
5717 + kmem_cache_free(vm_area_cachep, vma);
5718 + return 1;
5719 + }
5720 +
5721 + if (pax_insert_vma(vma, call_dl_resolve)) {
5722 + up_write(&current->mm->mmap_sem);
5723 + kmem_cache_free(vm_area_cachep, vma);
5724 + return 1;
5725 + }
5726 +
5727 + current->mm->call_dl_resolve = call_dl_resolve;
5728 + up_write(&current->mm->mmap_sem);
5729 +
5730 +emulate:
5731 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5732 + regs->tpc = call_dl_resolve;
5733 + regs->tnpc = addr+4;
5734 + return 3;
5735 + }
5736 +#endif
5737 +
5738 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5739 + if ((save & 0xFFC00000U) == 0x05000000U &&
5740 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5741 + nop == 0x01000000U)
5742 + {
5743 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5744 + regs->u_regs[UREG_G2] = addr + 4;
5745 + addr = (save & 0x003FFFFFU) << 10;
5746 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5747 +
5748 + if (test_thread_flag(TIF_32BIT))
5749 + addr &= 0xFFFFFFFFUL;
5750 +
5751 + regs->tpc = addr;
5752 + regs->tnpc = addr+4;
5753 + return 3;
5754 + }
5755 +
5756 + /* PaX: 64-bit PLT stub */
5757 + err = get_user(sethi1, (unsigned int *)addr);
5758 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5759 + err |= get_user(or1, (unsigned int *)(addr+8));
5760 + err |= get_user(or2, (unsigned int *)(addr+12));
5761 + err |= get_user(sllx, (unsigned int *)(addr+16));
5762 + err |= get_user(add, (unsigned int *)(addr+20));
5763 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5764 + err |= get_user(nop, (unsigned int *)(addr+28));
5765 + if (err)
5766 + break;
5767 +
5768 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5769 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5770 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5771 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5772 + sllx == 0x89293020U &&
5773 + add == 0x8A010005U &&
5774 + jmpl == 0x89C14000U &&
5775 + nop == 0x01000000U)
5776 + {
5777 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5778 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5779 + regs->u_regs[UREG_G4] <<= 32;
5780 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5781 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5782 + regs->u_regs[UREG_G4] = addr + 24;
5783 + addr = regs->u_regs[UREG_G5];
5784 + regs->tpc = addr;
5785 + regs->tnpc = addr+4;
5786 + return 3;
5787 + }
5788 + }
5789 + } while (0);
5790 +
5791 +#ifdef CONFIG_PAX_DLRESOLVE
5792 + do { /* PaX: unpatched PLT emulation step 2 */
5793 + unsigned int save, call, nop;
5794 +
5795 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5796 + err |= get_user(call, (unsigned int *)regs->tpc);
5797 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5798 + if (err)
5799 + break;
5800 +
5801 + if (save == 0x9DE3BFA8U &&
5802 + (call & 0xC0000000U) == 0x40000000U &&
5803 + nop == 0x01000000U)
5804 + {
5805 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5806 +
5807 + if (test_thread_flag(TIF_32BIT))
5808 + dl_resolve &= 0xFFFFFFFFUL;
5809 +
5810 + regs->u_regs[UREG_RETPC] = regs->tpc;
5811 + regs->tpc = dl_resolve;
5812 + regs->tnpc = dl_resolve+4;
5813 + return 3;
5814 + }
5815 + } while (0);
5816 +#endif
5817 +
5818 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5819 + unsigned int sethi, ba, nop;
5820 +
5821 + err = get_user(sethi, (unsigned int *)regs->tpc);
5822 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5823 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5824 +
5825 + if (err)
5826 + break;
5827 +
5828 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5829 + (ba & 0xFFF00000U) == 0x30600000U &&
5830 + nop == 0x01000000U)
5831 + {
5832 + unsigned long addr;
5833 +
5834 + addr = (sethi & 0x003FFFFFU) << 10;
5835 + regs->u_regs[UREG_G1] = addr;
5836 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5837 +
5838 + if (test_thread_flag(TIF_32BIT))
5839 + addr &= 0xFFFFFFFFUL;
5840 +
5841 + regs->tpc = addr;
5842 + regs->tnpc = addr+4;
5843 + return 2;
5844 + }
5845 + } while (0);
5846 +
5847 +#endif
5848 +
5849 + return 1;
5850 +}
5851 +
5852 +void pax_report_insns(void *pc, void *sp)
5853 +{
5854 + unsigned long i;
5855 +
5856 + printk(KERN_ERR "PAX: bytes at PC: ");
5857 + for (i = 0; i < 8; i++) {
5858 + unsigned int c;
5859 + if (get_user(c, (unsigned int *)pc+i))
5860 + printk(KERN_CONT "???????? ");
5861 + else
5862 + printk(KERN_CONT "%08x ", c);
5863 + }
5864 + printk("\n");
5865 +}
5866 +#endif
5867 +
5868 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5869 {
5870 struct mm_struct *mm = current->mm;
5871 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5872 if (!vma)
5873 goto bad_area;
5874
5875 +#ifdef CONFIG_PAX_PAGEEXEC
5876 + /* PaX: detect ITLB misses on non-exec pages */
5877 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5878 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5879 + {
5880 + if (address != regs->tpc)
5881 + goto good_area;
5882 +
5883 + up_read(&mm->mmap_sem);
5884 + switch (pax_handle_fetch_fault(regs)) {
5885 +
5886 +#ifdef CONFIG_PAX_EMUPLT
5887 + case 2:
5888 + case 3:
5889 + return;
5890 +#endif
5891 +
5892 + }
5893 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5894 + do_group_exit(SIGKILL);
5895 + }
5896 +#endif
5897 +
5898 /* Pure DTLB misses do not tell us whether the fault causing
5899 * load/store/atomic was a write or not, it only says that there
5900 * was no match. So in such a case we (carefully) read the
5901 diff -urNp linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c
5902 --- linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5903 +++ linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5904 @@ -69,7 +69,7 @@ full_search:
5905 }
5906 return -ENOMEM;
5907 }
5908 - if (likely(!vma || addr + len <= vma->vm_start)) {
5909 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5910 /*
5911 * Remember the place where we stopped the search:
5912 */
5913 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5914 /* make sure it can fit in the remaining address space */
5915 if (likely(addr > len)) {
5916 vma = find_vma(mm, addr-len);
5917 - if (!vma || addr <= vma->vm_start) {
5918 + if (check_heap_stack_gap(vma, addr - len, len)) {
5919 /* remember the address as a hint for next time */
5920 return (mm->free_area_cache = addr-len);
5921 }
5922 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5923 if (unlikely(mm->mmap_base < len))
5924 goto bottomup;
5925
5926 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5927 + addr = mm->mmap_base - len;
5928
5929 do {
5930 + addr &= HPAGE_MASK;
5931 /*
5932 * Lookup failure means no vma is above this address,
5933 * else if new region fits below vma->vm_start,
5934 * return with success:
5935 */
5936 vma = find_vma(mm, addr);
5937 - if (likely(!vma || addr+len <= vma->vm_start)) {
5938 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5939 /* remember the address as a hint for next time */
5940 return (mm->free_area_cache = addr);
5941 }
5942 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5943 mm->cached_hole_size = vma->vm_start - addr;
5944
5945 /* try just below the current vma->vm_start */
5946 - addr = (vma->vm_start-len) & HPAGE_MASK;
5947 - } while (likely(len < vma->vm_start));
5948 + addr = skip_heap_stack_gap(vma, len);
5949 + } while (!IS_ERR_VALUE(addr));
5950
5951 bottomup:
5952 /*
5953 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5954 if (addr) {
5955 addr = ALIGN(addr, HPAGE_SIZE);
5956 vma = find_vma(mm, addr);
5957 - if (task_size - len >= addr &&
5958 - (!vma || addr + len <= vma->vm_start))
5959 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5960 return addr;
5961 }
5962 if (mm->get_unmapped_area == arch_get_unmapped_area)
5963 diff -urNp linux-2.6.32.45/arch/sparc/mm/init_32.c linux-2.6.32.45/arch/sparc/mm/init_32.c
5964 --- linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5965 +++ linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5966 @@ -317,6 +317,9 @@ extern void device_scan(void);
5967 pgprot_t PAGE_SHARED __read_mostly;
5968 EXPORT_SYMBOL(PAGE_SHARED);
5969
5970 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5971 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5972 +
5973 void __init paging_init(void)
5974 {
5975 switch(sparc_cpu_model) {
5976 @@ -345,17 +348,17 @@ void __init paging_init(void)
5977
5978 /* Initialize the protection map with non-constant, MMU dependent values. */
5979 protection_map[0] = PAGE_NONE;
5980 - protection_map[1] = PAGE_READONLY;
5981 - protection_map[2] = PAGE_COPY;
5982 - protection_map[3] = PAGE_COPY;
5983 + protection_map[1] = PAGE_READONLY_NOEXEC;
5984 + protection_map[2] = PAGE_COPY_NOEXEC;
5985 + protection_map[3] = PAGE_COPY_NOEXEC;
5986 protection_map[4] = PAGE_READONLY;
5987 protection_map[5] = PAGE_READONLY;
5988 protection_map[6] = PAGE_COPY;
5989 protection_map[7] = PAGE_COPY;
5990 protection_map[8] = PAGE_NONE;
5991 - protection_map[9] = PAGE_READONLY;
5992 - protection_map[10] = PAGE_SHARED;
5993 - protection_map[11] = PAGE_SHARED;
5994 + protection_map[9] = PAGE_READONLY_NOEXEC;
5995 + protection_map[10] = PAGE_SHARED_NOEXEC;
5996 + protection_map[11] = PAGE_SHARED_NOEXEC;
5997 protection_map[12] = PAGE_READONLY;
5998 protection_map[13] = PAGE_READONLY;
5999 protection_map[14] = PAGE_SHARED;
6000 diff -urNp linux-2.6.32.45/arch/sparc/mm/Makefile linux-2.6.32.45/arch/sparc/mm/Makefile
6001 --- linux-2.6.32.45/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
6002 +++ linux-2.6.32.45/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
6003 @@ -2,7 +2,7 @@
6004 #
6005
6006 asflags-y := -ansi
6007 -ccflags-y := -Werror
6008 +#ccflags-y := -Werror
6009
6010 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6011 obj-y += fault_$(BITS).o
6012 diff -urNp linux-2.6.32.45/arch/sparc/mm/srmmu.c linux-2.6.32.45/arch/sparc/mm/srmmu.c
6013 --- linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
6014 +++ linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
6015 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6016 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6017 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6018 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6019 +
6020 +#ifdef CONFIG_PAX_PAGEEXEC
6021 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6022 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6023 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6024 +#endif
6025 +
6026 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6027 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6028
6029 diff -urNp linux-2.6.32.45/arch/um/include/asm/kmap_types.h linux-2.6.32.45/arch/um/include/asm/kmap_types.h
6030 --- linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6031 +++ linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6032 @@ -23,6 +23,7 @@ enum km_type {
6033 KM_IRQ1,
6034 KM_SOFTIRQ0,
6035 KM_SOFTIRQ1,
6036 + KM_CLEARPAGE,
6037 KM_TYPE_NR
6038 };
6039
6040 diff -urNp linux-2.6.32.45/arch/um/include/asm/page.h linux-2.6.32.45/arch/um/include/asm/page.h
6041 --- linux-2.6.32.45/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6042 +++ linux-2.6.32.45/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6043 @@ -14,6 +14,9 @@
6044 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6045 #define PAGE_MASK (~(PAGE_SIZE-1))
6046
6047 +#define ktla_ktva(addr) (addr)
6048 +#define ktva_ktla(addr) (addr)
6049 +
6050 #ifndef __ASSEMBLY__
6051
6052 struct page;
6053 diff -urNp linux-2.6.32.45/arch/um/kernel/process.c linux-2.6.32.45/arch/um/kernel/process.c
6054 --- linux-2.6.32.45/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6055 +++ linux-2.6.32.45/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6056 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6057 return 2;
6058 }
6059
6060 -/*
6061 - * Only x86 and x86_64 have an arch_align_stack().
6062 - * All other arches have "#define arch_align_stack(x) (x)"
6063 - * in their asm/system.h
6064 - * As this is included in UML from asm-um/system-generic.h,
6065 - * we can use it to behave as the subarch does.
6066 - */
6067 -#ifndef arch_align_stack
6068 -unsigned long arch_align_stack(unsigned long sp)
6069 -{
6070 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6071 - sp -= get_random_int() % 8192;
6072 - return sp & ~0xf;
6073 -}
6074 -#endif
6075 -
6076 unsigned long get_wchan(struct task_struct *p)
6077 {
6078 unsigned long stack_page, sp, ip;
6079 diff -urNp linux-2.6.32.45/arch/um/sys-i386/syscalls.c linux-2.6.32.45/arch/um/sys-i386/syscalls.c
6080 --- linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6081 +++ linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6082 @@ -11,6 +11,21 @@
6083 #include "asm/uaccess.h"
6084 #include "asm/unistd.h"
6085
6086 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6087 +{
6088 + unsigned long pax_task_size = TASK_SIZE;
6089 +
6090 +#ifdef CONFIG_PAX_SEGMEXEC
6091 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6092 + pax_task_size = SEGMEXEC_TASK_SIZE;
6093 +#endif
6094 +
6095 + if (len > pax_task_size || addr > pax_task_size - len)
6096 + return -EINVAL;
6097 +
6098 + return 0;
6099 +}
6100 +
6101 /*
6102 * Perform the select(nd, in, out, ex, tv) and mmap() system
6103 * calls. Linux/i386 didn't use to be able to handle more than
6104 diff -urNp linux-2.6.32.45/arch/x86/boot/bitops.h linux-2.6.32.45/arch/x86/boot/bitops.h
6105 --- linux-2.6.32.45/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6106 +++ linux-2.6.32.45/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6107 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6108 u8 v;
6109 const u32 *p = (const u32 *)addr;
6110
6111 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6112 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6113 return v;
6114 }
6115
6116 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6117
6118 static inline void set_bit(int nr, void *addr)
6119 {
6120 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6121 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6122 }
6123
6124 #endif /* BOOT_BITOPS_H */
6125 diff -urNp linux-2.6.32.45/arch/x86/boot/boot.h linux-2.6.32.45/arch/x86/boot/boot.h
6126 --- linux-2.6.32.45/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6127 +++ linux-2.6.32.45/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6128 @@ -82,7 +82,7 @@ static inline void io_delay(void)
6129 static inline u16 ds(void)
6130 {
6131 u16 seg;
6132 - asm("movw %%ds,%0" : "=rm" (seg));
6133 + asm volatile("movw %%ds,%0" : "=rm" (seg));
6134 return seg;
6135 }
6136
6137 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6138 static inline int memcmp(const void *s1, const void *s2, size_t len)
6139 {
6140 u8 diff;
6141 - asm("repe; cmpsb; setnz %0"
6142 + asm volatile("repe; cmpsb; setnz %0"
6143 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6144 return diff;
6145 }
6146 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_32.S linux-2.6.32.45/arch/x86/boot/compressed/head_32.S
6147 --- linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6148 +++ linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6149 @@ -76,7 +76,7 @@ ENTRY(startup_32)
6150 notl %eax
6151 andl %eax, %ebx
6152 #else
6153 - movl $LOAD_PHYSICAL_ADDR, %ebx
6154 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6155 #endif
6156
6157 /* Target address to relocate to for decompression */
6158 @@ -149,7 +149,7 @@ relocated:
6159 * and where it was actually loaded.
6160 */
6161 movl %ebp, %ebx
6162 - subl $LOAD_PHYSICAL_ADDR, %ebx
6163 + subl $____LOAD_PHYSICAL_ADDR, %ebx
6164 jz 2f /* Nothing to be done if loaded at compiled addr. */
6165 /*
6166 * Process relocations.
6167 @@ -157,8 +157,7 @@ relocated:
6168
6169 1: subl $4, %edi
6170 movl (%edi), %ecx
6171 - testl %ecx, %ecx
6172 - jz 2f
6173 + jecxz 2f
6174 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6175 jmp 1b
6176 2:
6177 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_64.S linux-2.6.32.45/arch/x86/boot/compressed/head_64.S
6178 --- linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6179 +++ linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6180 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6181 notl %eax
6182 andl %eax, %ebx
6183 #else
6184 - movl $LOAD_PHYSICAL_ADDR, %ebx
6185 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6186 #endif
6187
6188 /* Target address to relocate to for decompression */
6189 @@ -183,7 +183,7 @@ no_longmode:
6190 hlt
6191 jmp 1b
6192
6193 -#include "../../kernel/verify_cpu_64.S"
6194 +#include "../../kernel/verify_cpu.S"
6195
6196 /*
6197 * Be careful here startup_64 needs to be at a predictable
6198 @@ -234,7 +234,7 @@ ENTRY(startup_64)
6199 notq %rax
6200 andq %rax, %rbp
6201 #else
6202 - movq $LOAD_PHYSICAL_ADDR, %rbp
6203 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6204 #endif
6205
6206 /* Target address to relocate to for decompression */
6207 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/Makefile linux-2.6.32.45/arch/x86/boot/compressed/Makefile
6208 --- linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-03-27 14:31:47.000000000 -0400
6209 +++ linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-08-07 14:38:34.000000000 -0400
6210 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
6211 KBUILD_CFLAGS += $(cflags-y)
6212 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6213 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6214 +ifdef CONSTIFY_PLUGIN
6215 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6216 +endif
6217
6218 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6219 GCOV_PROFILE := n
6220 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/misc.c linux-2.6.32.45/arch/x86/boot/compressed/misc.c
6221 --- linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6222 +++ linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6223 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
6224 case PT_LOAD:
6225 #ifdef CONFIG_RELOCATABLE
6226 dest = output;
6227 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6228 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6229 #else
6230 dest = (void *)(phdr->p_paddr);
6231 #endif
6232 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6233 error("Destination address too large");
6234 #endif
6235 #ifndef CONFIG_RELOCATABLE
6236 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6237 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6238 error("Wrong destination address");
6239 #endif
6240
6241 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c
6242 --- linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6243 +++ linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6244 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6245
6246 offs = (olen > ilen) ? olen - ilen : 0;
6247 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6248 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6249 + offs += 64*1024; /* Add 64K bytes slack */
6250 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6251
6252 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6253 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/relocs.c linux-2.6.32.45/arch/x86/boot/compressed/relocs.c
6254 --- linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6255 +++ linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6256 @@ -10,8 +10,11 @@
6257 #define USE_BSD
6258 #include <endian.h>
6259
6260 +#include "../../../../include/linux/autoconf.h"
6261 +
6262 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6263 static Elf32_Ehdr ehdr;
6264 +static Elf32_Phdr *phdr;
6265 static unsigned long reloc_count, reloc_idx;
6266 static unsigned long *relocs;
6267
6268 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6269
6270 static int is_safe_abs_reloc(const char* sym_name)
6271 {
6272 - int i;
6273 + unsigned int i;
6274
6275 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6276 if (!strcmp(sym_name, safe_abs_relocs[i]))
6277 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6278 }
6279 }
6280
6281 +static void read_phdrs(FILE *fp)
6282 +{
6283 + unsigned int i;
6284 +
6285 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6286 + if (!phdr) {
6287 + die("Unable to allocate %d program headers\n",
6288 + ehdr.e_phnum);
6289 + }
6290 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6291 + die("Seek to %d failed: %s\n",
6292 + ehdr.e_phoff, strerror(errno));
6293 + }
6294 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6295 + die("Cannot read ELF program headers: %s\n",
6296 + strerror(errno));
6297 + }
6298 + for(i = 0; i < ehdr.e_phnum; i++) {
6299 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6300 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6301 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6302 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6303 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6304 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6305 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6306 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6307 + }
6308 +
6309 +}
6310 +
6311 static void read_shdrs(FILE *fp)
6312 {
6313 - int i;
6314 + unsigned int i;
6315 Elf32_Shdr shdr;
6316
6317 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6318 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6319
6320 static void read_strtabs(FILE *fp)
6321 {
6322 - int i;
6323 + unsigned int i;
6324 for (i = 0; i < ehdr.e_shnum; i++) {
6325 struct section *sec = &secs[i];
6326 if (sec->shdr.sh_type != SHT_STRTAB) {
6327 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6328
6329 static void read_symtabs(FILE *fp)
6330 {
6331 - int i,j;
6332 + unsigned int i,j;
6333 for (i = 0; i < ehdr.e_shnum; i++) {
6334 struct section *sec = &secs[i];
6335 if (sec->shdr.sh_type != SHT_SYMTAB) {
6336 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6337
6338 static void read_relocs(FILE *fp)
6339 {
6340 - int i,j;
6341 + unsigned int i,j;
6342 + uint32_t base;
6343 +
6344 for (i = 0; i < ehdr.e_shnum; i++) {
6345 struct section *sec = &secs[i];
6346 if (sec->shdr.sh_type != SHT_REL) {
6347 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6348 die("Cannot read symbol table: %s\n",
6349 strerror(errno));
6350 }
6351 + base = 0;
6352 + for (j = 0; j < ehdr.e_phnum; j++) {
6353 + if (phdr[j].p_type != PT_LOAD )
6354 + continue;
6355 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6356 + continue;
6357 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6358 + break;
6359 + }
6360 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6361 Elf32_Rel *rel = &sec->reltab[j];
6362 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6363 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6364 rel->r_info = elf32_to_cpu(rel->r_info);
6365 }
6366 }
6367 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6368
6369 static void print_absolute_symbols(void)
6370 {
6371 - int i;
6372 + unsigned int i;
6373 printf("Absolute symbols\n");
6374 printf(" Num: Value Size Type Bind Visibility Name\n");
6375 for (i = 0; i < ehdr.e_shnum; i++) {
6376 struct section *sec = &secs[i];
6377 char *sym_strtab;
6378 Elf32_Sym *sh_symtab;
6379 - int j;
6380 + unsigned int j;
6381
6382 if (sec->shdr.sh_type != SHT_SYMTAB) {
6383 continue;
6384 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6385
6386 static void print_absolute_relocs(void)
6387 {
6388 - int i, printed = 0;
6389 + unsigned int i, printed = 0;
6390
6391 for (i = 0; i < ehdr.e_shnum; i++) {
6392 struct section *sec = &secs[i];
6393 struct section *sec_applies, *sec_symtab;
6394 char *sym_strtab;
6395 Elf32_Sym *sh_symtab;
6396 - int j;
6397 + unsigned int j;
6398 if (sec->shdr.sh_type != SHT_REL) {
6399 continue;
6400 }
6401 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6402
6403 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6404 {
6405 - int i;
6406 + unsigned int i;
6407 /* Walk through the relocations */
6408 for (i = 0; i < ehdr.e_shnum; i++) {
6409 char *sym_strtab;
6410 Elf32_Sym *sh_symtab;
6411 struct section *sec_applies, *sec_symtab;
6412 - int j;
6413 + unsigned int j;
6414 struct section *sec = &secs[i];
6415
6416 if (sec->shdr.sh_type != SHT_REL) {
6417 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6418 if (sym->st_shndx == SHN_ABS) {
6419 continue;
6420 }
6421 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6422 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6423 + continue;
6424 +
6425 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6426 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6427 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6428 + continue;
6429 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6430 + continue;
6431 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6432 + continue;
6433 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6434 + continue;
6435 +#endif
6436 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6437 /*
6438 * NONE can be ignored and and PC relative
6439 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6440
6441 static void emit_relocs(int as_text)
6442 {
6443 - int i;
6444 + unsigned int i;
6445 /* Count how many relocations I have and allocate space for them. */
6446 reloc_count = 0;
6447 walk_relocs(count_reloc);
6448 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
6449 fname, strerror(errno));
6450 }
6451 read_ehdr(fp);
6452 + read_phdrs(fp);
6453 read_shdrs(fp);
6454 read_strtabs(fp);
6455 read_symtabs(fp);
6456 diff -urNp linux-2.6.32.45/arch/x86/boot/cpucheck.c linux-2.6.32.45/arch/x86/boot/cpucheck.c
6457 --- linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6458 +++ linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6459 @@ -74,7 +74,7 @@ static int has_fpu(void)
6460 u16 fcw = -1, fsw = -1;
6461 u32 cr0;
6462
6463 - asm("movl %%cr0,%0" : "=r" (cr0));
6464 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6465 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6466 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6467 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6468 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6469 {
6470 u32 f0, f1;
6471
6472 - asm("pushfl ; "
6473 + asm volatile("pushfl ; "
6474 "pushfl ; "
6475 "popl %0 ; "
6476 "movl %0,%1 ; "
6477 @@ -115,7 +115,7 @@ static void get_flags(void)
6478 set_bit(X86_FEATURE_FPU, cpu.flags);
6479
6480 if (has_eflag(X86_EFLAGS_ID)) {
6481 - asm("cpuid"
6482 + asm volatile("cpuid"
6483 : "=a" (max_intel_level),
6484 "=b" (cpu_vendor[0]),
6485 "=d" (cpu_vendor[1]),
6486 @@ -124,7 +124,7 @@ static void get_flags(void)
6487
6488 if (max_intel_level >= 0x00000001 &&
6489 max_intel_level <= 0x0000ffff) {
6490 - asm("cpuid"
6491 + asm volatile("cpuid"
6492 : "=a" (tfms),
6493 "=c" (cpu.flags[4]),
6494 "=d" (cpu.flags[0])
6495 @@ -136,7 +136,7 @@ static void get_flags(void)
6496 cpu.model += ((tfms >> 16) & 0xf) << 4;
6497 }
6498
6499 - asm("cpuid"
6500 + asm volatile("cpuid"
6501 : "=a" (max_amd_level)
6502 : "a" (0x80000000)
6503 : "ebx", "ecx", "edx");
6504 @@ -144,7 +144,7 @@ static void get_flags(void)
6505 if (max_amd_level >= 0x80000001 &&
6506 max_amd_level <= 0x8000ffff) {
6507 u32 eax = 0x80000001;
6508 - asm("cpuid"
6509 + asm volatile("cpuid"
6510 : "+a" (eax),
6511 "=c" (cpu.flags[6]),
6512 "=d" (cpu.flags[1])
6513 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6514 u32 ecx = MSR_K7_HWCR;
6515 u32 eax, edx;
6516
6517 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6518 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6519 eax &= ~(1 << 15);
6520 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6521 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6522
6523 get_flags(); /* Make sure it really did something */
6524 err = check_flags();
6525 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6526 u32 ecx = MSR_VIA_FCR;
6527 u32 eax, edx;
6528
6529 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6530 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6531 eax |= (1<<1)|(1<<7);
6532 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6533 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6534
6535 set_bit(X86_FEATURE_CX8, cpu.flags);
6536 err = check_flags();
6537 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6538 u32 eax, edx;
6539 u32 level = 1;
6540
6541 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6542 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6543 - asm("cpuid"
6544 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6545 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6546 + asm volatile("cpuid"
6547 : "+a" (level), "=d" (cpu.flags[0])
6548 : : "ecx", "ebx");
6549 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6550 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6551
6552 err = check_flags();
6553 }
6554 diff -urNp linux-2.6.32.45/arch/x86/boot/header.S linux-2.6.32.45/arch/x86/boot/header.S
6555 --- linux-2.6.32.45/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6556 +++ linux-2.6.32.45/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6557 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6558 # single linked list of
6559 # struct setup_data
6560
6561 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6562 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6563
6564 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6565 #define VO_INIT_SIZE (VO__end - VO__text)
6566 diff -urNp linux-2.6.32.45/arch/x86/boot/Makefile linux-2.6.32.45/arch/x86/boot/Makefile
6567 --- linux-2.6.32.45/arch/x86/boot/Makefile 2011-03-27 14:31:47.000000000 -0400
6568 +++ linux-2.6.32.45/arch/x86/boot/Makefile 2011-08-07 14:38:13.000000000 -0400
6569 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
6570 $(call cc-option, -fno-stack-protector) \
6571 $(call cc-option, -mpreferred-stack-boundary=2)
6572 KBUILD_CFLAGS += $(call cc-option, -m32)
6573 +ifdef CONSTIFY_PLUGIN
6574 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6575 +endif
6576 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6577 GCOV_PROFILE := n
6578
6579 diff -urNp linux-2.6.32.45/arch/x86/boot/memory.c linux-2.6.32.45/arch/x86/boot/memory.c
6580 --- linux-2.6.32.45/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6581 +++ linux-2.6.32.45/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6582 @@ -19,7 +19,7 @@
6583
6584 static int detect_memory_e820(void)
6585 {
6586 - int count = 0;
6587 + unsigned int count = 0;
6588 struct biosregs ireg, oreg;
6589 struct e820entry *desc = boot_params.e820_map;
6590 static struct e820entry buf; /* static so it is zeroed */
6591 diff -urNp linux-2.6.32.45/arch/x86/boot/video.c linux-2.6.32.45/arch/x86/boot/video.c
6592 --- linux-2.6.32.45/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6593 +++ linux-2.6.32.45/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6594 @@ -90,7 +90,7 @@ static void store_mode_params(void)
6595 static unsigned int get_entry(void)
6596 {
6597 char entry_buf[4];
6598 - int i, len = 0;
6599 + unsigned int i, len = 0;
6600 int key;
6601 unsigned int v;
6602
6603 diff -urNp linux-2.6.32.45/arch/x86/boot/video-vesa.c linux-2.6.32.45/arch/x86/boot/video-vesa.c
6604 --- linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6605 +++ linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6606 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6607
6608 boot_params.screen_info.vesapm_seg = oreg.es;
6609 boot_params.screen_info.vesapm_off = oreg.di;
6610 + boot_params.screen_info.vesapm_size = oreg.cx;
6611 }
6612
6613 /*
6614 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_aout.c linux-2.6.32.45/arch/x86/ia32/ia32_aout.c
6615 --- linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6616 +++ linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6617 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6618 unsigned long dump_start, dump_size;
6619 struct user32 dump;
6620
6621 + memset(&dump, 0, sizeof(dump));
6622 +
6623 fs = get_fs();
6624 set_fs(KERNEL_DS);
6625 has_dumped = 1;
6626 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6627 dump_size = dump.u_ssize << PAGE_SHIFT;
6628 DUMP_WRITE(dump_start, dump_size);
6629 }
6630 - /*
6631 - * Finally dump the task struct. Not be used by gdb, but
6632 - * could be useful
6633 - */
6634 - set_fs(KERNEL_DS);
6635 - DUMP_WRITE(current, sizeof(*current));
6636 end_coredump:
6637 set_fs(fs);
6638 return has_dumped;
6639 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32entry.S linux-2.6.32.45/arch/x86/ia32/ia32entry.S
6640 --- linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6641 +++ linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6642 @@ -13,6 +13,7 @@
6643 #include <asm/thread_info.h>
6644 #include <asm/segment.h>
6645 #include <asm/irqflags.h>
6646 +#include <asm/pgtable.h>
6647 #include <linux/linkage.h>
6648
6649 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6650 @@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6651 ENDPROC(native_irq_enable_sysexit)
6652 #endif
6653
6654 + .macro pax_enter_kernel_user
6655 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6656 + call pax_enter_kernel_user
6657 +#endif
6658 + .endm
6659 +
6660 + .macro pax_exit_kernel_user
6661 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6662 + call pax_exit_kernel_user
6663 +#endif
6664 +#ifdef CONFIG_PAX_RANDKSTACK
6665 + pushq %rax
6666 + call pax_randomize_kstack
6667 + popq %rax
6668 +#endif
6669 + pax_erase_kstack
6670 + .endm
6671 +
6672 +.macro pax_erase_kstack
6673 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6674 + call pax_erase_kstack
6675 +#endif
6676 +.endm
6677 +
6678 /*
6679 * 32bit SYSENTER instruction entry.
6680 *
6681 @@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6682 CFI_REGISTER rsp,rbp
6683 SWAPGS_UNSAFE_STACK
6684 movq PER_CPU_VAR(kernel_stack), %rsp
6685 - addq $(KERNEL_STACK_OFFSET),%rsp
6686 + pax_enter_kernel_user
6687 /*
6688 * No need to follow this irqs on/off section: the syscall
6689 * disabled irqs, here we enable it straight after entry:
6690 @@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6691 pushfq
6692 CFI_ADJUST_CFA_OFFSET 8
6693 /*CFI_REL_OFFSET rflags,0*/
6694 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6695 + GET_THREAD_INFO(%r10)
6696 + movl TI_sysenter_return(%r10), %r10d
6697 CFI_REGISTER rip,r10
6698 pushq $__USER32_CS
6699 CFI_ADJUST_CFA_OFFSET 8
6700 @@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6701 SAVE_ARGS 0,0,1
6702 /* no need to do an access_ok check here because rbp has been
6703 32bit zero extended */
6704 +
6705 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6706 + mov $PAX_USER_SHADOW_BASE,%r10
6707 + add %r10,%rbp
6708 +#endif
6709 +
6710 1: movl (%rbp),%ebp
6711 .section __ex_table,"a"
6712 .quad 1b,ia32_badarg
6713 @@ -172,6 +204,7 @@ sysenter_dispatch:
6714 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6715 jnz sysexit_audit
6716 sysexit_from_sys_call:
6717 + pax_exit_kernel_user
6718 andl $~TS_COMPAT,TI_status(%r10)
6719 /* clear IF, that popfq doesn't enable interrupts early */
6720 andl $~0x200,EFLAGS-R11(%rsp)
6721 @@ -200,6 +233,9 @@ sysexit_from_sys_call:
6722 movl %eax,%esi /* 2nd arg: syscall number */
6723 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6724 call audit_syscall_entry
6725 +
6726 + pax_erase_kstack
6727 +
6728 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6729 cmpq $(IA32_NR_syscalls-1),%rax
6730 ja ia32_badsys
6731 @@ -252,6 +288,9 @@ sysenter_tracesys:
6732 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6733 movq %rsp,%rdi /* &pt_regs -> arg1 */
6734 call syscall_trace_enter
6735 +
6736 + pax_erase_kstack
6737 +
6738 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6739 RESTORE_REST
6740 cmpq $(IA32_NR_syscalls-1),%rax
6741 @@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6742 ENTRY(ia32_cstar_target)
6743 CFI_STARTPROC32 simple
6744 CFI_SIGNAL_FRAME
6745 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6746 + CFI_DEF_CFA rsp,0
6747 CFI_REGISTER rip,rcx
6748 /*CFI_REGISTER rflags,r11*/
6749 SWAPGS_UNSAFE_STACK
6750 movl %esp,%r8d
6751 CFI_REGISTER rsp,r8
6752 movq PER_CPU_VAR(kernel_stack),%rsp
6753 +
6754 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6755 + pax_enter_kernel_user
6756 +#endif
6757 +
6758 /*
6759 * No need to follow this irqs on/off section: the syscall
6760 * disabled irqs and here we enable it straight after entry:
6761 */
6762 ENABLE_INTERRUPTS(CLBR_NONE)
6763 - SAVE_ARGS 8,1,1
6764 + SAVE_ARGS 8*6,1,1
6765 movl %eax,%eax /* zero extension */
6766 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6767 movq %rcx,RIP-ARGOFFSET(%rsp)
6768 @@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6769 /* no need to do an access_ok check here because r8 has been
6770 32bit zero extended */
6771 /* hardware stack frame is complete now */
6772 +
6773 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6774 + mov $PAX_USER_SHADOW_BASE,%r10
6775 + add %r10,%r8
6776 +#endif
6777 +
6778 1: movl (%r8),%r9d
6779 .section __ex_table,"a"
6780 .quad 1b,ia32_badarg
6781 @@ -333,6 +383,7 @@ cstar_dispatch:
6782 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6783 jnz sysretl_audit
6784 sysretl_from_sys_call:
6785 + pax_exit_kernel_user
6786 andl $~TS_COMPAT,TI_status(%r10)
6787 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6788 movl RIP-ARGOFFSET(%rsp),%ecx
6789 @@ -370,6 +421,9 @@ cstar_tracesys:
6790 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6791 movq %rsp,%rdi /* &pt_regs -> arg1 */
6792 call syscall_trace_enter
6793 +
6794 + pax_erase_kstack
6795 +
6796 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6797 RESTORE_REST
6798 xchgl %ebp,%r9d
6799 @@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6800 CFI_REL_OFFSET rip,RIP-RIP
6801 PARAVIRT_ADJUST_EXCEPTION_FRAME
6802 SWAPGS
6803 + pax_enter_kernel_user
6804 /*
6805 * No need to follow this irqs on/off section: the syscall
6806 * disabled irqs and here we enable it straight after entry:
6807 @@ -448,6 +503,9 @@ ia32_tracesys:
6808 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6809 movq %rsp,%rdi /* &pt_regs -> arg1 */
6810 call syscall_trace_enter
6811 +
6812 + pax_erase_kstack
6813 +
6814 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6815 RESTORE_REST
6816 cmpq $(IA32_NR_syscalls-1),%rax
6817 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_signal.c linux-2.6.32.45/arch/x86/ia32/ia32_signal.c
6818 --- linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6819 +++ linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6820 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6821 sp -= frame_size;
6822 /* Align the stack pointer according to the i386 ABI,
6823 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6824 - sp = ((sp + 4) & -16ul) - 4;
6825 + sp = ((sp - 12) & -16ul) - 4;
6826 return (void __user *) sp;
6827 }
6828
6829 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6830 * These are actually not used anymore, but left because some
6831 * gdb versions depend on them as a marker.
6832 */
6833 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6834 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6835 } put_user_catch(err);
6836
6837 if (err)
6838 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6839 0xb8,
6840 __NR_ia32_rt_sigreturn,
6841 0x80cd,
6842 - 0,
6843 + 0
6844 };
6845
6846 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6847 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6848
6849 if (ka->sa.sa_flags & SA_RESTORER)
6850 restorer = ka->sa.sa_restorer;
6851 + else if (current->mm->context.vdso)
6852 + /* Return stub is in 32bit vsyscall page */
6853 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6854 else
6855 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6856 - rt_sigreturn);
6857 + restorer = &frame->retcode;
6858 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6859
6860 /*
6861 * Not actually used anymore, but left because some gdb
6862 * versions need it.
6863 */
6864 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6865 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6866 } put_user_catch(err);
6867
6868 if (err)
6869 diff -urNp linux-2.6.32.45/arch/x86/include/asm/alternative.h linux-2.6.32.45/arch/x86/include/asm/alternative.h
6870 --- linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6871 +++ linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6872 @@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6873 " .byte 662b-661b\n" /* sourcelen */ \
6874 " .byte 664f-663f\n" /* replacementlen */ \
6875 ".previous\n" \
6876 - ".section .altinstr_replacement, \"ax\"\n" \
6877 + ".section .altinstr_replacement, \"a\"\n" \
6878 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6879 ".previous"
6880
6881 diff -urNp linux-2.6.32.45/arch/x86/include/asm/apic.h linux-2.6.32.45/arch/x86/include/asm/apic.h
6882 --- linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-03-27 14:31:47.000000000 -0400
6883 +++ linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-08-17 20:01:15.000000000 -0400
6884 @@ -46,7 +46,7 @@ static inline void generic_apic_probe(vo
6885
6886 #ifdef CONFIG_X86_LOCAL_APIC
6887
6888 -extern unsigned int apic_verbosity;
6889 +extern int apic_verbosity;
6890 extern int local_apic_timer_c2_ok;
6891
6892 extern int disable_apic;
6893 diff -urNp linux-2.6.32.45/arch/x86/include/asm/apm.h linux-2.6.32.45/arch/x86/include/asm/apm.h
6894 --- linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6895 +++ linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6896 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6897 __asm__ __volatile__(APM_DO_ZERO_SEGS
6898 "pushl %%edi\n\t"
6899 "pushl %%ebp\n\t"
6900 - "lcall *%%cs:apm_bios_entry\n\t"
6901 + "lcall *%%ss:apm_bios_entry\n\t"
6902 "setc %%al\n\t"
6903 "popl %%ebp\n\t"
6904 "popl %%edi\n\t"
6905 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6906 __asm__ __volatile__(APM_DO_ZERO_SEGS
6907 "pushl %%edi\n\t"
6908 "pushl %%ebp\n\t"
6909 - "lcall *%%cs:apm_bios_entry\n\t"
6910 + "lcall *%%ss:apm_bios_entry\n\t"
6911 "setc %%bl\n\t"
6912 "popl %%ebp\n\t"
6913 "popl %%edi\n\t"
6914 diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_32.h linux-2.6.32.45/arch/x86/include/asm/atomic_32.h
6915 --- linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6916 +++ linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6917 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6918 }
6919
6920 /**
6921 + * atomic_read_unchecked - read atomic variable
6922 + * @v: pointer of type atomic_unchecked_t
6923 + *
6924 + * Atomically reads the value of @v.
6925 + */
6926 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6927 +{
6928 + return v->counter;
6929 +}
6930 +
6931 +/**
6932 * atomic_set - set atomic variable
6933 * @v: pointer of type atomic_t
6934 * @i: required value
6935 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6936 }
6937
6938 /**
6939 + * atomic_set_unchecked - set atomic variable
6940 + * @v: pointer of type atomic_unchecked_t
6941 + * @i: required value
6942 + *
6943 + * Atomically sets the value of @v to @i.
6944 + */
6945 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6946 +{
6947 + v->counter = i;
6948 +}
6949 +
6950 +/**
6951 * atomic_add - add integer to atomic variable
6952 * @i: integer value to add
6953 * @v: pointer of type atomic_t
6954 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6955 */
6956 static inline void atomic_add(int i, atomic_t *v)
6957 {
6958 - asm volatile(LOCK_PREFIX "addl %1,%0"
6959 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6960 +
6961 +#ifdef CONFIG_PAX_REFCOUNT
6962 + "jno 0f\n"
6963 + LOCK_PREFIX "subl %1,%0\n"
6964 + "int $4\n0:\n"
6965 + _ASM_EXTABLE(0b, 0b)
6966 +#endif
6967 +
6968 + : "+m" (v->counter)
6969 + : "ir" (i));
6970 +}
6971 +
6972 +/**
6973 + * atomic_add_unchecked - add integer to atomic variable
6974 + * @i: integer value to add
6975 + * @v: pointer of type atomic_unchecked_t
6976 + *
6977 + * Atomically adds @i to @v.
6978 + */
6979 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6980 +{
6981 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6982 : "+m" (v->counter)
6983 : "ir" (i));
6984 }
6985 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6986 */
6987 static inline void atomic_sub(int i, atomic_t *v)
6988 {
6989 - asm volatile(LOCK_PREFIX "subl %1,%0"
6990 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6991 +
6992 +#ifdef CONFIG_PAX_REFCOUNT
6993 + "jno 0f\n"
6994 + LOCK_PREFIX "addl %1,%0\n"
6995 + "int $4\n0:\n"
6996 + _ASM_EXTABLE(0b, 0b)
6997 +#endif
6998 +
6999 + : "+m" (v->counter)
7000 + : "ir" (i));
7001 +}
7002 +
7003 +/**
7004 + * atomic_sub_unchecked - subtract integer from atomic variable
7005 + * @i: integer value to subtract
7006 + * @v: pointer of type atomic_unchecked_t
7007 + *
7008 + * Atomically subtracts @i from @v.
7009 + */
7010 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7011 +{
7012 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7013 : "+m" (v->counter)
7014 : "ir" (i));
7015 }
7016 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
7017 {
7018 unsigned char c;
7019
7020 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7021 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7022 +
7023 +#ifdef CONFIG_PAX_REFCOUNT
7024 + "jno 0f\n"
7025 + LOCK_PREFIX "addl %2,%0\n"
7026 + "int $4\n0:\n"
7027 + _ASM_EXTABLE(0b, 0b)
7028 +#endif
7029 +
7030 + "sete %1\n"
7031 : "+m" (v->counter), "=qm" (c)
7032 : "ir" (i) : "memory");
7033 return c;
7034 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
7035 */
7036 static inline void atomic_inc(atomic_t *v)
7037 {
7038 - asm volatile(LOCK_PREFIX "incl %0"
7039 + asm volatile(LOCK_PREFIX "incl %0\n"
7040 +
7041 +#ifdef CONFIG_PAX_REFCOUNT
7042 + "jno 0f\n"
7043 + LOCK_PREFIX "decl %0\n"
7044 + "int $4\n0:\n"
7045 + _ASM_EXTABLE(0b, 0b)
7046 +#endif
7047 +
7048 + : "+m" (v->counter));
7049 +}
7050 +
7051 +/**
7052 + * atomic_inc_unchecked - increment atomic variable
7053 + * @v: pointer of type atomic_unchecked_t
7054 + *
7055 + * Atomically increments @v by 1.
7056 + */
7057 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7058 +{
7059 + asm volatile(LOCK_PREFIX "incl %0\n"
7060 : "+m" (v->counter));
7061 }
7062
7063 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7064 */
7065 static inline void atomic_dec(atomic_t *v)
7066 {
7067 - asm volatile(LOCK_PREFIX "decl %0"
7068 + asm volatile(LOCK_PREFIX "decl %0\n"
7069 +
7070 +#ifdef CONFIG_PAX_REFCOUNT
7071 + "jno 0f\n"
7072 + LOCK_PREFIX "incl %0\n"
7073 + "int $4\n0:\n"
7074 + _ASM_EXTABLE(0b, 0b)
7075 +#endif
7076 +
7077 + : "+m" (v->counter));
7078 +}
7079 +
7080 +/**
7081 + * atomic_dec_unchecked - decrement atomic variable
7082 + * @v: pointer of type atomic_unchecked_t
7083 + *
7084 + * Atomically decrements @v by 1.
7085 + */
7086 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7087 +{
7088 + asm volatile(LOCK_PREFIX "decl %0\n"
7089 : "+m" (v->counter));
7090 }
7091
7092 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7093 {
7094 unsigned char c;
7095
7096 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7097 + asm volatile(LOCK_PREFIX "decl %0\n"
7098 +
7099 +#ifdef CONFIG_PAX_REFCOUNT
7100 + "jno 0f\n"
7101 + LOCK_PREFIX "incl %0\n"
7102 + "int $4\n0:\n"
7103 + _ASM_EXTABLE(0b, 0b)
7104 +#endif
7105 +
7106 + "sete %1\n"
7107 : "+m" (v->counter), "=qm" (c)
7108 : : "memory");
7109 return c != 0;
7110 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7111 {
7112 unsigned char c;
7113
7114 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7115 + asm volatile(LOCK_PREFIX "incl %0\n"
7116 +
7117 +#ifdef CONFIG_PAX_REFCOUNT
7118 + "jno 0f\n"
7119 + LOCK_PREFIX "decl %0\n"
7120 + "into\n0:\n"
7121 + _ASM_EXTABLE(0b, 0b)
7122 +#endif
7123 +
7124 + "sete %1\n"
7125 + : "+m" (v->counter), "=qm" (c)
7126 + : : "memory");
7127 + return c != 0;
7128 +}
7129 +
7130 +/**
7131 + * atomic_inc_and_test_unchecked - increment and test
7132 + * @v: pointer of type atomic_unchecked_t
7133 + *
7134 + * Atomically increments @v by 1
7135 + * and returns true if the result is zero, or false for all
7136 + * other cases.
7137 + */
7138 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7139 +{
7140 + unsigned char c;
7141 +
7142 + asm volatile(LOCK_PREFIX "incl %0\n"
7143 + "sete %1\n"
7144 : "+m" (v->counter), "=qm" (c)
7145 : : "memory");
7146 return c != 0;
7147 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7148 {
7149 unsigned char c;
7150
7151 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7152 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7153 +
7154 +#ifdef CONFIG_PAX_REFCOUNT
7155 + "jno 0f\n"
7156 + LOCK_PREFIX "subl %2,%0\n"
7157 + "int $4\n0:\n"
7158 + _ASM_EXTABLE(0b, 0b)
7159 +#endif
7160 +
7161 + "sets %1\n"
7162 : "+m" (v->counter), "=qm" (c)
7163 : "ir" (i) : "memory");
7164 return c;
7165 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7166 #endif
7167 /* Modern 486+ processor */
7168 __i = i;
7169 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7170 +
7171 +#ifdef CONFIG_PAX_REFCOUNT
7172 + "jno 0f\n"
7173 + "movl %0, %1\n"
7174 + "int $4\n0:\n"
7175 + _ASM_EXTABLE(0b, 0b)
7176 +#endif
7177 +
7178 + : "+r" (i), "+m" (v->counter)
7179 + : : "memory");
7180 + return i + __i;
7181 +
7182 +#ifdef CONFIG_M386
7183 +no_xadd: /* Legacy 386 processor */
7184 + local_irq_save(flags);
7185 + __i = atomic_read(v);
7186 + atomic_set(v, i + __i);
7187 + local_irq_restore(flags);
7188 + return i + __i;
7189 +#endif
7190 +}
7191 +
7192 +/**
7193 + * atomic_add_return_unchecked - add integer and return
7194 + * @v: pointer of type atomic_unchecked_t
7195 + * @i: integer value to add
7196 + *
7197 + * Atomically adds @i to @v and returns @i + @v
7198 + */
7199 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7200 +{
7201 + int __i;
7202 +#ifdef CONFIG_M386
7203 + unsigned long flags;
7204 + if (unlikely(boot_cpu_data.x86 <= 3))
7205 + goto no_xadd;
7206 +#endif
7207 + /* Modern 486+ processor */
7208 + __i = i;
7209 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7210 : "+r" (i), "+m" (v->counter)
7211 : : "memory");
7212 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7213 return cmpxchg(&v->counter, old, new);
7214 }
7215
7216 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7217 +{
7218 + return cmpxchg(&v->counter, old, new);
7219 +}
7220 +
7221 static inline int atomic_xchg(atomic_t *v, int new)
7222 {
7223 return xchg(&v->counter, new);
7224 }
7225
7226 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7227 +{
7228 + return xchg(&v->counter, new);
7229 +}
7230 +
7231 /**
7232 * atomic_add_unless - add unless the number is already a given value
7233 * @v: pointer of type atomic_t
7234 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7235 */
7236 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7237 {
7238 - int c, old;
7239 + int c, old, new;
7240 c = atomic_read(v);
7241 for (;;) {
7242 - if (unlikely(c == (u)))
7243 + if (unlikely(c == u))
7244 break;
7245 - old = atomic_cmpxchg((v), c, c + (a));
7246 +
7247 + asm volatile("addl %2,%0\n"
7248 +
7249 +#ifdef CONFIG_PAX_REFCOUNT
7250 + "jno 0f\n"
7251 + "subl %2,%0\n"
7252 + "int $4\n0:\n"
7253 + _ASM_EXTABLE(0b, 0b)
7254 +#endif
7255 +
7256 + : "=r" (new)
7257 + : "0" (c), "ir" (a));
7258 +
7259 + old = atomic_cmpxchg(v, c, new);
7260 if (likely(old == c))
7261 break;
7262 c = old;
7263 }
7264 - return c != (u);
7265 + return c != u;
7266 }
7267
7268 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7269
7270 #define atomic_inc_return(v) (atomic_add_return(1, v))
7271 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7272 +{
7273 + return atomic_add_return_unchecked(1, v);
7274 +}
7275 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7276
7277 /* These are x86-specific, used by some header files */
7278 @@ -266,9 +495,18 @@ typedef struct {
7279 u64 __aligned(8) counter;
7280 } atomic64_t;
7281
7282 +#ifdef CONFIG_PAX_REFCOUNT
7283 +typedef struct {
7284 + u64 __aligned(8) counter;
7285 +} atomic64_unchecked_t;
7286 +#else
7287 +typedef atomic64_t atomic64_unchecked_t;
7288 +#endif
7289 +
7290 #define ATOMIC64_INIT(val) { (val) }
7291
7292 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7293 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7294
7295 /**
7296 * atomic64_xchg - xchg atomic64 variable
7297 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7298 * the old value.
7299 */
7300 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7301 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7302
7303 /**
7304 * atomic64_set - set atomic64 variable
7305 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7306 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7307
7308 /**
7309 + * atomic64_unchecked_set - set atomic64 variable
7310 + * @ptr: pointer to type atomic64_unchecked_t
7311 + * @new_val: value to assign
7312 + *
7313 + * Atomically sets the value of @ptr to @new_val.
7314 + */
7315 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7316 +
7317 +/**
7318 * atomic64_read - read atomic64 variable
7319 * @ptr: pointer to type atomic64_t
7320 *
7321 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7322 return res;
7323 }
7324
7325 -extern u64 atomic64_read(atomic64_t *ptr);
7326 +/**
7327 + * atomic64_read_unchecked - read atomic64 variable
7328 + * @ptr: pointer to type atomic64_unchecked_t
7329 + *
7330 + * Atomically reads the value of @ptr and returns it.
7331 + */
7332 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7333 +{
7334 + u64 res;
7335 +
7336 + /*
7337 + * Note, we inline this atomic64_unchecked_t primitive because
7338 + * it only clobbers EAX/EDX and leaves the others
7339 + * untouched. We also (somewhat subtly) rely on the
7340 + * fact that cmpxchg8b returns the current 64-bit value
7341 + * of the memory location we are touching:
7342 + */
7343 + asm volatile(
7344 + "mov %%ebx, %%eax\n\t"
7345 + "mov %%ecx, %%edx\n\t"
7346 + LOCK_PREFIX "cmpxchg8b %1\n"
7347 + : "=&A" (res)
7348 + : "m" (*ptr)
7349 + );
7350 +
7351 + return res;
7352 +}
7353
7354 /**
7355 * atomic64_add_return - add and return
7356 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7357 * Other variants with different arithmetic operators:
7358 */
7359 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7360 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7361 extern u64 atomic64_inc_return(atomic64_t *ptr);
7362 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7363 extern u64 atomic64_dec_return(atomic64_t *ptr);
7364 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7365
7366 /**
7367 * atomic64_add - add integer to atomic64 variable
7368 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7369 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7370
7371 /**
7372 + * atomic64_add_unchecked - add integer to atomic64 variable
7373 + * @delta: integer value to add
7374 + * @ptr: pointer to type atomic64_unchecked_t
7375 + *
7376 + * Atomically adds @delta to @ptr.
7377 + */
7378 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7379 +
7380 +/**
7381 * atomic64_sub - subtract the atomic64 variable
7382 * @delta: integer value to subtract
7383 * @ptr: pointer to type atomic64_t
7384 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7385 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7386
7387 /**
7388 + * atomic64_sub_unchecked - subtract the atomic64 variable
7389 + * @delta: integer value to subtract
7390 + * @ptr: pointer to type atomic64_unchecked_t
7391 + *
7392 + * Atomically subtracts @delta from @ptr.
7393 + */
7394 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7395 +
7396 +/**
7397 * atomic64_sub_and_test - subtract value from variable and test result
7398 * @delta: integer value to subtract
7399 * @ptr: pointer to type atomic64_t
7400 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7401 extern void atomic64_inc(atomic64_t *ptr);
7402
7403 /**
7404 + * atomic64_inc_unchecked - increment atomic64 variable
7405 + * @ptr: pointer to type atomic64_unchecked_t
7406 + *
7407 + * Atomically increments @ptr by 1.
7408 + */
7409 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7410 +
7411 +/**
7412 * atomic64_dec - decrement atomic64 variable
7413 * @ptr: pointer to type atomic64_t
7414 *
7415 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7416 extern void atomic64_dec(atomic64_t *ptr);
7417
7418 /**
7419 + * atomic64_dec_unchecked - decrement atomic64 variable
7420 + * @ptr: pointer to type atomic64_unchecked_t
7421 + *
7422 + * Atomically decrements @ptr by 1.
7423 + */
7424 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7425 +
7426 +/**
7427 * atomic64_dec_and_test - decrement and test
7428 * @ptr: pointer to type atomic64_t
7429 *
7430 diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_64.h linux-2.6.32.45/arch/x86/include/asm/atomic_64.h
7431 --- linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7432 +++ linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7433 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7434 }
7435
7436 /**
7437 + * atomic_read_unchecked - read atomic variable
7438 + * @v: pointer of type atomic_unchecked_t
7439 + *
7440 + * Atomically reads the value of @v.
7441 + */
7442 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7443 +{
7444 + return v->counter;
7445 +}
7446 +
7447 +/**
7448 * atomic_set - set atomic variable
7449 * @v: pointer of type atomic_t
7450 * @i: required value
7451 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7452 }
7453
7454 /**
7455 + * atomic_set_unchecked - set atomic variable
7456 + * @v: pointer of type atomic_unchecked_t
7457 + * @i: required value
7458 + *
7459 + * Atomically sets the value of @v to @i.
7460 + */
7461 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7462 +{
7463 + v->counter = i;
7464 +}
7465 +
7466 +/**
7467 * atomic_add - add integer to atomic variable
7468 * @i: integer value to add
7469 * @v: pointer of type atomic_t
7470 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7471 */
7472 static inline void atomic_add(int i, atomic_t *v)
7473 {
7474 - asm volatile(LOCK_PREFIX "addl %1,%0"
7475 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7476 +
7477 +#ifdef CONFIG_PAX_REFCOUNT
7478 + "jno 0f\n"
7479 + LOCK_PREFIX "subl %1,%0\n"
7480 + "int $4\n0:\n"
7481 + _ASM_EXTABLE(0b, 0b)
7482 +#endif
7483 +
7484 + : "=m" (v->counter)
7485 + : "ir" (i), "m" (v->counter));
7486 +}
7487 +
7488 +/**
7489 + * atomic_add_unchecked - add integer to atomic variable
7490 + * @i: integer value to add
7491 + * @v: pointer of type atomic_unchecked_t
7492 + *
7493 + * Atomically adds @i to @v.
7494 + */
7495 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7496 +{
7497 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7498 : "=m" (v->counter)
7499 : "ir" (i), "m" (v->counter));
7500 }
7501 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7502 */
7503 static inline void atomic_sub(int i, atomic_t *v)
7504 {
7505 - asm volatile(LOCK_PREFIX "subl %1,%0"
7506 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7507 +
7508 +#ifdef CONFIG_PAX_REFCOUNT
7509 + "jno 0f\n"
7510 + LOCK_PREFIX "addl %1,%0\n"
7511 + "int $4\n0:\n"
7512 + _ASM_EXTABLE(0b, 0b)
7513 +#endif
7514 +
7515 + : "=m" (v->counter)
7516 + : "ir" (i), "m" (v->counter));
7517 +}
7518 +
7519 +/**
7520 + * atomic_sub_unchecked - subtract the atomic variable
7521 + * @i: integer value to subtract
7522 + * @v: pointer of type atomic_unchecked_t
7523 + *
7524 + * Atomically subtracts @i from @v.
7525 + */
7526 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7527 +{
7528 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7529 : "=m" (v->counter)
7530 : "ir" (i), "m" (v->counter));
7531 }
7532 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7533 {
7534 unsigned char c;
7535
7536 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7537 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7538 +
7539 +#ifdef CONFIG_PAX_REFCOUNT
7540 + "jno 0f\n"
7541 + LOCK_PREFIX "addl %2,%0\n"
7542 + "int $4\n0:\n"
7543 + _ASM_EXTABLE(0b, 0b)
7544 +#endif
7545 +
7546 + "sete %1\n"
7547 : "=m" (v->counter), "=qm" (c)
7548 : "ir" (i), "m" (v->counter) : "memory");
7549 return c;
7550 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7551 */
7552 static inline void atomic_inc(atomic_t *v)
7553 {
7554 - asm volatile(LOCK_PREFIX "incl %0"
7555 + asm volatile(LOCK_PREFIX "incl %0\n"
7556 +
7557 +#ifdef CONFIG_PAX_REFCOUNT
7558 + "jno 0f\n"
7559 + LOCK_PREFIX "decl %0\n"
7560 + "int $4\n0:\n"
7561 + _ASM_EXTABLE(0b, 0b)
7562 +#endif
7563 +
7564 + : "=m" (v->counter)
7565 + : "m" (v->counter));
7566 +}
7567 +
7568 +/**
7569 + * atomic_inc_unchecked - increment atomic variable
7570 + * @v: pointer of type atomic_unchecked_t
7571 + *
7572 + * Atomically increments @v by 1.
7573 + */
7574 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7575 +{
7576 + asm volatile(LOCK_PREFIX "incl %0\n"
7577 : "=m" (v->counter)
7578 : "m" (v->counter));
7579 }
7580 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7581 */
7582 static inline void atomic_dec(atomic_t *v)
7583 {
7584 - asm volatile(LOCK_PREFIX "decl %0"
7585 + asm volatile(LOCK_PREFIX "decl %0\n"
7586 +
7587 +#ifdef CONFIG_PAX_REFCOUNT
7588 + "jno 0f\n"
7589 + LOCK_PREFIX "incl %0\n"
7590 + "int $4\n0:\n"
7591 + _ASM_EXTABLE(0b, 0b)
7592 +#endif
7593 +
7594 + : "=m" (v->counter)
7595 + : "m" (v->counter));
7596 +}
7597 +
7598 +/**
7599 + * atomic_dec_unchecked - decrement atomic variable
7600 + * @v: pointer of type atomic_unchecked_t
7601 + *
7602 + * Atomically decrements @v by 1.
7603 + */
7604 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7605 +{
7606 + asm volatile(LOCK_PREFIX "decl %0\n"
7607 : "=m" (v->counter)
7608 : "m" (v->counter));
7609 }
7610 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7611 {
7612 unsigned char c;
7613
7614 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7615 + asm volatile(LOCK_PREFIX "decl %0\n"
7616 +
7617 +#ifdef CONFIG_PAX_REFCOUNT
7618 + "jno 0f\n"
7619 + LOCK_PREFIX "incl %0\n"
7620 + "int $4\n0:\n"
7621 + _ASM_EXTABLE(0b, 0b)
7622 +#endif
7623 +
7624 + "sete %1\n"
7625 : "=m" (v->counter), "=qm" (c)
7626 : "m" (v->counter) : "memory");
7627 return c != 0;
7628 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7629 {
7630 unsigned char c;
7631
7632 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7633 + asm volatile(LOCK_PREFIX "incl %0\n"
7634 +
7635 +#ifdef CONFIG_PAX_REFCOUNT
7636 + "jno 0f\n"
7637 + LOCK_PREFIX "decl %0\n"
7638 + "int $4\n0:\n"
7639 + _ASM_EXTABLE(0b, 0b)
7640 +#endif
7641 +
7642 + "sete %1\n"
7643 + : "=m" (v->counter), "=qm" (c)
7644 + : "m" (v->counter) : "memory");
7645 + return c != 0;
7646 +}
7647 +
7648 +/**
7649 + * atomic_inc_and_test_unchecked - increment and test
7650 + * @v: pointer of type atomic_unchecked_t
7651 + *
7652 + * Atomically increments @v by 1
7653 + * and returns true if the result is zero, or false for all
7654 + * other cases.
7655 + */
7656 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7657 +{
7658 + unsigned char c;
7659 +
7660 + asm volatile(LOCK_PREFIX "incl %0\n"
7661 + "sete %1\n"
7662 : "=m" (v->counter), "=qm" (c)
7663 : "m" (v->counter) : "memory");
7664 return c != 0;
7665 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7666 {
7667 unsigned char c;
7668
7669 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7670 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7671 +
7672 +#ifdef CONFIG_PAX_REFCOUNT
7673 + "jno 0f\n"
7674 + LOCK_PREFIX "subl %2,%0\n"
7675 + "int $4\n0:\n"
7676 + _ASM_EXTABLE(0b, 0b)
7677 +#endif
7678 +
7679 + "sets %1\n"
7680 : "=m" (v->counter), "=qm" (c)
7681 : "ir" (i), "m" (v->counter) : "memory");
7682 return c;
7683 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7684 static inline int atomic_add_return(int i, atomic_t *v)
7685 {
7686 int __i = i;
7687 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
7688 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7689 +
7690 +#ifdef CONFIG_PAX_REFCOUNT
7691 + "jno 0f\n"
7692 + "movl %0, %1\n"
7693 + "int $4\n0:\n"
7694 + _ASM_EXTABLE(0b, 0b)
7695 +#endif
7696 +
7697 + : "+r" (i), "+m" (v->counter)
7698 + : : "memory");
7699 + return i + __i;
7700 +}
7701 +
7702 +/**
7703 + * atomic_add_return_unchecked - add and return
7704 + * @i: integer value to add
7705 + * @v: pointer of type atomic_unchecked_t
7706 + *
7707 + * Atomically adds @i to @v and returns @i + @v
7708 + */
7709 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7710 +{
7711 + int __i = i;
7712 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7713 : "+r" (i), "+m" (v->counter)
7714 : : "memory");
7715 return i + __i;
7716 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7717 }
7718
7719 #define atomic_inc_return(v) (atomic_add_return(1, v))
7720 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7721 +{
7722 + return atomic_add_return_unchecked(1, v);
7723 +}
7724 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7725
7726 /* The 64-bit atomic type */
7727 @@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7728 }
7729
7730 /**
7731 + * atomic64_read_unchecked - read atomic64 variable
7732 + * @v: pointer of type atomic64_unchecked_t
7733 + *
7734 + * Atomically reads the value of @v.
7735 + * Doesn't imply a read memory barrier.
7736 + */
7737 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7738 +{
7739 + return v->counter;
7740 +}
7741 +
7742 +/**
7743 * atomic64_set - set atomic64 variable
7744 * @v: pointer to type atomic64_t
7745 * @i: required value
7746 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7747 }
7748
7749 /**
7750 + * atomic64_set_unchecked - set atomic64 variable
7751 + * @v: pointer to type atomic64_unchecked_t
7752 + * @i: required value
7753 + *
7754 + * Atomically sets the value of @v to @i.
7755 + */
7756 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7757 +{
7758 + v->counter = i;
7759 +}
7760 +
7761 +/**
7762 * atomic64_add - add integer to atomic64 variable
7763 * @i: integer value to add
7764 * @v: pointer to type atomic64_t
7765 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7766 */
7767 static inline void atomic64_add(long i, atomic64_t *v)
7768 {
7769 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7770 +
7771 +#ifdef CONFIG_PAX_REFCOUNT
7772 + "jno 0f\n"
7773 + LOCK_PREFIX "subq %1,%0\n"
7774 + "int $4\n0:\n"
7775 + _ASM_EXTABLE(0b, 0b)
7776 +#endif
7777 +
7778 + : "=m" (v->counter)
7779 + : "er" (i), "m" (v->counter));
7780 +}
7781 +
7782 +/**
7783 + * atomic64_add_unchecked - add integer to atomic64 variable
7784 + * @i: integer value to add
7785 + * @v: pointer to type atomic64_unchecked_t
7786 + *
7787 + * Atomically adds @i to @v.
7788 + */
7789 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7790 +{
7791 asm volatile(LOCK_PREFIX "addq %1,%0"
7792 : "=m" (v->counter)
7793 : "er" (i), "m" (v->counter));
7794 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7795 */
7796 static inline void atomic64_sub(long i, atomic64_t *v)
7797 {
7798 - asm volatile(LOCK_PREFIX "subq %1,%0"
7799 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7800 +
7801 +#ifdef CONFIG_PAX_REFCOUNT
7802 + "jno 0f\n"
7803 + LOCK_PREFIX "addq %1,%0\n"
7804 + "int $4\n0:\n"
7805 + _ASM_EXTABLE(0b, 0b)
7806 +#endif
7807 +
7808 : "=m" (v->counter)
7809 : "er" (i), "m" (v->counter));
7810 }
7811 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7812 {
7813 unsigned char c;
7814
7815 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7816 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7817 +
7818 +#ifdef CONFIG_PAX_REFCOUNT
7819 + "jno 0f\n"
7820 + LOCK_PREFIX "addq %2,%0\n"
7821 + "int $4\n0:\n"
7822 + _ASM_EXTABLE(0b, 0b)
7823 +#endif
7824 +
7825 + "sete %1\n"
7826 : "=m" (v->counter), "=qm" (c)
7827 : "er" (i), "m" (v->counter) : "memory");
7828 return c;
7829 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7830 */
7831 static inline void atomic64_inc(atomic64_t *v)
7832 {
7833 + asm volatile(LOCK_PREFIX "incq %0\n"
7834 +
7835 +#ifdef CONFIG_PAX_REFCOUNT
7836 + "jno 0f\n"
7837 + LOCK_PREFIX "decq %0\n"
7838 + "int $4\n0:\n"
7839 + _ASM_EXTABLE(0b, 0b)
7840 +#endif
7841 +
7842 + : "=m" (v->counter)
7843 + : "m" (v->counter));
7844 +}
7845 +
7846 +/**
7847 + * atomic64_inc_unchecked - increment atomic64 variable
7848 + * @v: pointer to type atomic64_unchecked_t
7849 + *
7850 + * Atomically increments @v by 1.
7851 + */
7852 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7853 +{
7854 asm volatile(LOCK_PREFIX "incq %0"
7855 : "=m" (v->counter)
7856 : "m" (v->counter));
7857 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7858 */
7859 static inline void atomic64_dec(atomic64_t *v)
7860 {
7861 - asm volatile(LOCK_PREFIX "decq %0"
7862 + asm volatile(LOCK_PREFIX "decq %0\n"
7863 +
7864 +#ifdef CONFIG_PAX_REFCOUNT
7865 + "jno 0f\n"
7866 + LOCK_PREFIX "incq %0\n"
7867 + "int $4\n0:\n"
7868 + _ASM_EXTABLE(0b, 0b)
7869 +#endif
7870 +
7871 + : "=m" (v->counter)
7872 + : "m" (v->counter));
7873 +}
7874 +
7875 +/**
7876 + * atomic64_dec_unchecked - decrement atomic64 variable
7877 + * @v: pointer to type atomic64_t
7878 + *
7879 + * Atomically decrements @v by 1.
7880 + */
7881 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7882 +{
7883 + asm volatile(LOCK_PREFIX "decq %0\n"
7884 : "=m" (v->counter)
7885 : "m" (v->counter));
7886 }
7887 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7888 {
7889 unsigned char c;
7890
7891 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7892 + asm volatile(LOCK_PREFIX "decq %0\n"
7893 +
7894 +#ifdef CONFIG_PAX_REFCOUNT
7895 + "jno 0f\n"
7896 + LOCK_PREFIX "incq %0\n"
7897 + "int $4\n0:\n"
7898 + _ASM_EXTABLE(0b, 0b)
7899 +#endif
7900 +
7901 + "sete %1\n"
7902 : "=m" (v->counter), "=qm" (c)
7903 : "m" (v->counter) : "memory");
7904 return c != 0;
7905 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7906 {
7907 unsigned char c;
7908
7909 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7910 + asm volatile(LOCK_PREFIX "incq %0\n"
7911 +
7912 +#ifdef CONFIG_PAX_REFCOUNT
7913 + "jno 0f\n"
7914 + LOCK_PREFIX "decq %0\n"
7915 + "int $4\n0:\n"
7916 + _ASM_EXTABLE(0b, 0b)
7917 +#endif
7918 +
7919 + "sete %1\n"
7920 : "=m" (v->counter), "=qm" (c)
7921 : "m" (v->counter) : "memory");
7922 return c != 0;
7923 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7924 {
7925 unsigned char c;
7926
7927 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7928 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7929 +
7930 +#ifdef CONFIG_PAX_REFCOUNT
7931 + "jno 0f\n"
7932 + LOCK_PREFIX "subq %2,%0\n"
7933 + "int $4\n0:\n"
7934 + _ASM_EXTABLE(0b, 0b)
7935 +#endif
7936 +
7937 + "sets %1\n"
7938 : "=m" (v->counter), "=qm" (c)
7939 : "er" (i), "m" (v->counter) : "memory");
7940 return c;
7941 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7942 static inline long atomic64_add_return(long i, atomic64_t *v)
7943 {
7944 long __i = i;
7945 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7946 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7947 +
7948 +#ifdef CONFIG_PAX_REFCOUNT
7949 + "jno 0f\n"
7950 + "movq %0, %1\n"
7951 + "int $4\n0:\n"
7952 + _ASM_EXTABLE(0b, 0b)
7953 +#endif
7954 +
7955 + : "+r" (i), "+m" (v->counter)
7956 + : : "memory");
7957 + return i + __i;
7958 +}
7959 +
7960 +/**
7961 + * atomic64_add_return_unchecked - add and return
7962 + * @i: integer value to add
7963 + * @v: pointer to type atomic64_unchecked_t
7964 + *
7965 + * Atomically adds @i to @v and returns @i + @v
7966 + */
7967 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7968 +{
7969 + long __i = i;
7970 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7971 : "+r" (i), "+m" (v->counter)
7972 : : "memory");
7973 return i + __i;
7974 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7975 }
7976
7977 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7978 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7979 +{
7980 + return atomic64_add_return_unchecked(1, v);
7981 +}
7982 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7983
7984 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7985 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7986 return cmpxchg(&v->counter, old, new);
7987 }
7988
7989 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7990 +{
7991 + return cmpxchg(&v->counter, old, new);
7992 +}
7993 +
7994 static inline long atomic64_xchg(atomic64_t *v, long new)
7995 {
7996 return xchg(&v->counter, new);
7997 }
7998
7999 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8000 +{
8001 + return xchg(&v->counter, new);
8002 +}
8003 +
8004 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
8005 {
8006 return cmpxchg(&v->counter, old, new);
8007 }
8008
8009 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8010 +{
8011 + return cmpxchg(&v->counter, old, new);
8012 +}
8013 +
8014 static inline long atomic_xchg(atomic_t *v, int new)
8015 {
8016 return xchg(&v->counter, new);
8017 }
8018
8019 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8020 +{
8021 + return xchg(&v->counter, new);
8022 +}
8023 +
8024 /**
8025 * atomic_add_unless - add unless the number is a given value
8026 * @v: pointer of type atomic_t
8027 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
8028 */
8029 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8030 {
8031 - int c, old;
8032 + int c, old, new;
8033 c = atomic_read(v);
8034 for (;;) {
8035 - if (unlikely(c == (u)))
8036 + if (unlikely(c == u))
8037 break;
8038 - old = atomic_cmpxchg((v), c, c + (a));
8039 +
8040 + asm volatile("addl %2,%0\n"
8041 +
8042 +#ifdef CONFIG_PAX_REFCOUNT
8043 + "jno 0f\n"
8044 + "subl %2,%0\n"
8045 + "int $4\n0:\n"
8046 + _ASM_EXTABLE(0b, 0b)
8047 +#endif
8048 +
8049 + : "=r" (new)
8050 + : "0" (c), "ir" (a));
8051 +
8052 + old = atomic_cmpxchg(v, c, new);
8053 if (likely(old == c))
8054 break;
8055 c = old;
8056 }
8057 - return c != (u);
8058 + return c != u;
8059 }
8060
8061 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8062 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8063 */
8064 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8065 {
8066 - long c, old;
8067 + long c, old, new;
8068 c = atomic64_read(v);
8069 for (;;) {
8070 - if (unlikely(c == (u)))
8071 + if (unlikely(c == u))
8072 break;
8073 - old = atomic64_cmpxchg((v), c, c + (a));
8074 +
8075 + asm volatile("addq %2,%0\n"
8076 +
8077 +#ifdef CONFIG_PAX_REFCOUNT
8078 + "jno 0f\n"
8079 + "subq %2,%0\n"
8080 + "int $4\n0:\n"
8081 + _ASM_EXTABLE(0b, 0b)
8082 +#endif
8083 +
8084 + : "=r" (new)
8085 + : "0" (c), "er" (a));
8086 +
8087 + old = atomic64_cmpxchg(v, c, new);
8088 if (likely(old == c))
8089 break;
8090 c = old;
8091 }
8092 - return c != (u);
8093 + return c != u;
8094 }
8095
8096 /**
8097 diff -urNp linux-2.6.32.45/arch/x86/include/asm/bitops.h linux-2.6.32.45/arch/x86/include/asm/bitops.h
8098 --- linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8099 +++ linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8100 @@ -38,7 +38,7 @@
8101 * a mask operation on a byte.
8102 */
8103 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8104 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8105 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8106 #define CONST_MASK(nr) (1 << ((nr) & 7))
8107
8108 /**
8109 diff -urNp linux-2.6.32.45/arch/x86/include/asm/boot.h linux-2.6.32.45/arch/x86/include/asm/boot.h
8110 --- linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8111 +++ linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8112 @@ -11,10 +11,15 @@
8113 #include <asm/pgtable_types.h>
8114
8115 /* Physical address where kernel should be loaded. */
8116 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8117 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8118 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8119 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8120
8121 +#ifndef __ASSEMBLY__
8122 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8123 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8124 +#endif
8125 +
8126 /* Minimum kernel alignment, as a power of two */
8127 #ifdef CONFIG_X86_64
8128 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8129 diff -urNp linux-2.6.32.45/arch/x86/include/asm/cacheflush.h linux-2.6.32.45/arch/x86/include/asm/cacheflush.h
8130 --- linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8131 +++ linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8132 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8133 static inline unsigned long get_page_memtype(struct page *pg)
8134 {
8135 if (!PageUncached(pg) && !PageWC(pg))
8136 - return -1;
8137 + return ~0UL;
8138 else if (!PageUncached(pg) && PageWC(pg))
8139 return _PAGE_CACHE_WC;
8140 else if (PageUncached(pg) && !PageWC(pg))
8141 @@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8142 SetPageWC(pg);
8143 break;
8144 default:
8145 - case -1:
8146 + case ~0UL:
8147 ClearPageUncached(pg);
8148 ClearPageWC(pg);
8149 break;
8150 diff -urNp linux-2.6.32.45/arch/x86/include/asm/cache.h linux-2.6.32.45/arch/x86/include/asm/cache.h
8151 --- linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8152 +++ linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8153 @@ -5,9 +5,10 @@
8154
8155 /* L1 cache line size */
8156 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8157 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8158 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8159
8160 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8161 +#define __read_only __attribute__((__section__(".data.read_only")))
8162
8163 #ifdef CONFIG_X86_VSMP
8164 /* vSMP Internode cacheline shift */
8165 diff -urNp linux-2.6.32.45/arch/x86/include/asm/checksum_32.h linux-2.6.32.45/arch/x86/include/asm/checksum_32.h
8166 --- linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8167 +++ linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8168 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8169 int len, __wsum sum,
8170 int *src_err_ptr, int *dst_err_ptr);
8171
8172 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8173 + int len, __wsum sum,
8174 + int *src_err_ptr, int *dst_err_ptr);
8175 +
8176 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8177 + int len, __wsum sum,
8178 + int *src_err_ptr, int *dst_err_ptr);
8179 +
8180 /*
8181 * Note: when you get a NULL pointer exception here this means someone
8182 * passed in an incorrect kernel address to one of these functions.
8183 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8184 int *err_ptr)
8185 {
8186 might_sleep();
8187 - return csum_partial_copy_generic((__force void *)src, dst,
8188 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8189 len, sum, err_ptr, NULL);
8190 }
8191
8192 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8193 {
8194 might_sleep();
8195 if (access_ok(VERIFY_WRITE, dst, len))
8196 - return csum_partial_copy_generic(src, (__force void *)dst,
8197 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8198 len, sum, NULL, err_ptr);
8199
8200 if (len)
8201 diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc_defs.h linux-2.6.32.45/arch/x86/include/asm/desc_defs.h
8202 --- linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8203 +++ linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8204 @@ -31,6 +31,12 @@ struct desc_struct {
8205 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8206 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8207 };
8208 + struct {
8209 + u16 offset_low;
8210 + u16 seg;
8211 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8212 + unsigned offset_high: 16;
8213 + } gate;
8214 };
8215 } __attribute__((packed));
8216
8217 diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc.h linux-2.6.32.45/arch/x86/include/asm/desc.h
8218 --- linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8219 +++ linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8220 @@ -4,6 +4,7 @@
8221 #include <asm/desc_defs.h>
8222 #include <asm/ldt.h>
8223 #include <asm/mmu.h>
8224 +#include <asm/pgtable.h>
8225 #include <linux/smp.h>
8226
8227 static inline void fill_ldt(struct desc_struct *desc,
8228 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8229 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8230 desc->type = (info->read_exec_only ^ 1) << 1;
8231 desc->type |= info->contents << 2;
8232 + desc->type |= info->seg_not_present ^ 1;
8233 desc->s = 1;
8234 desc->dpl = 0x3;
8235 desc->p = info->seg_not_present ^ 1;
8236 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8237 }
8238
8239 extern struct desc_ptr idt_descr;
8240 -extern gate_desc idt_table[];
8241 -
8242 -struct gdt_page {
8243 - struct desc_struct gdt[GDT_ENTRIES];
8244 -} __attribute__((aligned(PAGE_SIZE)));
8245 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8246 +extern gate_desc idt_table[256];
8247
8248 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8249 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8250 {
8251 - return per_cpu(gdt_page, cpu).gdt;
8252 + return cpu_gdt_table[cpu];
8253 }
8254
8255 #ifdef CONFIG_X86_64
8256 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8257 unsigned long base, unsigned dpl, unsigned flags,
8258 unsigned short seg)
8259 {
8260 - gate->a = (seg << 16) | (base & 0xffff);
8261 - gate->b = (base & 0xffff0000) |
8262 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8263 + gate->gate.offset_low = base;
8264 + gate->gate.seg = seg;
8265 + gate->gate.reserved = 0;
8266 + gate->gate.type = type;
8267 + gate->gate.s = 0;
8268 + gate->gate.dpl = dpl;
8269 + gate->gate.p = 1;
8270 + gate->gate.offset_high = base >> 16;
8271 }
8272
8273 #endif
8274 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8275 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8276 const gate_desc *gate)
8277 {
8278 + pax_open_kernel();
8279 memcpy(&idt[entry], gate, sizeof(*gate));
8280 + pax_close_kernel();
8281 }
8282
8283 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8284 const void *desc)
8285 {
8286 + pax_open_kernel();
8287 memcpy(&ldt[entry], desc, 8);
8288 + pax_close_kernel();
8289 }
8290
8291 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8292 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8293 size = sizeof(struct desc_struct);
8294 break;
8295 }
8296 +
8297 + pax_open_kernel();
8298 memcpy(&gdt[entry], desc, size);
8299 + pax_close_kernel();
8300 }
8301
8302 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8303 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8304
8305 static inline void native_load_tr_desc(void)
8306 {
8307 + pax_open_kernel();
8308 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8309 + pax_close_kernel();
8310 }
8311
8312 static inline void native_load_gdt(const struct desc_ptr *dtr)
8313 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8314 unsigned int i;
8315 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8316
8317 + pax_open_kernel();
8318 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8319 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8320 + pax_close_kernel();
8321 }
8322
8323 #define _LDT_empty(info) \
8324 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8325 desc->limit = (limit >> 16) & 0xf;
8326 }
8327
8328 -static inline void _set_gate(int gate, unsigned type, void *addr,
8329 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8330 unsigned dpl, unsigned ist, unsigned seg)
8331 {
8332 gate_desc s;
8333 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8334 * Pentium F0 0F bugfix can have resulted in the mapped
8335 * IDT being write-protected.
8336 */
8337 -static inline void set_intr_gate(unsigned int n, void *addr)
8338 +static inline void set_intr_gate(unsigned int n, const void *addr)
8339 {
8340 BUG_ON((unsigned)n > 0xFF);
8341 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8342 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8343 /*
8344 * This routine sets up an interrupt gate at directory privilege level 3.
8345 */
8346 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8347 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8348 {
8349 BUG_ON((unsigned)n > 0xFF);
8350 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8351 }
8352
8353 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8354 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8355 {
8356 BUG_ON((unsigned)n > 0xFF);
8357 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8358 }
8359
8360 -static inline void set_trap_gate(unsigned int n, void *addr)
8361 +static inline void set_trap_gate(unsigned int n, const void *addr)
8362 {
8363 BUG_ON((unsigned)n > 0xFF);
8364 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8365 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8366 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8367 {
8368 BUG_ON((unsigned)n > 0xFF);
8369 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8370 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8371 }
8372
8373 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8374 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8375 {
8376 BUG_ON((unsigned)n > 0xFF);
8377 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8378 }
8379
8380 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8381 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8382 {
8383 BUG_ON((unsigned)n > 0xFF);
8384 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8385 }
8386
8387 +#ifdef CONFIG_X86_32
8388 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8389 +{
8390 + struct desc_struct d;
8391 +
8392 + if (likely(limit))
8393 + limit = (limit - 1UL) >> PAGE_SHIFT;
8394 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8395 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8396 +}
8397 +#endif
8398 +
8399 #endif /* _ASM_X86_DESC_H */
8400 diff -urNp linux-2.6.32.45/arch/x86/include/asm/device.h linux-2.6.32.45/arch/x86/include/asm/device.h
8401 --- linux-2.6.32.45/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8402 +++ linux-2.6.32.45/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8403 @@ -6,7 +6,7 @@ struct dev_archdata {
8404 void *acpi_handle;
8405 #endif
8406 #ifdef CONFIG_X86_64
8407 -struct dma_map_ops *dma_ops;
8408 + const struct dma_map_ops *dma_ops;
8409 #endif
8410 #ifdef CONFIG_DMAR
8411 void *iommu; /* hook for IOMMU specific extension */
8412 diff -urNp linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h
8413 --- linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8414 +++ linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8415 @@ -25,9 +25,9 @@ extern int iommu_merge;
8416 extern struct device x86_dma_fallback_dev;
8417 extern int panic_on_overflow;
8418
8419 -extern struct dma_map_ops *dma_ops;
8420 +extern const struct dma_map_ops *dma_ops;
8421
8422 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8423 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8424 {
8425 #ifdef CONFIG_X86_32
8426 return dma_ops;
8427 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8428 /* Make sure we keep the same behaviour */
8429 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8430 {
8431 - struct dma_map_ops *ops = get_dma_ops(dev);
8432 + const struct dma_map_ops *ops = get_dma_ops(dev);
8433 if (ops->mapping_error)
8434 return ops->mapping_error(dev, dma_addr);
8435
8436 @@ -122,7 +122,7 @@ static inline void *
8437 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8438 gfp_t gfp)
8439 {
8440 - struct dma_map_ops *ops = get_dma_ops(dev);
8441 + const struct dma_map_ops *ops = get_dma_ops(dev);
8442 void *memory;
8443
8444 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8445 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8446 static inline void dma_free_coherent(struct device *dev, size_t size,
8447 void *vaddr, dma_addr_t bus)
8448 {
8449 - struct dma_map_ops *ops = get_dma_ops(dev);
8450 + const struct dma_map_ops *ops = get_dma_ops(dev);
8451
8452 WARN_ON(irqs_disabled()); /* for portability */
8453
8454 diff -urNp linux-2.6.32.45/arch/x86/include/asm/e820.h linux-2.6.32.45/arch/x86/include/asm/e820.h
8455 --- linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8456 +++ linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8457 @@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8458 #define ISA_END_ADDRESS 0x100000
8459 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8460
8461 -#define BIOS_BEGIN 0x000a0000
8462 +#define BIOS_BEGIN 0x000c0000
8463 #define BIOS_END 0x00100000
8464
8465 #ifdef __KERNEL__
8466 diff -urNp linux-2.6.32.45/arch/x86/include/asm/elf.h linux-2.6.32.45/arch/x86/include/asm/elf.h
8467 --- linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8468 +++ linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8469 @@ -257,7 +257,25 @@ extern int force_personality32;
8470 the loader. We need to make sure that it is out of the way of the program
8471 that it will "exec", and that there is sufficient room for the brk. */
8472
8473 +#ifdef CONFIG_PAX_SEGMEXEC
8474 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8475 +#else
8476 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8477 +#endif
8478 +
8479 +#ifdef CONFIG_PAX_ASLR
8480 +#ifdef CONFIG_X86_32
8481 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8482 +
8483 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8484 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8485 +#else
8486 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8487 +
8488 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8489 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8490 +#endif
8491 +#endif
8492
8493 /* This yields a mask that user programs can use to figure out what
8494 instruction set this CPU supports. This could be done in user space,
8495 @@ -311,8 +329,7 @@ do { \
8496 #define ARCH_DLINFO \
8497 do { \
8498 if (vdso_enabled) \
8499 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8500 - (unsigned long)current->mm->context.vdso); \
8501 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8502 } while (0)
8503
8504 #define AT_SYSINFO 32
8505 @@ -323,7 +340,7 @@ do { \
8506
8507 #endif /* !CONFIG_X86_32 */
8508
8509 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8510 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8511
8512 #define VDSO_ENTRY \
8513 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8514 @@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8515 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8516 #define compat_arch_setup_additional_pages syscall32_setup_pages
8517
8518 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8519 -#define arch_randomize_brk arch_randomize_brk
8520 -
8521 #endif /* _ASM_X86_ELF_H */
8522 diff -urNp linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h
8523 --- linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8524 +++ linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8525 @@ -15,6 +15,6 @@ enum reboot_type {
8526
8527 extern enum reboot_type reboot_type;
8528
8529 -extern void machine_emergency_restart(void);
8530 +extern void machine_emergency_restart(void) __noreturn;
8531
8532 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8533 diff -urNp linux-2.6.32.45/arch/x86/include/asm/futex.h linux-2.6.32.45/arch/x86/include/asm/futex.h
8534 --- linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8535 +++ linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8536 @@ -12,16 +12,18 @@
8537 #include <asm/system.h>
8538
8539 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8540 + typecheck(u32 *, uaddr); \
8541 asm volatile("1:\t" insn "\n" \
8542 "2:\t.section .fixup,\"ax\"\n" \
8543 "3:\tmov\t%3, %1\n" \
8544 "\tjmp\t2b\n" \
8545 "\t.previous\n" \
8546 _ASM_EXTABLE(1b, 3b) \
8547 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8548 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8549 : "i" (-EFAULT), "0" (oparg), "1" (0))
8550
8551 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8552 + typecheck(u32 *, uaddr); \
8553 asm volatile("1:\tmovl %2, %0\n" \
8554 "\tmovl\t%0, %3\n" \
8555 "\t" insn "\n" \
8556 @@ -34,10 +36,10 @@
8557 _ASM_EXTABLE(1b, 4b) \
8558 _ASM_EXTABLE(2b, 4b) \
8559 : "=&a" (oldval), "=&r" (ret), \
8560 - "+m" (*uaddr), "=&r" (tem) \
8561 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8562 : "r" (oparg), "i" (-EFAULT), "1" (0))
8563
8564 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8565 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8566 {
8567 int op = (encoded_op >> 28) & 7;
8568 int cmp = (encoded_op >> 24) & 15;
8569 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8570
8571 switch (op) {
8572 case FUTEX_OP_SET:
8573 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8574 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8575 break;
8576 case FUTEX_OP_ADD:
8577 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8578 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8579 uaddr, oparg);
8580 break;
8581 case FUTEX_OP_OR:
8582 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8583 return ret;
8584 }
8585
8586 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8587 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8588 int newval)
8589 {
8590
8591 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8592 return -ENOSYS;
8593 #endif
8594
8595 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8596 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8597 return -EFAULT;
8598
8599 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8600 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8601 "2:\t.section .fixup, \"ax\"\n"
8602 "3:\tmov %2, %0\n"
8603 "\tjmp 2b\n"
8604 "\t.previous\n"
8605 _ASM_EXTABLE(1b, 3b)
8606 - : "=a" (oldval), "+m" (*uaddr)
8607 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8608 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8609 : "memory"
8610 );
8611 diff -urNp linux-2.6.32.45/arch/x86/include/asm/hw_irq.h linux-2.6.32.45/arch/x86/include/asm/hw_irq.h
8612 --- linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8613 +++ linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8614 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8615 extern void enable_IO_APIC(void);
8616
8617 /* Statistics */
8618 -extern atomic_t irq_err_count;
8619 -extern atomic_t irq_mis_count;
8620 +extern atomic_unchecked_t irq_err_count;
8621 +extern atomic_unchecked_t irq_mis_count;
8622
8623 /* EISA */
8624 extern void eisa_set_level_irq(unsigned int irq);
8625 diff -urNp linux-2.6.32.45/arch/x86/include/asm/i387.h linux-2.6.32.45/arch/x86/include/asm/i387.h
8626 --- linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8627 +++ linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8628 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8629 {
8630 int err;
8631
8632 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8633 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8634 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8635 +#endif
8636 +
8637 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8638 "2:\n"
8639 ".section .fixup,\"ax\"\n"
8640 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8641 {
8642 int err;
8643
8644 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8645 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8646 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8647 +#endif
8648 +
8649 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8650 "2:\n"
8651 ".section .fixup,\"ax\"\n"
8652 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8653 }
8654
8655 /* We need a safe address that is cheap to find and that is already
8656 - in L1 during context switch. The best choices are unfortunately
8657 - different for UP and SMP */
8658 -#ifdef CONFIG_SMP
8659 -#define safe_address (__per_cpu_offset[0])
8660 -#else
8661 -#define safe_address (kstat_cpu(0).cpustat.user)
8662 -#endif
8663 + in L1 during context switch. */
8664 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8665
8666 /*
8667 * These must be called with preempt disabled
8668 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8669 struct thread_info *me = current_thread_info();
8670 preempt_disable();
8671 if (me->status & TS_USEDFPU)
8672 - __save_init_fpu(me->task);
8673 + __save_init_fpu(current);
8674 else
8675 clts();
8676 }
8677 diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_32.h linux-2.6.32.45/arch/x86/include/asm/io_32.h
8678 --- linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8679 +++ linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8680 @@ -3,6 +3,7 @@
8681
8682 #include <linux/string.h>
8683 #include <linux/compiler.h>
8684 +#include <asm/processor.h>
8685
8686 /*
8687 * This file contains the definitions for the x86 IO instructions
8688 @@ -42,6 +43,17 @@
8689
8690 #ifdef __KERNEL__
8691
8692 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8693 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8694 +{
8695 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8696 +}
8697 +
8698 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8699 +{
8700 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8701 +}
8702 +
8703 #include <asm-generic/iomap.h>
8704
8705 #include <linux/vmalloc.h>
8706 diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_64.h linux-2.6.32.45/arch/x86/include/asm/io_64.h
8707 --- linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8708 +++ linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8709 @@ -140,6 +140,17 @@ __OUTS(l)
8710
8711 #include <linux/vmalloc.h>
8712
8713 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8714 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8715 +{
8716 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8717 +}
8718 +
8719 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8720 +{
8721 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8722 +}
8723 +
8724 #include <asm-generic/iomap.h>
8725
8726 void __memcpy_fromio(void *, unsigned long, unsigned);
8727 diff -urNp linux-2.6.32.45/arch/x86/include/asm/iommu.h linux-2.6.32.45/arch/x86/include/asm/iommu.h
8728 --- linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8729 +++ linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8730 @@ -3,7 +3,7 @@
8731
8732 extern void pci_iommu_shutdown(void);
8733 extern void no_iommu_init(void);
8734 -extern struct dma_map_ops nommu_dma_ops;
8735 +extern const struct dma_map_ops nommu_dma_ops;
8736 extern int force_iommu, no_iommu;
8737 extern int iommu_detected;
8738 extern int iommu_pass_through;
8739 diff -urNp linux-2.6.32.45/arch/x86/include/asm/irqflags.h linux-2.6.32.45/arch/x86/include/asm/irqflags.h
8740 --- linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8741 +++ linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8742 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8743 sti; \
8744 sysexit
8745
8746 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8747 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8748 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8749 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8750 +
8751 #else
8752 #define INTERRUPT_RETURN iret
8753 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8754 diff -urNp linux-2.6.32.45/arch/x86/include/asm/kprobes.h linux-2.6.32.45/arch/x86/include/asm/kprobes.h
8755 --- linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8756 +++ linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8757 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8758 #define BREAKPOINT_INSTRUCTION 0xcc
8759 #define RELATIVEJUMP_INSTRUCTION 0xe9
8760 #define MAX_INSN_SIZE 16
8761 -#define MAX_STACK_SIZE 64
8762 -#define MIN_STACK_SIZE(ADDR) \
8763 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8764 - THREAD_SIZE - (unsigned long)(ADDR))) \
8765 - ? (MAX_STACK_SIZE) \
8766 - : (((unsigned long)current_thread_info()) + \
8767 - THREAD_SIZE - (unsigned long)(ADDR)))
8768 +#define MAX_STACK_SIZE 64UL
8769 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8770
8771 #define flush_insn_slot(p) do { } while (0)
8772
8773 diff -urNp linux-2.6.32.45/arch/x86/include/asm/kvm_host.h linux-2.6.32.45/arch/x86/include/asm/kvm_host.h
8774 --- linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8775 +++ linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8776 @@ -536,7 +536,7 @@ struct kvm_x86_ops {
8777 const struct trace_print_flags *exit_reasons_str;
8778 };
8779
8780 -extern struct kvm_x86_ops *kvm_x86_ops;
8781 +extern const struct kvm_x86_ops *kvm_x86_ops;
8782
8783 int kvm_mmu_module_init(void);
8784 void kvm_mmu_module_exit(void);
8785 diff -urNp linux-2.6.32.45/arch/x86/include/asm/local.h linux-2.6.32.45/arch/x86/include/asm/local.h
8786 --- linux-2.6.32.45/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8787 +++ linux-2.6.32.45/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8788 @@ -18,26 +18,58 @@ typedef struct {
8789
8790 static inline void local_inc(local_t *l)
8791 {
8792 - asm volatile(_ASM_INC "%0"
8793 + asm volatile(_ASM_INC "%0\n"
8794 +
8795 +#ifdef CONFIG_PAX_REFCOUNT
8796 + "jno 0f\n"
8797 + _ASM_DEC "%0\n"
8798 + "int $4\n0:\n"
8799 + _ASM_EXTABLE(0b, 0b)
8800 +#endif
8801 +
8802 : "+m" (l->a.counter));
8803 }
8804
8805 static inline void local_dec(local_t *l)
8806 {
8807 - asm volatile(_ASM_DEC "%0"
8808 + asm volatile(_ASM_DEC "%0\n"
8809 +
8810 +#ifdef CONFIG_PAX_REFCOUNT
8811 + "jno 0f\n"
8812 + _ASM_INC "%0\n"
8813 + "int $4\n0:\n"
8814 + _ASM_EXTABLE(0b, 0b)
8815 +#endif
8816 +
8817 : "+m" (l->a.counter));
8818 }
8819
8820 static inline void local_add(long i, local_t *l)
8821 {
8822 - asm volatile(_ASM_ADD "%1,%0"
8823 + asm volatile(_ASM_ADD "%1,%0\n"
8824 +
8825 +#ifdef CONFIG_PAX_REFCOUNT
8826 + "jno 0f\n"
8827 + _ASM_SUB "%1,%0\n"
8828 + "int $4\n0:\n"
8829 + _ASM_EXTABLE(0b, 0b)
8830 +#endif
8831 +
8832 : "+m" (l->a.counter)
8833 : "ir" (i));
8834 }
8835
8836 static inline void local_sub(long i, local_t *l)
8837 {
8838 - asm volatile(_ASM_SUB "%1,%0"
8839 + asm volatile(_ASM_SUB "%1,%0\n"
8840 +
8841 +#ifdef CONFIG_PAX_REFCOUNT
8842 + "jno 0f\n"
8843 + _ASM_ADD "%1,%0\n"
8844 + "int $4\n0:\n"
8845 + _ASM_EXTABLE(0b, 0b)
8846 +#endif
8847 +
8848 : "+m" (l->a.counter)
8849 : "ir" (i));
8850 }
8851 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8852 {
8853 unsigned char c;
8854
8855 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8856 + asm volatile(_ASM_SUB "%2,%0\n"
8857 +
8858 +#ifdef CONFIG_PAX_REFCOUNT
8859 + "jno 0f\n"
8860 + _ASM_ADD "%2,%0\n"
8861 + "int $4\n0:\n"
8862 + _ASM_EXTABLE(0b, 0b)
8863 +#endif
8864 +
8865 + "sete %1\n"
8866 : "+m" (l->a.counter), "=qm" (c)
8867 : "ir" (i) : "memory");
8868 return c;
8869 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8870 {
8871 unsigned char c;
8872
8873 - asm volatile(_ASM_DEC "%0; sete %1"
8874 + asm volatile(_ASM_DEC "%0\n"
8875 +
8876 +#ifdef CONFIG_PAX_REFCOUNT
8877 + "jno 0f\n"
8878 + _ASM_INC "%0\n"
8879 + "int $4\n0:\n"
8880 + _ASM_EXTABLE(0b, 0b)
8881 +#endif
8882 +
8883 + "sete %1\n"
8884 : "+m" (l->a.counter), "=qm" (c)
8885 : : "memory");
8886 return c != 0;
8887 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8888 {
8889 unsigned char c;
8890
8891 - asm volatile(_ASM_INC "%0; sete %1"
8892 + asm volatile(_ASM_INC "%0\n"
8893 +
8894 +#ifdef CONFIG_PAX_REFCOUNT
8895 + "jno 0f\n"
8896 + _ASM_DEC "%0\n"
8897 + "int $4\n0:\n"
8898 + _ASM_EXTABLE(0b, 0b)
8899 +#endif
8900 +
8901 + "sete %1\n"
8902 : "+m" (l->a.counter), "=qm" (c)
8903 : : "memory");
8904 return c != 0;
8905 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8906 {
8907 unsigned char c;
8908
8909 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8910 + asm volatile(_ASM_ADD "%2,%0\n"
8911 +
8912 +#ifdef CONFIG_PAX_REFCOUNT
8913 + "jno 0f\n"
8914 + _ASM_SUB "%2,%0\n"
8915 + "int $4\n0:\n"
8916 + _ASM_EXTABLE(0b, 0b)
8917 +#endif
8918 +
8919 + "sets %1\n"
8920 : "+m" (l->a.counter), "=qm" (c)
8921 : "ir" (i) : "memory");
8922 return c;
8923 @@ -133,7 +201,15 @@ static inline long local_add_return(long
8924 #endif
8925 /* Modern 486+ processor */
8926 __i = i;
8927 - asm volatile(_ASM_XADD "%0, %1;"
8928 + asm volatile(_ASM_XADD "%0, %1\n"
8929 +
8930 +#ifdef CONFIG_PAX_REFCOUNT
8931 + "jno 0f\n"
8932 + _ASM_MOV "%0,%1\n"
8933 + "int $4\n0:\n"
8934 + _ASM_EXTABLE(0b, 0b)
8935 +#endif
8936 +
8937 : "+r" (i), "+m" (l->a.counter)
8938 : : "memory");
8939 return i + __i;
8940 diff -urNp linux-2.6.32.45/arch/x86/include/asm/microcode.h linux-2.6.32.45/arch/x86/include/asm/microcode.h
8941 --- linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8942 +++ linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8943 @@ -12,13 +12,13 @@ struct device;
8944 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8945
8946 struct microcode_ops {
8947 - enum ucode_state (*request_microcode_user) (int cpu,
8948 + enum ucode_state (* const request_microcode_user) (int cpu,
8949 const void __user *buf, size_t size);
8950
8951 - enum ucode_state (*request_microcode_fw) (int cpu,
8952 + enum ucode_state (* const request_microcode_fw) (int cpu,
8953 struct device *device);
8954
8955 - void (*microcode_fini_cpu) (int cpu);
8956 + void (* const microcode_fini_cpu) (int cpu);
8957
8958 /*
8959 * The generic 'microcode_core' part guarantees that
8960 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
8961 extern struct ucode_cpu_info ucode_cpu_info[];
8962
8963 #ifdef CONFIG_MICROCODE_INTEL
8964 -extern struct microcode_ops * __init init_intel_microcode(void);
8965 +extern const struct microcode_ops * __init init_intel_microcode(void);
8966 #else
8967 -static inline struct microcode_ops * __init init_intel_microcode(void)
8968 +static inline const struct microcode_ops * __init init_intel_microcode(void)
8969 {
8970 return NULL;
8971 }
8972 #endif /* CONFIG_MICROCODE_INTEL */
8973
8974 #ifdef CONFIG_MICROCODE_AMD
8975 -extern struct microcode_ops * __init init_amd_microcode(void);
8976 +extern const struct microcode_ops * __init init_amd_microcode(void);
8977 #else
8978 -static inline struct microcode_ops * __init init_amd_microcode(void)
8979 +static inline const struct microcode_ops * __init init_amd_microcode(void)
8980 {
8981 return NULL;
8982 }
8983 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mman.h linux-2.6.32.45/arch/x86/include/asm/mman.h
8984 --- linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8985 +++ linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8986 @@ -5,4 +5,14 @@
8987
8988 #include <asm-generic/mman.h>
8989
8990 +#ifdef __KERNEL__
8991 +#ifndef __ASSEMBLY__
8992 +#ifdef CONFIG_X86_32
8993 +#define arch_mmap_check i386_mmap_check
8994 +int i386_mmap_check(unsigned long addr, unsigned long len,
8995 + unsigned long flags);
8996 +#endif
8997 +#endif
8998 +#endif
8999 +
9000 #endif /* _ASM_X86_MMAN_H */
9001 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu_context.h linux-2.6.32.45/arch/x86/include/asm/mmu_context.h
9002 --- linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
9003 +++ linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-08-17 19:46:53.000000000 -0400
9004 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
9005
9006 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9007 {
9008 +
9009 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9010 + unsigned int i;
9011 + pgd_t *pgd;
9012 +
9013 + pax_open_kernel();
9014 + pgd = get_cpu_pgd(smp_processor_id());
9015 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9016 + if (paravirt_enabled())
9017 + set_pgd(pgd+i, native_make_pgd(0));
9018 + else
9019 + pgd[i] = native_make_pgd(0);
9020 + pax_close_kernel();
9021 +#endif
9022 +
9023 #ifdef CONFIG_SMP
9024 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9025 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9026 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
9027 struct task_struct *tsk)
9028 {
9029 unsigned cpu = smp_processor_id();
9030 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
9031 + int tlbstate = TLBSTATE_OK;
9032 +#endif
9033
9034 if (likely(prev != next)) {
9035 #ifdef CONFIG_SMP
9036 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9037 + tlbstate = percpu_read(cpu_tlbstate.state);
9038 +#endif
9039 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9040 percpu_write(cpu_tlbstate.active_mm, next);
9041 #endif
9042 cpumask_set_cpu(cpu, mm_cpumask(next));
9043
9044 /* Re-load page tables */
9045 +#ifdef CONFIG_PAX_PER_CPU_PGD
9046 + pax_open_kernel();
9047 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9048 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9049 + pax_close_kernel();
9050 + load_cr3(get_cpu_pgd(cpu));
9051 +#else
9052 load_cr3(next->pgd);
9053 +#endif
9054
9055 /* stop flush ipis for the previous mm */
9056 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9057 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
9058 */
9059 if (unlikely(prev->context.ldt != next->context.ldt))
9060 load_LDT_nolock(&next->context);
9061 - }
9062 +
9063 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9064 + if (!nx_enabled) {
9065 + smp_mb__before_clear_bit();
9066 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9067 + smp_mb__after_clear_bit();
9068 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9069 + }
9070 +#endif
9071 +
9072 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9073 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9074 + prev->context.user_cs_limit != next->context.user_cs_limit))
9075 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9076 #ifdef CONFIG_SMP
9077 + else if (unlikely(tlbstate != TLBSTATE_OK))
9078 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9079 +#endif
9080 +#endif
9081 +
9082 + }
9083 else {
9084 +
9085 +#ifdef CONFIG_PAX_PER_CPU_PGD
9086 + pax_open_kernel();
9087 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9088 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9089 + pax_close_kernel();
9090 + load_cr3(get_cpu_pgd(cpu));
9091 +#endif
9092 +
9093 +#ifdef CONFIG_SMP
9094 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9095 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9096
9097 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
9098 * tlb flush IPI delivery. We must reload CR3
9099 * to make sure to use no freed page tables.
9100 */
9101 +
9102 +#ifndef CONFIG_PAX_PER_CPU_PGD
9103 load_cr3(next->pgd);
9104 +#endif
9105 +
9106 load_LDT_nolock(&next->context);
9107 +
9108 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9109 + if (!nx_enabled)
9110 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9111 +#endif
9112 +
9113 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9114 +#ifdef CONFIG_PAX_PAGEEXEC
9115 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9116 +#endif
9117 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9118 +#endif
9119 +
9120 }
9121 - }
9122 #endif
9123 + }
9124 }
9125
9126 #define activate_mm(prev, next) \
9127 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu.h linux-2.6.32.45/arch/x86/include/asm/mmu.h
9128 --- linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9129 +++ linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9130 @@ -9,10 +9,23 @@
9131 * we put the segment information here.
9132 */
9133 typedef struct {
9134 - void *ldt;
9135 + struct desc_struct *ldt;
9136 int size;
9137 struct mutex lock;
9138 - void *vdso;
9139 + unsigned long vdso;
9140 +
9141 +#ifdef CONFIG_X86_32
9142 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9143 + unsigned long user_cs_base;
9144 + unsigned long user_cs_limit;
9145 +
9146 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9147 + cpumask_t cpu_user_cs_mask;
9148 +#endif
9149 +
9150 +#endif
9151 +#endif
9152 +
9153 } mm_context_t;
9154
9155 #ifdef CONFIG_SMP
9156 diff -urNp linux-2.6.32.45/arch/x86/include/asm/module.h linux-2.6.32.45/arch/x86/include/asm/module.h
9157 --- linux-2.6.32.45/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9158 +++ linux-2.6.32.45/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9159 @@ -5,6 +5,7 @@
9160
9161 #ifdef CONFIG_X86_64
9162 /* X86_64 does not define MODULE_PROC_FAMILY */
9163 +#define MODULE_PROC_FAMILY ""
9164 #elif defined CONFIG_M386
9165 #define MODULE_PROC_FAMILY "386 "
9166 #elif defined CONFIG_M486
9167 @@ -59,13 +60,36 @@
9168 #error unknown processor family
9169 #endif
9170
9171 -#ifdef CONFIG_X86_32
9172 -# ifdef CONFIG_4KSTACKS
9173 -# define MODULE_STACKSIZE "4KSTACKS "
9174 -# else
9175 -# define MODULE_STACKSIZE ""
9176 -# endif
9177 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9178 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9179 +#define MODULE_PAX_UDEREF "UDEREF "
9180 +#else
9181 +#define MODULE_PAX_UDEREF ""
9182 +#endif
9183 +
9184 +#ifdef CONFIG_PAX_KERNEXEC
9185 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
9186 +#else
9187 +#define MODULE_PAX_KERNEXEC ""
9188 +#endif
9189 +
9190 +#ifdef CONFIG_PAX_REFCOUNT
9191 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
9192 +#else
9193 +#define MODULE_PAX_REFCOUNT ""
9194 #endif
9195
9196 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9197 +#define MODULE_STACKSIZE "4KSTACKS "
9198 +#else
9199 +#define MODULE_STACKSIZE ""
9200 +#endif
9201 +
9202 +#ifdef CONFIG_GRKERNSEC
9203 +#define MODULE_GRSEC "GRSECURITY "
9204 +#else
9205 +#define MODULE_GRSEC ""
9206 +#endif
9207 +
9208 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9209 +
9210 #endif /* _ASM_X86_MODULE_H */
9211 diff -urNp linux-2.6.32.45/arch/x86/include/asm/page_64_types.h linux-2.6.32.45/arch/x86/include/asm/page_64_types.h
9212 --- linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9213 +++ linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9214 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9215
9216 /* duplicated to the one in bootmem.h */
9217 extern unsigned long max_pfn;
9218 -extern unsigned long phys_base;
9219 +extern const unsigned long phys_base;
9220
9221 extern unsigned long __phys_addr(unsigned long);
9222 #define __phys_reloc_hide(x) (x)
9223 diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt.h linux-2.6.32.45/arch/x86/include/asm/paravirt.h
9224 --- linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9225 +++ linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
9226 @@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
9227 pv_mmu_ops.set_fixmap(idx, phys, flags);
9228 }
9229
9230 +#ifdef CONFIG_PAX_KERNEXEC
9231 +static inline unsigned long pax_open_kernel(void)
9232 +{
9233 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9234 +}
9235 +
9236 +static inline unsigned long pax_close_kernel(void)
9237 +{
9238 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9239 +}
9240 +#else
9241 +static inline unsigned long pax_open_kernel(void) { return 0; }
9242 +static inline unsigned long pax_close_kernel(void) { return 0; }
9243 +#endif
9244 +
9245 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9246
9247 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9248 @@ -945,7 +960,7 @@ extern void default_banner(void);
9249
9250 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9251 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9252 -#define PARA_INDIRECT(addr) *%cs:addr
9253 +#define PARA_INDIRECT(addr) *%ss:addr
9254 #endif
9255
9256 #define INTERRUPT_RETURN \
9257 @@ -1022,6 +1037,21 @@ extern void default_banner(void);
9258 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9259 CLBR_NONE, \
9260 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9261 +
9262 +#define GET_CR0_INTO_RDI \
9263 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9264 + mov %rax,%rdi
9265 +
9266 +#define SET_RDI_INTO_CR0 \
9267 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9268 +
9269 +#define GET_CR3_INTO_RDI \
9270 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9271 + mov %rax,%rdi
9272 +
9273 +#define SET_RDI_INTO_CR3 \
9274 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9275 +
9276 #endif /* CONFIG_X86_32 */
9277
9278 #endif /* __ASSEMBLY__ */
9279 diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h
9280 --- linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9281 +++ linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-08-05 20:33:55.000000000 -0400
9282 @@ -78,19 +78,19 @@ struct pv_init_ops {
9283 */
9284 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9285 unsigned long addr, unsigned len);
9286 -};
9287 +} __no_const;
9288
9289
9290 struct pv_lazy_ops {
9291 /* Set deferred update mode, used for batching operations. */
9292 void (*enter)(void);
9293 void (*leave)(void);
9294 -};
9295 +} __no_const;
9296
9297 struct pv_time_ops {
9298 unsigned long long (*sched_clock)(void);
9299 unsigned long (*get_tsc_khz)(void);
9300 -};
9301 +} __no_const;
9302
9303 struct pv_cpu_ops {
9304 /* hooks for various privileged instructions */
9305 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
9306
9307 void (*start_context_switch)(struct task_struct *prev);
9308 void (*end_context_switch)(struct task_struct *next);
9309 -};
9310 +} __no_const;
9311
9312 struct pv_irq_ops {
9313 /*
9314 @@ -217,7 +217,7 @@ struct pv_apic_ops {
9315 unsigned long start_eip,
9316 unsigned long start_esp);
9317 #endif
9318 -};
9319 +} __no_const;
9320
9321 struct pv_mmu_ops {
9322 unsigned long (*read_cr2)(void);
9323 @@ -316,6 +316,12 @@ struct pv_mmu_ops {
9324 an mfn. We can tell which is which from the index. */
9325 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9326 phys_addr_t phys, pgprot_t flags);
9327 +
9328 +#ifdef CONFIG_PAX_KERNEXEC
9329 + unsigned long (*pax_open_kernel)(void);
9330 + unsigned long (*pax_close_kernel)(void);
9331 +#endif
9332 +
9333 };
9334
9335 struct raw_spinlock;
9336 @@ -326,7 +332,7 @@ struct pv_lock_ops {
9337 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
9338 int (*spin_trylock)(struct raw_spinlock *lock);
9339 void (*spin_unlock)(struct raw_spinlock *lock);
9340 -};
9341 +} __no_const;
9342
9343 /* This contains all the paravirt structures: we get a convenient
9344 * number for each function using the offset which we use to indicate
9345 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pci_x86.h linux-2.6.32.45/arch/x86/include/asm/pci_x86.h
9346 --- linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9347 +++ linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9348 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9349 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9350
9351 struct pci_raw_ops {
9352 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9353 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9354 int reg, int len, u32 *val);
9355 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9356 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9357 int reg, int len, u32 val);
9358 };
9359
9360 -extern struct pci_raw_ops *raw_pci_ops;
9361 -extern struct pci_raw_ops *raw_pci_ext_ops;
9362 +extern const struct pci_raw_ops *raw_pci_ops;
9363 +extern const struct pci_raw_ops *raw_pci_ext_ops;
9364
9365 -extern struct pci_raw_ops pci_direct_conf1;
9366 +extern const struct pci_raw_ops pci_direct_conf1;
9367 extern bool port_cf9_safe;
9368
9369 /* arch_initcall level */
9370 diff -urNp linux-2.6.32.45/arch/x86/include/asm/percpu.h linux-2.6.32.45/arch/x86/include/asm/percpu.h
9371 --- linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-03-27 14:31:47.000000000 -0400
9372 +++ linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-08-17 19:33:59.000000000 -0400
9373 @@ -78,6 +78,7 @@ do { \
9374 if (0) { \
9375 T__ tmp__; \
9376 tmp__ = (val); \
9377 + (void)tmp__; \
9378 } \
9379 switch (sizeof(var)) { \
9380 case 1: \
9381 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgalloc.h linux-2.6.32.45/arch/x86/include/asm/pgalloc.h
9382 --- linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9383 +++ linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9384 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9385 pmd_t *pmd, pte_t *pte)
9386 {
9387 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9388 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9389 +}
9390 +
9391 +static inline void pmd_populate_user(struct mm_struct *mm,
9392 + pmd_t *pmd, pte_t *pte)
9393 +{
9394 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9395 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9396 }
9397
9398 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h
9399 --- linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9400 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9401 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9402
9403 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9404 {
9405 + pax_open_kernel();
9406 *pmdp = pmd;
9407 + pax_close_kernel();
9408 }
9409
9410 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9411 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h
9412 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9413 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9414 @@ -26,9 +26,6 @@
9415 struct mm_struct;
9416 struct vm_area_struct;
9417
9418 -extern pgd_t swapper_pg_dir[1024];
9419 -extern pgd_t trampoline_pg_dir[1024];
9420 -
9421 static inline void pgtable_cache_init(void) { }
9422 static inline void check_pgt_cache(void) { }
9423 void paging_init(void);
9424 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9425 # include <asm/pgtable-2level.h>
9426 #endif
9427
9428 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9429 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9430 +#ifdef CONFIG_X86_PAE
9431 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9432 +#endif
9433 +
9434 #if defined(CONFIG_HIGHPTE)
9435 #define __KM_PTE \
9436 (in_nmi() ? KM_NMI_PTE : \
9437 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9438 /* Clear a kernel PTE and flush it from the TLB */
9439 #define kpte_clear_flush(ptep, vaddr) \
9440 do { \
9441 + pax_open_kernel(); \
9442 pte_clear(&init_mm, (vaddr), (ptep)); \
9443 + pax_close_kernel(); \
9444 __flush_tlb_one((vaddr)); \
9445 } while (0)
9446
9447 @@ -85,6 +90,9 @@ do { \
9448
9449 #endif /* !__ASSEMBLY__ */
9450
9451 +#define HAVE_ARCH_UNMAPPED_AREA
9452 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9453 +
9454 /*
9455 * kern_addr_valid() is (1) for FLATMEM and (0) for
9456 * SPARSEMEM and DISCONTIGMEM
9457 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h
9458 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9459 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9460 @@ -8,7 +8,7 @@
9461 */
9462 #ifdef CONFIG_X86_PAE
9463 # include <asm/pgtable-3level_types.h>
9464 -# define PMD_SIZE (1UL << PMD_SHIFT)
9465 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9466 # define PMD_MASK (~(PMD_SIZE - 1))
9467 #else
9468 # include <asm/pgtable-2level_types.h>
9469 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9470 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9471 #endif
9472
9473 +#ifdef CONFIG_PAX_KERNEXEC
9474 +#ifndef __ASSEMBLY__
9475 +extern unsigned char MODULES_EXEC_VADDR[];
9476 +extern unsigned char MODULES_EXEC_END[];
9477 +#endif
9478 +#include <asm/boot.h>
9479 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9480 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9481 +#else
9482 +#define ktla_ktva(addr) (addr)
9483 +#define ktva_ktla(addr) (addr)
9484 +#endif
9485 +
9486 #define MODULES_VADDR VMALLOC_START
9487 #define MODULES_END VMALLOC_END
9488 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9489 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h
9490 --- linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9491 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9492 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9493
9494 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9495 {
9496 + pax_open_kernel();
9497 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9498 + pax_close_kernel();
9499 }
9500
9501 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9502 {
9503 + pax_open_kernel();
9504 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9505 + pax_close_kernel();
9506 }
9507
9508 /*
9509 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h
9510 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9511 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9512 @@ -16,10 +16,13 @@
9513
9514 extern pud_t level3_kernel_pgt[512];
9515 extern pud_t level3_ident_pgt[512];
9516 +extern pud_t level3_vmalloc_pgt[512];
9517 +extern pud_t level3_vmemmap_pgt[512];
9518 +extern pud_t level2_vmemmap_pgt[512];
9519 extern pmd_t level2_kernel_pgt[512];
9520 extern pmd_t level2_fixmap_pgt[512];
9521 -extern pmd_t level2_ident_pgt[512];
9522 -extern pgd_t init_level4_pgt[];
9523 +extern pmd_t level2_ident_pgt[512*2];
9524 +extern pgd_t init_level4_pgt[512];
9525
9526 #define swapper_pg_dir init_level4_pgt
9527
9528 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9529
9530 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9531 {
9532 + pax_open_kernel();
9533 *pmdp = pmd;
9534 + pax_close_kernel();
9535 }
9536
9537 static inline void native_pmd_clear(pmd_t *pmd)
9538 @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9539
9540 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9541 {
9542 + pax_open_kernel();
9543 *pgdp = pgd;
9544 + pax_close_kernel();
9545 }
9546
9547 static inline void native_pgd_clear(pgd_t *pgd)
9548 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h
9549 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9550 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9551 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9552 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9553 #define MODULES_END _AC(0xffffffffff000000, UL)
9554 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9555 +#define MODULES_EXEC_VADDR MODULES_VADDR
9556 +#define MODULES_EXEC_END MODULES_END
9557 +
9558 +#define ktla_ktva(addr) (addr)
9559 +#define ktva_ktla(addr) (addr)
9560
9561 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9562 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable.h linux-2.6.32.45/arch/x86/include/asm/pgtable.h
9563 --- linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9564 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9565 @@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9566
9567 #define arch_end_context_switch(prev) do {} while(0)
9568
9569 +#define pax_open_kernel() native_pax_open_kernel()
9570 +#define pax_close_kernel() native_pax_close_kernel()
9571 #endif /* CONFIG_PARAVIRT */
9572
9573 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9574 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9575 +
9576 +#ifdef CONFIG_PAX_KERNEXEC
9577 +static inline unsigned long native_pax_open_kernel(void)
9578 +{
9579 + unsigned long cr0;
9580 +
9581 + preempt_disable();
9582 + barrier();
9583 + cr0 = read_cr0() ^ X86_CR0_WP;
9584 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9585 + write_cr0(cr0);
9586 + return cr0 ^ X86_CR0_WP;
9587 +}
9588 +
9589 +static inline unsigned long native_pax_close_kernel(void)
9590 +{
9591 + unsigned long cr0;
9592 +
9593 + cr0 = read_cr0() ^ X86_CR0_WP;
9594 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9595 + write_cr0(cr0);
9596 + barrier();
9597 + preempt_enable_no_resched();
9598 + return cr0 ^ X86_CR0_WP;
9599 +}
9600 +#else
9601 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9602 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9603 +#endif
9604 +
9605 /*
9606 * The following only work if pte_present() is true.
9607 * Undefined behaviour if not..
9608 */
9609 +static inline int pte_user(pte_t pte)
9610 +{
9611 + return pte_val(pte) & _PAGE_USER;
9612 +}
9613 +
9614 static inline int pte_dirty(pte_t pte)
9615 {
9616 return pte_flags(pte) & _PAGE_DIRTY;
9617 @@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9618 return pte_clear_flags(pte, _PAGE_RW);
9619 }
9620
9621 +static inline pte_t pte_mkread(pte_t pte)
9622 +{
9623 + return __pte(pte_val(pte) | _PAGE_USER);
9624 +}
9625 +
9626 static inline pte_t pte_mkexec(pte_t pte)
9627 {
9628 - return pte_clear_flags(pte, _PAGE_NX);
9629 +#ifdef CONFIG_X86_PAE
9630 + if (__supported_pte_mask & _PAGE_NX)
9631 + return pte_clear_flags(pte, _PAGE_NX);
9632 + else
9633 +#endif
9634 + return pte_set_flags(pte, _PAGE_USER);
9635 +}
9636 +
9637 +static inline pte_t pte_exprotect(pte_t pte)
9638 +{
9639 +#ifdef CONFIG_X86_PAE
9640 + if (__supported_pte_mask & _PAGE_NX)
9641 + return pte_set_flags(pte, _PAGE_NX);
9642 + else
9643 +#endif
9644 + return pte_clear_flags(pte, _PAGE_USER);
9645 }
9646
9647 static inline pte_t pte_mkdirty(pte_t pte)
9648 @@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9649 #endif
9650
9651 #ifndef __ASSEMBLY__
9652 +
9653 +#ifdef CONFIG_PAX_PER_CPU_PGD
9654 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9655 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9656 +{
9657 + return cpu_pgd[cpu];
9658 +}
9659 +#endif
9660 +
9661 #include <linux/mm_types.h>
9662
9663 static inline int pte_none(pte_t pte)
9664 @@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9665
9666 static inline int pgd_bad(pgd_t pgd)
9667 {
9668 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9669 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9670 }
9671
9672 static inline int pgd_none(pgd_t pgd)
9673 @@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9674 * pgd_offset() returns a (pgd_t *)
9675 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9676 */
9677 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9678 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9679 +
9680 +#ifdef CONFIG_PAX_PER_CPU_PGD
9681 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9682 +#endif
9683 +
9684 /*
9685 * a shortcut which implies the use of the kernel's pgd, instead
9686 * of a process's
9687 @@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9688 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9689 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9690
9691 +#ifdef CONFIG_X86_32
9692 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9693 +#else
9694 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9695 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9696 +
9697 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9698 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9699 +#else
9700 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9701 +#endif
9702 +
9703 +#endif
9704 +
9705 #ifndef __ASSEMBLY__
9706
9707 extern int direct_gbpages;
9708 @@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9709 * dst and src can be on the same page, but the range must not overlap,
9710 * and must not cross a page boundary.
9711 */
9712 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9713 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9714 {
9715 - memcpy(dst, src, count * sizeof(pgd_t));
9716 + pax_open_kernel();
9717 + while (count--)
9718 + *dst++ = *src++;
9719 + pax_close_kernel();
9720 }
9721
9722 +#ifdef CONFIG_PAX_PER_CPU_PGD
9723 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9724 +#endif
9725 +
9726 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9727 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9728 +#else
9729 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9730 +#endif
9731
9732 #include <asm-generic/pgtable.h>
9733 #endif /* __ASSEMBLY__ */
9734 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h
9735 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9736 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9737 @@ -16,12 +16,11 @@
9738 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9739 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9740 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9741 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9742 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9743 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9744 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9745 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9746 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9747 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9748 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9749 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9750
9751 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9752 @@ -39,7 +38,6 @@
9753 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9754 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9755 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9756 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9757 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9758 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9759 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9760 @@ -55,8 +53,10 @@
9761
9762 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9763 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9764 -#else
9765 +#elif defined(CONFIG_KMEMCHECK)
9766 #define _PAGE_NX (_AT(pteval_t, 0))
9767 +#else
9768 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9769 #endif
9770
9771 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9772 @@ -93,6 +93,9 @@
9773 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9774 _PAGE_ACCESSED)
9775
9776 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9777 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9778 +
9779 #define __PAGE_KERNEL_EXEC \
9780 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9781 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9782 @@ -103,8 +106,8 @@
9783 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9784 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9785 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9786 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9787 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9788 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9789 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9790 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9791 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9792 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9793 @@ -163,8 +166,8 @@
9794 * bits are combined, this will alow user to access the high address mapped
9795 * VDSO in the presence of CONFIG_COMPAT_VDSO
9796 */
9797 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9798 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9799 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9800 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9801 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9802 #endif
9803
9804 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9805 {
9806 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9807 }
9808 +#endif
9809
9810 +#if PAGETABLE_LEVELS == 3
9811 +#include <asm-generic/pgtable-nopud.h>
9812 +#endif
9813 +
9814 +#if PAGETABLE_LEVELS == 2
9815 +#include <asm-generic/pgtable-nopmd.h>
9816 +#endif
9817 +
9818 +#ifndef __ASSEMBLY__
9819 #if PAGETABLE_LEVELS > 3
9820 typedef struct { pudval_t pud; } pud_t;
9821
9822 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9823 return pud.pud;
9824 }
9825 #else
9826 -#include <asm-generic/pgtable-nopud.h>
9827 -
9828 static inline pudval_t native_pud_val(pud_t pud)
9829 {
9830 return native_pgd_val(pud.pgd);
9831 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9832 return pmd.pmd;
9833 }
9834 #else
9835 -#include <asm-generic/pgtable-nopmd.h>
9836 -
9837 static inline pmdval_t native_pmd_val(pmd_t pmd)
9838 {
9839 return native_pgd_val(pmd.pud.pgd);
9840 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9841
9842 extern pteval_t __supported_pte_mask;
9843 extern void set_nx(void);
9844 +
9845 +#ifdef CONFIG_X86_32
9846 +#ifdef CONFIG_X86_PAE
9847 extern int nx_enabled;
9848 +#else
9849 +#define nx_enabled (0)
9850 +#endif
9851 +#else
9852 +#define nx_enabled (1)
9853 +#endif
9854
9855 #define pgprot_writecombine pgprot_writecombine
9856 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9857 diff -urNp linux-2.6.32.45/arch/x86/include/asm/processor.h linux-2.6.32.45/arch/x86/include/asm/processor.h
9858 --- linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9859 +++ linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9860 @@ -272,7 +272,7 @@ struct tss_struct {
9861
9862 } ____cacheline_aligned;
9863
9864 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9865 +extern struct tss_struct init_tss[NR_CPUS];
9866
9867 /*
9868 * Save the original ist values for checking stack pointers during debugging
9869 @@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9870 */
9871 #define TASK_SIZE PAGE_OFFSET
9872 #define TASK_SIZE_MAX TASK_SIZE
9873 +
9874 +#ifdef CONFIG_PAX_SEGMEXEC
9875 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9876 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9877 +#else
9878 #define STACK_TOP TASK_SIZE
9879 -#define STACK_TOP_MAX STACK_TOP
9880 +#endif
9881 +
9882 +#define STACK_TOP_MAX TASK_SIZE
9883
9884 #define INIT_THREAD { \
9885 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9886 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9887 .vm86_info = NULL, \
9888 .sysenter_cs = __KERNEL_CS, \
9889 .io_bitmap_ptr = NULL, \
9890 @@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9891 */
9892 #define INIT_TSS { \
9893 .x86_tss = { \
9894 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9895 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9896 .ss0 = __KERNEL_DS, \
9897 .ss1 = __KERNEL_CS, \
9898 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9899 @@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9900 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9901
9902 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9903 -#define KSTK_TOP(info) \
9904 -({ \
9905 - unsigned long *__ptr = (unsigned long *)(info); \
9906 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9907 -})
9908 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9909
9910 /*
9911 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9912 @@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9913 #define task_pt_regs(task) \
9914 ({ \
9915 struct pt_regs *__regs__; \
9916 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9917 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9918 __regs__ - 1; \
9919 })
9920
9921 @@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9922 /*
9923 * User space process size. 47bits minus one guard page.
9924 */
9925 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9926 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9927
9928 /* This decides where the kernel will search for a free chunk of vm
9929 * space during mmap's.
9930 */
9931 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9932 - 0xc0000000 : 0xFFFFe000)
9933 + 0xc0000000 : 0xFFFFf000)
9934
9935 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9936 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9937 @@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9938 #define STACK_TOP_MAX TASK_SIZE_MAX
9939
9940 #define INIT_THREAD { \
9941 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9942 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9943 }
9944
9945 #define INIT_TSS { \
9946 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9947 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9948 }
9949
9950 /*
9951 @@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9952 */
9953 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9954
9955 +#ifdef CONFIG_PAX_SEGMEXEC
9956 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9957 +#endif
9958 +
9959 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9960
9961 /* Get/set a process' ability to use the timestamp counter instruction */
9962 diff -urNp linux-2.6.32.45/arch/x86/include/asm/ptrace.h linux-2.6.32.45/arch/x86/include/asm/ptrace.h
9963 --- linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9964 +++ linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9965 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9966 }
9967
9968 /*
9969 - * user_mode_vm(regs) determines whether a register set came from user mode.
9970 + * user_mode(regs) determines whether a register set came from user mode.
9971 * This is true if V8086 mode was enabled OR if the register set was from
9972 * protected mode with RPL-3 CS value. This tricky test checks that with
9973 * one comparison. Many places in the kernel can bypass this full check
9974 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9975 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9976 + * be used.
9977 */
9978 -static inline int user_mode(struct pt_regs *regs)
9979 +static inline int user_mode_novm(struct pt_regs *regs)
9980 {
9981 #ifdef CONFIG_X86_32
9982 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9983 #else
9984 - return !!(regs->cs & 3);
9985 + return !!(regs->cs & SEGMENT_RPL_MASK);
9986 #endif
9987 }
9988
9989 -static inline int user_mode_vm(struct pt_regs *regs)
9990 +static inline int user_mode(struct pt_regs *regs)
9991 {
9992 #ifdef CONFIG_X86_32
9993 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9994 USER_RPL;
9995 #else
9996 - return user_mode(regs);
9997 + return user_mode_novm(regs);
9998 #endif
9999 }
10000
10001 diff -urNp linux-2.6.32.45/arch/x86/include/asm/reboot.h linux-2.6.32.45/arch/x86/include/asm/reboot.h
10002 --- linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
10003 +++ linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-08-05 20:33:55.000000000 -0400
10004 @@ -6,19 +6,19 @@
10005 struct pt_regs;
10006
10007 struct machine_ops {
10008 - void (*restart)(char *cmd);
10009 - void (*halt)(void);
10010 - void (*power_off)(void);
10011 + void (* __noreturn restart)(char *cmd);
10012 + void (* __noreturn halt)(void);
10013 + void (* __noreturn power_off)(void);
10014 void (*shutdown)(void);
10015 void (*crash_shutdown)(struct pt_regs *);
10016 - void (*emergency_restart)(void);
10017 -};
10018 + void (* __noreturn emergency_restart)(void);
10019 +} __no_const;
10020
10021 extern struct machine_ops machine_ops;
10022
10023 void native_machine_crash_shutdown(struct pt_regs *regs);
10024 void native_machine_shutdown(void);
10025 -void machine_real_restart(const unsigned char *code, int length);
10026 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
10027
10028 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
10029 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
10030 diff -urNp linux-2.6.32.45/arch/x86/include/asm/rwsem.h linux-2.6.32.45/arch/x86/include/asm/rwsem.h
10031 --- linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
10032 +++ linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
10033 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw
10034 {
10035 asm volatile("# beginning down_read\n\t"
10036 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10037 +
10038 +#ifdef CONFIG_PAX_REFCOUNT
10039 + "jno 0f\n"
10040 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
10041 + "int $4\n0:\n"
10042 + _ASM_EXTABLE(0b, 0b)
10043 +#endif
10044 +
10045 /* adds 0x00000001, returns the old value */
10046 " jns 1f\n"
10047 " call call_rwsem_down_read_failed\n"
10048 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
10049 "1:\n\t"
10050 " mov %1,%2\n\t"
10051 " add %3,%2\n\t"
10052 +
10053 +#ifdef CONFIG_PAX_REFCOUNT
10054 + "jno 0f\n"
10055 + "sub %3,%2\n"
10056 + "int $4\n0:\n"
10057 + _ASM_EXTABLE(0b, 0b)
10058 +#endif
10059 +
10060 " jle 2f\n\t"
10061 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10062 " jnz 1b\n\t"
10063 @@ -160,6 +176,14 @@ static inline void __down_write_nested(s
10064 tmp = RWSEM_ACTIVE_WRITE_BIAS;
10065 asm volatile("# beginning down_write\n\t"
10066 LOCK_PREFIX " xadd %1,(%2)\n\t"
10067 +
10068 +#ifdef CONFIG_PAX_REFCOUNT
10069 + "jno 0f\n"
10070 + "mov %1,(%2)\n"
10071 + "int $4\n0:\n"
10072 + _ASM_EXTABLE(0b, 0b)
10073 +#endif
10074 +
10075 /* subtract 0x0000ffff, returns the old value */
10076 " test %1,%1\n\t"
10077 /* was the count 0 before? */
10078 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10079 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10080 asm volatile("# beginning __up_read\n\t"
10081 LOCK_PREFIX " xadd %1,(%2)\n\t"
10082 +
10083 +#ifdef CONFIG_PAX_REFCOUNT
10084 + "jno 0f\n"
10085 + "mov %1,(%2)\n"
10086 + "int $4\n0:\n"
10087 + _ASM_EXTABLE(0b, 0b)
10088 +#endif
10089 +
10090 /* subtracts 1, returns the old value */
10091 " jns 1f\n\t"
10092 " call call_rwsem_wake\n"
10093 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10094 rwsem_count_t tmp;
10095 asm volatile("# beginning __up_write\n\t"
10096 LOCK_PREFIX " xadd %1,(%2)\n\t"
10097 +
10098 +#ifdef CONFIG_PAX_REFCOUNT
10099 + "jno 0f\n"
10100 + "mov %1,(%2)\n"
10101 + "int $4\n0:\n"
10102 + _ASM_EXTABLE(0b, 0b)
10103 +#endif
10104 +
10105 /* tries to transition
10106 0xffff0001 -> 0x00000000 */
10107 " jz 1f\n"
10108 @@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10109 {
10110 asm volatile("# beginning __downgrade_write\n\t"
10111 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10112 +
10113 +#ifdef CONFIG_PAX_REFCOUNT
10114 + "jno 0f\n"
10115 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10116 + "int $4\n0:\n"
10117 + _ASM_EXTABLE(0b, 0b)
10118 +#endif
10119 +
10120 /*
10121 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10122 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10123 @@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10124 static inline void rwsem_atomic_add(rwsem_count_t delta,
10125 struct rw_semaphore *sem)
10126 {
10127 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10128 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10129 +
10130 +#ifdef CONFIG_PAX_REFCOUNT
10131 + "jno 0f\n"
10132 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10133 + "int $4\n0:\n"
10134 + _ASM_EXTABLE(0b, 0b)
10135 +#endif
10136 +
10137 : "+m" (sem->count)
10138 : "er" (delta));
10139 }
10140 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10141 {
10142 rwsem_count_t tmp = delta;
10143
10144 - asm volatile(LOCK_PREFIX "xadd %0,%1"
10145 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10146 +
10147 +#ifdef CONFIG_PAX_REFCOUNT
10148 + "jno 0f\n"
10149 + "mov %0,%1\n"
10150 + "int $4\n0:\n"
10151 + _ASM_EXTABLE(0b, 0b)
10152 +#endif
10153 +
10154 : "+r" (tmp), "+m" (sem->count)
10155 : : "memory");
10156
10157 diff -urNp linux-2.6.32.45/arch/x86/include/asm/segment.h linux-2.6.32.45/arch/x86/include/asm/segment.h
10158 --- linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10159 +++ linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10160 @@ -62,8 +62,8 @@
10161 * 26 - ESPFIX small SS
10162 * 27 - per-cpu [ offset to per-cpu data area ]
10163 * 28 - stack_canary-20 [ for stack protector ]
10164 - * 29 - unused
10165 - * 30 - unused
10166 + * 29 - PCI BIOS CS
10167 + * 30 - PCI BIOS DS
10168 * 31 - TSS for double fault handler
10169 */
10170 #define GDT_ENTRY_TLS_MIN 6
10171 @@ -77,6 +77,8 @@
10172
10173 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10174
10175 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10176 +
10177 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10178
10179 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10180 @@ -88,7 +90,7 @@
10181 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10182 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10183
10184 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10185 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10186 #ifdef CONFIG_SMP
10187 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10188 #else
10189 @@ -102,6 +104,12 @@
10190 #define __KERNEL_STACK_CANARY 0
10191 #endif
10192
10193 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10194 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10195 +
10196 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10197 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10198 +
10199 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10200
10201 /*
10202 @@ -139,7 +147,7 @@
10203 */
10204
10205 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10206 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10207 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10208
10209
10210 #else
10211 @@ -163,6 +171,8 @@
10212 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10213 #define __USER32_DS __USER_DS
10214
10215 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10216 +
10217 #define GDT_ENTRY_TSS 8 /* needs two entries */
10218 #define GDT_ENTRY_LDT 10 /* needs two entries */
10219 #define GDT_ENTRY_TLS_MIN 12
10220 @@ -183,6 +193,7 @@
10221 #endif
10222
10223 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10224 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10225 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10226 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10227 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10228 diff -urNp linux-2.6.32.45/arch/x86/include/asm/smp.h linux-2.6.32.45/arch/x86/include/asm/smp.h
10229 --- linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10230 +++ linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-08-05 20:33:55.000000000 -0400
10231 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
10232 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10233 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10234 DECLARE_PER_CPU(u16, cpu_llc_id);
10235 -DECLARE_PER_CPU(int, cpu_number);
10236 +DECLARE_PER_CPU(unsigned int, cpu_number);
10237
10238 static inline struct cpumask *cpu_sibling_mask(int cpu)
10239 {
10240 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10241 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10242
10243 /* Static state in head.S used to set up a CPU */
10244 -extern struct {
10245 - void *sp;
10246 - unsigned short ss;
10247 -} stack_start;
10248 +extern unsigned long stack_start; /* Initial stack pointer address */
10249
10250 struct smp_ops {
10251 void (*smp_prepare_boot_cpu)(void);
10252 @@ -60,7 +57,7 @@ struct smp_ops {
10253
10254 void (*send_call_func_ipi)(const struct cpumask *mask);
10255 void (*send_call_func_single_ipi)(int cpu);
10256 -};
10257 +} __no_const;
10258
10259 /* Globals due to paravirt */
10260 extern void set_cpu_sibling_map(int cpu);
10261 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10262 extern int safe_smp_processor_id(void);
10263
10264 #elif defined(CONFIG_X86_64_SMP)
10265 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10266 -
10267 -#define stack_smp_processor_id() \
10268 -({ \
10269 - struct thread_info *ti; \
10270 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10271 - ti->cpu; \
10272 -})
10273 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10274 +#define stack_smp_processor_id() raw_smp_processor_id()
10275 #define safe_smp_processor_id() smp_processor_id()
10276
10277 #endif
10278 diff -urNp linux-2.6.32.45/arch/x86/include/asm/spinlock.h linux-2.6.32.45/arch/x86/include/asm/spinlock.h
10279 --- linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10280 +++ linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10281 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10282 static inline void __raw_read_lock(raw_rwlock_t *rw)
10283 {
10284 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10285 +
10286 +#ifdef CONFIG_PAX_REFCOUNT
10287 + "jno 0f\n"
10288 + LOCK_PREFIX " addl $1,(%0)\n"
10289 + "int $4\n0:\n"
10290 + _ASM_EXTABLE(0b, 0b)
10291 +#endif
10292 +
10293 "jns 1f\n"
10294 "call __read_lock_failed\n\t"
10295 "1:\n"
10296 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10297 static inline void __raw_write_lock(raw_rwlock_t *rw)
10298 {
10299 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10300 +
10301 +#ifdef CONFIG_PAX_REFCOUNT
10302 + "jno 0f\n"
10303 + LOCK_PREFIX " addl %1,(%0)\n"
10304 + "int $4\n0:\n"
10305 + _ASM_EXTABLE(0b, 0b)
10306 +#endif
10307 +
10308 "jz 1f\n"
10309 "call __write_lock_failed\n\t"
10310 "1:\n"
10311 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10312
10313 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10314 {
10315 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10316 + asm volatile(LOCK_PREFIX "incl %0\n"
10317 +
10318 +#ifdef CONFIG_PAX_REFCOUNT
10319 + "jno 0f\n"
10320 + LOCK_PREFIX "decl %0\n"
10321 + "int $4\n0:\n"
10322 + _ASM_EXTABLE(0b, 0b)
10323 +#endif
10324 +
10325 + :"+m" (rw->lock) : : "memory");
10326 }
10327
10328 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10329 {
10330 - asm volatile(LOCK_PREFIX "addl %1, %0"
10331 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
10332 +
10333 +#ifdef CONFIG_PAX_REFCOUNT
10334 + "jno 0f\n"
10335 + LOCK_PREFIX "subl %1, %0\n"
10336 + "int $4\n0:\n"
10337 + _ASM_EXTABLE(0b, 0b)
10338 +#endif
10339 +
10340 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10341 }
10342
10343 diff -urNp linux-2.6.32.45/arch/x86/include/asm/stackprotector.h linux-2.6.32.45/arch/x86/include/asm/stackprotector.h
10344 --- linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10345 +++ linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10346 @@ -48,7 +48,7 @@
10347 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10348 */
10349 #define GDT_STACK_CANARY_INIT \
10350 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10351 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10352
10353 /*
10354 * Initialize the stackprotector canary value.
10355 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10356
10357 static inline void load_stack_canary_segment(void)
10358 {
10359 -#ifdef CONFIG_X86_32
10360 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10361 asm volatile ("mov %0, %%gs" : : "r" (0));
10362 #endif
10363 }
10364 diff -urNp linux-2.6.32.45/arch/x86/include/asm/system.h linux-2.6.32.45/arch/x86/include/asm/system.h
10365 --- linux-2.6.32.45/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10366 +++ linux-2.6.32.45/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10367 @@ -132,7 +132,7 @@ do { \
10368 "thread_return:\n\t" \
10369 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10370 __switch_canary \
10371 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10372 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10373 "movq %%rax,%%rdi\n\t" \
10374 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10375 "jnz ret_from_fork\n\t" \
10376 @@ -143,7 +143,7 @@ do { \
10377 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10378 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10379 [_tif_fork] "i" (_TIF_FORK), \
10380 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10381 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
10382 [current_task] "m" (per_cpu_var(current_task)) \
10383 __switch_canary_iparam \
10384 : "memory", "cc" __EXTRA_CLOBBER)
10385 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10386 {
10387 unsigned long __limit;
10388 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10389 - return __limit + 1;
10390 + return __limit;
10391 }
10392
10393 static inline void native_clts(void)
10394 @@ -340,12 +340,12 @@ void enable_hlt(void);
10395
10396 void cpu_idle_wait(void);
10397
10398 -extern unsigned long arch_align_stack(unsigned long sp);
10399 +#define arch_align_stack(x) ((x) & ~0xfUL)
10400 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10401
10402 void default_idle(void);
10403
10404 -void stop_this_cpu(void *dummy);
10405 +void stop_this_cpu(void *dummy) __noreturn;
10406
10407 /*
10408 * Force strict CPU ordering.
10409 diff -urNp linux-2.6.32.45/arch/x86/include/asm/thread_info.h linux-2.6.32.45/arch/x86/include/asm/thread_info.h
10410 --- linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10411 +++ linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10412 @@ -10,6 +10,7 @@
10413 #include <linux/compiler.h>
10414 #include <asm/page.h>
10415 #include <asm/types.h>
10416 +#include <asm/percpu.h>
10417
10418 /*
10419 * low level task data that entry.S needs immediate access to
10420 @@ -24,7 +25,6 @@ struct exec_domain;
10421 #include <asm/atomic.h>
10422
10423 struct thread_info {
10424 - struct task_struct *task; /* main task structure */
10425 struct exec_domain *exec_domain; /* execution domain */
10426 __u32 flags; /* low level flags */
10427 __u32 status; /* thread synchronous flags */
10428 @@ -34,18 +34,12 @@ struct thread_info {
10429 mm_segment_t addr_limit;
10430 struct restart_block restart_block;
10431 void __user *sysenter_return;
10432 -#ifdef CONFIG_X86_32
10433 - unsigned long previous_esp; /* ESP of the previous stack in
10434 - case of nested (IRQ) stacks
10435 - */
10436 - __u8 supervisor_stack[0];
10437 -#endif
10438 + unsigned long lowest_stack;
10439 int uaccess_err;
10440 };
10441
10442 -#define INIT_THREAD_INFO(tsk) \
10443 +#define INIT_THREAD_INFO \
10444 { \
10445 - .task = &tsk, \
10446 .exec_domain = &default_exec_domain, \
10447 .flags = 0, \
10448 .cpu = 0, \
10449 @@ -56,7 +50,7 @@ struct thread_info {
10450 }, \
10451 }
10452
10453 -#define init_thread_info (init_thread_union.thread_info)
10454 +#define init_thread_info (init_thread_union.stack)
10455 #define init_stack (init_thread_union.stack)
10456
10457 #else /* !__ASSEMBLY__ */
10458 @@ -163,6 +157,23 @@ struct thread_info {
10459 #define alloc_thread_info(tsk) \
10460 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10461
10462 +#ifdef __ASSEMBLY__
10463 +/* how to get the thread information struct from ASM */
10464 +#define GET_THREAD_INFO(reg) \
10465 + mov PER_CPU_VAR(current_tinfo), reg
10466 +
10467 +/* use this one if reg already contains %esp */
10468 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10469 +#else
10470 +/* how to get the thread information struct from C */
10471 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10472 +
10473 +static __always_inline struct thread_info *current_thread_info(void)
10474 +{
10475 + return percpu_read_stable(current_tinfo);
10476 +}
10477 +#endif
10478 +
10479 #ifdef CONFIG_X86_32
10480
10481 #define STACK_WARN (THREAD_SIZE/8)
10482 @@ -173,35 +184,13 @@ struct thread_info {
10483 */
10484 #ifndef __ASSEMBLY__
10485
10486 -
10487 /* how to get the current stack pointer from C */
10488 register unsigned long current_stack_pointer asm("esp") __used;
10489
10490 -/* how to get the thread information struct from C */
10491 -static inline struct thread_info *current_thread_info(void)
10492 -{
10493 - return (struct thread_info *)
10494 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10495 -}
10496 -
10497 -#else /* !__ASSEMBLY__ */
10498 -
10499 -/* how to get the thread information struct from ASM */
10500 -#define GET_THREAD_INFO(reg) \
10501 - movl $-THREAD_SIZE, reg; \
10502 - andl %esp, reg
10503 -
10504 -/* use this one if reg already contains %esp */
10505 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10506 - andl $-THREAD_SIZE, reg
10507 -
10508 #endif
10509
10510 #else /* X86_32 */
10511
10512 -#include <asm/percpu.h>
10513 -#define KERNEL_STACK_OFFSET (5*8)
10514 -
10515 /*
10516 * macros/functions for gaining access to the thread information structure
10517 * preempt_count needs to be 1 initially, until the scheduler is functional.
10518 @@ -209,21 +198,8 @@ static inline struct thread_info *curren
10519 #ifndef __ASSEMBLY__
10520 DECLARE_PER_CPU(unsigned long, kernel_stack);
10521
10522 -static inline struct thread_info *current_thread_info(void)
10523 -{
10524 - struct thread_info *ti;
10525 - ti = (void *)(percpu_read_stable(kernel_stack) +
10526 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10527 - return ti;
10528 -}
10529 -
10530 -#else /* !__ASSEMBLY__ */
10531 -
10532 -/* how to get the thread information struct from ASM */
10533 -#define GET_THREAD_INFO(reg) \
10534 - movq PER_CPU_VAR(kernel_stack),reg ; \
10535 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10536 -
10537 +/* how to get the current stack pointer from C */
10538 +register unsigned long current_stack_pointer asm("rsp") __used;
10539 #endif
10540
10541 #endif /* !X86_32 */
10542 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10543 extern void free_thread_info(struct thread_info *ti);
10544 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10545 #define arch_task_cache_init arch_task_cache_init
10546 +
10547 +#define __HAVE_THREAD_FUNCTIONS
10548 +#define task_thread_info(task) (&(task)->tinfo)
10549 +#define task_stack_page(task) ((task)->stack)
10550 +#define setup_thread_stack(p, org) do {} while (0)
10551 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10552 +
10553 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10554 +extern struct task_struct *alloc_task_struct(void);
10555 +extern void free_task_struct(struct task_struct *);
10556 +
10557 #endif
10558 #endif /* _ASM_X86_THREAD_INFO_H */
10559 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h
10560 --- linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10561 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10562 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10563 static __always_inline unsigned long __must_check
10564 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10565 {
10566 + pax_track_stack();
10567 +
10568 + if ((long)n < 0)
10569 + return n;
10570 +
10571 if (__builtin_constant_p(n)) {
10572 unsigned long ret;
10573
10574 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10575 return ret;
10576 }
10577 }
10578 + if (!__builtin_constant_p(n))
10579 + check_object_size(from, n, true);
10580 return __copy_to_user_ll(to, from, n);
10581 }
10582
10583 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10584 __copy_to_user(void __user *to, const void *from, unsigned long n)
10585 {
10586 might_fault();
10587 +
10588 return __copy_to_user_inatomic(to, from, n);
10589 }
10590
10591 static __always_inline unsigned long
10592 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10593 {
10594 + if ((long)n < 0)
10595 + return n;
10596 +
10597 /* Avoid zeroing the tail if the copy fails..
10598 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10599 * but as the zeroing behaviour is only significant when n is not
10600 @@ -138,6 +149,12 @@ static __always_inline unsigned long
10601 __copy_from_user(void *to, const void __user *from, unsigned long n)
10602 {
10603 might_fault();
10604 +
10605 + pax_track_stack();
10606 +
10607 + if ((long)n < 0)
10608 + return n;
10609 +
10610 if (__builtin_constant_p(n)) {
10611 unsigned long ret;
10612
10613 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10614 return ret;
10615 }
10616 }
10617 + if (!__builtin_constant_p(n))
10618 + check_object_size(to, n, false);
10619 return __copy_from_user_ll(to, from, n);
10620 }
10621
10622 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10623 const void __user *from, unsigned long n)
10624 {
10625 might_fault();
10626 +
10627 + if ((long)n < 0)
10628 + return n;
10629 +
10630 if (__builtin_constant_p(n)) {
10631 unsigned long ret;
10632
10633 @@ -182,14 +205,62 @@ static __always_inline unsigned long
10634 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10635 unsigned long n)
10636 {
10637 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10638 + if ((long)n < 0)
10639 + return n;
10640 +
10641 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10642 +}
10643 +
10644 +/**
10645 + * copy_to_user: - Copy a block of data into user space.
10646 + * @to: Destination address, in user space.
10647 + * @from: Source address, in kernel space.
10648 + * @n: Number of bytes to copy.
10649 + *
10650 + * Context: User context only. This function may sleep.
10651 + *
10652 + * Copy data from kernel space to user space.
10653 + *
10654 + * Returns number of bytes that could not be copied.
10655 + * On success, this will be zero.
10656 + */
10657 +static __always_inline unsigned long __must_check
10658 +copy_to_user(void __user *to, const void *from, unsigned long n)
10659 +{
10660 + if (access_ok(VERIFY_WRITE, to, n))
10661 + n = __copy_to_user(to, from, n);
10662 + return n;
10663 +}
10664 +
10665 +/**
10666 + * copy_from_user: - Copy a block of data from user space.
10667 + * @to: Destination address, in kernel space.
10668 + * @from: Source address, in user space.
10669 + * @n: Number of bytes to copy.
10670 + *
10671 + * Context: User context only. This function may sleep.
10672 + *
10673 + * Copy data from user space to kernel space.
10674 + *
10675 + * Returns number of bytes that could not be copied.
10676 + * On success, this will be zero.
10677 + *
10678 + * If some data could not be copied, this function will pad the copied
10679 + * data to the requested size using zero bytes.
10680 + */
10681 +static __always_inline unsigned long __must_check
10682 +copy_from_user(void *to, const void __user *from, unsigned long n)
10683 +{
10684 + if (access_ok(VERIFY_READ, from, n))
10685 + n = __copy_from_user(to, from, n);
10686 + else if ((long)n > 0) {
10687 + if (!__builtin_constant_p(n))
10688 + check_object_size(to, n, false);
10689 + memset(to, 0, n);
10690 + }
10691 + return n;
10692 }
10693
10694 -unsigned long __must_check copy_to_user(void __user *to,
10695 - const void *from, unsigned long n);
10696 -unsigned long __must_check copy_from_user(void *to,
10697 - const void __user *from,
10698 - unsigned long n);
10699 long __must_check strncpy_from_user(char *dst, const char __user *src,
10700 long count);
10701 long __must_check __strncpy_from_user(char *dst,
10702 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h
10703 --- linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10704 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10705 @@ -9,6 +9,9 @@
10706 #include <linux/prefetch.h>
10707 #include <linux/lockdep.h>
10708 #include <asm/page.h>
10709 +#include <asm/pgtable.h>
10710 +
10711 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10712
10713 /*
10714 * Copy To/From Userspace
10715 @@ -19,113 +22,203 @@ __must_check unsigned long
10716 copy_user_generic(void *to, const void *from, unsigned len);
10717
10718 __must_check unsigned long
10719 -copy_to_user(void __user *to, const void *from, unsigned len);
10720 -__must_check unsigned long
10721 -copy_from_user(void *to, const void __user *from, unsigned len);
10722 -__must_check unsigned long
10723 copy_in_user(void __user *to, const void __user *from, unsigned len);
10724
10725 static __always_inline __must_check
10726 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10727 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10728 {
10729 - int ret = 0;
10730 + unsigned ret = 0;
10731
10732 might_fault();
10733 - if (!__builtin_constant_p(size))
10734 - return copy_user_generic(dst, (__force void *)src, size);
10735 +
10736 + if ((int)size < 0)
10737 + return size;
10738 +
10739 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10740 + if (!__access_ok(VERIFY_READ, src, size))
10741 + return size;
10742 +#endif
10743 +
10744 + if (!__builtin_constant_p(size)) {
10745 + check_object_size(dst, size, false);
10746 +
10747 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10748 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10749 + src += PAX_USER_SHADOW_BASE;
10750 +#endif
10751 +
10752 + return copy_user_generic(dst, (__force const void *)src, size);
10753 + }
10754 switch (size) {
10755 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10756 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10757 ret, "b", "b", "=q", 1);
10758 return ret;
10759 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10760 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10761 ret, "w", "w", "=r", 2);
10762 return ret;
10763 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10764 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10765 ret, "l", "k", "=r", 4);
10766 return ret;
10767 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10768 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10769 ret, "q", "", "=r", 8);
10770 return ret;
10771 case 10:
10772 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10773 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10774 ret, "q", "", "=r", 10);
10775 if (unlikely(ret))
10776 return ret;
10777 __get_user_asm(*(u16 *)(8 + (char *)dst),
10778 - (u16 __user *)(8 + (char __user *)src),
10779 + (const u16 __user *)(8 + (const char __user *)src),
10780 ret, "w", "w", "=r", 2);
10781 return ret;
10782 case 16:
10783 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10784 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10785 ret, "q", "", "=r", 16);
10786 if (unlikely(ret))
10787 return ret;
10788 __get_user_asm(*(u64 *)(8 + (char *)dst),
10789 - (u64 __user *)(8 + (char __user *)src),
10790 + (const u64 __user *)(8 + (const char __user *)src),
10791 ret, "q", "", "=r", 8);
10792 return ret;
10793 default:
10794 - return copy_user_generic(dst, (__force void *)src, size);
10795 +
10796 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10797 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10798 + src += PAX_USER_SHADOW_BASE;
10799 +#endif
10800 +
10801 + return copy_user_generic(dst, (__force const void *)src, size);
10802 }
10803 }
10804
10805 static __always_inline __must_check
10806 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10807 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10808 {
10809 - int ret = 0;
10810 + unsigned ret = 0;
10811
10812 might_fault();
10813 - if (!__builtin_constant_p(size))
10814 +
10815 + pax_track_stack();
10816 +
10817 + if ((int)size < 0)
10818 + return size;
10819 +
10820 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10821 + if (!__access_ok(VERIFY_WRITE, dst, size))
10822 + return size;
10823 +#endif
10824 +
10825 + if (!__builtin_constant_p(size)) {
10826 + check_object_size(src, size, true);
10827 +
10828 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10829 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10830 + dst += PAX_USER_SHADOW_BASE;
10831 +#endif
10832 +
10833 return copy_user_generic((__force void *)dst, src, size);
10834 + }
10835 switch (size) {
10836 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10837 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10838 ret, "b", "b", "iq", 1);
10839 return ret;
10840 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10841 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10842 ret, "w", "w", "ir", 2);
10843 return ret;
10844 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10845 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10846 ret, "l", "k", "ir", 4);
10847 return ret;
10848 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10849 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10850 ret, "q", "", "er", 8);
10851 return ret;
10852 case 10:
10853 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10854 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10855 ret, "q", "", "er", 10);
10856 if (unlikely(ret))
10857 return ret;
10858 asm("":::"memory");
10859 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10860 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10861 ret, "w", "w", "ir", 2);
10862 return ret;
10863 case 16:
10864 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10865 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10866 ret, "q", "", "er", 16);
10867 if (unlikely(ret))
10868 return ret;
10869 asm("":::"memory");
10870 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10871 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10872 ret, "q", "", "er", 8);
10873 return ret;
10874 default:
10875 +
10876 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10877 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10878 + dst += PAX_USER_SHADOW_BASE;
10879 +#endif
10880 +
10881 return copy_user_generic((__force void *)dst, src, size);
10882 }
10883 }
10884
10885 static __always_inline __must_check
10886 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10887 +unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10888 +{
10889 + if (access_ok(VERIFY_WRITE, to, len))
10890 + len = __copy_to_user(to, from, len);
10891 + return len;
10892 +}
10893 +
10894 +static __always_inline __must_check
10895 +unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10896 +{
10897 + if ((int)len < 0)
10898 + return len;
10899 +
10900 + if (access_ok(VERIFY_READ, from, len))
10901 + len = __copy_from_user(to, from, len);
10902 + else if ((int)len > 0) {
10903 + if (!__builtin_constant_p(len))
10904 + check_object_size(to, len, false);
10905 + memset(to, 0, len);
10906 + }
10907 + return len;
10908 +}
10909 +
10910 +static __always_inline __must_check
10911 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10912 {
10913 - int ret = 0;
10914 + unsigned ret = 0;
10915
10916 might_fault();
10917 - if (!__builtin_constant_p(size))
10918 +
10919 + pax_track_stack();
10920 +
10921 + if ((int)size < 0)
10922 + return size;
10923 +
10924 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10925 + if (!__access_ok(VERIFY_READ, src, size))
10926 + return size;
10927 + if (!__access_ok(VERIFY_WRITE, dst, size))
10928 + return size;
10929 +#endif
10930 +
10931 + if (!__builtin_constant_p(size)) {
10932 +
10933 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10934 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10935 + src += PAX_USER_SHADOW_BASE;
10936 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10937 + dst += PAX_USER_SHADOW_BASE;
10938 +#endif
10939 +
10940 return copy_user_generic((__force void *)dst,
10941 - (__force void *)src, size);
10942 + (__force const void *)src, size);
10943 + }
10944 switch (size) {
10945 case 1: {
10946 u8 tmp;
10947 - __get_user_asm(tmp, (u8 __user *)src,
10948 + __get_user_asm(tmp, (const u8 __user *)src,
10949 ret, "b", "b", "=q", 1);
10950 if (likely(!ret))
10951 __put_user_asm(tmp, (u8 __user *)dst,
10952 @@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10953 }
10954 case 2: {
10955 u16 tmp;
10956 - __get_user_asm(tmp, (u16 __user *)src,
10957 + __get_user_asm(tmp, (const u16 __user *)src,
10958 ret, "w", "w", "=r", 2);
10959 if (likely(!ret))
10960 __put_user_asm(tmp, (u16 __user *)dst,
10961 @@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10962
10963 case 4: {
10964 u32 tmp;
10965 - __get_user_asm(tmp, (u32 __user *)src,
10966 + __get_user_asm(tmp, (const u32 __user *)src,
10967 ret, "l", "k", "=r", 4);
10968 if (likely(!ret))
10969 __put_user_asm(tmp, (u32 __user *)dst,
10970 @@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10971 }
10972 case 8: {
10973 u64 tmp;
10974 - __get_user_asm(tmp, (u64 __user *)src,
10975 + __get_user_asm(tmp, (const u64 __user *)src,
10976 ret, "q", "", "=r", 8);
10977 if (likely(!ret))
10978 __put_user_asm(tmp, (u64 __user *)dst,
10979 @@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10980 return ret;
10981 }
10982 default:
10983 +
10984 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10985 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10986 + src += PAX_USER_SHADOW_BASE;
10987 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10988 + dst += PAX_USER_SHADOW_BASE;
10989 +#endif
10990 +
10991 return copy_user_generic((__force void *)dst,
10992 - (__force void *)src, size);
10993 + (__force const void *)src, size);
10994 }
10995 }
10996
10997 @@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10998 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10999 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11000
11001 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
11002 - unsigned size);
11003 +static __must_check __always_inline unsigned long
11004 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11005 +{
11006 + pax_track_stack();
11007 +
11008 + if ((int)size < 0)
11009 + return size;
11010
11011 -static __must_check __always_inline int
11012 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11013 + if (!__access_ok(VERIFY_READ, src, size))
11014 + return size;
11015 +
11016 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11017 + src += PAX_USER_SHADOW_BASE;
11018 +#endif
11019 +
11020 + return copy_user_generic(dst, (__force const void *)src, size);
11021 +}
11022 +
11023 +static __must_check __always_inline unsigned long
11024 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11025 {
11026 + if ((int)size < 0)
11027 + return size;
11028 +
11029 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11030 + if (!__access_ok(VERIFY_WRITE, dst, size))
11031 + return size;
11032 +
11033 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11034 + dst += PAX_USER_SHADOW_BASE;
11035 +#endif
11036 +
11037 return copy_user_generic((__force void *)dst, src, size);
11038 }
11039
11040 -extern long __copy_user_nocache(void *dst, const void __user *src,
11041 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11042 unsigned size, int zerorest);
11043
11044 -static inline int
11045 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11046 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11047 {
11048 might_sleep();
11049 +
11050 + if ((int)size < 0)
11051 + return size;
11052 +
11053 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11054 + if (!__access_ok(VERIFY_READ, src, size))
11055 + return size;
11056 +#endif
11057 +
11058 return __copy_user_nocache(dst, src, size, 1);
11059 }
11060
11061 -static inline int
11062 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11063 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11064 unsigned size)
11065 {
11066 + if ((int)size < 0)
11067 + return size;
11068 +
11069 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11070 + if (!__access_ok(VERIFY_READ, src, size))
11071 + return size;
11072 +#endif
11073 +
11074 return __copy_user_nocache(dst, src, size, 0);
11075 }
11076
11077 -unsigned long
11078 +extern unsigned long
11079 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11080
11081 #endif /* _ASM_X86_UACCESS_64_H */
11082 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess.h linux-2.6.32.45/arch/x86/include/asm/uaccess.h
11083 --- linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
11084 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
11085 @@ -8,12 +8,15 @@
11086 #include <linux/thread_info.h>
11087 #include <linux/prefetch.h>
11088 #include <linux/string.h>
11089 +#include <linux/sched.h>
11090 #include <asm/asm.h>
11091 #include <asm/page.h>
11092
11093 #define VERIFY_READ 0
11094 #define VERIFY_WRITE 1
11095
11096 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
11097 +
11098 /*
11099 * The fs value determines whether argument validity checking should be
11100 * performed or not. If get_fs() == USER_DS, checking is performed, with
11101 @@ -29,7 +32,12 @@
11102
11103 #define get_ds() (KERNEL_DS)
11104 #define get_fs() (current_thread_info()->addr_limit)
11105 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11106 +void __set_fs(mm_segment_t x);
11107 +void set_fs(mm_segment_t x);
11108 +#else
11109 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11110 +#endif
11111
11112 #define segment_eq(a, b) ((a).seg == (b).seg)
11113
11114 @@ -77,7 +85,33 @@
11115 * checks that the pointer is in the user space range - after calling
11116 * this function, memory access functions may still return -EFAULT.
11117 */
11118 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11119 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11120 +#define access_ok(type, addr, size) \
11121 +({ \
11122 + long __size = size; \
11123 + unsigned long __addr = (unsigned long)addr; \
11124 + unsigned long __addr_ao = __addr & PAGE_MASK; \
11125 + unsigned long __end_ao = __addr + __size - 1; \
11126 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11127 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11128 + while(__addr_ao <= __end_ao) { \
11129 + char __c_ao; \
11130 + __addr_ao += PAGE_SIZE; \
11131 + if (__size > PAGE_SIZE) \
11132 + cond_resched(); \
11133 + if (__get_user(__c_ao, (char __user *)__addr)) \
11134 + break; \
11135 + if (type != VERIFY_WRITE) { \
11136 + __addr = __addr_ao; \
11137 + continue; \
11138 + } \
11139 + if (__put_user(__c_ao, (char __user *)__addr)) \
11140 + break; \
11141 + __addr = __addr_ao; \
11142 + } \
11143 + } \
11144 + __ret_ao; \
11145 +})
11146
11147 /*
11148 * The exception table consists of pairs of addresses: the first is the
11149 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11150 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11151 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11152
11153 -
11154 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11155 +#define __copyuser_seg "gs;"
11156 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11157 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11158 +#else
11159 +#define __copyuser_seg
11160 +#define __COPYUSER_SET_ES
11161 +#define __COPYUSER_RESTORE_ES
11162 +#endif
11163
11164 #ifdef CONFIG_X86_32
11165 #define __put_user_asm_u64(x, addr, err, errret) \
11166 - asm volatile("1: movl %%eax,0(%2)\n" \
11167 - "2: movl %%edx,4(%2)\n" \
11168 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11169 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11170 "3:\n" \
11171 ".section .fixup,\"ax\"\n" \
11172 "4: movl %3,%0\n" \
11173 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11174 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11175
11176 #define __put_user_asm_ex_u64(x, addr) \
11177 - asm volatile("1: movl %%eax,0(%1)\n" \
11178 - "2: movl %%edx,4(%1)\n" \
11179 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11180 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11181 "3:\n" \
11182 _ASM_EXTABLE(1b, 2b - 1b) \
11183 _ASM_EXTABLE(2b, 3b - 2b) \
11184 @@ -374,7 +416,7 @@ do { \
11185 } while (0)
11186
11187 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11188 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11189 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11190 "2:\n" \
11191 ".section .fixup,\"ax\"\n" \
11192 "3: mov %3,%0\n" \
11193 @@ -382,7 +424,7 @@ do { \
11194 " jmp 2b\n" \
11195 ".previous\n" \
11196 _ASM_EXTABLE(1b, 3b) \
11197 - : "=r" (err), ltype(x) \
11198 + : "=r" (err), ltype (x) \
11199 : "m" (__m(addr)), "i" (errret), "0" (err))
11200
11201 #define __get_user_size_ex(x, ptr, size) \
11202 @@ -407,7 +449,7 @@ do { \
11203 } while (0)
11204
11205 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11206 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11207 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11208 "2:\n" \
11209 _ASM_EXTABLE(1b, 2b - 1b) \
11210 : ltype(x) : "m" (__m(addr)))
11211 @@ -424,13 +466,24 @@ do { \
11212 int __gu_err; \
11213 unsigned long __gu_val; \
11214 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11215 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
11216 + (x) = (__typeof__(*(ptr)))__gu_val; \
11217 __gu_err; \
11218 })
11219
11220 /* FIXME: this hack is definitely wrong -AK */
11221 struct __large_struct { unsigned long buf[100]; };
11222 -#define __m(x) (*(struct __large_struct __user *)(x))
11223 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11224 +#define ____m(x) \
11225 +({ \
11226 + unsigned long ____x = (unsigned long)(x); \
11227 + if (____x < PAX_USER_SHADOW_BASE) \
11228 + ____x += PAX_USER_SHADOW_BASE; \
11229 + (void __user *)____x; \
11230 +})
11231 +#else
11232 +#define ____m(x) (x)
11233 +#endif
11234 +#define __m(x) (*(struct __large_struct __user *)____m(x))
11235
11236 /*
11237 * Tell gcc we read from memory instead of writing: this is because
11238 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11239 * aliasing issues.
11240 */
11241 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11242 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11243 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11244 "2:\n" \
11245 ".section .fixup,\"ax\"\n" \
11246 "3: mov %3,%0\n" \
11247 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11248 ".previous\n" \
11249 _ASM_EXTABLE(1b, 3b) \
11250 : "=r"(err) \
11251 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11252 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11253
11254 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11255 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11256 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11257 "2:\n" \
11258 _ASM_EXTABLE(1b, 2b - 1b) \
11259 : : ltype(x), "m" (__m(addr)))
11260 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11261 * On error, the variable @x is set to zero.
11262 */
11263
11264 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11265 +#define __get_user(x, ptr) get_user((x), (ptr))
11266 +#else
11267 #define __get_user(x, ptr) \
11268 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11269 +#endif
11270
11271 /**
11272 * __put_user: - Write a simple value into user space, with less checking.
11273 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11274 * Returns zero on success, or -EFAULT on error.
11275 */
11276
11277 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11278 +#define __put_user(x, ptr) put_user((x), (ptr))
11279 +#else
11280 #define __put_user(x, ptr) \
11281 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11282 +#endif
11283
11284 #define __get_user_unaligned __get_user
11285 #define __put_user_unaligned __put_user
11286 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11287 #define get_user_ex(x, ptr) do { \
11288 unsigned long __gue_val; \
11289 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11290 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
11291 + (x) = (__typeof__(*(ptr)))__gue_val; \
11292 } while (0)
11293
11294 #ifdef CONFIG_X86_WP_WORKS_OK
11295 @@ -567,6 +628,7 @@ extern struct movsl_mask {
11296
11297 #define ARCH_HAS_NOCACHE_UACCESS 1
11298
11299 +#define ARCH_HAS_SORT_EXTABLE
11300 #ifdef CONFIG_X86_32
11301 # include "uaccess_32.h"
11302 #else
11303 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vgtod.h linux-2.6.32.45/arch/x86/include/asm/vgtod.h
11304 --- linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11305 +++ linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11306 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11307 int sysctl_enabled;
11308 struct timezone sys_tz;
11309 struct { /* extract of a clocksource struct */
11310 + char name[8];
11311 cycle_t (*vread)(void);
11312 cycle_t cycle_last;
11313 cycle_t mask;
11314 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi.h linux-2.6.32.45/arch/x86/include/asm/vmi.h
11315 --- linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11316 +++ linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11317 @@ -191,6 +191,7 @@ struct vrom_header {
11318 u8 reserved[96]; /* Reserved for headers */
11319 char vmi_init[8]; /* VMI_Init jump point */
11320 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11321 + char rom_data[8048]; /* rest of the option ROM */
11322 } __attribute__((packed));
11323
11324 struct pnp_header {
11325 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi_time.h linux-2.6.32.45/arch/x86/include/asm/vmi_time.h
11326 --- linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-03-27 14:31:47.000000000 -0400
11327 +++ linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-08-05 20:33:55.000000000 -0400
11328 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
11329 int (*wallclock_updated)(void);
11330 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
11331 void (*cancel_alarm)(u32 flags);
11332 -} vmi_timer_ops;
11333 +} __no_const vmi_timer_ops;
11334
11335 /* Prototypes */
11336 extern void __init vmi_time_init(void);
11337 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vsyscall.h linux-2.6.32.45/arch/x86/include/asm/vsyscall.h
11338 --- linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11339 +++ linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11340 @@ -15,9 +15,10 @@ enum vsyscall_num {
11341
11342 #ifdef __KERNEL__
11343 #include <linux/seqlock.h>
11344 +#include <linux/getcpu.h>
11345 +#include <linux/time.h>
11346
11347 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11348 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11349
11350 /* Definitions for CONFIG_GENERIC_TIME definitions */
11351 #define __section_vsyscall_gtod_data __attribute__ \
11352 @@ -31,7 +32,6 @@ enum vsyscall_num {
11353 #define VGETCPU_LSL 2
11354
11355 extern int __vgetcpu_mode;
11356 -extern volatile unsigned long __jiffies;
11357
11358 /* kernel space (writeable) */
11359 extern int vgetcpu_mode;
11360 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11361
11362 extern void map_vsyscall(void);
11363
11364 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11365 +extern time_t vtime(time_t *t);
11366 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11367 #endif /* __KERNEL__ */
11368
11369 #endif /* _ASM_X86_VSYSCALL_H */
11370 diff -urNp linux-2.6.32.45/arch/x86/include/asm/x86_init.h linux-2.6.32.45/arch/x86/include/asm/x86_init.h
11371 --- linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-03-27 14:31:47.000000000 -0400
11372 +++ linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-08-05 20:33:55.000000000 -0400
11373 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11374 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11375 void (*find_smp_config)(unsigned int reserve);
11376 void (*get_smp_config)(unsigned int early);
11377 -};
11378 +} __no_const;
11379
11380 /**
11381 * struct x86_init_resources - platform specific resource related ops
11382 @@ -42,7 +42,7 @@ struct x86_init_resources {
11383 void (*probe_roms)(void);
11384 void (*reserve_resources)(void);
11385 char *(*memory_setup)(void);
11386 -};
11387 +} __no_const;
11388
11389 /**
11390 * struct x86_init_irqs - platform specific interrupt setup
11391 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11392 void (*pre_vector_init)(void);
11393 void (*intr_init)(void);
11394 void (*trap_init)(void);
11395 -};
11396 +} __no_const;
11397
11398 /**
11399 * struct x86_init_oem - oem platform specific customizing functions
11400 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11401 struct x86_init_oem {
11402 void (*arch_setup)(void);
11403 void (*banner)(void);
11404 -};
11405 +} __no_const;
11406
11407 /**
11408 * struct x86_init_paging - platform specific paging functions
11409 @@ -75,7 +75,7 @@ struct x86_init_oem {
11410 struct x86_init_paging {
11411 void (*pagetable_setup_start)(pgd_t *base);
11412 void (*pagetable_setup_done)(pgd_t *base);
11413 -};
11414 +} __no_const;
11415
11416 /**
11417 * struct x86_init_timers - platform specific timer setup
11418 @@ -88,7 +88,7 @@ struct x86_init_timers {
11419 void (*setup_percpu_clockev)(void);
11420 void (*tsc_pre_init)(void);
11421 void (*timer_init)(void);
11422 -};
11423 +} __no_const;
11424
11425 /**
11426 * struct x86_init_ops - functions for platform specific setup
11427 @@ -101,7 +101,7 @@ struct x86_init_ops {
11428 struct x86_init_oem oem;
11429 struct x86_init_paging paging;
11430 struct x86_init_timers timers;
11431 -};
11432 +} __no_const;
11433
11434 /**
11435 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11436 @@ -109,7 +109,7 @@ struct x86_init_ops {
11437 */
11438 struct x86_cpuinit_ops {
11439 void (*setup_percpu_clockev)(void);
11440 -};
11441 +} __no_const;
11442
11443 /**
11444 * struct x86_platform_ops - platform specific runtime functions
11445 @@ -121,7 +121,7 @@ struct x86_platform_ops {
11446 unsigned long (*calibrate_tsc)(void);
11447 unsigned long (*get_wallclock)(void);
11448 int (*set_wallclock)(unsigned long nowtime);
11449 -};
11450 +} __no_const;
11451
11452 extern struct x86_init_ops x86_init;
11453 extern struct x86_cpuinit_ops x86_cpuinit;
11454 diff -urNp linux-2.6.32.45/arch/x86/include/asm/xsave.h linux-2.6.32.45/arch/x86/include/asm/xsave.h
11455 --- linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11456 +++ linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11457 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11458 static inline int xsave_user(struct xsave_struct __user *buf)
11459 {
11460 int err;
11461 +
11462 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11463 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11464 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11465 +#endif
11466 +
11467 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11468 "2:\n"
11469 ".section .fixup,\"ax\"\n"
11470 @@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11471 u32 lmask = mask;
11472 u32 hmask = mask >> 32;
11473
11474 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11475 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11476 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11477 +#endif
11478 +
11479 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11480 "2:\n"
11481 ".section .fixup,\"ax\"\n"
11482 diff -urNp linux-2.6.32.45/arch/x86/Kconfig linux-2.6.32.45/arch/x86/Kconfig
11483 --- linux-2.6.32.45/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11484 +++ linux-2.6.32.45/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11485 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11486
11487 config X86_32_LAZY_GS
11488 def_bool y
11489 - depends on X86_32 && !CC_STACKPROTECTOR
11490 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11491
11492 config KTIME_SCALAR
11493 def_bool X86_32
11494 @@ -1008,7 +1008,7 @@ choice
11495
11496 config NOHIGHMEM
11497 bool "off"
11498 - depends on !X86_NUMAQ
11499 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11500 ---help---
11501 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11502 However, the address space of 32-bit x86 processors is only 4
11503 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
11504
11505 config HIGHMEM4G
11506 bool "4GB"
11507 - depends on !X86_NUMAQ
11508 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11509 ---help---
11510 Select this if you have a 32-bit processor and between 1 and 4
11511 gigabytes of physical RAM.
11512 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11513 hex
11514 default 0xB0000000 if VMSPLIT_3G_OPT
11515 default 0x80000000 if VMSPLIT_2G
11516 - default 0x78000000 if VMSPLIT_2G_OPT
11517 + default 0x70000000 if VMSPLIT_2G_OPT
11518 default 0x40000000 if VMSPLIT_1G
11519 default 0xC0000000
11520 depends on X86_32
11521 @@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11522
11523 config EFI
11524 bool "EFI runtime service support"
11525 - depends on ACPI
11526 + depends on ACPI && !PAX_KERNEXEC
11527 ---help---
11528 This enables the kernel to use EFI runtime services that are
11529 available (such as the EFI variable services).
11530 @@ -1460,6 +1460,7 @@ config SECCOMP
11531
11532 config CC_STACKPROTECTOR
11533 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11534 + depends on X86_64 || !PAX_MEMORY_UDEREF
11535 ---help---
11536 This option turns on the -fstack-protector GCC feature. This
11537 feature puts, at the beginning of functions, a canary value on
11538 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11539 config PHYSICAL_START
11540 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11541 default "0x1000000"
11542 + range 0x400000 0x40000000
11543 ---help---
11544 This gives the physical address where the kernel is loaded.
11545
11546 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11547 hex
11548 prompt "Alignment value to which kernel should be aligned" if X86_32
11549 default "0x1000000"
11550 + range 0x400000 0x1000000 if PAX_KERNEXEC
11551 range 0x2000 0x1000000
11552 ---help---
11553 This value puts the alignment restrictions on physical address
11554 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11555 Say N if you want to disable CPU hotplug.
11556
11557 config COMPAT_VDSO
11558 - def_bool y
11559 + def_bool n
11560 prompt "Compat VDSO support"
11561 depends on X86_32 || IA32_EMULATION
11562 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11563 ---help---
11564 Map the 32-bit VDSO to the predictable old-style address too.
11565 ---help---
11566 diff -urNp linux-2.6.32.45/arch/x86/Kconfig.cpu linux-2.6.32.45/arch/x86/Kconfig.cpu
11567 --- linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11568 +++ linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11569 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11570
11571 config X86_F00F_BUG
11572 def_bool y
11573 - depends on M586MMX || M586TSC || M586 || M486 || M386
11574 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11575
11576 config X86_WP_WORKS_OK
11577 def_bool y
11578 @@ -360,7 +360,7 @@ config X86_POPAD_OK
11579
11580 config X86_ALIGNMENT_16
11581 def_bool y
11582 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11583 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11584
11585 config X86_INTEL_USERCOPY
11586 def_bool y
11587 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
11588 # generates cmov.
11589 config X86_CMOV
11590 def_bool y
11591 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11592 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11593
11594 config X86_MINIMUM_CPU_FAMILY
11595 int
11596 diff -urNp linux-2.6.32.45/arch/x86/Kconfig.debug linux-2.6.32.45/arch/x86/Kconfig.debug
11597 --- linux-2.6.32.45/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11598 +++ linux-2.6.32.45/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11599 @@ -99,7 +99,7 @@ config X86_PTDUMP
11600 config DEBUG_RODATA
11601 bool "Write protect kernel read-only data structures"
11602 default y
11603 - depends on DEBUG_KERNEL
11604 + depends on DEBUG_KERNEL && BROKEN
11605 ---help---
11606 Mark the kernel read-only data as write-protected in the pagetables,
11607 in order to catch accidental (and incorrect) writes to such const
11608 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile
11609 --- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-03-27 14:31:47.000000000 -0400
11610 +++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-08-07 14:38:58.000000000 -0400
11611 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
11612 $(call cc-option, -fno-stack-protector) \
11613 $(call cc-option, -mpreferred-stack-boundary=2)
11614 KBUILD_CFLAGS += $(call cc-option, -m32)
11615 +ifdef CONSTIFY_PLUGIN
11616 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11617 +endif
11618 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11619 GCOV_PROFILE := n
11620
11621 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S
11622 --- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11623 +++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11624 @@ -91,6 +91,9 @@ _start:
11625 /* Do any other stuff... */
11626
11627 #ifndef CONFIG_64BIT
11628 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
11629 + call verify_cpu
11630 +
11631 /* This could also be done in C code... */
11632 movl pmode_cr3, %eax
11633 movl %eax, %cr3
11634 @@ -104,7 +107,7 @@ _start:
11635 movl %eax, %ecx
11636 orl %edx, %ecx
11637 jz 1f
11638 - movl $0xc0000080, %ecx
11639 + mov $MSR_EFER, %ecx
11640 wrmsr
11641 1:
11642
11643 @@ -114,6 +117,7 @@ _start:
11644 movl pmode_cr0, %eax
11645 movl %eax, %cr0
11646 jmp pmode_return
11647 +# include "../../verify_cpu.S"
11648 #else
11649 pushw $0
11650 pushw trampoline_segment
11651 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c
11652 --- linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11653 +++ linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11654 @@ -11,11 +11,12 @@
11655 #include <linux/cpumask.h>
11656 #include <asm/segment.h>
11657 #include <asm/desc.h>
11658 +#include <asm/e820.h>
11659
11660 #include "realmode/wakeup.h"
11661 #include "sleep.h"
11662
11663 -unsigned long acpi_wakeup_address;
11664 +unsigned long acpi_wakeup_address = 0x2000;
11665 unsigned long acpi_realmode_flags;
11666
11667 /* address in low memory of the wakeup routine. */
11668 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11669 #else /* CONFIG_64BIT */
11670 header->trampoline_segment = setup_trampoline() >> 4;
11671 #ifdef CONFIG_SMP
11672 - stack_start.sp = temp_stack + sizeof(temp_stack);
11673 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11674 +
11675 + pax_open_kernel();
11676 early_gdt_descr.address =
11677 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11678 + pax_close_kernel();
11679 +
11680 initial_gs = per_cpu_offset(smp_processor_id());
11681 #endif
11682 initial_code = (unsigned long)wakeup_long64;
11683 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11684 return;
11685 }
11686
11687 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11688 -
11689 - if (!acpi_realmode) {
11690 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11691 - return;
11692 - }
11693 -
11694 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11695 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11696 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11697 }
11698
11699
11700 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S
11701 --- linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11702 +++ linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11703 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11704 # and restore the stack ... but you need gdt for this to work
11705 movl saved_context_esp, %esp
11706
11707 - movl %cs:saved_magic, %eax
11708 - cmpl $0x12345678, %eax
11709 + cmpl $0x12345678, saved_magic
11710 jne bogus_magic
11711
11712 # jump to place where we left off
11713 - movl saved_eip, %eax
11714 - jmp *%eax
11715 + jmp *(saved_eip)
11716
11717 bogus_magic:
11718 jmp bogus_magic
11719 diff -urNp linux-2.6.32.45/arch/x86/kernel/alternative.c linux-2.6.32.45/arch/x86/kernel/alternative.c
11720 --- linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11721 +++ linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11722 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11723
11724 BUG_ON(p->len > MAX_PATCH_LEN);
11725 /* prep the buffer with the original instructions */
11726 - memcpy(insnbuf, p->instr, p->len);
11727 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11728 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11729 (unsigned long)p->instr, p->len);
11730
11731 @@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11732 if (smp_alt_once)
11733 free_init_pages("SMP alternatives",
11734 (unsigned long)__smp_locks,
11735 - (unsigned long)__smp_locks_end);
11736 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11737
11738 restart_nmi();
11739 }
11740 @@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11741 * instructions. And on the local CPU you need to be protected again NMI or MCE
11742 * handlers seeing an inconsistent instruction while you patch.
11743 */
11744 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11745 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
11746 size_t len)
11747 {
11748 unsigned long flags;
11749 local_irq_save(flags);
11750 - memcpy(addr, opcode, len);
11751 +
11752 + pax_open_kernel();
11753 + memcpy(ktla_ktva(addr), opcode, len);
11754 sync_core();
11755 + pax_close_kernel();
11756 +
11757 local_irq_restore(flags);
11758 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11759 that causes hangs on some VIA CPUs. */
11760 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11761 */
11762 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11763 {
11764 - unsigned long flags;
11765 - char *vaddr;
11766 + unsigned char *vaddr = ktla_ktva(addr);
11767 struct page *pages[2];
11768 - int i;
11769 + size_t i;
11770
11771 if (!core_kernel_text((unsigned long)addr)) {
11772 - pages[0] = vmalloc_to_page(addr);
11773 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11774 + pages[0] = vmalloc_to_page(vaddr);
11775 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11776 } else {
11777 - pages[0] = virt_to_page(addr);
11778 + pages[0] = virt_to_page(vaddr);
11779 WARN_ON(!PageReserved(pages[0]));
11780 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11781 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11782 }
11783 BUG_ON(!pages[0]);
11784 - local_irq_save(flags);
11785 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11786 - if (pages[1])
11787 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11788 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11789 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11790 - clear_fixmap(FIX_TEXT_POKE0);
11791 - if (pages[1])
11792 - clear_fixmap(FIX_TEXT_POKE1);
11793 - local_flush_tlb();
11794 - sync_core();
11795 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11796 - that causes hangs on some VIA CPUs. */
11797 + text_poke_early(addr, opcode, len);
11798 for (i = 0; i < len; i++)
11799 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11800 - local_irq_restore(flags);
11801 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11802 return addr;
11803 }
11804 diff -urNp linux-2.6.32.45/arch/x86/kernel/amd_iommu.c linux-2.6.32.45/arch/x86/kernel/amd_iommu.c
11805 --- linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11806 +++ linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11807 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11808 }
11809 }
11810
11811 -static struct dma_map_ops amd_iommu_dma_ops = {
11812 +static const struct dma_map_ops amd_iommu_dma_ops = {
11813 .alloc_coherent = alloc_coherent,
11814 .free_coherent = free_coherent,
11815 .map_page = map_page,
11816 diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/apic.c linux-2.6.32.45/arch/x86/kernel/apic/apic.c
11817 --- linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11818 +++ linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-08-17 20:00:16.000000000 -0400
11819 @@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
11820 /*
11821 * Debug level, exported for io_apic.c
11822 */
11823 -unsigned int apic_verbosity;
11824 +int apic_verbosity;
11825
11826 int pic_mode;
11827
11828 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11829 apic_write(APIC_ESR, 0);
11830 v1 = apic_read(APIC_ESR);
11831 ack_APIC_irq();
11832 - atomic_inc(&irq_err_count);
11833 + atomic_inc_unchecked(&irq_err_count);
11834
11835 /*
11836 * Here is what the APIC error bits mean:
11837 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11838 u16 *bios_cpu_apicid;
11839 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11840
11841 + pax_track_stack();
11842 +
11843 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11844 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11845
11846 diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c
11847 --- linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11848 +++ linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11849 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11850 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11851 GFP_ATOMIC);
11852 if (!ioapic_entries)
11853 - return 0;
11854 + return NULL;
11855
11856 for (apic = 0; apic < nr_ioapics; apic++) {
11857 ioapic_entries[apic] =
11858 @@ -733,7 +733,7 @@ nomem:
11859 kfree(ioapic_entries[apic]);
11860 kfree(ioapic_entries);
11861
11862 - return 0;
11863 + return NULL;
11864 }
11865
11866 /*
11867 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11868 }
11869 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11870
11871 -void lock_vector_lock(void)
11872 +void lock_vector_lock(void) __acquires(vector_lock)
11873 {
11874 /* Used to the online set of cpus does not change
11875 * during assign_irq_vector.
11876 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11877 spin_lock(&vector_lock);
11878 }
11879
11880 -void unlock_vector_lock(void)
11881 +void unlock_vector_lock(void) __releases(vector_lock)
11882 {
11883 spin_unlock(&vector_lock);
11884 }
11885 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11886 ack_APIC_irq();
11887 }
11888
11889 -atomic_t irq_mis_count;
11890 +atomic_unchecked_t irq_mis_count;
11891
11892 static void ack_apic_level(unsigned int irq)
11893 {
11894 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11895
11896 /* Tail end of version 0x11 I/O APIC bug workaround */
11897 if (!(v & (1 << (i & 0x1f)))) {
11898 - atomic_inc(&irq_mis_count);
11899 + atomic_inc_unchecked(&irq_mis_count);
11900 spin_lock(&ioapic_lock);
11901 __mask_and_edge_IO_APIC_irq(cfg);
11902 __unmask_and_level_IO_APIC_irq(cfg);
11903 diff -urNp linux-2.6.32.45/arch/x86/kernel/apm_32.c linux-2.6.32.45/arch/x86/kernel/apm_32.c
11904 --- linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11905 +++ linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11906 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11907 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11908 * even though they are called in protected mode.
11909 */
11910 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11911 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11912 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11913
11914 static const char driver_version[] = "1.16ac"; /* no spaces */
11915 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11916 BUG_ON(cpu != 0);
11917 gdt = get_cpu_gdt_table(cpu);
11918 save_desc_40 = gdt[0x40 / 8];
11919 +
11920 + pax_open_kernel();
11921 gdt[0x40 / 8] = bad_bios_desc;
11922 + pax_close_kernel();
11923
11924 apm_irq_save(flags);
11925 APM_DO_SAVE_SEGS;
11926 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11927 &call->esi);
11928 APM_DO_RESTORE_SEGS;
11929 apm_irq_restore(flags);
11930 +
11931 + pax_open_kernel();
11932 gdt[0x40 / 8] = save_desc_40;
11933 + pax_close_kernel();
11934 +
11935 put_cpu();
11936
11937 return call->eax & 0xff;
11938 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11939 BUG_ON(cpu != 0);
11940 gdt = get_cpu_gdt_table(cpu);
11941 save_desc_40 = gdt[0x40 / 8];
11942 +
11943 + pax_open_kernel();
11944 gdt[0x40 / 8] = bad_bios_desc;
11945 + pax_close_kernel();
11946
11947 apm_irq_save(flags);
11948 APM_DO_SAVE_SEGS;
11949 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11950 &call->eax);
11951 APM_DO_RESTORE_SEGS;
11952 apm_irq_restore(flags);
11953 +
11954 + pax_open_kernel();
11955 gdt[0x40 / 8] = save_desc_40;
11956 + pax_close_kernel();
11957 +
11958 put_cpu();
11959 return error;
11960 }
11961 @@ -975,7 +989,7 @@ recalc:
11962
11963 static void apm_power_off(void)
11964 {
11965 - unsigned char po_bios_call[] = {
11966 + const unsigned char po_bios_call[] = {
11967 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11968 0x8e, 0xd0, /* movw ax,ss */
11969 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11970 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11971 * code to that CPU.
11972 */
11973 gdt = get_cpu_gdt_table(0);
11974 +
11975 + pax_open_kernel();
11976 set_desc_base(&gdt[APM_CS >> 3],
11977 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11978 set_desc_base(&gdt[APM_CS_16 >> 3],
11979 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11980 set_desc_base(&gdt[APM_DS >> 3],
11981 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11982 + pax_close_kernel();
11983
11984 proc_create("apm", 0, NULL, &apm_file_ops);
11985
11986 diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c
11987 --- linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11988 +++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11989 @@ -51,7 +51,6 @@ void foo(void)
11990 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11991 BLANK();
11992
11993 - OFFSET(TI_task, thread_info, task);
11994 OFFSET(TI_exec_domain, thread_info, exec_domain);
11995 OFFSET(TI_flags, thread_info, flags);
11996 OFFSET(TI_status, thread_info, status);
11997 @@ -60,6 +59,8 @@ void foo(void)
11998 OFFSET(TI_restart_block, thread_info, restart_block);
11999 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
12000 OFFSET(TI_cpu, thread_info, cpu);
12001 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
12002 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12003 BLANK();
12004
12005 OFFSET(GDS_size, desc_ptr, size);
12006 @@ -99,6 +100,7 @@ void foo(void)
12007
12008 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12009 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12010 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12011 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
12012 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
12013 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
12014 @@ -115,6 +117,11 @@ void foo(void)
12015 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
12016 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12017 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12018 +
12019 +#ifdef CONFIG_PAX_KERNEXEC
12020 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12021 +#endif
12022 +
12023 #endif
12024
12025 #ifdef CONFIG_XEN
12026 diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c
12027 --- linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
12028 +++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
12029 @@ -44,6 +44,8 @@ int main(void)
12030 ENTRY(addr_limit);
12031 ENTRY(preempt_count);
12032 ENTRY(status);
12033 + ENTRY(lowest_stack);
12034 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12035 #ifdef CONFIG_IA32_EMULATION
12036 ENTRY(sysenter_return);
12037 #endif
12038 @@ -63,6 +65,18 @@ int main(void)
12039 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12040 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
12041 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12042 +
12043 +#ifdef CONFIG_PAX_KERNEXEC
12044 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12045 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12046 +#endif
12047 +
12048 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12049 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12050 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12051 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
12052 +#endif
12053 +
12054 #endif
12055
12056
12057 @@ -115,6 +129,7 @@ int main(void)
12058 ENTRY(cr8);
12059 BLANK();
12060 #undef ENTRY
12061 + DEFINE(TSS_size, sizeof(struct tss_struct));
12062 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
12063 BLANK();
12064 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
12065 @@ -130,6 +145,7 @@ int main(void)
12066
12067 BLANK();
12068 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12069 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12070 #ifdef CONFIG_XEN
12071 BLANK();
12072 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12073 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/amd.c
12074 --- linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
12075 +++ linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
12076 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
12077 unsigned int size)
12078 {
12079 /* AMD errata T13 (order #21922) */
12080 - if ((c->x86 == 6)) {
12081 + if (c->x86 == 6) {
12082 /* Duron Rev A0 */
12083 if (c->x86_model == 3 && c->x86_mask == 0)
12084 size = 64;
12085 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/common.c linux-2.6.32.45/arch/x86/kernel/cpu/common.c
12086 --- linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
12087 +++ linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
12088 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
12089
12090 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12091
12092 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12093 -#ifdef CONFIG_X86_64
12094 - /*
12095 - * We need valid kernel segments for data and code in long mode too
12096 - * IRET will check the segment types kkeil 2000/10/28
12097 - * Also sysret mandates a special GDT layout
12098 - *
12099 - * TLS descriptors are currently at a different place compared to i386.
12100 - * Hopefully nobody expects them at a fixed place (Wine?)
12101 - */
12102 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12103 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12104 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12105 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12106 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12107 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12108 -#else
12109 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12110 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12111 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12112 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12113 - /*
12114 - * Segments used for calling PnP BIOS have byte granularity.
12115 - * They code segments and data segments have fixed 64k limits,
12116 - * the transfer segment sizes are set at run time.
12117 - */
12118 - /* 32-bit code */
12119 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12120 - /* 16-bit code */
12121 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12122 - /* 16-bit data */
12123 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12124 - /* 16-bit data */
12125 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12126 - /* 16-bit data */
12127 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12128 - /*
12129 - * The APM segments have byte granularity and their bases
12130 - * are set at run time. All have 64k limits.
12131 - */
12132 - /* 32-bit code */
12133 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12134 - /* 16-bit code */
12135 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12136 - /* data */
12137 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12138 -
12139 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12140 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12141 - GDT_STACK_CANARY_INIT
12142 -#endif
12143 -} };
12144 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12145 -
12146 static int __init x86_xsave_setup(char *s)
12147 {
12148 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12149 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
12150 {
12151 struct desc_ptr gdt_descr;
12152
12153 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12154 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12155 gdt_descr.size = GDT_SIZE - 1;
12156 load_gdt(&gdt_descr);
12157 /* Reload the per-cpu base */
12158 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
12159 /* Filter out anything that depends on CPUID levels we don't have */
12160 filter_cpuid_features(c, true);
12161
12162 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
12163 + setup_clear_cpu_cap(X86_FEATURE_SEP);
12164 +#endif
12165 +
12166 /* If the model name is still unset, do table lookup. */
12167 if (!c->x86_model_id[0]) {
12168 const char *p;
12169 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
12170 }
12171 __setup("clearcpuid=", setup_disablecpuid);
12172
12173 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12174 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
12175 +
12176 #ifdef CONFIG_X86_64
12177 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12178
12179 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
12180 EXPORT_PER_CPU_SYMBOL(current_task);
12181
12182 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12183 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12184 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12185 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12186
12187 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12188 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
12189 {
12190 memset(regs, 0, sizeof(struct pt_regs));
12191 regs->fs = __KERNEL_PERCPU;
12192 - regs->gs = __KERNEL_STACK_CANARY;
12193 + savesegment(gs, regs->gs);
12194
12195 return regs;
12196 }
12197 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
12198 int i;
12199
12200 cpu = stack_smp_processor_id();
12201 - t = &per_cpu(init_tss, cpu);
12202 + t = init_tss + cpu;
12203 orig_ist = &per_cpu(orig_ist, cpu);
12204
12205 #ifdef CONFIG_NUMA
12206 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12207 switch_to_new_gdt(cpu);
12208 loadsegment(fs, 0);
12209
12210 - load_idt((const struct desc_ptr *)&idt_descr);
12211 + load_idt(&idt_descr);
12212
12213 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12214 syscall_init();
12215 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12216 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12217 barrier();
12218
12219 - check_efer();
12220 if (cpu != 0)
12221 enable_x2apic();
12222
12223 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12224 {
12225 int cpu = smp_processor_id();
12226 struct task_struct *curr = current;
12227 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12228 + struct tss_struct *t = init_tss + cpu;
12229 struct thread_struct *thread = &curr->thread;
12230
12231 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12232 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel.c linux-2.6.32.45/arch/x86/kernel/cpu/intel.c
12233 --- linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12234 +++ linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12235 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12236 * Update the IDT descriptor and reload the IDT so that
12237 * it uses the read-only mapped virtual address.
12238 */
12239 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12240 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12241 load_idt(&idt_descr);
12242 }
12243 #endif
12244 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c
12245 --- linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12246 +++ linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12247 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12248 return ret;
12249 }
12250
12251 -static struct sysfs_ops sysfs_ops = {
12252 +static const struct sysfs_ops sysfs_ops = {
12253 .show = show,
12254 .store = store,
12255 };
12256 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/Makefile linux-2.6.32.45/arch/x86/kernel/cpu/Makefile
12257 --- linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12258 +++ linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12259 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12260 CFLAGS_REMOVE_common.o = -pg
12261 endif
12262
12263 -# Make sure load_percpu_segment has no stackprotector
12264 -nostackp := $(call cc-option, -fno-stack-protector)
12265 -CFLAGS_common.o := $(nostackp)
12266 -
12267 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12268 obj-y += proc.o capflags.o powerflags.o common.o
12269 obj-y += vmware.o hypervisor.o sched.o
12270 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c
12271 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12272 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12273 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12274 return ret;
12275 }
12276
12277 -static struct sysfs_ops threshold_ops = {
12278 +static const struct sysfs_ops threshold_ops = {
12279 .show = show,
12280 .store = store,
12281 };
12282 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c
12283 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12284 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12285 @@ -43,6 +43,7 @@
12286 #include <asm/ipi.h>
12287 #include <asm/mce.h>
12288 #include <asm/msr.h>
12289 +#include <asm/local.h>
12290
12291 #include "mce-internal.h"
12292
12293 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12294 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12295 m->cs, m->ip);
12296
12297 - if (m->cs == __KERNEL_CS)
12298 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12299 print_symbol("{%s}", m->ip);
12300 pr_cont("\n");
12301 }
12302 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
12303
12304 #define PANIC_TIMEOUT 5 /* 5 seconds */
12305
12306 -static atomic_t mce_paniced;
12307 +static atomic_unchecked_t mce_paniced;
12308
12309 static int fake_panic;
12310 -static atomic_t mce_fake_paniced;
12311 +static atomic_unchecked_t mce_fake_paniced;
12312
12313 /* Panic in progress. Enable interrupts and wait for final IPI */
12314 static void wait_for_panic(void)
12315 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12316 /*
12317 * Make sure only one CPU runs in machine check panic
12318 */
12319 - if (atomic_inc_return(&mce_paniced) > 1)
12320 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12321 wait_for_panic();
12322 barrier();
12323
12324 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12325 console_verbose();
12326 } else {
12327 /* Don't log too much for fake panic */
12328 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12329 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12330 return;
12331 }
12332 print_mce_head();
12333 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12334 * might have been modified by someone else.
12335 */
12336 rmb();
12337 - if (atomic_read(&mce_paniced))
12338 + if (atomic_read_unchecked(&mce_paniced))
12339 wait_for_panic();
12340 if (!monarch_timeout)
12341 goto out;
12342 @@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12343 */
12344
12345 static DEFINE_SPINLOCK(mce_state_lock);
12346 -static int open_count; /* #times opened */
12347 +static local_t open_count; /* #times opened */
12348 static int open_exclu; /* already open exclusive? */
12349
12350 static int mce_open(struct inode *inode, struct file *file)
12351 {
12352 spin_lock(&mce_state_lock);
12353
12354 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12355 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12356 spin_unlock(&mce_state_lock);
12357
12358 return -EBUSY;
12359 @@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12360
12361 if (file->f_flags & O_EXCL)
12362 open_exclu = 1;
12363 - open_count++;
12364 + local_inc(&open_count);
12365
12366 spin_unlock(&mce_state_lock);
12367
12368 @@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12369 {
12370 spin_lock(&mce_state_lock);
12371
12372 - open_count--;
12373 + local_dec(&open_count);
12374 open_exclu = 0;
12375
12376 spin_unlock(&mce_state_lock);
12377 @@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12378 static void mce_reset(void)
12379 {
12380 cpu_missing = 0;
12381 - atomic_set(&mce_fake_paniced, 0);
12382 + atomic_set_unchecked(&mce_fake_paniced, 0);
12383 atomic_set(&mce_executing, 0);
12384 atomic_set(&mce_callin, 0);
12385 atomic_set(&global_nwo, 0);
12386 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c
12387 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-03-27 14:31:47.000000000 -0400
12388 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:33:55.000000000 -0400
12389 @@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *fi
12390 static int inject_init(void)
12391 {
12392 printk(KERN_INFO "Machine check injector initialized\n");
12393 - mce_chrdev_ops.write = mce_write;
12394 + pax_open_kernel();
12395 + *(void **)&mce_chrdev_ops.write = mce_write;
12396 + pax_close_kernel();
12397 register_die_notifier(&mce_raise_nb);
12398 return 0;
12399 }
12400 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c
12401 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12402 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12403 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12404 return 0;
12405 }
12406
12407 -static struct mtrr_ops amd_mtrr_ops = {
12408 +static const struct mtrr_ops amd_mtrr_ops = {
12409 .vendor = X86_VENDOR_AMD,
12410 .set = amd_set_mtrr,
12411 .get = amd_get_mtrr,
12412 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c
12413 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12414 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12415 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12416 return 0;
12417 }
12418
12419 -static struct mtrr_ops centaur_mtrr_ops = {
12420 +static const struct mtrr_ops centaur_mtrr_ops = {
12421 .vendor = X86_VENDOR_CENTAUR,
12422 .set = centaur_set_mcr,
12423 .get = centaur_get_mcr,
12424 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c
12425 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12426 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12427 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12428 post_set();
12429 }
12430
12431 -static struct mtrr_ops cyrix_mtrr_ops = {
12432 +static const struct mtrr_ops cyrix_mtrr_ops = {
12433 .vendor = X86_VENDOR_CYRIX,
12434 .set_all = cyrix_set_all,
12435 .set = cyrix_set_arr,
12436 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c
12437 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12438 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12439 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12440 /*
12441 * Generic structure...
12442 */
12443 -struct mtrr_ops generic_mtrr_ops = {
12444 +const struct mtrr_ops generic_mtrr_ops = {
12445 .use_intel_if = 1,
12446 .set_all = generic_set_all,
12447 .get = generic_get_mtrr,
12448 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c
12449 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12450 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12451 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12452 u64 size_or_mask, size_and_mask;
12453 static bool mtrr_aps_delayed_init;
12454
12455 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12456 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12457
12458 -struct mtrr_ops *mtrr_if;
12459 +const struct mtrr_ops *mtrr_if;
12460
12461 static void set_mtrr(unsigned int reg, unsigned long base,
12462 unsigned long size, mtrr_type type);
12463
12464 -void set_mtrr_ops(struct mtrr_ops *ops)
12465 +void set_mtrr_ops(const struct mtrr_ops *ops)
12466 {
12467 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12468 mtrr_ops[ops->vendor] = ops;
12469 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h
12470 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12471 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12472 @@ -12,19 +12,19 @@
12473 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12474
12475 struct mtrr_ops {
12476 - u32 vendor;
12477 - u32 use_intel_if;
12478 - void (*set)(unsigned int reg, unsigned long base,
12479 + const u32 vendor;
12480 + const u32 use_intel_if;
12481 + void (* const set)(unsigned int reg, unsigned long base,
12482 unsigned long size, mtrr_type type);
12483 - void (*set_all)(void);
12484 + void (* const set_all)(void);
12485
12486 - void (*get)(unsigned int reg, unsigned long *base,
12487 + void (* const get)(unsigned int reg, unsigned long *base,
12488 unsigned long *size, mtrr_type *type);
12489 - int (*get_free_region)(unsigned long base, unsigned long size,
12490 + int (* const get_free_region)(unsigned long base, unsigned long size,
12491 int replace_reg);
12492 - int (*validate_add_page)(unsigned long base, unsigned long size,
12493 + int (* const validate_add_page)(unsigned long base, unsigned long size,
12494 unsigned int type);
12495 - int (*have_wrcomb)(void);
12496 + int (* const have_wrcomb)(void);
12497 };
12498
12499 extern int generic_get_free_region(unsigned long base, unsigned long size,
12500 @@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12501 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12502 unsigned int type);
12503
12504 -extern struct mtrr_ops generic_mtrr_ops;
12505 +extern const struct mtrr_ops generic_mtrr_ops;
12506
12507 extern int positive_have_wrcomb(void);
12508
12509 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12510 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12511 void get_mtrr_state(void);
12512
12513 -extern void set_mtrr_ops(struct mtrr_ops *ops);
12514 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
12515
12516 extern u64 size_or_mask, size_and_mask;
12517 -extern struct mtrr_ops *mtrr_if;
12518 +extern const struct mtrr_ops *mtrr_if;
12519
12520 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12521 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12522 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c
12523 --- linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12524 +++ linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12525 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12526
12527 /* Interface defining a CPU specific perfctr watchdog */
12528 struct wd_ops {
12529 - int (*reserve)(void);
12530 - void (*unreserve)(void);
12531 - int (*setup)(unsigned nmi_hz);
12532 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12533 - void (*stop)(void);
12534 + int (* const reserve)(void);
12535 + void (* const unreserve)(void);
12536 + int (* const setup)(unsigned nmi_hz);
12537 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12538 + void (* const stop)(void);
12539 unsigned perfctr;
12540 unsigned evntsel;
12541 u64 checkbit;
12542 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12543 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12544 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12545
12546 +/* cannot be const */
12547 static struct wd_ops intel_arch_wd_ops;
12548
12549 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12550 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12551 return 1;
12552 }
12553
12554 +/* cannot be const */
12555 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12556 .reserve = single_msr_reserve,
12557 .unreserve = single_msr_unreserve,
12558 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c
12559 --- linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12560 +++ linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12561 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12562 * count to the generic event atomically:
12563 */
12564 again:
12565 - prev_raw_count = atomic64_read(&hwc->prev_count);
12566 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12567 rdmsrl(hwc->event_base + idx, new_raw_count);
12568
12569 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12570 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12571 new_raw_count) != prev_raw_count)
12572 goto again;
12573
12574 @@ -741,7 +741,7 @@ again:
12575 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12576 delta >>= shift;
12577
12578 - atomic64_add(delta, &event->count);
12579 + atomic64_add_unchecked(delta, &event->count);
12580 atomic64_sub(delta, &hwc->period_left);
12581
12582 return new_raw_count;
12583 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12584 * The hw event starts counting from this event offset,
12585 * mark it to be able to extra future deltas:
12586 */
12587 - atomic64_set(&hwc->prev_count, (u64)-left);
12588 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12589
12590 err = checking_wrmsrl(hwc->event_base + idx,
12591 (u64)(-left) & x86_pmu.event_mask);
12592 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12593 break;
12594
12595 callchain_store(entry, frame.return_address);
12596 - fp = frame.next_frame;
12597 + fp = (__force const void __user *)frame.next_frame;
12598 }
12599 }
12600
12601 diff -urNp linux-2.6.32.45/arch/x86/kernel/crash.c linux-2.6.32.45/arch/x86/kernel/crash.c
12602 --- linux-2.6.32.45/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12603 +++ linux-2.6.32.45/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12604 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12605 regs = args->regs;
12606
12607 #ifdef CONFIG_X86_32
12608 - if (!user_mode_vm(regs)) {
12609 + if (!user_mode(regs)) {
12610 crash_fixup_ss_esp(&fixed_regs, regs);
12611 regs = &fixed_regs;
12612 }
12613 diff -urNp linux-2.6.32.45/arch/x86/kernel/doublefault_32.c linux-2.6.32.45/arch/x86/kernel/doublefault_32.c
12614 --- linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12615 +++ linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12616 @@ -11,7 +11,7 @@
12617
12618 #define DOUBLEFAULT_STACKSIZE (1024)
12619 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12620 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12621 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12622
12623 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12624
12625 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12626 unsigned long gdt, tss;
12627
12628 store_gdt(&gdt_desc);
12629 - gdt = gdt_desc.address;
12630 + gdt = (unsigned long)gdt_desc.address;
12631
12632 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12633
12634 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12635 /* 0x2 bit is always set */
12636 .flags = X86_EFLAGS_SF | 0x2,
12637 .sp = STACK_START,
12638 - .es = __USER_DS,
12639 + .es = __KERNEL_DS,
12640 .cs = __KERNEL_CS,
12641 .ss = __KERNEL_DS,
12642 - .ds = __USER_DS,
12643 + .ds = __KERNEL_DS,
12644 .fs = __KERNEL_PERCPU,
12645
12646 .__cr3 = __pa_nodebug(swapper_pg_dir),
12647 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c
12648 --- linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12649 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12650 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12651 #endif
12652
12653 for (;;) {
12654 - struct thread_info *context;
12655 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12656 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12657
12658 - context = (struct thread_info *)
12659 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12660 - bp = print_context_stack(context, stack, bp, ops,
12661 - data, NULL, &graph);
12662 -
12663 - stack = (unsigned long *)context->previous_esp;
12664 - if (!stack)
12665 + if (stack_start == task_stack_page(task))
12666 break;
12667 + stack = *(unsigned long **)stack_start;
12668 if (ops->stack(data, "IRQ") < 0)
12669 break;
12670 touch_nmi_watchdog();
12671 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12672 * When in-kernel, we also print out the stack and code at the
12673 * time of the fault..
12674 */
12675 - if (!user_mode_vm(regs)) {
12676 + if (!user_mode(regs)) {
12677 unsigned int code_prologue = code_bytes * 43 / 64;
12678 unsigned int code_len = code_bytes;
12679 unsigned char c;
12680 u8 *ip;
12681 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12682
12683 printk(KERN_EMERG "Stack:\n");
12684 show_stack_log_lvl(NULL, regs, &regs->sp,
12685 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12686
12687 printk(KERN_EMERG "Code: ");
12688
12689 - ip = (u8 *)regs->ip - code_prologue;
12690 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12691 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12692 /* try starting at IP */
12693 - ip = (u8 *)regs->ip;
12694 + ip = (u8 *)regs->ip + cs_base;
12695 code_len = code_len - code_prologue + 1;
12696 }
12697 for (i = 0; i < code_len; i++, ip++) {
12698 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12699 printk(" Bad EIP value.");
12700 break;
12701 }
12702 - if (ip == (u8 *)regs->ip)
12703 + if (ip == (u8 *)regs->ip + cs_base)
12704 printk("<%02x> ", c);
12705 else
12706 printk("%02x ", c);
12707 @@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12708 {
12709 unsigned short ud2;
12710
12711 + ip = ktla_ktva(ip);
12712 if (ip < PAGE_OFFSET)
12713 return 0;
12714 if (probe_kernel_address((unsigned short *)ip, ud2))
12715 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c
12716 --- linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12717 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12718 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12719 unsigned long *irq_stack_end =
12720 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12721 unsigned used = 0;
12722 - struct thread_info *tinfo;
12723 int graph = 0;
12724 + void *stack_start;
12725
12726 if (!task)
12727 task = current;
12728 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12729 * current stack address. If the stacks consist of nested
12730 * exceptions
12731 */
12732 - tinfo = task_thread_info(task);
12733 for (;;) {
12734 char *id;
12735 unsigned long *estack_end;
12736 +
12737 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12738 &used, &id);
12739
12740 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12741 if (ops->stack(data, id) < 0)
12742 break;
12743
12744 - bp = print_context_stack(tinfo, stack, bp, ops,
12745 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12746 data, estack_end, &graph);
12747 ops->stack(data, "<EOE>");
12748 /*
12749 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12750 if (stack >= irq_stack && stack < irq_stack_end) {
12751 if (ops->stack(data, "IRQ") < 0)
12752 break;
12753 - bp = print_context_stack(tinfo, stack, bp,
12754 + bp = print_context_stack(task, irq_stack, stack, bp,
12755 ops, data, irq_stack_end, &graph);
12756 /*
12757 * We link to the next stack (which would be
12758 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12759 /*
12760 * This handles the process stack:
12761 */
12762 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12763 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12764 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12765 put_cpu();
12766 }
12767 EXPORT_SYMBOL(dump_trace);
12768 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.c linux-2.6.32.45/arch/x86/kernel/dumpstack.c
12769 --- linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12770 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12771 @@ -2,6 +2,9 @@
12772 * Copyright (C) 1991, 1992 Linus Torvalds
12773 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12774 */
12775 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12776 +#define __INCLUDED_BY_HIDESYM 1
12777 +#endif
12778 #include <linux/kallsyms.h>
12779 #include <linux/kprobes.h>
12780 #include <linux/uaccess.h>
12781 @@ -28,7 +31,7 @@ static int die_counter;
12782
12783 void printk_address(unsigned long address, int reliable)
12784 {
12785 - printk(" [<%p>] %s%pS\n", (void *) address,
12786 + printk(" [<%p>] %s%pA\n", (void *) address,
12787 reliable ? "" : "? ", (void *) address);
12788 }
12789
12790 @@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12791 static void
12792 print_ftrace_graph_addr(unsigned long addr, void *data,
12793 const struct stacktrace_ops *ops,
12794 - struct thread_info *tinfo, int *graph)
12795 + struct task_struct *task, int *graph)
12796 {
12797 - struct task_struct *task = tinfo->task;
12798 unsigned long ret_addr;
12799 int index = task->curr_ret_stack;
12800
12801 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12802 static inline void
12803 print_ftrace_graph_addr(unsigned long addr, void *data,
12804 const struct stacktrace_ops *ops,
12805 - struct thread_info *tinfo, int *graph)
12806 + struct task_struct *task, int *graph)
12807 { }
12808 #endif
12809
12810 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12811 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12812 */
12813
12814 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12815 - void *p, unsigned int size, void *end)
12816 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12817 {
12818 - void *t = tinfo;
12819 if (end) {
12820 if (p < end && p >= (end-THREAD_SIZE))
12821 return 1;
12822 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12823 }
12824
12825 unsigned long
12826 -print_context_stack(struct thread_info *tinfo,
12827 +print_context_stack(struct task_struct *task, void *stack_start,
12828 unsigned long *stack, unsigned long bp,
12829 const struct stacktrace_ops *ops, void *data,
12830 unsigned long *end, int *graph)
12831 {
12832 struct stack_frame *frame = (struct stack_frame *)bp;
12833
12834 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12835 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12836 unsigned long addr;
12837
12838 addr = *stack;
12839 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12840 } else {
12841 ops->address(data, addr, 0);
12842 }
12843 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12844 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12845 }
12846 stack++;
12847 }
12848 @@ -180,7 +180,7 @@ void dump_stack(void)
12849 #endif
12850
12851 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12852 - current->pid, current->comm, print_tainted(),
12853 + task_pid_nr(current), current->comm, print_tainted(),
12854 init_utsname()->release,
12855 (int)strcspn(init_utsname()->version, " "),
12856 init_utsname()->version);
12857 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12858 return flags;
12859 }
12860
12861 +extern void gr_handle_kernel_exploit(void);
12862 +
12863 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12864 {
12865 if (regs && kexec_should_crash(current))
12866 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12867 panic("Fatal exception in interrupt");
12868 if (panic_on_oops)
12869 panic("Fatal exception");
12870 - do_exit(signr);
12871 +
12872 + gr_handle_kernel_exploit();
12873 +
12874 + do_group_exit(signr);
12875 }
12876
12877 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12878 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12879 unsigned long flags = oops_begin();
12880 int sig = SIGSEGV;
12881
12882 - if (!user_mode_vm(regs))
12883 + if (!user_mode(regs))
12884 report_bug(regs->ip, regs);
12885
12886 if (__die(str, regs, err))
12887 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.h linux-2.6.32.45/arch/x86/kernel/dumpstack.h
12888 --- linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12889 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12890 @@ -15,7 +15,7 @@
12891 #endif
12892
12893 extern unsigned long
12894 -print_context_stack(struct thread_info *tinfo,
12895 +print_context_stack(struct task_struct *task, void *stack_start,
12896 unsigned long *stack, unsigned long bp,
12897 const struct stacktrace_ops *ops, void *data,
12898 unsigned long *end, int *graph);
12899 diff -urNp linux-2.6.32.45/arch/x86/kernel/e820.c linux-2.6.32.45/arch/x86/kernel/e820.c
12900 --- linux-2.6.32.45/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12901 +++ linux-2.6.32.45/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12902 @@ -733,7 +733,7 @@ struct early_res {
12903 };
12904 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12905 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12906 - {}
12907 + { 0, 0, {0}, 0 }
12908 };
12909
12910 static int __init find_overlapped_early(u64 start, u64 end)
12911 diff -urNp linux-2.6.32.45/arch/x86/kernel/early_printk.c linux-2.6.32.45/arch/x86/kernel/early_printk.c
12912 --- linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12913 +++ linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12914 @@ -7,6 +7,7 @@
12915 #include <linux/pci_regs.h>
12916 #include <linux/pci_ids.h>
12917 #include <linux/errno.h>
12918 +#include <linux/sched.h>
12919 #include <asm/io.h>
12920 #include <asm/processor.h>
12921 #include <asm/fcntl.h>
12922 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12923 int n;
12924 va_list ap;
12925
12926 + pax_track_stack();
12927 +
12928 va_start(ap, fmt);
12929 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12930 early_console->write(early_console, buf, n);
12931 diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_32.c linux-2.6.32.45/arch/x86/kernel/efi_32.c
12932 --- linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12933 +++ linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12934 @@ -38,70 +38,38 @@
12935 */
12936
12937 static unsigned long efi_rt_eflags;
12938 -static pgd_t efi_bak_pg_dir_pointer[2];
12939 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12940
12941 -void efi_call_phys_prelog(void)
12942 +void __init efi_call_phys_prelog(void)
12943 {
12944 - unsigned long cr4;
12945 - unsigned long temp;
12946 struct desc_ptr gdt_descr;
12947
12948 local_irq_save(efi_rt_eflags);
12949
12950 - /*
12951 - * If I don't have PAE, I should just duplicate two entries in page
12952 - * directory. If I have PAE, I just need to duplicate one entry in
12953 - * page directory.
12954 - */
12955 - cr4 = read_cr4_safe();
12956
12957 - if (cr4 & X86_CR4_PAE) {
12958 - efi_bak_pg_dir_pointer[0].pgd =
12959 - swapper_pg_dir[pgd_index(0)].pgd;
12960 - swapper_pg_dir[0].pgd =
12961 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12962 - } else {
12963 - efi_bak_pg_dir_pointer[0].pgd =
12964 - swapper_pg_dir[pgd_index(0)].pgd;
12965 - efi_bak_pg_dir_pointer[1].pgd =
12966 - swapper_pg_dir[pgd_index(0x400000)].pgd;
12967 - swapper_pg_dir[pgd_index(0)].pgd =
12968 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12969 - temp = PAGE_OFFSET + 0x400000;
12970 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12971 - swapper_pg_dir[pgd_index(temp)].pgd;
12972 - }
12973 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12974 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12975 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12976
12977 /*
12978 * After the lock is released, the original page table is restored.
12979 */
12980 __flush_tlb_all();
12981
12982 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
12983 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12984 gdt_descr.size = GDT_SIZE - 1;
12985 load_gdt(&gdt_descr);
12986 }
12987
12988 -void efi_call_phys_epilog(void)
12989 +void __init efi_call_phys_epilog(void)
12990 {
12991 - unsigned long cr4;
12992 struct desc_ptr gdt_descr;
12993
12994 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12995 + gdt_descr.address = get_cpu_gdt_table(0);
12996 gdt_descr.size = GDT_SIZE - 1;
12997 load_gdt(&gdt_descr);
12998
12999 - cr4 = read_cr4_safe();
13000 -
13001 - if (cr4 & X86_CR4_PAE) {
13002 - swapper_pg_dir[pgd_index(0)].pgd =
13003 - efi_bak_pg_dir_pointer[0].pgd;
13004 - } else {
13005 - swapper_pg_dir[pgd_index(0)].pgd =
13006 - efi_bak_pg_dir_pointer[0].pgd;
13007 - swapper_pg_dir[pgd_index(0x400000)].pgd =
13008 - efi_bak_pg_dir_pointer[1].pgd;
13009 - }
13010 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
13011
13012 /*
13013 * After the lock is released, the original page table is restored.
13014 diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S
13015 --- linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
13016 +++ linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
13017 @@ -6,6 +6,7 @@
13018 */
13019
13020 #include <linux/linkage.h>
13021 +#include <linux/init.h>
13022 #include <asm/page_types.h>
13023
13024 /*
13025 @@ -20,7 +21,7 @@
13026 * service functions will comply with gcc calling convention, too.
13027 */
13028
13029 -.text
13030 +__INIT
13031 ENTRY(efi_call_phys)
13032 /*
13033 * 0. The function can only be called in Linux kernel. So CS has been
13034 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
13035 * The mapping of lower virtual memory has been created in prelog and
13036 * epilog.
13037 */
13038 - movl $1f, %edx
13039 - subl $__PAGE_OFFSET, %edx
13040 - jmp *%edx
13041 + jmp 1f-__PAGE_OFFSET
13042 1:
13043
13044 /*
13045 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
13046 * parameter 2, ..., param n. To make things easy, we save the return
13047 * address of efi_call_phys in a global variable.
13048 */
13049 - popl %edx
13050 - movl %edx, saved_return_addr
13051 - /* get the function pointer into ECX*/
13052 - popl %ecx
13053 - movl %ecx, efi_rt_function_ptr
13054 - movl $2f, %edx
13055 - subl $__PAGE_OFFSET, %edx
13056 - pushl %edx
13057 + popl (saved_return_addr)
13058 + popl (efi_rt_function_ptr)
13059
13060 /*
13061 * 3. Clear PG bit in %CR0.
13062 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
13063 /*
13064 * 5. Call the physical function.
13065 */
13066 - jmp *%ecx
13067 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
13068
13069 -2:
13070 /*
13071 * 6. After EFI runtime service returns, control will return to
13072 * following instruction. We'd better readjust stack pointer first.
13073 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
13074 movl %cr0, %edx
13075 orl $0x80000000, %edx
13076 movl %edx, %cr0
13077 - jmp 1f
13078 -1:
13079 +
13080 /*
13081 * 8. Now restore the virtual mode from flat mode by
13082 * adding EIP with PAGE_OFFSET.
13083 */
13084 - movl $1f, %edx
13085 - jmp *%edx
13086 + jmp 1f+__PAGE_OFFSET
13087 1:
13088
13089 /*
13090 * 9. Balance the stack. And because EAX contain the return value,
13091 * we'd better not clobber it.
13092 */
13093 - leal efi_rt_function_ptr, %edx
13094 - movl (%edx), %ecx
13095 - pushl %ecx
13096 + pushl (efi_rt_function_ptr)
13097
13098 /*
13099 - * 10. Push the saved return address onto the stack and return.
13100 + * 10. Return to the saved return address.
13101 */
13102 - leal saved_return_addr, %edx
13103 - movl (%edx), %ecx
13104 - pushl %ecx
13105 - ret
13106 + jmpl *(saved_return_addr)
13107 ENDPROC(efi_call_phys)
13108 .previous
13109
13110 -.data
13111 +__INITDATA
13112 saved_return_addr:
13113 .long 0
13114 efi_rt_function_ptr:
13115 diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_32.S linux-2.6.32.45/arch/x86/kernel/entry_32.S
13116 --- linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
13117 +++ linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
13118 @@ -185,13 +185,146 @@
13119 /*CFI_REL_OFFSET gs, PT_GS*/
13120 .endm
13121 .macro SET_KERNEL_GS reg
13122 +
13123 +#ifdef CONFIG_CC_STACKPROTECTOR
13124 movl $(__KERNEL_STACK_CANARY), \reg
13125 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13126 + movl $(__USER_DS), \reg
13127 +#else
13128 + xorl \reg, \reg
13129 +#endif
13130 +
13131 movl \reg, %gs
13132 .endm
13133
13134 #endif /* CONFIG_X86_32_LAZY_GS */
13135
13136 -.macro SAVE_ALL
13137 +.macro pax_enter_kernel
13138 +#ifdef CONFIG_PAX_KERNEXEC
13139 + call pax_enter_kernel
13140 +#endif
13141 +.endm
13142 +
13143 +.macro pax_exit_kernel
13144 +#ifdef CONFIG_PAX_KERNEXEC
13145 + call pax_exit_kernel
13146 +#endif
13147 +.endm
13148 +
13149 +#ifdef CONFIG_PAX_KERNEXEC
13150 +ENTRY(pax_enter_kernel)
13151 +#ifdef CONFIG_PARAVIRT
13152 + pushl %eax
13153 + pushl %ecx
13154 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
13155 + mov %eax, %esi
13156 +#else
13157 + mov %cr0, %esi
13158 +#endif
13159 + bts $16, %esi
13160 + jnc 1f
13161 + mov %cs, %esi
13162 + cmp $__KERNEL_CS, %esi
13163 + jz 3f
13164 + ljmp $__KERNEL_CS, $3f
13165 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13166 +2:
13167 +#ifdef CONFIG_PARAVIRT
13168 + mov %esi, %eax
13169 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13170 +#else
13171 + mov %esi, %cr0
13172 +#endif
13173 +3:
13174 +#ifdef CONFIG_PARAVIRT
13175 + popl %ecx
13176 + popl %eax
13177 +#endif
13178 + ret
13179 +ENDPROC(pax_enter_kernel)
13180 +
13181 +ENTRY(pax_exit_kernel)
13182 +#ifdef CONFIG_PARAVIRT
13183 + pushl %eax
13184 + pushl %ecx
13185 +#endif
13186 + mov %cs, %esi
13187 + cmp $__KERNEXEC_KERNEL_CS, %esi
13188 + jnz 2f
13189 +#ifdef CONFIG_PARAVIRT
13190 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13191 + mov %eax, %esi
13192 +#else
13193 + mov %cr0, %esi
13194 +#endif
13195 + btr $16, %esi
13196 + ljmp $__KERNEL_CS, $1f
13197 +1:
13198 +#ifdef CONFIG_PARAVIRT
13199 + mov %esi, %eax
13200 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13201 +#else
13202 + mov %esi, %cr0
13203 +#endif
13204 +2:
13205 +#ifdef CONFIG_PARAVIRT
13206 + popl %ecx
13207 + popl %eax
13208 +#endif
13209 + ret
13210 +ENDPROC(pax_exit_kernel)
13211 +#endif
13212 +
13213 +.macro pax_erase_kstack
13214 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13215 + call pax_erase_kstack
13216 +#endif
13217 +.endm
13218 +
13219 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13220 +/*
13221 + * ebp: thread_info
13222 + * ecx, edx: can be clobbered
13223 + */
13224 +ENTRY(pax_erase_kstack)
13225 + pushl %edi
13226 + pushl %eax
13227 +
13228 + mov TI_lowest_stack(%ebp), %edi
13229 + mov $-0xBEEF, %eax
13230 + std
13231 +
13232 +1: mov %edi, %ecx
13233 + and $THREAD_SIZE_asm - 1, %ecx
13234 + shr $2, %ecx
13235 + repne scasl
13236 + jecxz 2f
13237 +
13238 + cmp $2*16, %ecx
13239 + jc 2f
13240 +
13241 + mov $2*16, %ecx
13242 + repe scasl
13243 + jecxz 2f
13244 + jne 1b
13245 +
13246 +2: cld
13247 + mov %esp, %ecx
13248 + sub %edi, %ecx
13249 + shr $2, %ecx
13250 + rep stosl
13251 +
13252 + mov TI_task_thread_sp0(%ebp), %edi
13253 + sub $128, %edi
13254 + mov %edi, TI_lowest_stack(%ebp)
13255 +
13256 + popl %eax
13257 + popl %edi
13258 + ret
13259 +ENDPROC(pax_erase_kstack)
13260 +#endif
13261 +
13262 +.macro __SAVE_ALL _DS
13263 cld
13264 PUSH_GS
13265 pushl %fs
13266 @@ -224,7 +357,7 @@
13267 pushl %ebx
13268 CFI_ADJUST_CFA_OFFSET 4
13269 CFI_REL_OFFSET ebx, 0
13270 - movl $(__USER_DS), %edx
13271 + movl $\_DS, %edx
13272 movl %edx, %ds
13273 movl %edx, %es
13274 movl $(__KERNEL_PERCPU), %edx
13275 @@ -232,6 +365,15 @@
13276 SET_KERNEL_GS %edx
13277 .endm
13278
13279 +.macro SAVE_ALL
13280 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13281 + __SAVE_ALL __KERNEL_DS
13282 + pax_enter_kernel
13283 +#else
13284 + __SAVE_ALL __USER_DS
13285 +#endif
13286 +.endm
13287 +
13288 .macro RESTORE_INT_REGS
13289 popl %ebx
13290 CFI_ADJUST_CFA_OFFSET -4
13291 @@ -352,7 +494,15 @@ check_userspace:
13292 movb PT_CS(%esp), %al
13293 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13294 cmpl $USER_RPL, %eax
13295 +
13296 +#ifdef CONFIG_PAX_KERNEXEC
13297 + jae resume_userspace
13298 +
13299 + PAX_EXIT_KERNEL
13300 + jmp resume_kernel
13301 +#else
13302 jb resume_kernel # not returning to v8086 or userspace
13303 +#endif
13304
13305 ENTRY(resume_userspace)
13306 LOCKDEP_SYS_EXIT
13307 @@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13308 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13309 # int/exception return?
13310 jne work_pending
13311 - jmp restore_all
13312 + jmp restore_all_pax
13313 END(ret_from_exception)
13314
13315 #ifdef CONFIG_PREEMPT
13316 @@ -414,25 +564,36 @@ sysenter_past_esp:
13317 /*CFI_REL_OFFSET cs, 0*/
13318 /*
13319 * Push current_thread_info()->sysenter_return to the stack.
13320 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13321 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
13322 */
13323 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13324 + pushl $0
13325 CFI_ADJUST_CFA_OFFSET 4
13326 CFI_REL_OFFSET eip, 0
13327
13328 pushl %eax
13329 CFI_ADJUST_CFA_OFFSET 4
13330 SAVE_ALL
13331 + GET_THREAD_INFO(%ebp)
13332 + movl TI_sysenter_return(%ebp),%ebp
13333 + movl %ebp,PT_EIP(%esp)
13334 ENABLE_INTERRUPTS(CLBR_NONE)
13335
13336 /*
13337 * Load the potential sixth argument from user stack.
13338 * Careful about security.
13339 */
13340 + movl PT_OLDESP(%esp),%ebp
13341 +
13342 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13343 + mov PT_OLDSS(%esp),%ds
13344 +1: movl %ds:(%ebp),%ebp
13345 + push %ss
13346 + pop %ds
13347 +#else
13348 cmpl $__PAGE_OFFSET-3,%ebp
13349 jae syscall_fault
13350 1: movl (%ebp),%ebp
13351 +#endif
13352 +
13353 movl %ebp,PT_EBP(%esp)
13354 .section __ex_table,"a"
13355 .align 4
13356 @@ -455,12 +616,23 @@ sysenter_do_call:
13357 testl $_TIF_ALLWORK_MASK, %ecx
13358 jne sysexit_audit
13359 sysenter_exit:
13360 +
13361 +#ifdef CONFIG_PAX_RANDKSTACK
13362 + pushl_cfi %eax
13363 + call pax_randomize_kstack
13364 + popl_cfi %eax
13365 +#endif
13366 +
13367 + pax_erase_kstack
13368 +
13369 /* if something modifies registers it must also disable sysexit */
13370 movl PT_EIP(%esp), %edx
13371 movl PT_OLDESP(%esp), %ecx
13372 xorl %ebp,%ebp
13373 TRACE_IRQS_ON
13374 1: mov PT_FS(%esp), %fs
13375 +2: mov PT_DS(%esp), %ds
13376 +3: mov PT_ES(%esp), %es
13377 PTGS_TO_GS
13378 ENABLE_INTERRUPTS_SYSEXIT
13379
13380 @@ -477,6 +649,9 @@ sysenter_audit:
13381 movl %eax,%edx /* 2nd arg: syscall number */
13382 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13383 call audit_syscall_entry
13384 +
13385 + pax_erase_kstack
13386 +
13387 pushl %ebx
13388 CFI_ADJUST_CFA_OFFSET 4
13389 movl PT_EAX(%esp),%eax /* reload syscall number */
13390 @@ -504,11 +679,17 @@ sysexit_audit:
13391
13392 CFI_ENDPROC
13393 .pushsection .fixup,"ax"
13394 -2: movl $0,PT_FS(%esp)
13395 +4: movl $0,PT_FS(%esp)
13396 + jmp 1b
13397 +5: movl $0,PT_DS(%esp)
13398 + jmp 1b
13399 +6: movl $0,PT_ES(%esp)
13400 jmp 1b
13401 .section __ex_table,"a"
13402 .align 4
13403 - .long 1b,2b
13404 + .long 1b,4b
13405 + .long 2b,5b
13406 + .long 3b,6b
13407 .popsection
13408 PTGS_TO_GS_EX
13409 ENDPROC(ia32_sysenter_target)
13410 @@ -538,6 +719,14 @@ syscall_exit:
13411 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13412 jne syscall_exit_work
13413
13414 +restore_all_pax:
13415 +
13416 +#ifdef CONFIG_PAX_RANDKSTACK
13417 + call pax_randomize_kstack
13418 +#endif
13419 +
13420 + pax_erase_kstack
13421 +
13422 restore_all:
13423 TRACE_IRQS_IRET
13424 restore_all_notrace:
13425 @@ -602,7 +791,13 @@ ldt_ss:
13426 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13427 mov %dx, %ax /* eax: new kernel esp */
13428 sub %eax, %edx /* offset (low word is 0) */
13429 - PER_CPU(gdt_page, %ebx)
13430 +#ifdef CONFIG_SMP
13431 + movl PER_CPU_VAR(cpu_number), %ebx
13432 + shll $PAGE_SHIFT_asm, %ebx
13433 + addl $cpu_gdt_table, %ebx
13434 +#else
13435 + movl $cpu_gdt_table, %ebx
13436 +#endif
13437 shr $16, %edx
13438 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13439 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13440 @@ -636,31 +831,25 @@ work_resched:
13441 movl TI_flags(%ebp), %ecx
13442 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13443 # than syscall tracing?
13444 - jz restore_all
13445 + jz restore_all_pax
13446 testb $_TIF_NEED_RESCHED, %cl
13447 jnz work_resched
13448
13449 work_notifysig: # deal with pending signals and
13450 # notify-resume requests
13451 + movl %esp, %eax
13452 #ifdef CONFIG_VM86
13453 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13454 - movl %esp, %eax
13455 - jne work_notifysig_v86 # returning to kernel-space or
13456 + jz 1f # returning to kernel-space or
13457 # vm86-space
13458 - xorl %edx, %edx
13459 - call do_notify_resume
13460 - jmp resume_userspace_sig
13461
13462 - ALIGN
13463 -work_notifysig_v86:
13464 pushl %ecx # save ti_flags for do_notify_resume
13465 CFI_ADJUST_CFA_OFFSET 4
13466 call save_v86_state # %eax contains pt_regs pointer
13467 popl %ecx
13468 CFI_ADJUST_CFA_OFFSET -4
13469 movl %eax, %esp
13470 -#else
13471 - movl %esp, %eax
13472 +1:
13473 #endif
13474 xorl %edx, %edx
13475 call do_notify_resume
13476 @@ -673,6 +862,9 @@ syscall_trace_entry:
13477 movl $-ENOSYS,PT_EAX(%esp)
13478 movl %esp, %eax
13479 call syscall_trace_enter
13480 +
13481 + pax_erase_kstack
13482 +
13483 /* What it returned is what we'll actually use. */
13484 cmpl $(nr_syscalls), %eax
13485 jnae syscall_call
13486 @@ -695,6 +887,10 @@ END(syscall_exit_work)
13487
13488 RING0_INT_FRAME # can't unwind into user space anyway
13489 syscall_fault:
13490 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13491 + push %ss
13492 + pop %ds
13493 +#endif
13494 GET_THREAD_INFO(%ebp)
13495 movl $-EFAULT,PT_EAX(%esp)
13496 jmp resume_userspace
13497 @@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
13498 PTREGSCALL(vm86)
13499 PTREGSCALL(vm86old)
13500
13501 + ALIGN;
13502 +ENTRY(kernel_execve)
13503 + push %ebp
13504 + sub $PT_OLDSS+4,%esp
13505 + push %edi
13506 + push %ecx
13507 + push %eax
13508 + lea 3*4(%esp),%edi
13509 + mov $PT_OLDSS/4+1,%ecx
13510 + xorl %eax,%eax
13511 + rep stosl
13512 + pop %eax
13513 + pop %ecx
13514 + pop %edi
13515 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13516 + mov %eax,PT_EBX(%esp)
13517 + mov %edx,PT_ECX(%esp)
13518 + mov %ecx,PT_EDX(%esp)
13519 + mov %esp,%eax
13520 + call sys_execve
13521 + GET_THREAD_INFO(%ebp)
13522 + test %eax,%eax
13523 + jz syscall_exit
13524 + add $PT_OLDSS+4,%esp
13525 + pop %ebp
13526 + ret
13527 +
13528 .macro FIXUP_ESPFIX_STACK
13529 /*
13530 * Switch back for ESPFIX stack to the normal zerobased stack
13531 @@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
13532 * normal stack and adjusts ESP with the matching offset.
13533 */
13534 /* fixup the stack */
13535 - PER_CPU(gdt_page, %ebx)
13536 +#ifdef CONFIG_SMP
13537 + movl PER_CPU_VAR(cpu_number), %ebx
13538 + shll $PAGE_SHIFT_asm, %ebx
13539 + addl $cpu_gdt_table, %ebx
13540 +#else
13541 + movl $cpu_gdt_table, %ebx
13542 +#endif
13543 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13544 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13545 shl $16, %eax
13546 @@ -1198,7 +1427,6 @@ return_to_handler:
13547 ret
13548 #endif
13549
13550 -.section .rodata,"a"
13551 #include "syscall_table_32.S"
13552
13553 syscall_table_size=(.-sys_call_table)
13554 @@ -1255,9 +1483,12 @@ error_code:
13555 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13556 REG_TO_PTGS %ecx
13557 SET_KERNEL_GS %ecx
13558 - movl $(__USER_DS), %ecx
13559 + movl $(__KERNEL_DS), %ecx
13560 movl %ecx, %ds
13561 movl %ecx, %es
13562 +
13563 + pax_enter_kernel
13564 +
13565 TRACE_IRQS_OFF
13566 movl %esp,%eax # pt_regs pointer
13567 call *%edi
13568 @@ -1351,6 +1582,9 @@ nmi_stack_correct:
13569 xorl %edx,%edx # zero error code
13570 movl %esp,%eax # pt_regs pointer
13571 call do_nmi
13572 +
13573 + pax_exit_kernel
13574 +
13575 jmp restore_all_notrace
13576 CFI_ENDPROC
13577
13578 @@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13579 FIXUP_ESPFIX_STACK # %eax == %esp
13580 xorl %edx,%edx # zero error code
13581 call do_nmi
13582 +
13583 + pax_exit_kernel
13584 +
13585 RESTORE_REGS
13586 lss 12+4(%esp), %esp # back to espfix stack
13587 CFI_ADJUST_CFA_OFFSET -24
13588 diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_64.S linux-2.6.32.45/arch/x86/kernel/entry_64.S
13589 --- linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13590 +++ linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13591 @@ -53,6 +53,7 @@
13592 #include <asm/paravirt.h>
13593 #include <asm/ftrace.h>
13594 #include <asm/percpu.h>
13595 +#include <asm/pgtable.h>
13596
13597 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13598 #include <linux/elf-em.h>
13599 @@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13600 ENDPROC(native_usergs_sysret64)
13601 #endif /* CONFIG_PARAVIRT */
13602
13603 + .macro ljmpq sel, off
13604 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13605 + .byte 0x48; ljmp *1234f(%rip)
13606 + .pushsection .rodata
13607 + .align 16
13608 + 1234: .quad \off; .word \sel
13609 + .popsection
13610 +#else
13611 + pushq $\sel
13612 + pushq $\off
13613 + lretq
13614 +#endif
13615 + .endm
13616 +
13617 + .macro pax_enter_kernel
13618 +#ifdef CONFIG_PAX_KERNEXEC
13619 + call pax_enter_kernel
13620 +#endif
13621 + .endm
13622 +
13623 + .macro pax_exit_kernel
13624 +#ifdef CONFIG_PAX_KERNEXEC
13625 + call pax_exit_kernel
13626 +#endif
13627 + .endm
13628 +
13629 +#ifdef CONFIG_PAX_KERNEXEC
13630 +ENTRY(pax_enter_kernel)
13631 + pushq %rdi
13632 +
13633 +#ifdef CONFIG_PARAVIRT
13634 + PV_SAVE_REGS(CLBR_RDI)
13635 +#endif
13636 +
13637 + GET_CR0_INTO_RDI
13638 + bts $16,%rdi
13639 + jnc 1f
13640 + mov %cs,%edi
13641 + cmp $__KERNEL_CS,%edi
13642 + jz 3f
13643 + ljmpq __KERNEL_CS,3f
13644 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
13645 +2: SET_RDI_INTO_CR0
13646 +3:
13647 +
13648 +#ifdef CONFIG_PARAVIRT
13649 + PV_RESTORE_REGS(CLBR_RDI)
13650 +#endif
13651 +
13652 + popq %rdi
13653 + retq
13654 +ENDPROC(pax_enter_kernel)
13655 +
13656 +ENTRY(pax_exit_kernel)
13657 + pushq %rdi
13658 +
13659 +#ifdef CONFIG_PARAVIRT
13660 + PV_SAVE_REGS(CLBR_RDI)
13661 +#endif
13662 +
13663 + mov %cs,%rdi
13664 + cmp $__KERNEXEC_KERNEL_CS,%edi
13665 + jnz 2f
13666 + GET_CR0_INTO_RDI
13667 + btr $16,%rdi
13668 + ljmpq __KERNEL_CS,1f
13669 +1: SET_RDI_INTO_CR0
13670 +2:
13671 +
13672 +#ifdef CONFIG_PARAVIRT
13673 + PV_RESTORE_REGS(CLBR_RDI);
13674 +#endif
13675 +
13676 + popq %rdi
13677 + retq
13678 +ENDPROC(pax_exit_kernel)
13679 +#endif
13680 +
13681 + .macro pax_enter_kernel_user
13682 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13683 + call pax_enter_kernel_user
13684 +#endif
13685 + .endm
13686 +
13687 + .macro pax_exit_kernel_user
13688 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13689 + call pax_exit_kernel_user
13690 +#endif
13691 +#ifdef CONFIG_PAX_RANDKSTACK
13692 + push %rax
13693 + call pax_randomize_kstack
13694 + pop %rax
13695 +#endif
13696 + pax_erase_kstack
13697 + .endm
13698 +
13699 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13700 +ENTRY(pax_enter_kernel_user)
13701 + pushq %rdi
13702 + pushq %rbx
13703 +
13704 +#ifdef CONFIG_PARAVIRT
13705 + PV_SAVE_REGS(CLBR_RDI)
13706 +#endif
13707 +
13708 + GET_CR3_INTO_RDI
13709 + mov %rdi,%rbx
13710 + add $__START_KERNEL_map,%rbx
13711 + sub phys_base(%rip),%rbx
13712 +
13713 +#ifdef CONFIG_PARAVIRT
13714 + pushq %rdi
13715 + cmpl $0, pv_info+PARAVIRT_enabled
13716 + jz 1f
13717 + i = 0
13718 + .rept USER_PGD_PTRS
13719 + mov i*8(%rbx),%rsi
13720 + mov $0,%sil
13721 + lea i*8(%rbx),%rdi
13722 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13723 + i = i + 1
13724 + .endr
13725 + jmp 2f
13726 +1:
13727 +#endif
13728 +
13729 + i = 0
13730 + .rept USER_PGD_PTRS
13731 + movb $0,i*8(%rbx)
13732 + i = i + 1
13733 + .endr
13734 +
13735 +#ifdef CONFIG_PARAVIRT
13736 +2: popq %rdi
13737 +#endif
13738 + SET_RDI_INTO_CR3
13739 +
13740 +#ifdef CONFIG_PAX_KERNEXEC
13741 + GET_CR0_INTO_RDI
13742 + bts $16,%rdi
13743 + SET_RDI_INTO_CR0
13744 +#endif
13745 +
13746 +#ifdef CONFIG_PARAVIRT
13747 + PV_RESTORE_REGS(CLBR_RDI)
13748 +#endif
13749 +
13750 + popq %rbx
13751 + popq %rdi
13752 + retq
13753 +ENDPROC(pax_enter_kernel_user)
13754 +
13755 +ENTRY(pax_exit_kernel_user)
13756 + push %rdi
13757 +
13758 +#ifdef CONFIG_PARAVIRT
13759 + pushq %rbx
13760 + PV_SAVE_REGS(CLBR_RDI)
13761 +#endif
13762 +
13763 +#ifdef CONFIG_PAX_KERNEXEC
13764 + GET_CR0_INTO_RDI
13765 + btr $16,%rdi
13766 + SET_RDI_INTO_CR0
13767 +#endif
13768 +
13769 + GET_CR3_INTO_RDI
13770 + add $__START_KERNEL_map,%rdi
13771 + sub phys_base(%rip),%rdi
13772 +
13773 +#ifdef CONFIG_PARAVIRT
13774 + cmpl $0, pv_info+PARAVIRT_enabled
13775 + jz 1f
13776 + mov %rdi,%rbx
13777 + i = 0
13778 + .rept USER_PGD_PTRS
13779 + mov i*8(%rbx),%rsi
13780 + mov $0x67,%sil
13781 + lea i*8(%rbx),%rdi
13782 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13783 + i = i + 1
13784 + .endr
13785 + jmp 2f
13786 +1:
13787 +#endif
13788 +
13789 + i = 0
13790 + .rept USER_PGD_PTRS
13791 + movb $0x67,i*8(%rdi)
13792 + i = i + 1
13793 + .endr
13794 +
13795 +#ifdef CONFIG_PARAVIRT
13796 +2: PV_RESTORE_REGS(CLBR_RDI)
13797 + popq %rbx
13798 +#endif
13799 +
13800 + popq %rdi
13801 + retq
13802 +ENDPROC(pax_exit_kernel_user)
13803 +#endif
13804 +
13805 +.macro pax_erase_kstack
13806 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13807 + call pax_erase_kstack
13808 +#endif
13809 +.endm
13810 +
13811 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13812 +/*
13813 + * r10: thread_info
13814 + * rcx, rdx: can be clobbered
13815 + */
13816 +ENTRY(pax_erase_kstack)
13817 + pushq %rdi
13818 + pushq %rax
13819 +
13820 + GET_THREAD_INFO(%r10)
13821 + mov TI_lowest_stack(%r10), %rdi
13822 + mov $-0xBEEF, %rax
13823 + std
13824 +
13825 +1: mov %edi, %ecx
13826 + and $THREAD_SIZE_asm - 1, %ecx
13827 + shr $3, %ecx
13828 + repne scasq
13829 + jecxz 2f
13830 +
13831 + cmp $2*8, %ecx
13832 + jc 2f
13833 +
13834 + mov $2*8, %ecx
13835 + repe scasq
13836 + jecxz 2f
13837 + jne 1b
13838 +
13839 +2: cld
13840 + mov %esp, %ecx
13841 + sub %edi, %ecx
13842 + shr $3, %ecx
13843 + rep stosq
13844 +
13845 + mov TI_task_thread_sp0(%r10), %rdi
13846 + sub $256, %rdi
13847 + mov %rdi, TI_lowest_stack(%r10)
13848 +
13849 + popq %rax
13850 + popq %rdi
13851 + ret
13852 +ENDPROC(pax_erase_kstack)
13853 +#endif
13854
13855 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13856 #ifdef CONFIG_TRACE_IRQFLAGS
13857 @@ -317,7 +569,7 @@ ENTRY(save_args)
13858 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13859 movq_cfi rbp, 8 /* push %rbp */
13860 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13861 - testl $3, CS(%rdi)
13862 + testb $3, CS(%rdi)
13863 je 1f
13864 SWAPGS
13865 /*
13866 @@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13867
13868 RESTORE_REST
13869
13870 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13871 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13872 je int_ret_from_sys_call
13873
13874 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13875 @@ -455,7 +707,7 @@ END(ret_from_fork)
13876 ENTRY(system_call)
13877 CFI_STARTPROC simple
13878 CFI_SIGNAL_FRAME
13879 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13880 + CFI_DEF_CFA rsp,0
13881 CFI_REGISTER rip,rcx
13882 /*CFI_REGISTER rflags,r11*/
13883 SWAPGS_UNSAFE_STACK
13884 @@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13885
13886 movq %rsp,PER_CPU_VAR(old_rsp)
13887 movq PER_CPU_VAR(kernel_stack),%rsp
13888 + pax_enter_kernel_user
13889 /*
13890 * No need to follow this irqs off/on section - it's straight
13891 * and short:
13892 */
13893 ENABLE_INTERRUPTS(CLBR_NONE)
13894 - SAVE_ARGS 8,1
13895 + SAVE_ARGS 8*6,1
13896 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13897 movq %rcx,RIP-ARGOFFSET(%rsp)
13898 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13899 @@ -502,6 +755,7 @@ sysret_check:
13900 andl %edi,%edx
13901 jnz sysret_careful
13902 CFI_REMEMBER_STATE
13903 + pax_exit_kernel_user
13904 /*
13905 * sysretq will re-enable interrupts:
13906 */
13907 @@ -562,6 +816,9 @@ auditsys:
13908 movq %rax,%rsi /* 2nd arg: syscall number */
13909 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13910 call audit_syscall_entry
13911 +
13912 + pax_erase_kstack
13913 +
13914 LOAD_ARGS 0 /* reload call-clobbered registers */
13915 jmp system_call_fastpath
13916
13917 @@ -592,6 +849,9 @@ tracesys:
13918 FIXUP_TOP_OF_STACK %rdi
13919 movq %rsp,%rdi
13920 call syscall_trace_enter
13921 +
13922 + pax_erase_kstack
13923 +
13924 /*
13925 * Reload arg registers from stack in case ptrace changed them.
13926 * We don't reload %rax because syscall_trace_enter() returned
13927 @@ -613,7 +873,7 @@ tracesys:
13928 GLOBAL(int_ret_from_sys_call)
13929 DISABLE_INTERRUPTS(CLBR_NONE)
13930 TRACE_IRQS_OFF
13931 - testl $3,CS-ARGOFFSET(%rsp)
13932 + testb $3,CS-ARGOFFSET(%rsp)
13933 je retint_restore_args
13934 movl $_TIF_ALLWORK_MASK,%edi
13935 /* edi: mask to check */
13936 @@ -800,6 +1060,16 @@ END(interrupt)
13937 CFI_ADJUST_CFA_OFFSET 10*8
13938 call save_args
13939 PARTIAL_FRAME 0
13940 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13941 + testb $3, CS(%rdi)
13942 + jnz 1f
13943 + pax_enter_kernel
13944 + jmp 2f
13945 +1: pax_enter_kernel_user
13946 +2:
13947 +#else
13948 + pax_enter_kernel
13949 +#endif
13950 call \func
13951 .endm
13952
13953 @@ -822,7 +1092,7 @@ ret_from_intr:
13954 CFI_ADJUST_CFA_OFFSET -8
13955 exit_intr:
13956 GET_THREAD_INFO(%rcx)
13957 - testl $3,CS-ARGOFFSET(%rsp)
13958 + testb $3,CS-ARGOFFSET(%rsp)
13959 je retint_kernel
13960
13961 /* Interrupt came from user space */
13962 @@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13963 * The iretq could re-enable interrupts:
13964 */
13965 DISABLE_INTERRUPTS(CLBR_ANY)
13966 + pax_exit_kernel_user
13967 TRACE_IRQS_IRETQ
13968 SWAPGS
13969 jmp restore_args
13970
13971 retint_restore_args: /* return to kernel space */
13972 DISABLE_INTERRUPTS(CLBR_ANY)
13973 + pax_exit_kernel
13974 /*
13975 * The iretq could re-enable interrupts:
13976 */
13977 @@ -1032,6 +1304,16 @@ ENTRY(\sym)
13978 CFI_ADJUST_CFA_OFFSET 15*8
13979 call error_entry
13980 DEFAULT_FRAME 0
13981 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13982 + testb $3, CS(%rsp)
13983 + jnz 1f
13984 + pax_enter_kernel
13985 + jmp 2f
13986 +1: pax_enter_kernel_user
13987 +2:
13988 +#else
13989 + pax_enter_kernel
13990 +#endif
13991 movq %rsp,%rdi /* pt_regs pointer */
13992 xorl %esi,%esi /* no error code */
13993 call \do_sym
13994 @@ -1049,6 +1331,16 @@ ENTRY(\sym)
13995 subq $15*8, %rsp
13996 call save_paranoid
13997 TRACE_IRQS_OFF
13998 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13999 + testb $3, CS(%rsp)
14000 + jnz 1f
14001 + pax_enter_kernel
14002 + jmp 2f
14003 +1: pax_enter_kernel_user
14004 +2:
14005 +#else
14006 + pax_enter_kernel
14007 +#endif
14008 movq %rsp,%rdi /* pt_regs pointer */
14009 xorl %esi,%esi /* no error code */
14010 call \do_sym
14011 @@ -1066,9 +1358,24 @@ ENTRY(\sym)
14012 subq $15*8, %rsp
14013 call save_paranoid
14014 TRACE_IRQS_OFF
14015 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14016 + testb $3, CS(%rsp)
14017 + jnz 1f
14018 + pax_enter_kernel
14019 + jmp 2f
14020 +1: pax_enter_kernel_user
14021 +2:
14022 +#else
14023 + pax_enter_kernel
14024 +#endif
14025 movq %rsp,%rdi /* pt_regs pointer */
14026 xorl %esi,%esi /* no error code */
14027 - PER_CPU(init_tss, %rbp)
14028 +#ifdef CONFIG_SMP
14029 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
14030 + lea init_tss(%rbp), %rbp
14031 +#else
14032 + lea init_tss(%rip), %rbp
14033 +#endif
14034 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14035 call \do_sym
14036 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14037 @@ -1085,6 +1392,16 @@ ENTRY(\sym)
14038 CFI_ADJUST_CFA_OFFSET 15*8
14039 call error_entry
14040 DEFAULT_FRAME 0
14041 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14042 + testb $3, CS(%rsp)
14043 + jnz 1f
14044 + pax_enter_kernel
14045 + jmp 2f
14046 +1: pax_enter_kernel_user
14047 +2:
14048 +#else
14049 + pax_enter_kernel
14050 +#endif
14051 movq %rsp,%rdi /* pt_regs pointer */
14052 movq ORIG_RAX(%rsp),%rsi /* get error code */
14053 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14054 @@ -1104,6 +1421,16 @@ ENTRY(\sym)
14055 call save_paranoid
14056 DEFAULT_FRAME 0
14057 TRACE_IRQS_OFF
14058 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14059 + testb $3, CS(%rsp)
14060 + jnz 1f
14061 + pax_enter_kernel
14062 + jmp 2f
14063 +1: pax_enter_kernel_user
14064 +2:
14065 +#else
14066 + pax_enter_kernel
14067 +#endif
14068 movq %rsp,%rdi /* pt_regs pointer */
14069 movq ORIG_RAX(%rsp),%rsi /* get error code */
14070 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14071 @@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
14072 TRACE_IRQS_OFF
14073 testl %ebx,%ebx /* swapgs needed? */
14074 jnz paranoid_restore
14075 - testl $3,CS(%rsp)
14076 + testb $3,CS(%rsp)
14077 jnz paranoid_userspace
14078 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14079 + pax_exit_kernel
14080 + TRACE_IRQS_IRETQ 0
14081 + SWAPGS_UNSAFE_STACK
14082 + RESTORE_ALL 8
14083 + jmp irq_return
14084 +#endif
14085 paranoid_swapgs:
14086 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14087 + pax_exit_kernel_user
14088 +#else
14089 + pax_exit_kernel
14090 +#endif
14091 TRACE_IRQS_IRETQ 0
14092 SWAPGS_UNSAFE_STACK
14093 RESTORE_ALL 8
14094 jmp irq_return
14095 paranoid_restore:
14096 + pax_exit_kernel
14097 TRACE_IRQS_IRETQ 0
14098 RESTORE_ALL 8
14099 jmp irq_return
14100 @@ -1470,7 +1810,7 @@ ENTRY(error_entry)
14101 movq_cfi r14, R14+8
14102 movq_cfi r15, R15+8
14103 xorl %ebx,%ebx
14104 - testl $3,CS+8(%rsp)
14105 + testb $3,CS+8(%rsp)
14106 je error_kernelspace
14107 error_swapgs:
14108 SWAPGS
14109 @@ -1529,6 +1869,16 @@ ENTRY(nmi)
14110 CFI_ADJUST_CFA_OFFSET 15*8
14111 call save_paranoid
14112 DEFAULT_FRAME 0
14113 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14114 + testb $3, CS(%rsp)
14115 + jnz 1f
14116 + pax_enter_kernel
14117 + jmp 2f
14118 +1: pax_enter_kernel_user
14119 +2:
14120 +#else
14121 + pax_enter_kernel
14122 +#endif
14123 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14124 movq %rsp,%rdi
14125 movq $-1,%rsi
14126 @@ -1539,11 +1889,25 @@ ENTRY(nmi)
14127 DISABLE_INTERRUPTS(CLBR_NONE)
14128 testl %ebx,%ebx /* swapgs needed? */
14129 jnz nmi_restore
14130 - testl $3,CS(%rsp)
14131 + testb $3,CS(%rsp)
14132 jnz nmi_userspace
14133 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14134 + pax_exit_kernel
14135 + SWAPGS_UNSAFE_STACK
14136 + RESTORE_ALL 8
14137 + jmp irq_return
14138 +#endif
14139 nmi_swapgs:
14140 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14141 + pax_exit_kernel_user
14142 +#else
14143 + pax_exit_kernel
14144 +#endif
14145 SWAPGS_UNSAFE_STACK
14146 + RESTORE_ALL 8
14147 + jmp irq_return
14148 nmi_restore:
14149 + pax_exit_kernel
14150 RESTORE_ALL 8
14151 jmp irq_return
14152 nmi_userspace:
14153 diff -urNp linux-2.6.32.45/arch/x86/kernel/ftrace.c linux-2.6.32.45/arch/x86/kernel/ftrace.c
14154 --- linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
14155 +++ linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
14156 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
14157 static void *mod_code_newcode; /* holds the text to write to the IP */
14158
14159 static unsigned nmi_wait_count;
14160 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14161 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14162
14163 int ftrace_arch_read_dyn_info(char *buf, int size)
14164 {
14165 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
14166
14167 r = snprintf(buf, size, "%u %u",
14168 nmi_wait_count,
14169 - atomic_read(&nmi_update_count));
14170 + atomic_read_unchecked(&nmi_update_count));
14171 return r;
14172 }
14173
14174 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
14175 {
14176 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14177 smp_rmb();
14178 + pax_open_kernel();
14179 ftrace_mod_code();
14180 - atomic_inc(&nmi_update_count);
14181 + pax_close_kernel();
14182 + atomic_inc_unchecked(&nmi_update_count);
14183 }
14184 /* Must have previous changes seen before executions */
14185 smp_mb();
14186 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
14187
14188
14189
14190 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
14191 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
14192
14193 static unsigned char *ftrace_nop_replace(void)
14194 {
14195 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
14196 {
14197 unsigned char replaced[MCOUNT_INSN_SIZE];
14198
14199 + ip = ktla_ktva(ip);
14200 +
14201 /*
14202 * Note: Due to modules and __init, code can
14203 * disappear and change, we need to protect against faulting
14204 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
14205 unsigned char old[MCOUNT_INSN_SIZE], *new;
14206 int ret;
14207
14208 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14209 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14210 new = ftrace_call_replace(ip, (unsigned long)func);
14211 ret = ftrace_modify_code(ip, old, new);
14212
14213 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
14214 switch (faulted) {
14215 case 0:
14216 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
14217 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
14218 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14219 break;
14220 case 1:
14221 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14222 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14223 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14224 break;
14225 case 2:
14226 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14227 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14228 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14229 break;
14230 }
14231
14232 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14233 {
14234 unsigned char code[MCOUNT_INSN_SIZE];
14235
14236 + ip = ktla_ktva(ip);
14237 +
14238 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14239 return -EFAULT;
14240
14241 diff -urNp linux-2.6.32.45/arch/x86/kernel/head32.c linux-2.6.32.45/arch/x86/kernel/head32.c
14242 --- linux-2.6.32.45/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
14243 +++ linux-2.6.32.45/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
14244 @@ -16,6 +16,7 @@
14245 #include <asm/apic.h>
14246 #include <asm/io_apic.h>
14247 #include <asm/bios_ebda.h>
14248 +#include <asm/boot.h>
14249
14250 static void __init i386_default_early_setup(void)
14251 {
14252 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14253 {
14254 reserve_trampoline_memory();
14255
14256 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14257 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14258
14259 #ifdef CONFIG_BLK_DEV_INITRD
14260 /* Reserve INITRD */
14261 diff -urNp linux-2.6.32.45/arch/x86/kernel/head_32.S linux-2.6.32.45/arch/x86/kernel/head_32.S
14262 --- linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14263 +++ linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14264 @@ -19,10 +19,17 @@
14265 #include <asm/setup.h>
14266 #include <asm/processor-flags.h>
14267 #include <asm/percpu.h>
14268 +#include <asm/msr-index.h>
14269
14270 /* Physical address */
14271 #define pa(X) ((X) - __PAGE_OFFSET)
14272
14273 +#ifdef CONFIG_PAX_KERNEXEC
14274 +#define ta(X) (X)
14275 +#else
14276 +#define ta(X) ((X) - __PAGE_OFFSET)
14277 +#endif
14278 +
14279 /*
14280 * References to members of the new_cpu_data structure.
14281 */
14282 @@ -52,11 +59,7 @@
14283 * and small than max_low_pfn, otherwise will waste some page table entries
14284 */
14285
14286 -#if PTRS_PER_PMD > 1
14287 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14288 -#else
14289 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14290 -#endif
14291 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14292
14293 /* Enough space to fit pagetables for the low memory linear map */
14294 MAPPING_BEYOND_END = \
14295 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14296 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14297
14298 /*
14299 + * Real beginning of normal "text" segment
14300 + */
14301 +ENTRY(stext)
14302 +ENTRY(_stext)
14303 +
14304 +/*
14305 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14306 * %esi points to the real-mode code as a 32-bit pointer.
14307 * CS and DS must be 4 GB flat segments, but we don't depend on
14308 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14309 * can.
14310 */
14311 __HEAD
14312 +
14313 +#ifdef CONFIG_PAX_KERNEXEC
14314 + jmp startup_32
14315 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14316 +.fill PAGE_SIZE-5,1,0xcc
14317 +#endif
14318 +
14319 ENTRY(startup_32)
14320 + movl pa(stack_start),%ecx
14321 +
14322 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14323 us to not reload segments */
14324 testb $(1<<6), BP_loadflags(%esi)
14325 @@ -95,7 +113,60 @@ ENTRY(startup_32)
14326 movl %eax,%es
14327 movl %eax,%fs
14328 movl %eax,%gs
14329 + movl %eax,%ss
14330 2:
14331 + leal -__PAGE_OFFSET(%ecx),%esp
14332 +
14333 +#ifdef CONFIG_SMP
14334 + movl $pa(cpu_gdt_table),%edi
14335 + movl $__per_cpu_load,%eax
14336 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14337 + rorl $16,%eax
14338 + movb %al,__KERNEL_PERCPU + 4(%edi)
14339 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14340 + movl $__per_cpu_end - 1,%eax
14341 + subl $__per_cpu_start,%eax
14342 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14343 +#endif
14344 +
14345 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14346 + movl $NR_CPUS,%ecx
14347 + movl $pa(cpu_gdt_table),%edi
14348 +1:
14349 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14350 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14351 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14352 + addl $PAGE_SIZE_asm,%edi
14353 + loop 1b
14354 +#endif
14355 +
14356 +#ifdef CONFIG_PAX_KERNEXEC
14357 + movl $pa(boot_gdt),%edi
14358 + movl $__LOAD_PHYSICAL_ADDR,%eax
14359 + movw %ax,__BOOT_CS + 2(%edi)
14360 + rorl $16,%eax
14361 + movb %al,__BOOT_CS + 4(%edi)
14362 + movb %ah,__BOOT_CS + 7(%edi)
14363 + rorl $16,%eax
14364 +
14365 + ljmp $(__BOOT_CS),$1f
14366 +1:
14367 +
14368 + movl $NR_CPUS,%ecx
14369 + movl $pa(cpu_gdt_table),%edi
14370 + addl $__PAGE_OFFSET,%eax
14371 +1:
14372 + movw %ax,__KERNEL_CS + 2(%edi)
14373 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14374 + rorl $16,%eax
14375 + movb %al,__KERNEL_CS + 4(%edi)
14376 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14377 + movb %ah,__KERNEL_CS + 7(%edi)
14378 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14379 + rorl $16,%eax
14380 + addl $PAGE_SIZE_asm,%edi
14381 + loop 1b
14382 +#endif
14383
14384 /*
14385 * Clear BSS first so that there are no surprises...
14386 @@ -140,9 +211,7 @@ ENTRY(startup_32)
14387 cmpl $num_subarch_entries, %eax
14388 jae bad_subarch
14389
14390 - movl pa(subarch_entries)(,%eax,4), %eax
14391 - subl $__PAGE_OFFSET, %eax
14392 - jmp *%eax
14393 + jmp *pa(subarch_entries)(,%eax,4)
14394
14395 bad_subarch:
14396 WEAK(lguest_entry)
14397 @@ -154,10 +223,10 @@ WEAK(xen_entry)
14398 __INITDATA
14399
14400 subarch_entries:
14401 - .long default_entry /* normal x86/PC */
14402 - .long lguest_entry /* lguest hypervisor */
14403 - .long xen_entry /* Xen hypervisor */
14404 - .long default_entry /* Moorestown MID */
14405 + .long ta(default_entry) /* normal x86/PC */
14406 + .long ta(lguest_entry) /* lguest hypervisor */
14407 + .long ta(xen_entry) /* Xen hypervisor */
14408 + .long ta(default_entry) /* Moorestown MID */
14409 num_subarch_entries = (. - subarch_entries) / 4
14410 .previous
14411 #endif /* CONFIG_PARAVIRT */
14412 @@ -218,8 +287,11 @@ default_entry:
14413 movl %eax, pa(max_pfn_mapped)
14414
14415 /* Do early initialization of the fixmap area */
14416 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14417 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14418 +#ifdef CONFIG_COMPAT_VDSO
14419 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14420 +#else
14421 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14422 +#endif
14423 #else /* Not PAE */
14424
14425 page_pde_offset = (__PAGE_OFFSET >> 20);
14426 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14427 movl %eax, pa(max_pfn_mapped)
14428
14429 /* Do early initialization of the fixmap area */
14430 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14431 - movl %eax,pa(swapper_pg_dir+0xffc)
14432 +#ifdef CONFIG_COMPAT_VDSO
14433 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14434 +#else
14435 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14436 +#endif
14437 #endif
14438 jmp 3f
14439 /*
14440 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14441 movl %eax,%es
14442 movl %eax,%fs
14443 movl %eax,%gs
14444 + movl pa(stack_start),%ecx
14445 + movl %eax,%ss
14446 + leal -__PAGE_OFFSET(%ecx),%esp
14447 #endif /* CONFIG_SMP */
14448 3:
14449
14450 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14451 orl %edx,%eax
14452 movl %eax,%cr4
14453
14454 +#ifdef CONFIG_X86_PAE
14455 btl $5, %eax # check if PAE is enabled
14456 jnc 6f
14457
14458 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14459 cpuid
14460 cmpl $0x80000000, %eax
14461 jbe 6f
14462 +
14463 + /* Clear bogus XD_DISABLE bits */
14464 + call verify_cpu
14465 +
14466 mov $0x80000001, %eax
14467 cpuid
14468 /* Execute Disable bit supported? */
14469 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14470 jnc 6f
14471
14472 /* Setup EFER (Extended Feature Enable Register) */
14473 - movl $0xc0000080, %ecx
14474 + movl $MSR_EFER, %ecx
14475 rdmsr
14476
14477 btsl $11, %eax
14478 /* Make changes effective */
14479 wrmsr
14480
14481 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14482 + movl $1,pa(nx_enabled)
14483 +#endif
14484 +
14485 6:
14486
14487 /*
14488 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14489 movl %eax,%cr0 /* ..and set paging (PG) bit */
14490 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14491 1:
14492 - /* Set up the stack pointer */
14493 - lss stack_start,%esp
14494 + /* Shift the stack pointer to a virtual address */
14495 + addl $__PAGE_OFFSET, %esp
14496
14497 /*
14498 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14499 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14500
14501 #ifdef CONFIG_SMP
14502 cmpb $0, ready
14503 - jz 1f /* Initial CPU cleans BSS */
14504 - jmp checkCPUtype
14505 -1:
14506 + jnz checkCPUtype
14507 #endif /* CONFIG_SMP */
14508
14509 /*
14510 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14511 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14512 movl %eax,%ss # after changing gdt.
14513
14514 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14515 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14516 movl %eax,%ds
14517 movl %eax,%es
14518
14519 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14520 */
14521 cmpb $0,ready
14522 jne 1f
14523 - movl $per_cpu__gdt_page,%eax
14524 + movl $cpu_gdt_table,%eax
14525 movl $per_cpu__stack_canary,%ecx
14526 +#ifdef CONFIG_SMP
14527 + addl $__per_cpu_load,%ecx
14528 +#endif
14529 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14530 shrl $16, %ecx
14531 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14532 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14533 1:
14534 -#endif
14535 movl $(__KERNEL_STACK_CANARY),%eax
14536 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14537 + movl $(__USER_DS),%eax
14538 +#else
14539 + xorl %eax,%eax
14540 +#endif
14541 movl %eax,%gs
14542
14543 xorl %eax,%eax # Clear LDT
14544 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14545
14546 cld # gcc2 wants the direction flag cleared at all times
14547 pushl $0 # fake return address for unwinder
14548 -#ifdef CONFIG_SMP
14549 - movb ready, %cl
14550 movb $1, ready
14551 - cmpb $0,%cl # the first CPU calls start_kernel
14552 - je 1f
14553 - movl (stack_start), %esp
14554 -1:
14555 -#endif /* CONFIG_SMP */
14556 jmp *(initial_code)
14557
14558 /*
14559 @@ -546,22 +631,22 @@ early_page_fault:
14560 jmp early_fault
14561
14562 early_fault:
14563 - cld
14564 #ifdef CONFIG_PRINTK
14565 + cmpl $1,%ss:early_recursion_flag
14566 + je hlt_loop
14567 + incl %ss:early_recursion_flag
14568 + cld
14569 pusha
14570 movl $(__KERNEL_DS),%eax
14571 movl %eax,%ds
14572 movl %eax,%es
14573 - cmpl $2,early_recursion_flag
14574 - je hlt_loop
14575 - incl early_recursion_flag
14576 movl %cr2,%eax
14577 pushl %eax
14578 pushl %edx /* trapno */
14579 pushl $fault_msg
14580 call printk
14581 +; call dump_stack
14582 #endif
14583 - call dump_stack
14584 hlt_loop:
14585 hlt
14586 jmp hlt_loop
14587 @@ -569,8 +654,11 @@ hlt_loop:
14588 /* This is the default interrupt "handler" :-) */
14589 ALIGN
14590 ignore_int:
14591 - cld
14592 #ifdef CONFIG_PRINTK
14593 + cmpl $2,%ss:early_recursion_flag
14594 + je hlt_loop
14595 + incl %ss:early_recursion_flag
14596 + cld
14597 pushl %eax
14598 pushl %ecx
14599 pushl %edx
14600 @@ -579,9 +667,6 @@ ignore_int:
14601 movl $(__KERNEL_DS),%eax
14602 movl %eax,%ds
14603 movl %eax,%es
14604 - cmpl $2,early_recursion_flag
14605 - je hlt_loop
14606 - incl early_recursion_flag
14607 pushl 16(%esp)
14608 pushl 24(%esp)
14609 pushl 32(%esp)
14610 @@ -600,6 +685,8 @@ ignore_int:
14611 #endif
14612 iret
14613
14614 +#include "verify_cpu.S"
14615 +
14616 __REFDATA
14617 .align 4
14618 ENTRY(initial_code)
14619 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14620 /*
14621 * BSS section
14622 */
14623 -__PAGE_ALIGNED_BSS
14624 - .align PAGE_SIZE_asm
14625 #ifdef CONFIG_X86_PAE
14626 +.section .swapper_pg_pmd,"a",@progbits
14627 swapper_pg_pmd:
14628 .fill 1024*KPMDS,4,0
14629 #else
14630 +.section .swapper_pg_dir,"a",@progbits
14631 ENTRY(swapper_pg_dir)
14632 .fill 1024,4,0
14633 #endif
14634 +.section .swapper_pg_fixmap,"a",@progbits
14635 swapper_pg_fixmap:
14636 .fill 1024,4,0
14637 #ifdef CONFIG_X86_TRAMPOLINE
14638 +.section .trampoline_pg_dir,"a",@progbits
14639 ENTRY(trampoline_pg_dir)
14640 +#ifdef CONFIG_X86_PAE
14641 + .fill 4,8,0
14642 +#else
14643 .fill 1024,4,0
14644 #endif
14645 +#endif
14646 +
14647 +.section .empty_zero_page,"a",@progbits
14648 ENTRY(empty_zero_page)
14649 .fill 4096,1,0
14650
14651 /*
14652 + * The IDT has to be page-aligned to simplify the Pentium
14653 + * F0 0F bug workaround.. We have a special link segment
14654 + * for this.
14655 + */
14656 +.section .idt,"a",@progbits
14657 +ENTRY(idt_table)
14658 + .fill 256,8,0
14659 +
14660 +/*
14661 * This starts the data section.
14662 */
14663 #ifdef CONFIG_X86_PAE
14664 -__PAGE_ALIGNED_DATA
14665 - /* Page-aligned for the benefit of paravirt? */
14666 - .align PAGE_SIZE_asm
14667 +.section .swapper_pg_dir,"a",@progbits
14668 +
14669 ENTRY(swapper_pg_dir)
14670 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14671 # if KPMDS == 3
14672 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14673 # error "Kernel PMDs should be 1, 2 or 3"
14674 # endif
14675 .align PAGE_SIZE_asm /* needs to be page-sized too */
14676 +
14677 +#ifdef CONFIG_PAX_PER_CPU_PGD
14678 +ENTRY(cpu_pgd)
14679 + .rept NR_CPUS
14680 + .fill 4,8,0
14681 + .endr
14682 +#endif
14683 +
14684 #endif
14685
14686 .data
14687 +.balign 4
14688 ENTRY(stack_start)
14689 - .long init_thread_union+THREAD_SIZE
14690 - .long __BOOT_DS
14691 + .long init_thread_union+THREAD_SIZE-8
14692
14693 ready: .byte 0
14694
14695 +.section .rodata,"a",@progbits
14696 early_recursion_flag:
14697 .long 0
14698
14699 @@ -697,7 +809,7 @@ fault_msg:
14700 .word 0 # 32 bit align gdt_desc.address
14701 boot_gdt_descr:
14702 .word __BOOT_DS+7
14703 - .long boot_gdt - __PAGE_OFFSET
14704 + .long pa(boot_gdt)
14705
14706 .word 0 # 32-bit align idt_desc.address
14707 idt_descr:
14708 @@ -708,7 +820,7 @@ idt_descr:
14709 .word 0 # 32 bit align gdt_desc.address
14710 ENTRY(early_gdt_descr)
14711 .word GDT_ENTRIES*8-1
14712 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14713 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14714
14715 /*
14716 * The boot_gdt must mirror the equivalent in setup.S and is
14717 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14718 .align L1_CACHE_BYTES
14719 ENTRY(boot_gdt)
14720 .fill GDT_ENTRY_BOOT_CS,8,0
14721 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14722 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14723 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14724 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14725 +
14726 + .align PAGE_SIZE_asm
14727 +ENTRY(cpu_gdt_table)
14728 + .rept NR_CPUS
14729 + .quad 0x0000000000000000 /* NULL descriptor */
14730 + .quad 0x0000000000000000 /* 0x0b reserved */
14731 + .quad 0x0000000000000000 /* 0x13 reserved */
14732 + .quad 0x0000000000000000 /* 0x1b reserved */
14733 +
14734 +#ifdef CONFIG_PAX_KERNEXEC
14735 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14736 +#else
14737 + .quad 0x0000000000000000 /* 0x20 unused */
14738 +#endif
14739 +
14740 + .quad 0x0000000000000000 /* 0x28 unused */
14741 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14742 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14743 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14744 + .quad 0x0000000000000000 /* 0x4b reserved */
14745 + .quad 0x0000000000000000 /* 0x53 reserved */
14746 + .quad 0x0000000000000000 /* 0x5b reserved */
14747 +
14748 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14749 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14750 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14751 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14752 +
14753 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14754 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14755 +
14756 + /*
14757 + * Segments used for calling PnP BIOS have byte granularity.
14758 + * The code segments and data segments have fixed 64k limits,
14759 + * the transfer segment sizes are set at run time.
14760 + */
14761 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14762 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14763 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14764 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14765 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14766 +
14767 + /*
14768 + * The APM segments have byte granularity and their bases
14769 + * are set at run time. All have 64k limits.
14770 + */
14771 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14772 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14773 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14774 +
14775 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14776 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14777 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14778 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14779 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14780 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14781 +
14782 + /* Be sure this is zeroed to avoid false validations in Xen */
14783 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14784 + .endr
14785 diff -urNp linux-2.6.32.45/arch/x86/kernel/head_64.S linux-2.6.32.45/arch/x86/kernel/head_64.S
14786 --- linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14787 +++ linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14788 @@ -19,6 +19,7 @@
14789 #include <asm/cache.h>
14790 #include <asm/processor-flags.h>
14791 #include <asm/percpu.h>
14792 +#include <asm/cpufeature.h>
14793
14794 #ifdef CONFIG_PARAVIRT
14795 #include <asm/asm-offsets.h>
14796 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14797 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14798 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14799 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14800 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14801 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14802 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14803 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14804
14805 .text
14806 __HEAD
14807 @@ -85,35 +90,22 @@ startup_64:
14808 */
14809 addq %rbp, init_level4_pgt + 0(%rip)
14810 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14811 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14812 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14813 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14814
14815 addq %rbp, level3_ident_pgt + 0(%rip)
14816 +#ifndef CONFIG_XEN
14817 + addq %rbp, level3_ident_pgt + 8(%rip)
14818 +#endif
14819
14820 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14821 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14822 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14823
14824 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14825 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14826 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14827
14828 - /* Add an Identity mapping if I am above 1G */
14829 - leaq _text(%rip), %rdi
14830 - andq $PMD_PAGE_MASK, %rdi
14831 -
14832 - movq %rdi, %rax
14833 - shrq $PUD_SHIFT, %rax
14834 - andq $(PTRS_PER_PUD - 1), %rax
14835 - jz ident_complete
14836 -
14837 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14838 - leaq level3_ident_pgt(%rip), %rbx
14839 - movq %rdx, 0(%rbx, %rax, 8)
14840 -
14841 - movq %rdi, %rax
14842 - shrq $PMD_SHIFT, %rax
14843 - andq $(PTRS_PER_PMD - 1), %rax
14844 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14845 - leaq level2_spare_pgt(%rip), %rbx
14846 - movq %rdx, 0(%rbx, %rax, 8)
14847 -ident_complete:
14848 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14849 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14850
14851 /*
14852 * Fixup the kernel text+data virtual addresses. Note that
14853 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14854 * after the boot processor executes this code.
14855 */
14856
14857 - /* Enable PAE mode and PGE */
14858 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14859 + /* Enable PAE mode and PSE/PGE */
14860 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14861 movq %rax, %cr4
14862
14863 /* Setup early boot stage 4 level pagetables. */
14864 @@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14865 movl $MSR_EFER, %ecx
14866 rdmsr
14867 btsl $_EFER_SCE, %eax /* Enable System Call */
14868 - btl $20,%edi /* No Execute supported? */
14869 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14870 jnc 1f
14871 btsl $_EFER_NX, %eax
14872 + leaq init_level4_pgt(%rip), %rdi
14873 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14874 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14875 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14876 1: wrmsr /* Make changes effective */
14877
14878 /* Setup cr0 */
14879 @@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14880 .quad x86_64_start_kernel
14881 ENTRY(initial_gs)
14882 .quad INIT_PER_CPU_VAR(irq_stack_union)
14883 - __FINITDATA
14884
14885 ENTRY(stack_start)
14886 .quad init_thread_union+THREAD_SIZE-8
14887 .word 0
14888 + __FINITDATA
14889
14890 bad_address:
14891 jmp bad_address
14892
14893 - .section ".init.text","ax"
14894 + __INIT
14895 #ifdef CONFIG_EARLY_PRINTK
14896 .globl early_idt_handlers
14897 early_idt_handlers:
14898 @@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14899 #endif /* EARLY_PRINTK */
14900 1: hlt
14901 jmp 1b
14902 + .previous
14903
14904 #ifdef CONFIG_EARLY_PRINTK
14905 + __INITDATA
14906 early_recursion_flag:
14907 .long 0
14908 + .previous
14909
14910 + .section .rodata,"a",@progbits
14911 early_idt_msg:
14912 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14913 early_idt_ripmsg:
14914 .asciz "RIP %s\n"
14915 -#endif /* CONFIG_EARLY_PRINTK */
14916 .previous
14917 +#endif /* CONFIG_EARLY_PRINTK */
14918
14919 + .section .rodata,"a",@progbits
14920 #define NEXT_PAGE(name) \
14921 .balign PAGE_SIZE; \
14922 ENTRY(name)
14923 @@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14924 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14925 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14926 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14927 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
14928 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14929 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14930 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14931 .org init_level4_pgt + L4_START_KERNEL*8, 0
14932 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14933 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14934
14935 +#ifdef CONFIG_PAX_PER_CPU_PGD
14936 +NEXT_PAGE(cpu_pgd)
14937 + .rept NR_CPUS
14938 + .fill 512,8,0
14939 + .endr
14940 +#endif
14941 +
14942 NEXT_PAGE(level3_ident_pgt)
14943 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14944 +#ifdef CONFIG_XEN
14945 .fill 511,8,0
14946 +#else
14947 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14948 + .fill 510,8,0
14949 +#endif
14950 +
14951 +NEXT_PAGE(level3_vmalloc_pgt)
14952 + .fill 512,8,0
14953 +
14954 +NEXT_PAGE(level3_vmemmap_pgt)
14955 + .fill L3_VMEMMAP_START,8,0
14956 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14957
14958 NEXT_PAGE(level3_kernel_pgt)
14959 .fill L3_START_KERNEL,8,0
14960 @@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14961 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14962 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14963
14964 +NEXT_PAGE(level2_vmemmap_pgt)
14965 + .fill 512,8,0
14966 +
14967 NEXT_PAGE(level2_fixmap_pgt)
14968 - .fill 506,8,0
14969 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14970 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14971 - .fill 5,8,0
14972 + .fill 507,8,0
14973 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14974 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14975 + .fill 4,8,0
14976
14977 -NEXT_PAGE(level1_fixmap_pgt)
14978 +NEXT_PAGE(level1_vsyscall_pgt)
14979 .fill 512,8,0
14980
14981 -NEXT_PAGE(level2_ident_pgt)
14982 - /* Since I easily can, map the first 1G.
14983 + /* Since I easily can, map the first 2G.
14984 * Don't set NX because code runs from these pages.
14985 */
14986 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14987 +NEXT_PAGE(level2_ident_pgt)
14988 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14989
14990 NEXT_PAGE(level2_kernel_pgt)
14991 /*
14992 @@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14993 * If you want to increase this then increase MODULES_VADDR
14994 * too.)
14995 */
14996 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14997 - KERNEL_IMAGE_SIZE/PMD_SIZE)
14998 -
14999 -NEXT_PAGE(level2_spare_pgt)
15000 - .fill 512, 8, 0
15001 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15002
15003 #undef PMDS
15004 #undef NEXT_PAGE
15005
15006 - .data
15007 + .align PAGE_SIZE
15008 +ENTRY(cpu_gdt_table)
15009 + .rept NR_CPUS
15010 + .quad 0x0000000000000000 /* NULL descriptor */
15011 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15012 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
15013 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
15014 + .quad 0x00cffb000000ffff /* __USER32_CS */
15015 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15016 + .quad 0x00affb000000ffff /* __USER_CS */
15017 +
15018 +#ifdef CONFIG_PAX_KERNEXEC
15019 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15020 +#else
15021 + .quad 0x0 /* unused */
15022 +#endif
15023 +
15024 + .quad 0,0 /* TSS */
15025 + .quad 0,0 /* LDT */
15026 + .quad 0,0,0 /* three TLS descriptors */
15027 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
15028 + /* asm/segment.h:GDT_ENTRIES must match this */
15029 +
15030 + /* zero the remaining page */
15031 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15032 + .endr
15033 +
15034 .align 16
15035 .globl early_gdt_descr
15036 early_gdt_descr:
15037 .word GDT_ENTRIES*8-1
15038 early_gdt_descr_base:
15039 - .quad INIT_PER_CPU_VAR(gdt_page)
15040 + .quad cpu_gdt_table
15041
15042 ENTRY(phys_base)
15043 /* This must match the first entry in level2_kernel_pgt */
15044 .quad 0x0000000000000000
15045
15046 #include "../../x86/xen/xen-head.S"
15047 -
15048 - .section .bss, "aw", @nobits
15049 +
15050 + .section .rodata,"a",@progbits
15051 .align L1_CACHE_BYTES
15052 ENTRY(idt_table)
15053 - .skip IDT_ENTRIES * 16
15054 + .fill 512,8,0
15055
15056 __PAGE_ALIGNED_BSS
15057 .align PAGE_SIZE
15058 diff -urNp linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c
15059 --- linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
15060 +++ linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
15061 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15062 EXPORT_SYMBOL(cmpxchg8b_emu);
15063 #endif
15064
15065 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15066 +
15067 /* Networking helper routines. */
15068 EXPORT_SYMBOL(csum_partial_copy_generic);
15069 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15070 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15071
15072 EXPORT_SYMBOL(__get_user_1);
15073 EXPORT_SYMBOL(__get_user_2);
15074 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15075
15076 EXPORT_SYMBOL(csum_partial);
15077 EXPORT_SYMBOL(empty_zero_page);
15078 +
15079 +#ifdef CONFIG_PAX_KERNEXEC
15080 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15081 +#endif
15082 diff -urNp linux-2.6.32.45/arch/x86/kernel/i8259.c linux-2.6.32.45/arch/x86/kernel/i8259.c
15083 --- linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
15084 +++ linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
15085 @@ -208,7 +208,7 @@ spurious_8259A_irq:
15086 "spurious 8259A interrupt: IRQ%d.\n", irq);
15087 spurious_irq_mask |= irqmask;
15088 }
15089 - atomic_inc(&irq_err_count);
15090 + atomic_inc_unchecked(&irq_err_count);
15091 /*
15092 * Theoretically we do not have to handle this IRQ,
15093 * but in Linux this does not cause problems and is
15094 diff -urNp linux-2.6.32.45/arch/x86/kernel/init_task.c linux-2.6.32.45/arch/x86/kernel/init_task.c
15095 --- linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
15096 +++ linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
15097 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
15098 * way process stacks are handled. This is done by having a special
15099 * "init_task" linker map entry..
15100 */
15101 -union thread_union init_thread_union __init_task_data =
15102 - { INIT_THREAD_INFO(init_task) };
15103 +union thread_union init_thread_union __init_task_data;
15104
15105 /*
15106 * Initial task structure.
15107 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15108 * section. Since TSS's are completely CPU-local, we want them
15109 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15110 */
15111 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15112 -
15113 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15114 +EXPORT_SYMBOL(init_tss);
15115 diff -urNp linux-2.6.32.45/arch/x86/kernel/ioport.c linux-2.6.32.45/arch/x86/kernel/ioport.c
15116 --- linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
15117 +++ linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
15118 @@ -6,6 +6,7 @@
15119 #include <linux/sched.h>
15120 #include <linux/kernel.h>
15121 #include <linux/capability.h>
15122 +#include <linux/security.h>
15123 #include <linux/errno.h>
15124 #include <linux/types.h>
15125 #include <linux/ioport.h>
15126 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
15127
15128 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15129 return -EINVAL;
15130 +#ifdef CONFIG_GRKERNSEC_IO
15131 + if (turn_on && grsec_disable_privio) {
15132 + gr_handle_ioperm();
15133 + return -EPERM;
15134 + }
15135 +#endif
15136 if (turn_on && !capable(CAP_SYS_RAWIO))
15137 return -EPERM;
15138
15139 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
15140 * because the ->io_bitmap_max value must match the bitmap
15141 * contents:
15142 */
15143 - tss = &per_cpu(init_tss, get_cpu());
15144 + tss = init_tss + get_cpu();
15145
15146 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
15147
15148 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
15149 return -EINVAL;
15150 /* Trying to gain more privileges? */
15151 if (level > old) {
15152 +#ifdef CONFIG_GRKERNSEC_IO
15153 + if (grsec_disable_privio) {
15154 + gr_handle_iopl();
15155 + return -EPERM;
15156 + }
15157 +#endif
15158 if (!capable(CAP_SYS_RAWIO))
15159 return -EPERM;
15160 }
15161 diff -urNp linux-2.6.32.45/arch/x86/kernel/irq_32.c linux-2.6.32.45/arch/x86/kernel/irq_32.c
15162 --- linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
15163 +++ linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
15164 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
15165 __asm__ __volatile__("andl %%esp,%0" :
15166 "=r" (sp) : "0" (THREAD_SIZE - 1));
15167
15168 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15169 + return sp < STACK_WARN;
15170 }
15171
15172 static void print_stack_overflow(void)
15173 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(
15174 * per-CPU IRQ handling contexts (thread information and stack)
15175 */
15176 union irq_ctx {
15177 - struct thread_info tinfo;
15178 - u32 stack[THREAD_SIZE/sizeof(u32)];
15179 -} __attribute__((aligned(PAGE_SIZE)));
15180 + unsigned long previous_esp;
15181 + u32 stack[THREAD_SIZE/sizeof(u32)];
15182 +} __attribute__((aligned(THREAD_SIZE)));
15183
15184 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15185 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
15186 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
15187 static inline int
15188 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15189 {
15190 - union irq_ctx *curctx, *irqctx;
15191 + union irq_ctx *irqctx;
15192 u32 *isp, arg1, arg2;
15193
15194 - curctx = (union irq_ctx *) current_thread_info();
15195 irqctx = __get_cpu_var(hardirq_ctx);
15196
15197 /*
15198 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
15199 * handler) we can't do that and just have to keep using the
15200 * current stack (which is the irq stack already after all)
15201 */
15202 - if (unlikely(curctx == irqctx))
15203 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15204 return 0;
15205
15206 /* build the stack frame on the IRQ stack */
15207 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15208 - irqctx->tinfo.task = curctx->tinfo.task;
15209 - irqctx->tinfo.previous_esp = current_stack_pointer;
15210 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15211 + irqctx->previous_esp = current_stack_pointer;
15212
15213 - /*
15214 - * Copy the softirq bits in preempt_count so that the
15215 - * softirq checks work in the hardirq context.
15216 - */
15217 - irqctx->tinfo.preempt_count =
15218 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15219 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15220 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15221 + __set_fs(MAKE_MM_SEG(0));
15222 +#endif
15223
15224 if (unlikely(overflow))
15225 call_on_stack(print_stack_overflow, isp);
15226 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15227 : "0" (irq), "1" (desc), "2" (isp),
15228 "D" (desc->handle_irq)
15229 : "memory", "cc", "ecx");
15230 +
15231 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15232 + __set_fs(current_thread_info()->addr_limit);
15233 +#endif
15234 +
15235 return 1;
15236 }
15237
15238 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15239 */
15240 void __cpuinit irq_ctx_init(int cpu)
15241 {
15242 - union irq_ctx *irqctx;
15243 -
15244 if (per_cpu(hardirq_ctx, cpu))
15245 return;
15246
15247 - irqctx = &per_cpu(hardirq_stack, cpu);
15248 - irqctx->tinfo.task = NULL;
15249 - irqctx->tinfo.exec_domain = NULL;
15250 - irqctx->tinfo.cpu = cpu;
15251 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15252 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15253 -
15254 - per_cpu(hardirq_ctx, cpu) = irqctx;
15255 -
15256 - irqctx = &per_cpu(softirq_stack, cpu);
15257 - irqctx->tinfo.task = NULL;
15258 - irqctx->tinfo.exec_domain = NULL;
15259 - irqctx->tinfo.cpu = cpu;
15260 - irqctx->tinfo.preempt_count = 0;
15261 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15262 -
15263 - per_cpu(softirq_ctx, cpu) = irqctx;
15264 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15265 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15266
15267 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15268 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15269 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15270 asmlinkage void do_softirq(void)
15271 {
15272 unsigned long flags;
15273 - struct thread_info *curctx;
15274 union irq_ctx *irqctx;
15275 u32 *isp;
15276
15277 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15278 local_irq_save(flags);
15279
15280 if (local_softirq_pending()) {
15281 - curctx = current_thread_info();
15282 irqctx = __get_cpu_var(softirq_ctx);
15283 - irqctx->tinfo.task = curctx->task;
15284 - irqctx->tinfo.previous_esp = current_stack_pointer;
15285 + irqctx->previous_esp = current_stack_pointer;
15286
15287 /* build the stack frame on the softirq stack */
15288 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15289 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15290 +
15291 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15292 + __set_fs(MAKE_MM_SEG(0));
15293 +#endif
15294
15295 call_on_stack(__do_softirq, isp);
15296 +
15297 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15298 + __set_fs(current_thread_info()->addr_limit);
15299 +#endif
15300 +
15301 /*
15302 * Shouldnt happen, we returned above if in_interrupt():
15303 */
15304 diff -urNp linux-2.6.32.45/arch/x86/kernel/irq.c linux-2.6.32.45/arch/x86/kernel/irq.c
15305 --- linux-2.6.32.45/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15306 +++ linux-2.6.32.45/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15307 @@ -15,7 +15,7 @@
15308 #include <asm/mce.h>
15309 #include <asm/hw_irq.h>
15310
15311 -atomic_t irq_err_count;
15312 +atomic_unchecked_t irq_err_count;
15313
15314 /* Function pointer for generic interrupt vector handling */
15315 void (*generic_interrupt_extension)(void) = NULL;
15316 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15317 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15318 seq_printf(p, " Machine check polls\n");
15319 #endif
15320 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15321 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15322 #if defined(CONFIG_X86_IO_APIC)
15323 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15324 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15325 #endif
15326 return 0;
15327 }
15328 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15329
15330 u64 arch_irq_stat(void)
15331 {
15332 - u64 sum = atomic_read(&irq_err_count);
15333 + u64 sum = atomic_read_unchecked(&irq_err_count);
15334
15335 #ifdef CONFIG_X86_IO_APIC
15336 - sum += atomic_read(&irq_mis_count);
15337 + sum += atomic_read_unchecked(&irq_mis_count);
15338 #endif
15339 return sum;
15340 }
15341 diff -urNp linux-2.6.32.45/arch/x86/kernel/kgdb.c linux-2.6.32.45/arch/x86/kernel/kgdb.c
15342 --- linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15343 +++ linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15344 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15345
15346 /* clear the trace bit */
15347 linux_regs->flags &= ~X86_EFLAGS_TF;
15348 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15349 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15350
15351 /* set the trace bit if we're stepping */
15352 if (remcomInBuffer[0] == 's') {
15353 linux_regs->flags |= X86_EFLAGS_TF;
15354 kgdb_single_step = 1;
15355 - atomic_set(&kgdb_cpu_doing_single_step,
15356 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15357 raw_smp_processor_id());
15358 }
15359
15360 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15361 break;
15362
15363 case DIE_DEBUG:
15364 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
15365 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15366 raw_smp_processor_id()) {
15367 if (user_mode(regs))
15368 return single_step_cont(regs, args);
15369 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15370 return instruction_pointer(regs);
15371 }
15372
15373 -struct kgdb_arch arch_kgdb_ops = {
15374 +const struct kgdb_arch arch_kgdb_ops = {
15375 /* Breakpoint instruction: */
15376 .gdb_bpt_instr = { 0xcc },
15377 .flags = KGDB_HW_BREAKPOINT,
15378 diff -urNp linux-2.6.32.45/arch/x86/kernel/kprobes.c linux-2.6.32.45/arch/x86/kernel/kprobes.c
15379 --- linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15380 +++ linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15381 @@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15382 char op;
15383 s32 raddr;
15384 } __attribute__((packed)) * jop;
15385 - jop = (struct __arch_jmp_op *)from;
15386 +
15387 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15388 +
15389 + pax_open_kernel();
15390 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15391 jop->op = RELATIVEJUMP_INSTRUCTION;
15392 + pax_close_kernel();
15393 }
15394
15395 /*
15396 @@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15397 kprobe_opcode_t opcode;
15398 kprobe_opcode_t *orig_opcodes = opcodes;
15399
15400 - if (search_exception_tables((unsigned long)opcodes))
15401 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15402 return 0; /* Page fault may occur on this address. */
15403
15404 retry:
15405 @@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15406 disp = (u8 *) p->addr + *((s32 *) insn) -
15407 (u8 *) p->ainsn.insn;
15408 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15409 + pax_open_kernel();
15410 *(s32 *)insn = (s32) disp;
15411 + pax_close_kernel();
15412 }
15413 }
15414 #endif
15415 @@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15416
15417 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15418 {
15419 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15420 + pax_open_kernel();
15421 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15422 + pax_close_kernel();
15423
15424 fix_riprel(p);
15425
15426 - if (can_boost(p->addr))
15427 + if (can_boost(ktla_ktva(p->addr)))
15428 p->ainsn.boostable = 0;
15429 else
15430 p->ainsn.boostable = -1;
15431
15432 - p->opcode = *p->addr;
15433 + p->opcode = *(ktla_ktva(p->addr));
15434 }
15435
15436 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15437 @@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15438 if (p->opcode == BREAKPOINT_INSTRUCTION)
15439 regs->ip = (unsigned long)p->addr;
15440 else
15441 - regs->ip = (unsigned long)p->ainsn.insn;
15442 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15443 }
15444
15445 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15446 @@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15447 if (p->ainsn.boostable == 1 && !p->post_handler) {
15448 /* Boost up -- we can execute copied instructions directly */
15449 reset_current_kprobe();
15450 - regs->ip = (unsigned long)p->ainsn.insn;
15451 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15452 preempt_enable_no_resched();
15453 return;
15454 }
15455 @@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15456 struct kprobe_ctlblk *kcb;
15457
15458 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15459 - if (*addr != BREAKPOINT_INSTRUCTION) {
15460 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15461 /*
15462 * The breakpoint instruction was removed right
15463 * after we hit it. Another cpu has removed
15464 @@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15465 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15466 {
15467 unsigned long *tos = stack_addr(regs);
15468 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15469 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15470 unsigned long orig_ip = (unsigned long)p->addr;
15471 kprobe_opcode_t *insn = p->ainsn.insn;
15472
15473 @@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15474 struct die_args *args = data;
15475 int ret = NOTIFY_DONE;
15476
15477 - if (args->regs && user_mode_vm(args->regs))
15478 + if (args->regs && user_mode(args->regs))
15479 return ret;
15480
15481 switch (val) {
15482 diff -urNp linux-2.6.32.45/arch/x86/kernel/ldt.c linux-2.6.32.45/arch/x86/kernel/ldt.c
15483 --- linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15484 +++ linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15485 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15486 if (reload) {
15487 #ifdef CONFIG_SMP
15488 preempt_disable();
15489 - load_LDT(pc);
15490 + load_LDT_nolock(pc);
15491 if (!cpumask_equal(mm_cpumask(current->mm),
15492 cpumask_of(smp_processor_id())))
15493 smp_call_function(flush_ldt, current->mm, 1);
15494 preempt_enable();
15495 #else
15496 - load_LDT(pc);
15497 + load_LDT_nolock(pc);
15498 #endif
15499 }
15500 if (oldsize) {
15501 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15502 return err;
15503
15504 for (i = 0; i < old->size; i++)
15505 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15506 + write_ldt_entry(new->ldt, i, old->ldt + i);
15507 return 0;
15508 }
15509
15510 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15511 retval = copy_ldt(&mm->context, &old_mm->context);
15512 mutex_unlock(&old_mm->context.lock);
15513 }
15514 +
15515 + if (tsk == current) {
15516 + mm->context.vdso = 0;
15517 +
15518 +#ifdef CONFIG_X86_32
15519 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15520 + mm->context.user_cs_base = 0UL;
15521 + mm->context.user_cs_limit = ~0UL;
15522 +
15523 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15524 + cpus_clear(mm->context.cpu_user_cs_mask);
15525 +#endif
15526 +
15527 +#endif
15528 +#endif
15529 +
15530 + }
15531 +
15532 return retval;
15533 }
15534
15535 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15536 }
15537 }
15538
15539 +#ifdef CONFIG_PAX_SEGMEXEC
15540 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15541 + error = -EINVAL;
15542 + goto out_unlock;
15543 + }
15544 +#endif
15545 +
15546 fill_ldt(&ldt, &ldt_info);
15547 if (oldmode)
15548 ldt.avl = 0;
15549 diff -urNp linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c
15550 --- linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15551 +++ linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15552 @@ -26,7 +26,7 @@
15553 #include <asm/system.h>
15554 #include <asm/cacheflush.h>
15555
15556 -static void set_idt(void *newidt, __u16 limit)
15557 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15558 {
15559 struct desc_ptr curidt;
15560
15561 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15562 }
15563
15564
15565 -static void set_gdt(void *newgdt, __u16 limit)
15566 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15567 {
15568 struct desc_ptr curgdt;
15569
15570 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15571 }
15572
15573 control_page = page_address(image->control_code_page);
15574 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15575 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15576
15577 relocate_kernel_ptr = control_page;
15578 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15579 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_amd.c linux-2.6.32.45/arch/x86/kernel/microcode_amd.c
15580 --- linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15581 +++ linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15582 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15583 uci->mc = NULL;
15584 }
15585
15586 -static struct microcode_ops microcode_amd_ops = {
15587 +static const struct microcode_ops microcode_amd_ops = {
15588 .request_microcode_user = request_microcode_user,
15589 .request_microcode_fw = request_microcode_fw,
15590 .collect_cpu_info = collect_cpu_info_amd,
15591 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15592 .microcode_fini_cpu = microcode_fini_cpu_amd,
15593 };
15594
15595 -struct microcode_ops * __init init_amd_microcode(void)
15596 +const struct microcode_ops * __init init_amd_microcode(void)
15597 {
15598 return &microcode_amd_ops;
15599 }
15600 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_core.c linux-2.6.32.45/arch/x86/kernel/microcode_core.c
15601 --- linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15602 +++ linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15603 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15604
15605 #define MICROCODE_VERSION "2.00"
15606
15607 -static struct microcode_ops *microcode_ops;
15608 +static const struct microcode_ops *microcode_ops;
15609
15610 /*
15611 * Synchronization.
15612 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_intel.c linux-2.6.32.45/arch/x86/kernel/microcode_intel.c
15613 --- linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15614 +++ linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15615 @@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15616
15617 static int get_ucode_user(void *to, const void *from, size_t n)
15618 {
15619 - return copy_from_user(to, from, n);
15620 + return copy_from_user(to, (__force const void __user *)from, n);
15621 }
15622
15623 static enum ucode_state
15624 request_microcode_user(int cpu, const void __user *buf, size_t size)
15625 {
15626 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15627 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15628 }
15629
15630 static void microcode_fini_cpu(int cpu)
15631 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15632 uci->mc = NULL;
15633 }
15634
15635 -static struct microcode_ops microcode_intel_ops = {
15636 +static const struct microcode_ops microcode_intel_ops = {
15637 .request_microcode_user = request_microcode_user,
15638 .request_microcode_fw = request_microcode_fw,
15639 .collect_cpu_info = collect_cpu_info,
15640 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15641 .microcode_fini_cpu = microcode_fini_cpu,
15642 };
15643
15644 -struct microcode_ops * __init init_intel_microcode(void)
15645 +const struct microcode_ops * __init init_intel_microcode(void)
15646 {
15647 return &microcode_intel_ops;
15648 }
15649 diff -urNp linux-2.6.32.45/arch/x86/kernel/module.c linux-2.6.32.45/arch/x86/kernel/module.c
15650 --- linux-2.6.32.45/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15651 +++ linux-2.6.32.45/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15652 @@ -34,7 +34,7 @@
15653 #define DEBUGP(fmt...)
15654 #endif
15655
15656 -void *module_alloc(unsigned long size)
15657 +static void *__module_alloc(unsigned long size, pgprot_t prot)
15658 {
15659 struct vm_struct *area;
15660
15661 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15662 if (!area)
15663 return NULL;
15664
15665 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15666 - PAGE_KERNEL_EXEC);
15667 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15668 +}
15669 +
15670 +void *module_alloc(unsigned long size)
15671 +{
15672 +
15673 +#ifdef CONFIG_PAX_KERNEXEC
15674 + return __module_alloc(size, PAGE_KERNEL);
15675 +#else
15676 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15677 +#endif
15678 +
15679 }
15680
15681 /* Free memory returned from module_alloc */
15682 @@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15683 vfree(module_region);
15684 }
15685
15686 +#ifdef CONFIG_PAX_KERNEXEC
15687 +#ifdef CONFIG_X86_32
15688 +void *module_alloc_exec(unsigned long size)
15689 +{
15690 + struct vm_struct *area;
15691 +
15692 + if (size == 0)
15693 + return NULL;
15694 +
15695 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15696 + return area ? area->addr : NULL;
15697 +}
15698 +EXPORT_SYMBOL(module_alloc_exec);
15699 +
15700 +void module_free_exec(struct module *mod, void *module_region)
15701 +{
15702 + vunmap(module_region);
15703 +}
15704 +EXPORT_SYMBOL(module_free_exec);
15705 +#else
15706 +void module_free_exec(struct module *mod, void *module_region)
15707 +{
15708 + module_free(mod, module_region);
15709 +}
15710 +EXPORT_SYMBOL(module_free_exec);
15711 +
15712 +void *module_alloc_exec(unsigned long size)
15713 +{
15714 + return __module_alloc(size, PAGE_KERNEL_RX);
15715 +}
15716 +EXPORT_SYMBOL(module_alloc_exec);
15717 +#endif
15718 +#endif
15719 +
15720 /* We don't need anything special. */
15721 int module_frob_arch_sections(Elf_Ehdr *hdr,
15722 Elf_Shdr *sechdrs,
15723 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15724 unsigned int i;
15725 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15726 Elf32_Sym *sym;
15727 - uint32_t *location;
15728 + uint32_t *plocation, location;
15729
15730 DEBUGP("Applying relocate section %u to %u\n", relsec,
15731 sechdrs[relsec].sh_info);
15732 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15733 /* This is where to make the change */
15734 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15735 - + rel[i].r_offset;
15736 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15737 + location = (uint32_t)plocation;
15738 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15739 + plocation = ktla_ktva((void *)plocation);
15740 /* This is the symbol it is referring to. Note that all
15741 undefined symbols have been resolved. */
15742 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15743 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15744 switch (ELF32_R_TYPE(rel[i].r_info)) {
15745 case R_386_32:
15746 /* We add the value into the location given */
15747 - *location += sym->st_value;
15748 + pax_open_kernel();
15749 + *plocation += sym->st_value;
15750 + pax_close_kernel();
15751 break;
15752 case R_386_PC32:
15753 /* Add the value, subtract its postition */
15754 - *location += sym->st_value - (uint32_t)location;
15755 + pax_open_kernel();
15756 + *plocation += sym->st_value - location;
15757 + pax_close_kernel();
15758 break;
15759 default:
15760 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15761 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15762 case R_X86_64_NONE:
15763 break;
15764 case R_X86_64_64:
15765 + pax_open_kernel();
15766 *(u64 *)loc = val;
15767 + pax_close_kernel();
15768 break;
15769 case R_X86_64_32:
15770 + pax_open_kernel();
15771 *(u32 *)loc = val;
15772 + pax_close_kernel();
15773 if (val != *(u32 *)loc)
15774 goto overflow;
15775 break;
15776 case R_X86_64_32S:
15777 + pax_open_kernel();
15778 *(s32 *)loc = val;
15779 + pax_close_kernel();
15780 if ((s64)val != *(s32 *)loc)
15781 goto overflow;
15782 break;
15783 case R_X86_64_PC32:
15784 val -= (u64)loc;
15785 + pax_open_kernel();
15786 *(u32 *)loc = val;
15787 + pax_close_kernel();
15788 +
15789 #if 0
15790 if ((s64)val != *(s32 *)loc)
15791 goto overflow;
15792 diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt.c linux-2.6.32.45/arch/x86/kernel/paravirt.c
15793 --- linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15794 +++ linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-08-05 20:33:55.000000000 -0400
15795 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15796 {
15797 return x;
15798 }
15799 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15800 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15801 +#endif
15802
15803 void __init default_banner(void)
15804 {
15805 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15806 * corresponding structure. */
15807 static void *get_call_destination(u8 type)
15808 {
15809 - struct paravirt_patch_template tmpl = {
15810 + const struct paravirt_patch_template tmpl = {
15811 .pv_init_ops = pv_init_ops,
15812 .pv_time_ops = pv_time_ops,
15813 .pv_cpu_ops = pv_cpu_ops,
15814 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 typ
15815 .pv_lock_ops = pv_lock_ops,
15816 #endif
15817 };
15818 +
15819 + pax_track_stack();
15820 return *((void **)&tmpl + type);
15821 }
15822
15823 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type,
15824 if (opfunc == NULL)
15825 /* If there's no function, patch it with a ud2a (BUG) */
15826 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15827 - else if (opfunc == _paravirt_nop)
15828 + else if (opfunc == (void *)_paravirt_nop)
15829 /* If the operation is a nop, then nop the callsite */
15830 ret = paravirt_patch_nop();
15831
15832 /* identity functions just return their single argument */
15833 - else if (opfunc == _paravirt_ident_32)
15834 + else if (opfunc == (void *)_paravirt_ident_32)
15835 ret = paravirt_patch_ident_32(insnbuf, len);
15836 - else if (opfunc == _paravirt_ident_64)
15837 + else if (opfunc == (void *)_paravirt_ident_64)
15838 + ret = paravirt_patch_ident_64(insnbuf, len);
15839 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15840 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
15841 ret = paravirt_patch_ident_64(insnbuf, len);
15842 +#endif
15843
15844 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15845 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
15846 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insn
15847 if (insn_len > len || start == NULL)
15848 insn_len = len;
15849 else
15850 - memcpy(insnbuf, start, insn_len);
15851 + memcpy(insnbuf, ktla_ktva(start), insn_len);
15852
15853 return insn_len;
15854 }
15855 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
15856 preempt_enable();
15857 }
15858
15859 -struct pv_info pv_info = {
15860 +struct pv_info pv_info __read_only = {
15861 .name = "bare hardware",
15862 .paravirt_enabled = 0,
15863 .kernel_rpl = 0,
15864 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15865 };
15866
15867 -struct pv_init_ops pv_init_ops = {
15868 +struct pv_init_ops pv_init_ops __read_only = {
15869 .patch = native_patch,
15870 };
15871
15872 -struct pv_time_ops pv_time_ops = {
15873 +struct pv_time_ops pv_time_ops __read_only = {
15874 .sched_clock = native_sched_clock,
15875 };
15876
15877 -struct pv_irq_ops pv_irq_ops = {
15878 +struct pv_irq_ops pv_irq_ops __read_only = {
15879 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15880 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15881 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15882 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
15883 #endif
15884 };
15885
15886 -struct pv_cpu_ops pv_cpu_ops = {
15887 +struct pv_cpu_ops pv_cpu_ops __read_only = {
15888 .cpuid = native_cpuid,
15889 .get_debugreg = native_get_debugreg,
15890 .set_debugreg = native_set_debugreg,
15891 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
15892 .end_context_switch = paravirt_nop,
15893 };
15894
15895 -struct pv_apic_ops pv_apic_ops = {
15896 +struct pv_apic_ops pv_apic_ops __read_only = {
15897 #ifdef CONFIG_X86_LOCAL_APIC
15898 .startup_ipi_hook = paravirt_nop,
15899 #endif
15900 };
15901
15902 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
15903 +#ifdef CONFIG_X86_32
15904 +#ifdef CONFIG_X86_PAE
15905 +/* 64-bit pagetable entries */
15906 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
15907 +#else
15908 /* 32-bit pagetable entries */
15909 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
15910 +#endif
15911 #else
15912 /* 64-bit pagetable entries */
15913 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15914 #endif
15915
15916 -struct pv_mmu_ops pv_mmu_ops = {
15917 +struct pv_mmu_ops pv_mmu_ops __read_only = {
15918
15919 .read_cr2 = native_read_cr2,
15920 .write_cr2 = native_write_cr2,
15921 @@ -467,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15922 },
15923
15924 .set_fixmap = native_set_fixmap,
15925 +
15926 +#ifdef CONFIG_PAX_KERNEXEC
15927 + .pax_open_kernel = native_pax_open_kernel,
15928 + .pax_close_kernel = native_pax_close_kernel,
15929 +#endif
15930 +
15931 };
15932
15933 EXPORT_SYMBOL_GPL(pv_time_ops);
15934 diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c
15935 --- linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15936 +++ linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15937 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15938 __raw_spin_lock(lock);
15939 }
15940
15941 -struct pv_lock_ops pv_lock_ops = {
15942 +struct pv_lock_ops pv_lock_ops __read_only = {
15943 #ifdef CONFIG_SMP
15944 .spin_is_locked = __ticket_spin_is_locked,
15945 .spin_is_contended = __ticket_spin_is_contended,
15946 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c
15947 --- linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15948 +++ linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15949 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15950 free_pages((unsigned long)vaddr, get_order(size));
15951 }
15952
15953 -static struct dma_map_ops calgary_dma_ops = {
15954 +static const struct dma_map_ops calgary_dma_ops = {
15955 .alloc_coherent = calgary_alloc_coherent,
15956 .free_coherent = calgary_free_coherent,
15957 .map_sg = calgary_map_sg,
15958 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-dma.c linux-2.6.32.45/arch/x86/kernel/pci-dma.c
15959 --- linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15960 +++ linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15961 @@ -14,7 +14,7 @@
15962
15963 static int forbid_dac __read_mostly;
15964
15965 -struct dma_map_ops *dma_ops;
15966 +const struct dma_map_ops *dma_ops;
15967 EXPORT_SYMBOL(dma_ops);
15968
15969 static int iommu_sac_force __read_mostly;
15970 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15971
15972 int dma_supported(struct device *dev, u64 mask)
15973 {
15974 - struct dma_map_ops *ops = get_dma_ops(dev);
15975 + const struct dma_map_ops *ops = get_dma_ops(dev);
15976
15977 #ifdef CONFIG_PCI
15978 if (mask > 0xffffffff && forbid_dac > 0) {
15979 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c
15980 --- linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15981 +++ linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15982 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15983 return -1;
15984 }
15985
15986 -static struct dma_map_ops gart_dma_ops = {
15987 +static const struct dma_map_ops gart_dma_ops = {
15988 .map_sg = gart_map_sg,
15989 .unmap_sg = gart_unmap_sg,
15990 .map_page = gart_map_page,
15991 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-nommu.c linux-2.6.32.45/arch/x86/kernel/pci-nommu.c
15992 --- linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15993 +++ linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15994 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15995 flush_write_buffers();
15996 }
15997
15998 -struct dma_map_ops nommu_dma_ops = {
15999 +const struct dma_map_ops nommu_dma_ops = {
16000 .alloc_coherent = dma_generic_alloc_coherent,
16001 .free_coherent = nommu_free_coherent,
16002 .map_sg = nommu_map_sg,
16003 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c
16004 --- linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
16005 +++ linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
16006 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
16007 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
16008 }
16009
16010 -static struct dma_map_ops swiotlb_dma_ops = {
16011 +static const struct dma_map_ops swiotlb_dma_ops = {
16012 .mapping_error = swiotlb_dma_mapping_error,
16013 .alloc_coherent = x86_swiotlb_alloc_coherent,
16014 .free_coherent = swiotlb_free_coherent,
16015 diff -urNp linux-2.6.32.45/arch/x86/kernel/process_32.c linux-2.6.32.45/arch/x86/kernel/process_32.c
16016 --- linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
16017 +++ linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
16018 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
16019 unsigned long thread_saved_pc(struct task_struct *tsk)
16020 {
16021 return ((unsigned long *)tsk->thread.sp)[3];
16022 +//XXX return tsk->thread.eip;
16023 }
16024
16025 #ifndef CONFIG_SMP
16026 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
16027 unsigned short ss, gs;
16028 const char *board;
16029
16030 - if (user_mode_vm(regs)) {
16031 + if (user_mode(regs)) {
16032 sp = regs->sp;
16033 ss = regs->ss & 0xffff;
16034 - gs = get_user_gs(regs);
16035 } else {
16036 sp = (unsigned long) (&regs->sp);
16037 savesegment(ss, ss);
16038 - savesegment(gs, gs);
16039 }
16040 + gs = get_user_gs(regs);
16041
16042 printk("\n");
16043
16044 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
16045 regs.bx = (unsigned long) fn;
16046 regs.dx = (unsigned long) arg;
16047
16048 - regs.ds = __USER_DS;
16049 - regs.es = __USER_DS;
16050 + regs.ds = __KERNEL_DS;
16051 + regs.es = __KERNEL_DS;
16052 regs.fs = __KERNEL_PERCPU;
16053 - regs.gs = __KERNEL_STACK_CANARY;
16054 + savesegment(gs, regs.gs);
16055 regs.orig_ax = -1;
16056 regs.ip = (unsigned long) kernel_thread_helper;
16057 regs.cs = __KERNEL_CS | get_kernel_rpl();
16058 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
16059 struct task_struct *tsk;
16060 int err;
16061
16062 - childregs = task_pt_regs(p);
16063 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16064 *childregs = *regs;
16065 childregs->ax = 0;
16066 childregs->sp = sp;
16067
16068 p->thread.sp = (unsigned long) childregs;
16069 p->thread.sp0 = (unsigned long) (childregs+1);
16070 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16071
16072 p->thread.ip = (unsigned long) ret_from_fork;
16073
16074 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
16075 struct thread_struct *prev = &prev_p->thread,
16076 *next = &next_p->thread;
16077 int cpu = smp_processor_id();
16078 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16079 + struct tss_struct *tss = init_tss + cpu;
16080 bool preload_fpu;
16081
16082 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16083 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
16084 */
16085 lazy_save_gs(prev->gs);
16086
16087 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16088 + __set_fs(task_thread_info(next_p)->addr_limit);
16089 +#endif
16090 +
16091 /*
16092 * Load the per-thread Thread-Local Storage descriptor.
16093 */
16094 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
16095 */
16096 arch_end_context_switch(next_p);
16097
16098 + percpu_write(current_task, next_p);
16099 + percpu_write(current_tinfo, &next_p->tinfo);
16100 +
16101 if (preload_fpu)
16102 __math_state_restore();
16103
16104 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
16105 if (prev->gs | next->gs)
16106 lazy_load_gs(next->gs);
16107
16108 - percpu_write(current_task, next_p);
16109 -
16110 return prev_p;
16111 }
16112
16113 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
16114 } while (count++ < 16);
16115 return 0;
16116 }
16117 -
16118 diff -urNp linux-2.6.32.45/arch/x86/kernel/process_64.c linux-2.6.32.45/arch/x86/kernel/process_64.c
16119 --- linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
16120 +++ linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
16121 @@ -91,7 +91,7 @@ static void __exit_idle(void)
16122 void exit_idle(void)
16123 {
16124 /* idle loop has pid 0 */
16125 - if (current->pid)
16126 + if (task_pid_nr(current))
16127 return;
16128 __exit_idle();
16129 }
16130 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
16131 if (!board)
16132 board = "";
16133 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
16134 - current->pid, current->comm, print_tainted(),
16135 + task_pid_nr(current), current->comm, print_tainted(),
16136 init_utsname()->release,
16137 (int)strcspn(init_utsname()->version, " "),
16138 init_utsname()->version, board);
16139 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
16140 struct pt_regs *childregs;
16141 struct task_struct *me = current;
16142
16143 - childregs = ((struct pt_regs *)
16144 - (THREAD_SIZE + task_stack_page(p))) - 1;
16145 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16146 *childregs = *regs;
16147
16148 childregs->ax = 0;
16149 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
16150 p->thread.sp = (unsigned long) childregs;
16151 p->thread.sp0 = (unsigned long) (childregs+1);
16152 p->thread.usersp = me->thread.usersp;
16153 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16154
16155 set_tsk_thread_flag(p, TIF_FORK);
16156
16157 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
16158 struct thread_struct *prev = &prev_p->thread;
16159 struct thread_struct *next = &next_p->thread;
16160 int cpu = smp_processor_id();
16161 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16162 + struct tss_struct *tss = init_tss + cpu;
16163 unsigned fsindex, gsindex;
16164 bool preload_fpu;
16165
16166 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
16167 prev->usersp = percpu_read(old_rsp);
16168 percpu_write(old_rsp, next->usersp);
16169 percpu_write(current_task, next_p);
16170 + percpu_write(current_tinfo, &next_p->tinfo);
16171
16172 - percpu_write(kernel_stack,
16173 - (unsigned long)task_stack_page(next_p) +
16174 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16175 + percpu_write(kernel_stack, next->sp0);
16176
16177 /*
16178 * Now maybe reload the debug registers and handle I/O bitmaps
16179 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
16180 if (!p || p == current || p->state == TASK_RUNNING)
16181 return 0;
16182 stack = (unsigned long)task_stack_page(p);
16183 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16184 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16185 return 0;
16186 fp = *(u64 *)(p->thread.sp);
16187 do {
16188 - if (fp < (unsigned long)stack ||
16189 - fp >= (unsigned long)stack+THREAD_SIZE)
16190 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16191 return 0;
16192 ip = *(u64 *)(fp+8);
16193 if (!in_sched_functions(ip))
16194 diff -urNp linux-2.6.32.45/arch/x86/kernel/process.c linux-2.6.32.45/arch/x86/kernel/process.c
16195 --- linux-2.6.32.45/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
16196 +++ linux-2.6.32.45/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
16197 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
16198
16199 void free_thread_info(struct thread_info *ti)
16200 {
16201 - free_thread_xstate(ti->task);
16202 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16203 }
16204
16205 +static struct kmem_cache *task_struct_cachep;
16206 +
16207 void arch_task_cache_init(void)
16208 {
16209 - task_xstate_cachep =
16210 - kmem_cache_create("task_xstate", xstate_size,
16211 + /* create a slab on which task_structs can be allocated */
16212 + task_struct_cachep =
16213 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16214 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16215 +
16216 + task_xstate_cachep =
16217 + kmem_cache_create("task_xstate", xstate_size,
16218 __alignof__(union thread_xstate),
16219 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16220 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16221 +}
16222 +
16223 +struct task_struct *alloc_task_struct(void)
16224 +{
16225 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
16226 +}
16227 +
16228 +void free_task_struct(struct task_struct *task)
16229 +{
16230 + free_thread_xstate(task);
16231 + kmem_cache_free(task_struct_cachep, task);
16232 }
16233
16234 /*
16235 @@ -73,7 +90,7 @@ void exit_thread(void)
16236 unsigned long *bp = t->io_bitmap_ptr;
16237
16238 if (bp) {
16239 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16240 + struct tss_struct *tss = init_tss + get_cpu();
16241
16242 t->io_bitmap_ptr = NULL;
16243 clear_thread_flag(TIF_IO_BITMAP);
16244 @@ -93,6 +110,9 @@ void flush_thread(void)
16245
16246 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16247
16248 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16249 + loadsegment(gs, 0);
16250 +#endif
16251 tsk->thread.debugreg0 = 0;
16252 tsk->thread.debugreg1 = 0;
16253 tsk->thread.debugreg2 = 0;
16254 @@ -307,7 +327,7 @@ void default_idle(void)
16255 EXPORT_SYMBOL(default_idle);
16256 #endif
16257
16258 -void stop_this_cpu(void *dummy)
16259 +__noreturn void stop_this_cpu(void *dummy)
16260 {
16261 local_irq_disable();
16262 /*
16263 @@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
16264 }
16265 early_param("idle", idle_setup);
16266
16267 -unsigned long arch_align_stack(unsigned long sp)
16268 +#ifdef CONFIG_PAX_RANDKSTACK
16269 +asmlinkage void pax_randomize_kstack(void)
16270 {
16271 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16272 - sp -= get_random_int() % 8192;
16273 - return sp & ~0xf;
16274 -}
16275 + struct thread_struct *thread = &current->thread;
16276 + unsigned long time;
16277
16278 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16279 -{
16280 - unsigned long range_end = mm->brk + 0x02000000;
16281 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16282 + if (!randomize_va_space)
16283 + return;
16284 +
16285 + rdtscl(time);
16286 +
16287 + /* P4 seems to return a 0 LSB, ignore it */
16288 +#ifdef CONFIG_MPENTIUM4
16289 + time &= 0x3EUL;
16290 + time <<= 2;
16291 +#elif defined(CONFIG_X86_64)
16292 + time &= 0xFUL;
16293 + time <<= 4;
16294 +#else
16295 + time &= 0x1FUL;
16296 + time <<= 3;
16297 +#endif
16298 +
16299 + thread->sp0 ^= time;
16300 + load_sp0(init_tss + smp_processor_id(), thread);
16301 +
16302 +#ifdef CONFIG_X86_64
16303 + percpu_write(kernel_stack, thread->sp0);
16304 +#endif
16305 }
16306 +#endif
16307
16308 diff -urNp linux-2.6.32.45/arch/x86/kernel/ptrace.c linux-2.6.32.45/arch/x86/kernel/ptrace.c
16309 --- linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16310 +++ linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16311 @@ -925,7 +925,7 @@ static const struct user_regset_view use
16312 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16313 {
16314 int ret;
16315 - unsigned long __user *datap = (unsigned long __user *)data;
16316 + unsigned long __user *datap = (__force unsigned long __user *)data;
16317
16318 switch (request) {
16319 /* read the word at location addr in the USER area. */
16320 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16321 if (addr < 0)
16322 return -EIO;
16323 ret = do_get_thread_area(child, addr,
16324 - (struct user_desc __user *) data);
16325 + (__force struct user_desc __user *) data);
16326 break;
16327
16328 case PTRACE_SET_THREAD_AREA:
16329 if (addr < 0)
16330 return -EIO;
16331 ret = do_set_thread_area(child, addr,
16332 - (struct user_desc __user *) data, 0);
16333 + (__force struct user_desc __user *) data, 0);
16334 break;
16335 #endif
16336
16337 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16338 #ifdef CONFIG_X86_PTRACE_BTS
16339 case PTRACE_BTS_CONFIG:
16340 ret = ptrace_bts_config
16341 - (child, data, (struct ptrace_bts_config __user *)addr);
16342 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16343 break;
16344
16345 case PTRACE_BTS_STATUS:
16346 ret = ptrace_bts_status
16347 - (child, data, (struct ptrace_bts_config __user *)addr);
16348 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16349 break;
16350
16351 case PTRACE_BTS_SIZE:
16352 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16353
16354 case PTRACE_BTS_GET:
16355 ret = ptrace_bts_read_record
16356 - (child, data, (struct bts_struct __user *) addr);
16357 + (child, data, (__force struct bts_struct __user *) addr);
16358 break;
16359
16360 case PTRACE_BTS_CLEAR:
16361 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16362
16363 case PTRACE_BTS_DRAIN:
16364 ret = ptrace_bts_drain
16365 - (child, data, (struct bts_struct __user *) addr);
16366 + (child, data, (__force struct bts_struct __user *) addr);
16367 break;
16368 #endif /* CONFIG_X86_PTRACE_BTS */
16369
16370 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16371 info.si_code = si_code;
16372
16373 /* User-mode ip? */
16374 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16375 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16376
16377 /* Send us the fake SIGTRAP */
16378 force_sig_info(SIGTRAP, &info, tsk);
16379 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16380 * We must return the syscall number to actually look up in the table.
16381 * This can be -1L to skip running any syscall at all.
16382 */
16383 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
16384 +long syscall_trace_enter(struct pt_regs *regs)
16385 {
16386 long ret = 0;
16387
16388 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16389 return ret ?: regs->orig_ax;
16390 }
16391
16392 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
16393 +void syscall_trace_leave(struct pt_regs *regs)
16394 {
16395 if (unlikely(current->audit_context))
16396 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16397 diff -urNp linux-2.6.32.45/arch/x86/kernel/reboot.c linux-2.6.32.45/arch/x86/kernel/reboot.c
16398 --- linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:35:28.000000000 -0400
16399 +++ linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:33:59.000000000 -0400
16400 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16401 EXPORT_SYMBOL(pm_power_off);
16402
16403 static const struct desc_ptr no_idt = {};
16404 -static int reboot_mode;
16405 +static unsigned short reboot_mode;
16406 enum reboot_type reboot_type = BOOT_KBD;
16407 int reboot_force;
16408
16409 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
16410 controller to pulse the CPU reset line, which is more thorough, but
16411 doesn't work with at least one type of 486 motherboard. It is easy
16412 to stop this code working; hence the copious comments. */
16413 -static const unsigned long long
16414 -real_mode_gdt_entries [3] =
16415 +static struct desc_struct
16416 +real_mode_gdt_entries [3] __read_only =
16417 {
16418 - 0x0000000000000000ULL, /* Null descriptor */
16419 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16420 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16421 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16422 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16423 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16424 };
16425
16426 static const struct desc_ptr
16427 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16428 * specified by the code and length parameters.
16429 * We assume that length will aways be less that 100!
16430 */
16431 -void machine_real_restart(const unsigned char *code, int length)
16432 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16433 {
16434 local_irq_disable();
16435
16436 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16437 /* Remap the kernel at virtual address zero, as well as offset zero
16438 from the kernel segment. This assumes the kernel segment starts at
16439 virtual address PAGE_OFFSET. */
16440 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16441 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16442 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16443 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16444
16445 /*
16446 * Use `swapper_pg_dir' as our page directory.
16447 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16448 boot)". This seems like a fairly standard thing that gets set by
16449 REBOOT.COM programs, and the previous reset routine did this
16450 too. */
16451 - *((unsigned short *)0x472) = reboot_mode;
16452 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16453
16454 /* For the switch to real mode, copy some code to low memory. It has
16455 to be in the first 64k because it is running in 16-bit mode, and it
16456 has to have the same physical and virtual address, because it turns
16457 off paging. Copy it near the end of the first page, out of the way
16458 of BIOS variables. */
16459 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16460 - real_mode_switch, sizeof (real_mode_switch));
16461 - memcpy((void *)(0x1000 - 100), code, length);
16462 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16463 + memcpy(__va(0x1000 - 100), code, length);
16464
16465 /* Set up the IDT for real mode. */
16466 load_idt(&real_mode_idt);
16467 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16468 __asm__ __volatile__ ("ljmp $0x0008,%0"
16469 :
16470 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16471 + do { } while (1);
16472 }
16473 #ifdef CONFIG_APM_MODULE
16474 EXPORT_SYMBOL(machine_real_restart);
16475 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_f
16476 {
16477 }
16478
16479 -static void native_machine_emergency_restart(void)
16480 +__noreturn static void native_machine_emergency_restart(void)
16481 {
16482 int i;
16483
16484 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
16485 #endif
16486 }
16487
16488 -static void __machine_emergency_restart(int emergency)
16489 +static __noreturn void __machine_emergency_restart(int emergency)
16490 {
16491 reboot_emergency = emergency;
16492 machine_ops.emergency_restart();
16493 }
16494
16495 -static void native_machine_restart(char *__unused)
16496 +static __noreturn void native_machine_restart(char *__unused)
16497 {
16498 printk("machine restart\n");
16499
16500 @@ -674,7 +674,7 @@ static void native_machine_restart(char
16501 __machine_emergency_restart(0);
16502 }
16503
16504 -static void native_machine_halt(void)
16505 +static __noreturn void native_machine_halt(void)
16506 {
16507 /* stop other cpus and apics */
16508 machine_shutdown();
16509 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
16510 stop_this_cpu(NULL);
16511 }
16512
16513 -static void native_machine_power_off(void)
16514 +__noreturn static void native_machine_power_off(void)
16515 {
16516 if (pm_power_off) {
16517 if (!reboot_force)
16518 @@ -694,6 +694,7 @@ static void native_machine_power_off(voi
16519 }
16520 /* a fallback in case there is no PM info available */
16521 tboot_shutdown(TB_SHUTDOWN_HALT);
16522 + do { } while (1);
16523 }
16524
16525 struct machine_ops machine_ops = {
16526 diff -urNp linux-2.6.32.45/arch/x86/kernel/setup.c linux-2.6.32.45/arch/x86/kernel/setup.c
16527 --- linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16528 +++ linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16529 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16530
16531 if (!boot_params.hdr.root_flags)
16532 root_mountflags &= ~MS_RDONLY;
16533 - init_mm.start_code = (unsigned long) _text;
16534 - init_mm.end_code = (unsigned long) _etext;
16535 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16536 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16537 init_mm.end_data = (unsigned long) _edata;
16538 init_mm.brk = _brk_end;
16539
16540 - code_resource.start = virt_to_phys(_text);
16541 - code_resource.end = virt_to_phys(_etext)-1;
16542 - data_resource.start = virt_to_phys(_etext);
16543 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16544 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16545 + data_resource.start = virt_to_phys(_sdata);
16546 data_resource.end = virt_to_phys(_edata)-1;
16547 bss_resource.start = virt_to_phys(&__bss_start);
16548 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16549 diff -urNp linux-2.6.32.45/arch/x86/kernel/setup_percpu.c linux-2.6.32.45/arch/x86/kernel/setup_percpu.c
16550 --- linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16551 +++ linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16552 @@ -25,19 +25,17 @@
16553 # define DBG(x...)
16554 #endif
16555
16556 -DEFINE_PER_CPU(int, cpu_number);
16557 +#ifdef CONFIG_SMP
16558 +DEFINE_PER_CPU(unsigned int, cpu_number);
16559 EXPORT_PER_CPU_SYMBOL(cpu_number);
16560 +#endif
16561
16562 -#ifdef CONFIG_X86_64
16563 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16564 -#else
16565 -#define BOOT_PERCPU_OFFSET 0
16566 -#endif
16567
16568 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16569 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16570
16571 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16572 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16573 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16574 };
16575 EXPORT_SYMBOL(__per_cpu_offset);
16576 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16577 {
16578 #ifdef CONFIG_X86_32
16579 struct desc_struct gdt;
16580 + unsigned long base = per_cpu_offset(cpu);
16581
16582 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16583 - 0x2 | DESCTYPE_S, 0x8);
16584 - gdt.s = 1;
16585 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16586 + 0x83 | DESCTYPE_S, 0xC);
16587 write_gdt_entry(get_cpu_gdt_table(cpu),
16588 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16589 #endif
16590 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16591 /* alrighty, percpu areas up and running */
16592 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16593 for_each_possible_cpu(cpu) {
16594 +#ifdef CONFIG_CC_STACKPROTECTOR
16595 +#ifdef CONFIG_X86_32
16596 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16597 +#endif
16598 +#endif
16599 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16600 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16601 per_cpu(cpu_number, cpu) = cpu;
16602 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16603 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16604 #endif
16605 #endif
16606 +#ifdef CONFIG_CC_STACKPROTECTOR
16607 +#ifdef CONFIG_X86_32
16608 + if (!cpu)
16609 + per_cpu(stack_canary.canary, cpu) = canary;
16610 +#endif
16611 +#endif
16612 /*
16613 * Up to this point, the boot CPU has been using .data.init
16614 * area. Reload any changed state for the boot CPU.
16615 diff -urNp linux-2.6.32.45/arch/x86/kernel/signal.c linux-2.6.32.45/arch/x86/kernel/signal.c
16616 --- linux-2.6.32.45/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16617 +++ linux-2.6.32.45/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16618 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16619 * Align the stack pointer according to the i386 ABI,
16620 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16621 */
16622 - sp = ((sp + 4) & -16ul) - 4;
16623 + sp = ((sp - 12) & -16ul) - 4;
16624 #else /* !CONFIG_X86_32 */
16625 sp = round_down(sp, 16) - 8;
16626 #endif
16627 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16628 * Return an always-bogus address instead so we will die with SIGSEGV.
16629 */
16630 if (onsigstack && !likely(on_sig_stack(sp)))
16631 - return (void __user *)-1L;
16632 + return (__force void __user *)-1L;
16633
16634 /* save i387 state */
16635 if (used_math() && save_i387_xstate(*fpstate) < 0)
16636 - return (void __user *)-1L;
16637 + return (__force void __user *)-1L;
16638
16639 return (void __user *)sp;
16640 }
16641 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16642 }
16643
16644 if (current->mm->context.vdso)
16645 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16646 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16647 else
16648 - restorer = &frame->retcode;
16649 + restorer = (void __user *)&frame->retcode;
16650 if (ka->sa.sa_flags & SA_RESTORER)
16651 restorer = ka->sa.sa_restorer;
16652
16653 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16654 * reasons and because gdb uses it as a signature to notice
16655 * signal handler stack frames.
16656 */
16657 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16658 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16659
16660 if (err)
16661 return -EFAULT;
16662 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16663 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16664
16665 /* Set up to return from userspace. */
16666 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16667 + if (current->mm->context.vdso)
16668 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16669 + else
16670 + restorer = (void __user *)&frame->retcode;
16671 if (ka->sa.sa_flags & SA_RESTORER)
16672 restorer = ka->sa.sa_restorer;
16673 put_user_ex(restorer, &frame->pretcode);
16674 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16675 * reasons and because gdb uses it as a signature to notice
16676 * signal handler stack frames.
16677 */
16678 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16679 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16680 } put_user_catch(err);
16681
16682 if (err)
16683 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16684 int signr;
16685 sigset_t *oldset;
16686
16687 + pax_track_stack();
16688 +
16689 /*
16690 * We want the common case to go fast, which is why we may in certain
16691 * cases get here from kernel mode. Just return without doing anything
16692 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16693 * X86_32: vm86 regs switched out by assembly code before reaching
16694 * here, so testing against kernel CS suffices.
16695 */
16696 - if (!user_mode(regs))
16697 + if (!user_mode_novm(regs))
16698 return;
16699
16700 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16701 diff -urNp linux-2.6.32.45/arch/x86/kernel/smpboot.c linux-2.6.32.45/arch/x86/kernel/smpboot.c
16702 --- linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16703 +++ linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16704 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16705 */
16706 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16707
16708 -void cpu_hotplug_driver_lock()
16709 +void cpu_hotplug_driver_lock(void)
16710 {
16711 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
16712 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
16713 }
16714
16715 -void cpu_hotplug_driver_unlock()
16716 +void cpu_hotplug_driver_unlock(void)
16717 {
16718 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16719 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16720 }
16721
16722 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16723 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16724 * target processor state.
16725 */
16726 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16727 - (unsigned long)stack_start.sp);
16728 + stack_start);
16729
16730 /*
16731 * Run STARTUP IPI loop.
16732 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16733 set_idle_for_cpu(cpu, c_idle.idle);
16734 do_rest:
16735 per_cpu(current_task, cpu) = c_idle.idle;
16736 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16737 #ifdef CONFIG_X86_32
16738 /* Stack for startup_32 can be just as for start_secondary onwards */
16739 irq_ctx_init(cpu);
16740 @@ -750,13 +751,15 @@ do_rest:
16741 #else
16742 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16743 initial_gs = per_cpu_offset(cpu);
16744 - per_cpu(kernel_stack, cpu) =
16745 - (unsigned long)task_stack_page(c_idle.idle) -
16746 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16747 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16748 #endif
16749 +
16750 + pax_open_kernel();
16751 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16752 + pax_close_kernel();
16753 +
16754 initial_code = (unsigned long)start_secondary;
16755 - stack_start.sp = (void *) c_idle.idle->thread.sp;
16756 + stack_start = c_idle.idle->thread.sp;
16757
16758 /* start_ip had better be page-aligned! */
16759 start_ip = setup_trampoline();
16760 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16761
16762 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16763
16764 +#ifdef CONFIG_PAX_PER_CPU_PGD
16765 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16766 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16767 + KERNEL_PGD_PTRS);
16768 +#endif
16769 +
16770 err = do_boot_cpu(apicid, cpu);
16771
16772 if (err) {
16773 diff -urNp linux-2.6.32.45/arch/x86/kernel/step.c linux-2.6.32.45/arch/x86/kernel/step.c
16774 --- linux-2.6.32.45/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16775 +++ linux-2.6.32.45/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16776 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16777 struct desc_struct *desc;
16778 unsigned long base;
16779
16780 - seg &= ~7UL;
16781 + seg >>= 3;
16782
16783 mutex_lock(&child->mm->context.lock);
16784 - if (unlikely((seg >> 3) >= child->mm->context.size))
16785 + if (unlikely(seg >= child->mm->context.size))
16786 addr = -1L; /* bogus selector, access would fault */
16787 else {
16788 desc = child->mm->context.ldt + seg;
16789 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16790 addr += base;
16791 }
16792 mutex_unlock(&child->mm->context.lock);
16793 - }
16794 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16795 + addr = ktla_ktva(addr);
16796
16797 return addr;
16798 }
16799 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16800 unsigned char opcode[15];
16801 unsigned long addr = convert_ip_to_linear(child, regs);
16802
16803 + if (addr == -EINVAL)
16804 + return 0;
16805 +
16806 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16807 for (i = 0; i < copied; i++) {
16808 switch (opcode[i]) {
16809 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16810
16811 #ifdef CONFIG_X86_64
16812 case 0x40 ... 0x4f:
16813 - if (regs->cs != __USER_CS)
16814 + if ((regs->cs & 0xffff) != __USER_CS)
16815 /* 32-bit mode: register increment */
16816 return 0;
16817 /* 64-bit mode: REX prefix */
16818 diff -urNp linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S
16819 --- linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16820 +++ linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16821 @@ -1,3 +1,4 @@
16822 +.section .rodata,"a",@progbits
16823 ENTRY(sys_call_table)
16824 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16825 .long sys_exit
16826 diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c
16827 --- linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16828 +++ linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16829 @@ -24,6 +24,21 @@
16830
16831 #include <asm/syscalls.h>
16832
16833 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16834 +{
16835 + unsigned long pax_task_size = TASK_SIZE;
16836 +
16837 +#ifdef CONFIG_PAX_SEGMEXEC
16838 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16839 + pax_task_size = SEGMEXEC_TASK_SIZE;
16840 +#endif
16841 +
16842 + if (len > pax_task_size || addr > pax_task_size - len)
16843 + return -EINVAL;
16844 +
16845 + return 0;
16846 +}
16847 +
16848 /*
16849 * Perform the select(nd, in, out, ex, tv) and mmap() system
16850 * calls. Linux/i386 didn't use to be able to handle more than
16851 @@ -58,6 +73,212 @@ out:
16852 return err;
16853 }
16854
16855 +unsigned long
16856 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16857 + unsigned long len, unsigned long pgoff, unsigned long flags)
16858 +{
16859 + struct mm_struct *mm = current->mm;
16860 + struct vm_area_struct *vma;
16861 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16862 +
16863 +#ifdef CONFIG_PAX_SEGMEXEC
16864 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16865 + pax_task_size = SEGMEXEC_TASK_SIZE;
16866 +#endif
16867 +
16868 + pax_task_size -= PAGE_SIZE;
16869 +
16870 + if (len > pax_task_size)
16871 + return -ENOMEM;
16872 +
16873 + if (flags & MAP_FIXED)
16874 + return addr;
16875 +
16876 +#ifdef CONFIG_PAX_RANDMMAP
16877 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16878 +#endif
16879 +
16880 + if (addr) {
16881 + addr = PAGE_ALIGN(addr);
16882 + if (pax_task_size - len >= addr) {
16883 + vma = find_vma(mm, addr);
16884 + if (check_heap_stack_gap(vma, addr, len))
16885 + return addr;
16886 + }
16887 + }
16888 + if (len > mm->cached_hole_size) {
16889 + start_addr = addr = mm->free_area_cache;
16890 + } else {
16891 + start_addr = addr = mm->mmap_base;
16892 + mm->cached_hole_size = 0;
16893 + }
16894 +
16895 +#ifdef CONFIG_PAX_PAGEEXEC
16896 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16897 + start_addr = 0x00110000UL;
16898 +
16899 +#ifdef CONFIG_PAX_RANDMMAP
16900 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16901 + start_addr += mm->delta_mmap & 0x03FFF000UL;
16902 +#endif
16903 +
16904 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16905 + start_addr = addr = mm->mmap_base;
16906 + else
16907 + addr = start_addr;
16908 + }
16909 +#endif
16910 +
16911 +full_search:
16912 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16913 + /* At this point: (!vma || addr < vma->vm_end). */
16914 + if (pax_task_size - len < addr) {
16915 + /*
16916 + * Start a new search - just in case we missed
16917 + * some holes.
16918 + */
16919 + if (start_addr != mm->mmap_base) {
16920 + start_addr = addr = mm->mmap_base;
16921 + mm->cached_hole_size = 0;
16922 + goto full_search;
16923 + }
16924 + return -ENOMEM;
16925 + }
16926 + if (check_heap_stack_gap(vma, addr, len))
16927 + break;
16928 + if (addr + mm->cached_hole_size < vma->vm_start)
16929 + mm->cached_hole_size = vma->vm_start - addr;
16930 + addr = vma->vm_end;
16931 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
16932 + start_addr = addr = mm->mmap_base;
16933 + mm->cached_hole_size = 0;
16934 + goto full_search;
16935 + }
16936 + }
16937 +
16938 + /*
16939 + * Remember the place where we stopped the search:
16940 + */
16941 + mm->free_area_cache = addr + len;
16942 + return addr;
16943 +}
16944 +
16945 +unsigned long
16946 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16947 + const unsigned long len, const unsigned long pgoff,
16948 + const unsigned long flags)
16949 +{
16950 + struct vm_area_struct *vma;
16951 + struct mm_struct *mm = current->mm;
16952 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16953 +
16954 +#ifdef CONFIG_PAX_SEGMEXEC
16955 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16956 + pax_task_size = SEGMEXEC_TASK_SIZE;
16957 +#endif
16958 +
16959 + pax_task_size -= PAGE_SIZE;
16960 +
16961 + /* requested length too big for entire address space */
16962 + if (len > pax_task_size)
16963 + return -ENOMEM;
16964 +
16965 + if (flags & MAP_FIXED)
16966 + return addr;
16967 +
16968 +#ifdef CONFIG_PAX_PAGEEXEC
16969 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16970 + goto bottomup;
16971 +#endif
16972 +
16973 +#ifdef CONFIG_PAX_RANDMMAP
16974 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16975 +#endif
16976 +
16977 + /* requesting a specific address */
16978 + if (addr) {
16979 + addr = PAGE_ALIGN(addr);
16980 + if (pax_task_size - len >= addr) {
16981 + vma = find_vma(mm, addr);
16982 + if (check_heap_stack_gap(vma, addr, len))
16983 + return addr;
16984 + }
16985 + }
16986 +
16987 + /* check if free_area_cache is useful for us */
16988 + if (len <= mm->cached_hole_size) {
16989 + mm->cached_hole_size = 0;
16990 + mm->free_area_cache = mm->mmap_base;
16991 + }
16992 +
16993 + /* either no address requested or can't fit in requested address hole */
16994 + addr = mm->free_area_cache;
16995 +
16996 + /* make sure it can fit in the remaining address space */
16997 + if (addr > len) {
16998 + vma = find_vma(mm, addr-len);
16999 + if (check_heap_stack_gap(vma, addr - len, len))
17000 + /* remember the address as a hint for next time */
17001 + return (mm->free_area_cache = addr-len);
17002 + }
17003 +
17004 + if (mm->mmap_base < len)
17005 + goto bottomup;
17006 +
17007 + addr = mm->mmap_base-len;
17008 +
17009 + do {
17010 + /*
17011 + * Lookup failure means no vma is above this address,
17012 + * else if new region fits below vma->vm_start,
17013 + * return with success:
17014 + */
17015 + vma = find_vma(mm, addr);
17016 + if (check_heap_stack_gap(vma, addr, len))
17017 + /* remember the address as a hint for next time */
17018 + return (mm->free_area_cache = addr);
17019 +
17020 + /* remember the largest hole we saw so far */
17021 + if (addr + mm->cached_hole_size < vma->vm_start)
17022 + mm->cached_hole_size = vma->vm_start - addr;
17023 +
17024 + /* try just below the current vma->vm_start */
17025 + addr = skip_heap_stack_gap(vma, len);
17026 + } while (!IS_ERR_VALUE(addr));
17027 +
17028 +bottomup:
17029 + /*
17030 + * A failed mmap() very likely causes application failure,
17031 + * so fall back to the bottom-up function here. This scenario
17032 + * can happen with large stack limits and large mmap()
17033 + * allocations.
17034 + */
17035 +
17036 +#ifdef CONFIG_PAX_SEGMEXEC
17037 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17038 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17039 + else
17040 +#endif
17041 +
17042 + mm->mmap_base = TASK_UNMAPPED_BASE;
17043 +
17044 +#ifdef CONFIG_PAX_RANDMMAP
17045 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17046 + mm->mmap_base += mm->delta_mmap;
17047 +#endif
17048 +
17049 + mm->free_area_cache = mm->mmap_base;
17050 + mm->cached_hole_size = ~0UL;
17051 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17052 + /*
17053 + * Restore the topdown base:
17054 + */
17055 + mm->mmap_base = base;
17056 + mm->free_area_cache = base;
17057 + mm->cached_hole_size = ~0UL;
17058 +
17059 + return addr;
17060 +}
17061
17062 struct sel_arg_struct {
17063 unsigned long n;
17064 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
17065 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
17066 case SEMTIMEDOP:
17067 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
17068 - (const struct timespec __user *)fifth);
17069 + (__force const struct timespec __user *)fifth);
17070
17071 case SEMGET:
17072 return sys_semget(first, second, third);
17073 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
17074 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
17075 if (ret)
17076 return ret;
17077 - return put_user(raddr, (ulong __user *) third);
17078 + return put_user(raddr, (__force ulong __user *) third);
17079 }
17080 case 1: /* iBCS2 emulator entry point */
17081 if (!segment_eq(get_fs(), get_ds()))
17082 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
17083
17084 return error;
17085 }
17086 -
17087 -
17088 -/*
17089 - * Do a system call from kernel instead of calling sys_execve so we
17090 - * end up with proper pt_regs.
17091 - */
17092 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
17093 -{
17094 - long __res;
17095 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
17096 - : "=a" (__res)
17097 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
17098 - return __res;
17099 -}
17100 diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c
17101 --- linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
17102 +++ linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
17103 @@ -32,8 +32,8 @@ out:
17104 return error;
17105 }
17106
17107 -static void find_start_end(unsigned long flags, unsigned long *begin,
17108 - unsigned long *end)
17109 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17110 + unsigned long *begin, unsigned long *end)
17111 {
17112 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17113 unsigned long new_begin;
17114 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
17115 *begin = new_begin;
17116 }
17117 } else {
17118 - *begin = TASK_UNMAPPED_BASE;
17119 + *begin = mm->mmap_base;
17120 *end = TASK_SIZE;
17121 }
17122 }
17123 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
17124 if (flags & MAP_FIXED)
17125 return addr;
17126
17127 - find_start_end(flags, &begin, &end);
17128 + find_start_end(mm, flags, &begin, &end);
17129
17130 if (len > end)
17131 return -ENOMEM;
17132
17133 +#ifdef CONFIG_PAX_RANDMMAP
17134 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17135 +#endif
17136 +
17137 if (addr) {
17138 addr = PAGE_ALIGN(addr);
17139 vma = find_vma(mm, addr);
17140 - if (end - len >= addr &&
17141 - (!vma || addr + len <= vma->vm_start))
17142 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17143 return addr;
17144 }
17145 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17146 @@ -106,7 +109,7 @@ full_search:
17147 }
17148 return -ENOMEM;
17149 }
17150 - if (!vma || addr + len <= vma->vm_start) {
17151 + if (check_heap_stack_gap(vma, addr, len)) {
17152 /*
17153 * Remember the place where we stopped the search:
17154 */
17155 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
17156 {
17157 struct vm_area_struct *vma;
17158 struct mm_struct *mm = current->mm;
17159 - unsigned long addr = addr0;
17160 + unsigned long base = mm->mmap_base, addr = addr0;
17161
17162 /* requested length too big for entire address space */
17163 if (len > TASK_SIZE)
17164 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
17165 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17166 goto bottomup;
17167
17168 +#ifdef CONFIG_PAX_RANDMMAP
17169 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17170 +#endif
17171 +
17172 /* requesting a specific address */
17173 if (addr) {
17174 addr = PAGE_ALIGN(addr);
17175 - vma = find_vma(mm, addr);
17176 - if (TASK_SIZE - len >= addr &&
17177 - (!vma || addr + len <= vma->vm_start))
17178 - return addr;
17179 + if (TASK_SIZE - len >= addr) {
17180 + vma = find_vma(mm, addr);
17181 + if (check_heap_stack_gap(vma, addr, len))
17182 + return addr;
17183 + }
17184 }
17185
17186 /* check if free_area_cache is useful for us */
17187 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
17188 /* make sure it can fit in the remaining address space */
17189 if (addr > len) {
17190 vma = find_vma(mm, addr-len);
17191 - if (!vma || addr <= vma->vm_start)
17192 + if (check_heap_stack_gap(vma, addr - len, len))
17193 /* remember the address as a hint for next time */
17194 return mm->free_area_cache = addr-len;
17195 }
17196 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
17197 * return with success:
17198 */
17199 vma = find_vma(mm, addr);
17200 - if (!vma || addr+len <= vma->vm_start)
17201 + if (check_heap_stack_gap(vma, addr, len))
17202 /* remember the address as a hint for next time */
17203 return mm->free_area_cache = addr;
17204
17205 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
17206 mm->cached_hole_size = vma->vm_start - addr;
17207
17208 /* try just below the current vma->vm_start */
17209 - addr = vma->vm_start-len;
17210 - } while (len < vma->vm_start);
17211 + addr = skip_heap_stack_gap(vma, len);
17212 + } while (!IS_ERR_VALUE(addr));
17213
17214 bottomup:
17215 /*
17216 @@ -198,13 +206,21 @@ bottomup:
17217 * can happen with large stack limits and large mmap()
17218 * allocations.
17219 */
17220 + mm->mmap_base = TASK_UNMAPPED_BASE;
17221 +
17222 +#ifdef CONFIG_PAX_RANDMMAP
17223 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17224 + mm->mmap_base += mm->delta_mmap;
17225 +#endif
17226 +
17227 + mm->free_area_cache = mm->mmap_base;
17228 mm->cached_hole_size = ~0UL;
17229 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17230 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17231 /*
17232 * Restore the topdown base:
17233 */
17234 - mm->free_area_cache = mm->mmap_base;
17235 + mm->mmap_base = base;
17236 + mm->free_area_cache = base;
17237 mm->cached_hole_size = ~0UL;
17238
17239 return addr;
17240 diff -urNp linux-2.6.32.45/arch/x86/kernel/tboot.c linux-2.6.32.45/arch/x86/kernel/tboot.c
17241 --- linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
17242 +++ linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
17243 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
17244
17245 void tboot_shutdown(u32 shutdown_type)
17246 {
17247 - void (*shutdown)(void);
17248 + void (* __noreturn shutdown)(void);
17249
17250 if (!tboot_enabled())
17251 return;
17252 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17253
17254 switch_to_tboot_pt();
17255
17256 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17257 + shutdown = (void *)tboot->shutdown_entry;
17258 shutdown();
17259
17260 /* should not reach here */
17261 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17262 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17263 }
17264
17265 -static atomic_t ap_wfs_count;
17266 +static atomic_unchecked_t ap_wfs_count;
17267
17268 static int tboot_wait_for_aps(int num_aps)
17269 {
17270 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17271 {
17272 switch (action) {
17273 case CPU_DYING:
17274 - atomic_inc(&ap_wfs_count);
17275 + atomic_inc_unchecked(&ap_wfs_count);
17276 if (num_online_cpus() == 1)
17277 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17278 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17279 return NOTIFY_BAD;
17280 break;
17281 }
17282 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17283
17284 tboot_create_trampoline();
17285
17286 - atomic_set(&ap_wfs_count, 0);
17287 + atomic_set_unchecked(&ap_wfs_count, 0);
17288 register_hotcpu_notifier(&tboot_cpu_notifier);
17289 return 0;
17290 }
17291 diff -urNp linux-2.6.32.45/arch/x86/kernel/time.c linux-2.6.32.45/arch/x86/kernel/time.c
17292 --- linux-2.6.32.45/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17293 +++ linux-2.6.32.45/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17294 @@ -26,17 +26,13 @@
17295 int timer_ack;
17296 #endif
17297
17298 -#ifdef CONFIG_X86_64
17299 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17300 -#endif
17301 -
17302 unsigned long profile_pc(struct pt_regs *regs)
17303 {
17304 unsigned long pc = instruction_pointer(regs);
17305
17306 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17307 + if (!user_mode(regs) && in_lock_functions(pc)) {
17308 #ifdef CONFIG_FRAME_POINTER
17309 - return *(unsigned long *)(regs->bp + sizeof(long));
17310 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17311 #else
17312 unsigned long *sp =
17313 (unsigned long *)kernel_stack_pointer(regs);
17314 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17315 * or above a saved flags. Eflags has bits 22-31 zero,
17316 * kernel addresses don't.
17317 */
17318 +
17319 +#ifdef CONFIG_PAX_KERNEXEC
17320 + return ktla_ktva(sp[0]);
17321 +#else
17322 if (sp[0] >> 22)
17323 return sp[0];
17324 if (sp[1] >> 22)
17325 return sp[1];
17326 #endif
17327 +
17328 +#endif
17329 }
17330 return pc;
17331 }
17332 diff -urNp linux-2.6.32.45/arch/x86/kernel/tls.c linux-2.6.32.45/arch/x86/kernel/tls.c
17333 --- linux-2.6.32.45/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17334 +++ linux-2.6.32.45/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17335 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17336 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17337 return -EINVAL;
17338
17339 +#ifdef CONFIG_PAX_SEGMEXEC
17340 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17341 + return -EINVAL;
17342 +#endif
17343 +
17344 set_tls_desc(p, idx, &info, 1);
17345
17346 return 0;
17347 diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_32.S linux-2.6.32.45/arch/x86/kernel/trampoline_32.S
17348 --- linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17349 +++ linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17350 @@ -32,6 +32,12 @@
17351 #include <asm/segment.h>
17352 #include <asm/page_types.h>
17353
17354 +#ifdef CONFIG_PAX_KERNEXEC
17355 +#define ta(X) (X)
17356 +#else
17357 +#define ta(X) ((X) - __PAGE_OFFSET)
17358 +#endif
17359 +
17360 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17361 __CPUINITRODATA
17362 .code16
17363 @@ -60,7 +66,7 @@ r_base = .
17364 inc %ax # protected mode (PE) bit
17365 lmsw %ax # into protected mode
17366 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17367 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17368 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17369
17370 # These need to be in the same 64K segment as the above;
17371 # hence we don't use the boot_gdt_descr defined in head.S
17372 diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_64.S linux-2.6.32.45/arch/x86/kernel/trampoline_64.S
17373 --- linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17374 +++ linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17375 @@ -91,7 +91,7 @@ startup_32:
17376 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17377 movl %eax, %ds
17378
17379 - movl $X86_CR4_PAE, %eax
17380 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17381 movl %eax, %cr4 # Enable PAE mode
17382
17383 # Setup trampoline 4 level pagetables
17384 @@ -127,7 +127,7 @@ startup_64:
17385 no_longmode:
17386 hlt
17387 jmp no_longmode
17388 -#include "verify_cpu_64.S"
17389 +#include "verify_cpu.S"
17390
17391 # Careful these need to be in the same 64K segment as the above;
17392 tidt:
17393 @@ -138,7 +138,7 @@ tidt:
17394 # so the kernel can live anywhere
17395 .balign 4
17396 tgdt:
17397 - .short tgdt_end - tgdt # gdt limit
17398 + .short tgdt_end - tgdt - 1 # gdt limit
17399 .long tgdt - r_base
17400 .short 0
17401 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17402 diff -urNp linux-2.6.32.45/arch/x86/kernel/traps.c linux-2.6.32.45/arch/x86/kernel/traps.c
17403 --- linux-2.6.32.45/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17404 +++ linux-2.6.32.45/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17405 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17406
17407 /* Do we ignore FPU interrupts ? */
17408 char ignore_fpu_irq;
17409 -
17410 -/*
17411 - * The IDT has to be page-aligned to simplify the Pentium
17412 - * F0 0F bug workaround.
17413 - */
17414 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17415 #endif
17416
17417 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17418 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17419 static inline void
17420 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17421 {
17422 - if (!user_mode_vm(regs))
17423 + if (!user_mode(regs))
17424 die(str, regs, err);
17425 }
17426 #endif
17427
17428 static void __kprobes
17429 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17430 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17431 long error_code, siginfo_t *info)
17432 {
17433 struct task_struct *tsk = current;
17434
17435 #ifdef CONFIG_X86_32
17436 - if (regs->flags & X86_VM_MASK) {
17437 + if (v8086_mode(regs)) {
17438 /*
17439 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17440 * On nmi (interrupt 2), do_trap should not be called.
17441 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17442 }
17443 #endif
17444
17445 - if (!user_mode(regs))
17446 + if (!user_mode_novm(regs))
17447 goto kernel_trap;
17448
17449 #ifdef CONFIG_X86_32
17450 @@ -158,7 +152,7 @@ trap_signal:
17451 printk_ratelimit()) {
17452 printk(KERN_INFO
17453 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17454 - tsk->comm, tsk->pid, str,
17455 + tsk->comm, task_pid_nr(tsk), str,
17456 regs->ip, regs->sp, error_code);
17457 print_vma_addr(" in ", regs->ip);
17458 printk("\n");
17459 @@ -175,8 +169,20 @@ kernel_trap:
17460 if (!fixup_exception(regs)) {
17461 tsk->thread.error_code = error_code;
17462 tsk->thread.trap_no = trapnr;
17463 +
17464 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17465 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17466 + str = "PAX: suspicious stack segment fault";
17467 +#endif
17468 +
17469 die(str, regs, error_code);
17470 }
17471 +
17472 +#ifdef CONFIG_PAX_REFCOUNT
17473 + if (trapnr == 4)
17474 + pax_report_refcount_overflow(regs);
17475 +#endif
17476 +
17477 return;
17478
17479 #ifdef CONFIG_X86_32
17480 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17481 conditional_sti(regs);
17482
17483 #ifdef CONFIG_X86_32
17484 - if (regs->flags & X86_VM_MASK)
17485 + if (v8086_mode(regs))
17486 goto gp_in_vm86;
17487 #endif
17488
17489 tsk = current;
17490 - if (!user_mode(regs))
17491 + if (!user_mode_novm(regs))
17492 goto gp_in_kernel;
17493
17494 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17495 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17496 + struct mm_struct *mm = tsk->mm;
17497 + unsigned long limit;
17498 +
17499 + down_write(&mm->mmap_sem);
17500 + limit = mm->context.user_cs_limit;
17501 + if (limit < TASK_SIZE) {
17502 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17503 + up_write(&mm->mmap_sem);
17504 + return;
17505 + }
17506 + up_write(&mm->mmap_sem);
17507 + }
17508 +#endif
17509 +
17510 tsk->thread.error_code = error_code;
17511 tsk->thread.trap_no = 13;
17512
17513 @@ -305,6 +327,13 @@ gp_in_kernel:
17514 if (notify_die(DIE_GPF, "general protection fault", regs,
17515 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17516 return;
17517 +
17518 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17519 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17520 + die("PAX: suspicious general protection fault", regs, error_code);
17521 + else
17522 +#endif
17523 +
17524 die("general protection fault", regs, error_code);
17525 }
17526
17527 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17528 dotraplinkage notrace __kprobes void
17529 do_nmi(struct pt_regs *regs, long error_code)
17530 {
17531 +
17532 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17533 + if (!user_mode(regs)) {
17534 + unsigned long cs = regs->cs & 0xFFFF;
17535 + unsigned long ip = ktva_ktla(regs->ip);
17536 +
17537 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17538 + regs->ip = ip;
17539 + }
17540 +#endif
17541 +
17542 nmi_enter();
17543
17544 inc_irq_stat(__nmi_count);
17545 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17546 }
17547
17548 #ifdef CONFIG_X86_32
17549 - if (regs->flags & X86_VM_MASK)
17550 + if (v8086_mode(regs))
17551 goto debug_vm86;
17552 #endif
17553
17554 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17555 * kernel space (but re-enable TF when returning to user mode).
17556 */
17557 if (condition & DR_STEP) {
17558 - if (!user_mode(regs))
17559 + if (!user_mode_novm(regs))
17560 goto clear_TF_reenable;
17561 }
17562
17563 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17564 * Handle strange cache flush from user space exception
17565 * in all other cases. This is undocumented behaviour.
17566 */
17567 - if (regs->flags & X86_VM_MASK) {
17568 + if (v8086_mode(regs)) {
17569 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17570 return;
17571 }
17572 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17573 void __math_state_restore(void)
17574 {
17575 struct thread_info *thread = current_thread_info();
17576 - struct task_struct *tsk = thread->task;
17577 + struct task_struct *tsk = current;
17578
17579 /*
17580 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17581 @@ -825,8 +865,7 @@ void __math_state_restore(void)
17582 */
17583 asmlinkage void math_state_restore(void)
17584 {
17585 - struct thread_info *thread = current_thread_info();
17586 - struct task_struct *tsk = thread->task;
17587 + struct task_struct *tsk = current;
17588
17589 if (!tsk_used_math(tsk)) {
17590 local_irq_enable();
17591 diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S
17592 --- linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17593 +++ linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17594 @@ -1,105 +0,0 @@
17595 -/*
17596 - *
17597 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
17598 - * code has been borrowed from boot/setup.S and was introduced by
17599 - * Andi Kleen.
17600 - *
17601 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17602 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17603 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17604 - *
17605 - * This source code is licensed under the GNU General Public License,
17606 - * Version 2. See the file COPYING for more details.
17607 - *
17608 - * This is a common code for verification whether CPU supports
17609 - * long mode and SSE or not. It is not called directly instead this
17610 - * file is included at various places and compiled in that context.
17611 - * Following are the current usage.
17612 - *
17613 - * This file is included by both 16bit and 32bit code.
17614 - *
17615 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17616 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17617 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17618 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17619 - *
17620 - * verify_cpu, returns the status of cpu check in register %eax.
17621 - * 0: Success 1: Failure
17622 - *
17623 - * The caller needs to check for the error code and take the action
17624 - * appropriately. Either display a message or halt.
17625 - */
17626 -
17627 -#include <asm/cpufeature.h>
17628 -
17629 -verify_cpu:
17630 - pushfl # Save caller passed flags
17631 - pushl $0 # Kill any dangerous flags
17632 - popfl
17633 -
17634 - pushfl # standard way to check for cpuid
17635 - popl %eax
17636 - movl %eax,%ebx
17637 - xorl $0x200000,%eax
17638 - pushl %eax
17639 - popfl
17640 - pushfl
17641 - popl %eax
17642 - cmpl %eax,%ebx
17643 - jz verify_cpu_no_longmode # cpu has no cpuid
17644 -
17645 - movl $0x0,%eax # See if cpuid 1 is implemented
17646 - cpuid
17647 - cmpl $0x1,%eax
17648 - jb verify_cpu_no_longmode # no cpuid 1
17649 -
17650 - xor %di,%di
17651 - cmpl $0x68747541,%ebx # AuthenticAMD
17652 - jnz verify_cpu_noamd
17653 - cmpl $0x69746e65,%edx
17654 - jnz verify_cpu_noamd
17655 - cmpl $0x444d4163,%ecx
17656 - jnz verify_cpu_noamd
17657 - mov $1,%di # cpu is from AMD
17658 -
17659 -verify_cpu_noamd:
17660 - movl $0x1,%eax # Does the cpu have what it takes
17661 - cpuid
17662 - andl $REQUIRED_MASK0,%edx
17663 - xorl $REQUIRED_MASK0,%edx
17664 - jnz verify_cpu_no_longmode
17665 -
17666 - movl $0x80000000,%eax # See if extended cpuid is implemented
17667 - cpuid
17668 - cmpl $0x80000001,%eax
17669 - jb verify_cpu_no_longmode # no extended cpuid
17670 -
17671 - movl $0x80000001,%eax # Does the cpu have what it takes
17672 - cpuid
17673 - andl $REQUIRED_MASK1,%edx
17674 - xorl $REQUIRED_MASK1,%edx
17675 - jnz verify_cpu_no_longmode
17676 -
17677 -verify_cpu_sse_test:
17678 - movl $1,%eax
17679 - cpuid
17680 - andl $SSE_MASK,%edx
17681 - cmpl $SSE_MASK,%edx
17682 - je verify_cpu_sse_ok
17683 - test %di,%di
17684 - jz verify_cpu_no_longmode # only try to force SSE on AMD
17685 - movl $0xc0010015,%ecx # HWCR
17686 - rdmsr
17687 - btr $15,%eax # enable SSE
17688 - wrmsr
17689 - xor %di,%di # don't loop
17690 - jmp verify_cpu_sse_test # try again
17691 -
17692 -verify_cpu_no_longmode:
17693 - popfl # Restore caller passed flags
17694 - movl $1,%eax
17695 - ret
17696 -verify_cpu_sse_ok:
17697 - popfl # Restore caller passed flags
17698 - xorl %eax, %eax
17699 - ret
17700 diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu.S linux-2.6.32.45/arch/x86/kernel/verify_cpu.S
17701 --- linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17702 +++ linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17703 @@ -0,0 +1,140 @@
17704 +/*
17705 + *
17706 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
17707 + * code has been borrowed from boot/setup.S and was introduced by
17708 + * Andi Kleen.
17709 + *
17710 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17711 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17712 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17713 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17714 + *
17715 + * This source code is licensed under the GNU General Public License,
17716 + * Version 2. See the file COPYING for more details.
17717 + *
17718 + * This is a common code for verification whether CPU supports
17719 + * long mode and SSE or not. It is not called directly instead this
17720 + * file is included at various places and compiled in that context.
17721 + * This file is expected to run in 32bit code. Currently:
17722 + *
17723 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17724 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
17725 + * arch/x86/kernel/head_32.S: processor startup
17726 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17727 + *
17728 + * verify_cpu, returns the status of longmode and SSE in register %eax.
17729 + * 0: Success 1: Failure
17730 + *
17731 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17732 + *
17733 + * The caller needs to check for the error code and take the action
17734 + * appropriately. Either display a message or halt.
17735 + */
17736 +
17737 +#include <asm/cpufeature.h>
17738 +#include <asm/msr-index.h>
17739 +
17740 +verify_cpu:
17741 + pushfl # Save caller passed flags
17742 + pushl $0 # Kill any dangerous flags
17743 + popfl
17744 +
17745 + pushfl # standard way to check for cpuid
17746 + popl %eax
17747 + movl %eax,%ebx
17748 + xorl $0x200000,%eax
17749 + pushl %eax
17750 + popfl
17751 + pushfl
17752 + popl %eax
17753 + cmpl %eax,%ebx
17754 + jz verify_cpu_no_longmode # cpu has no cpuid
17755 +
17756 + movl $0x0,%eax # See if cpuid 1 is implemented
17757 + cpuid
17758 + cmpl $0x1,%eax
17759 + jb verify_cpu_no_longmode # no cpuid 1
17760 +
17761 + xor %di,%di
17762 + cmpl $0x68747541,%ebx # AuthenticAMD
17763 + jnz verify_cpu_noamd
17764 + cmpl $0x69746e65,%edx
17765 + jnz verify_cpu_noamd
17766 + cmpl $0x444d4163,%ecx
17767 + jnz verify_cpu_noamd
17768 + mov $1,%di # cpu is from AMD
17769 + jmp verify_cpu_check
17770 +
17771 +verify_cpu_noamd:
17772 + cmpl $0x756e6547,%ebx # GenuineIntel?
17773 + jnz verify_cpu_check
17774 + cmpl $0x49656e69,%edx
17775 + jnz verify_cpu_check
17776 + cmpl $0x6c65746e,%ecx
17777 + jnz verify_cpu_check
17778 +
17779 + # only call IA32_MISC_ENABLE when:
17780 + # family > 6 || (family == 6 && model >= 0xd)
17781 + movl $0x1, %eax # check CPU family and model
17782 + cpuid
17783 + movl %eax, %ecx
17784 +
17785 + andl $0x0ff00f00, %eax # mask family and extended family
17786 + shrl $8, %eax
17787 + cmpl $6, %eax
17788 + ja verify_cpu_clear_xd # family > 6, ok
17789 + jb verify_cpu_check # family < 6, skip
17790 +
17791 + andl $0x000f00f0, %ecx # mask model and extended model
17792 + shrl $4, %ecx
17793 + cmpl $0xd, %ecx
17794 + jb verify_cpu_check # family == 6, model < 0xd, skip
17795 +
17796 +verify_cpu_clear_xd:
17797 + movl $MSR_IA32_MISC_ENABLE, %ecx
17798 + rdmsr
17799 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17800 + jnc verify_cpu_check # only write MSR if bit was changed
17801 + wrmsr
17802 +
17803 +verify_cpu_check:
17804 + movl $0x1,%eax # Does the cpu have what it takes
17805 + cpuid
17806 + andl $REQUIRED_MASK0,%edx
17807 + xorl $REQUIRED_MASK0,%edx
17808 + jnz verify_cpu_no_longmode
17809 +
17810 + movl $0x80000000,%eax # See if extended cpuid is implemented
17811 + cpuid
17812 + cmpl $0x80000001,%eax
17813 + jb verify_cpu_no_longmode # no extended cpuid
17814 +
17815 + movl $0x80000001,%eax # Does the cpu have what it takes
17816 + cpuid
17817 + andl $REQUIRED_MASK1,%edx
17818 + xorl $REQUIRED_MASK1,%edx
17819 + jnz verify_cpu_no_longmode
17820 +
17821 +verify_cpu_sse_test:
17822 + movl $1,%eax
17823 + cpuid
17824 + andl $SSE_MASK,%edx
17825 + cmpl $SSE_MASK,%edx
17826 + je verify_cpu_sse_ok
17827 + test %di,%di
17828 + jz verify_cpu_no_longmode # only try to force SSE on AMD
17829 + movl $MSR_K7_HWCR,%ecx
17830 + rdmsr
17831 + btr $15,%eax # enable SSE
17832 + wrmsr
17833 + xor %di,%di # don't loop
17834 + jmp verify_cpu_sse_test # try again
17835 +
17836 +verify_cpu_no_longmode:
17837 + popfl # Restore caller passed flags
17838 + movl $1,%eax
17839 + ret
17840 +verify_cpu_sse_ok:
17841 + popfl # Restore caller passed flags
17842 + xorl %eax, %eax
17843 + ret
17844 diff -urNp linux-2.6.32.45/arch/x86/kernel/vm86_32.c linux-2.6.32.45/arch/x86/kernel/vm86_32.c
17845 --- linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
17846 +++ linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
17847 @@ -41,6 +41,7 @@
17848 #include <linux/ptrace.h>
17849 #include <linux/audit.h>
17850 #include <linux/stddef.h>
17851 +#include <linux/grsecurity.h>
17852
17853 #include <asm/uaccess.h>
17854 #include <asm/io.h>
17855 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
17856 do_exit(SIGSEGV);
17857 }
17858
17859 - tss = &per_cpu(init_tss, get_cpu());
17860 + tss = init_tss + get_cpu();
17861 current->thread.sp0 = current->thread.saved_sp0;
17862 current->thread.sysenter_cs = __KERNEL_CS;
17863 load_sp0(tss, &current->thread);
17864 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
17865 struct task_struct *tsk;
17866 int tmp, ret = -EPERM;
17867
17868 +#ifdef CONFIG_GRKERNSEC_VM86
17869 + if (!capable(CAP_SYS_RAWIO)) {
17870 + gr_handle_vm86();
17871 + goto out;
17872 + }
17873 +#endif
17874 +
17875 tsk = current;
17876 if (tsk->thread.saved_sp0)
17877 goto out;
17878 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
17879 int tmp, ret;
17880 struct vm86plus_struct __user *v86;
17881
17882 +#ifdef CONFIG_GRKERNSEC_VM86
17883 + if (!capable(CAP_SYS_RAWIO)) {
17884 + gr_handle_vm86();
17885 + ret = -EPERM;
17886 + goto out;
17887 + }
17888 +#endif
17889 +
17890 tsk = current;
17891 switch (regs->bx) {
17892 case VM86_REQUEST_IRQ:
17893 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
17894 tsk->thread.saved_fs = info->regs32->fs;
17895 tsk->thread.saved_gs = get_user_gs(info->regs32);
17896
17897 - tss = &per_cpu(init_tss, get_cpu());
17898 + tss = init_tss + get_cpu();
17899 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17900 if (cpu_has_sep)
17901 tsk->thread.sysenter_cs = 0;
17902 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
17903 goto cannot_handle;
17904 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17905 goto cannot_handle;
17906 - intr_ptr = (unsigned long __user *) (i << 2);
17907 + intr_ptr = (__force unsigned long __user *) (i << 2);
17908 if (get_user(segoffs, intr_ptr))
17909 goto cannot_handle;
17910 if ((segoffs >> 16) == BIOSSEG)
17911 diff -urNp linux-2.6.32.45/arch/x86/kernel/vmi_32.c linux-2.6.32.45/arch/x86/kernel/vmi_32.c
17912 --- linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
17913 +++ linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-08-05 20:33:55.000000000 -0400
17914 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
17915 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
17916
17917 #define call_vrom_func(rom,func) \
17918 - (((VROMFUNC *)(rom->func))())
17919 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
17920
17921 #define call_vrom_long_func(rom,func,arg) \
17922 - (((VROMLONGFUNC *)(rom->func)) (arg))
17923 +({\
17924 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
17925 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
17926 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
17927 + __reloc;\
17928 +})
17929
17930 -static struct vrom_header *vmi_rom;
17931 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
17932 static int disable_pge;
17933 static int disable_pse;
17934 static int disable_sep;
17935 @@ -76,10 +81,10 @@ static struct {
17936 void (*set_initial_ap_state)(int, int);
17937 void (*halt)(void);
17938 void (*set_lazy_mode)(int mode);
17939 -} vmi_ops;
17940 +} __no_const vmi_ops __read_only;
17941
17942 /* Cached VMI operations */
17943 -struct vmi_timer_ops vmi_timer_ops;
17944 +struct vmi_timer_ops vmi_timer_ops __read_only;
17945
17946 /*
17947 * VMI patching routines.
17948 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
17949 static inline void patch_offset(void *insnbuf,
17950 unsigned long ip, unsigned long dest)
17951 {
17952 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
17953 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
17954 }
17955
17956 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
17957 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
17958 {
17959 u64 reloc;
17960 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
17961 +
17962 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
17963 switch(rel->type) {
17964 case VMI_RELOCATION_CALL_REL:
17965 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
17966
17967 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17968 {
17969 - const pte_t pte = { .pte = 0 };
17970 + const pte_t pte = __pte(0ULL);
17971 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17972 }
17973
17974 static void vmi_pmd_clear(pmd_t *pmd)
17975 {
17976 - const pte_t pte = { .pte = 0 };
17977 + const pte_t pte = __pte(0ULL);
17978 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17979 }
17980 #endif
17981 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17982 ap.ss = __KERNEL_DS;
17983 ap.esp = (unsigned long) start_esp;
17984
17985 - ap.ds = __USER_DS;
17986 - ap.es = __USER_DS;
17987 + ap.ds = __KERNEL_DS;
17988 + ap.es = __KERNEL_DS;
17989 ap.fs = __KERNEL_PERCPU;
17990 - ap.gs = __KERNEL_STACK_CANARY;
17991 + savesegment(gs, ap.gs);
17992
17993 ap.eflags = 0;
17994
17995 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17996 paravirt_leave_lazy_mmu();
17997 }
17998
17999 +#ifdef CONFIG_PAX_KERNEXEC
18000 +static unsigned long vmi_pax_open_kernel(void)
18001 +{
18002 + return 0;
18003 +}
18004 +
18005 +static unsigned long vmi_pax_close_kernel(void)
18006 +{
18007 + return 0;
18008 +}
18009 +#endif
18010 +
18011 static inline int __init check_vmi_rom(struct vrom_header *rom)
18012 {
18013 struct pci_header *pci;
18014 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
18015 return 0;
18016 if (rom->vrom_signature != VMI_SIGNATURE)
18017 return 0;
18018 + if (rom->rom_length * 512 > sizeof(*rom)) {
18019 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
18020 + return 0;
18021 + }
18022 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
18023 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
18024 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
18025 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
18026 struct vrom_header *romstart;
18027 romstart = (struct vrom_header *)isa_bus_to_virt(base);
18028 if (check_vmi_rom(romstart)) {
18029 - vmi_rom = romstart;
18030 + vmi_rom = *romstart;
18031 return 1;
18032 }
18033 }
18034 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
18035
18036 para_fill(pv_irq_ops.safe_halt, Halt);
18037
18038 +#ifdef CONFIG_PAX_KERNEXEC
18039 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
18040 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
18041 +#endif
18042 +
18043 /*
18044 * Alternative instruction rewriting doesn't happen soon enough
18045 * to convert VMI_IRET to a call instead of a jump; so we have
18046 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
18047
18048 void __init vmi_init(void)
18049 {
18050 - if (!vmi_rom)
18051 + if (!vmi_rom.rom_signature)
18052 probe_vmi_rom();
18053 else
18054 - check_vmi_rom(vmi_rom);
18055 + check_vmi_rom(&vmi_rom);
18056
18057 /* In case probing for or validating the ROM failed, basil */
18058 - if (!vmi_rom)
18059 + if (!vmi_rom.rom_signature)
18060 return;
18061
18062 - reserve_top_address(-vmi_rom->virtual_top);
18063 + reserve_top_address(-vmi_rom.virtual_top);
18064
18065 #ifdef CONFIG_X86_IO_APIC
18066 /* This is virtual hardware; timer routing is wired correctly */
18067 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
18068 {
18069 unsigned long flags;
18070
18071 - if (!vmi_rom)
18072 + if (!vmi_rom.rom_signature)
18073 return;
18074
18075 local_irq_save(flags);
18076 diff -urNp linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S
18077 --- linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
18078 +++ linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
18079 @@ -26,6 +26,13 @@
18080 #include <asm/page_types.h>
18081 #include <asm/cache.h>
18082 #include <asm/boot.h>
18083 +#include <asm/segment.h>
18084 +
18085 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18086 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18087 +#else
18088 +#define __KERNEL_TEXT_OFFSET 0
18089 +#endif
18090
18091 #undef i386 /* in case the preprocessor is a 32bit one */
18092
18093 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
18094 #ifdef CONFIG_X86_32
18095 OUTPUT_ARCH(i386)
18096 ENTRY(phys_startup_32)
18097 -jiffies = jiffies_64;
18098 #else
18099 OUTPUT_ARCH(i386:x86-64)
18100 ENTRY(phys_startup_64)
18101 -jiffies_64 = jiffies;
18102 #endif
18103
18104 PHDRS {
18105 text PT_LOAD FLAGS(5); /* R_E */
18106 - data PT_LOAD FLAGS(7); /* RWE */
18107 +#ifdef CONFIG_X86_32
18108 + module PT_LOAD FLAGS(5); /* R_E */
18109 +#endif
18110 +#ifdef CONFIG_XEN
18111 + rodata PT_LOAD FLAGS(5); /* R_E */
18112 +#else
18113 + rodata PT_LOAD FLAGS(4); /* R__ */
18114 +#endif
18115 + data PT_LOAD FLAGS(6); /* RW_ */
18116 #ifdef CONFIG_X86_64
18117 user PT_LOAD FLAGS(5); /* R_E */
18118 +#endif
18119 + init.begin PT_LOAD FLAGS(6); /* RW_ */
18120 #ifdef CONFIG_SMP
18121 percpu PT_LOAD FLAGS(6); /* RW_ */
18122 #endif
18123 + text.init PT_LOAD FLAGS(5); /* R_E */
18124 + text.exit PT_LOAD FLAGS(5); /* R_E */
18125 init PT_LOAD FLAGS(7); /* RWE */
18126 -#endif
18127 note PT_NOTE FLAGS(0); /* ___ */
18128 }
18129
18130 SECTIONS
18131 {
18132 #ifdef CONFIG_X86_32
18133 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18134 - phys_startup_32 = startup_32 - LOAD_OFFSET;
18135 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18136 #else
18137 - . = __START_KERNEL;
18138 - phys_startup_64 = startup_64 - LOAD_OFFSET;
18139 + . = __START_KERNEL;
18140 #endif
18141
18142 /* Text and read-only data */
18143 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
18144 - _text = .;
18145 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18146 /* bootstrapping code */
18147 +#ifdef CONFIG_X86_32
18148 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18149 +#else
18150 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18151 +#endif
18152 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18153 + _text = .;
18154 HEAD_TEXT
18155 #ifdef CONFIG_X86_32
18156 . = ALIGN(PAGE_SIZE);
18157 @@ -82,28 +102,71 @@ SECTIONS
18158 IRQENTRY_TEXT
18159 *(.fixup)
18160 *(.gnu.warning)
18161 - /* End of text section */
18162 - _etext = .;
18163 } :text = 0x9090
18164
18165 - NOTES :text :note
18166 + . += __KERNEL_TEXT_OFFSET;
18167 +
18168 +#ifdef CONFIG_X86_32
18169 + . = ALIGN(PAGE_SIZE);
18170 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
18171 + *(.vmi.rom)
18172 + } :module
18173 +
18174 + . = ALIGN(PAGE_SIZE);
18175 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18176 +
18177 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18178 + MODULES_EXEC_VADDR = .;
18179 + BYTE(0)
18180 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18181 + . = ALIGN(HPAGE_SIZE);
18182 + MODULES_EXEC_END = . - 1;
18183 +#endif
18184 +
18185 + } :module
18186 +#endif
18187
18188 - EXCEPTION_TABLE(16) :text = 0x9090
18189 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18190 + /* End of text section */
18191 + _etext = . - __KERNEL_TEXT_OFFSET;
18192 + }
18193 +
18194 +#ifdef CONFIG_X86_32
18195 + . = ALIGN(PAGE_SIZE);
18196 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18197 + *(.idt)
18198 + . = ALIGN(PAGE_SIZE);
18199 + *(.empty_zero_page)
18200 + *(.swapper_pg_fixmap)
18201 + *(.swapper_pg_pmd)
18202 + *(.swapper_pg_dir)
18203 + *(.trampoline_pg_dir)
18204 + } :rodata
18205 +#endif
18206 +
18207 + . = ALIGN(PAGE_SIZE);
18208 + NOTES :rodata :note
18209 +
18210 + EXCEPTION_TABLE(16) :rodata
18211
18212 RO_DATA(PAGE_SIZE)
18213
18214 /* Data */
18215 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18216 +
18217 +#ifdef CONFIG_PAX_KERNEXEC
18218 + . = ALIGN(HPAGE_SIZE);
18219 +#else
18220 + . = ALIGN(PAGE_SIZE);
18221 +#endif
18222 +
18223 /* Start of data section */
18224 _sdata = .;
18225
18226 /* init_task */
18227 INIT_TASK_DATA(THREAD_SIZE)
18228
18229 -#ifdef CONFIG_X86_32
18230 - /* 32 bit has nosave before _edata */
18231 NOSAVE_DATA
18232 -#endif
18233
18234 PAGE_ALIGNED_DATA(PAGE_SIZE)
18235
18236 @@ -112,6 +175,8 @@ SECTIONS
18237 DATA_DATA
18238 CONSTRUCTORS
18239
18240 + jiffies = jiffies_64;
18241 +
18242 /* rarely changed data like cpu maps */
18243 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
18244
18245 @@ -166,12 +231,6 @@ SECTIONS
18246 }
18247 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18248
18249 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18250 - .jiffies : AT(VLOAD(.jiffies)) {
18251 - *(.jiffies)
18252 - }
18253 - jiffies = VVIRT(.jiffies);
18254 -
18255 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18256 *(.vsyscall_3)
18257 }
18258 @@ -187,12 +246,19 @@ SECTIONS
18259 #endif /* CONFIG_X86_64 */
18260
18261 /* Init code and data - will be freed after init */
18262 - . = ALIGN(PAGE_SIZE);
18263 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18264 + BYTE(0)
18265 +
18266 +#ifdef CONFIG_PAX_KERNEXEC
18267 + . = ALIGN(HPAGE_SIZE);
18268 +#else
18269 + . = ALIGN(PAGE_SIZE);
18270 +#endif
18271 +
18272 __init_begin = .; /* paired with __init_end */
18273 - }
18274 + } :init.begin
18275
18276 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18277 +#ifdef CONFIG_SMP
18278 /*
18279 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18280 * output PHDR, so the next output section - .init.text - should
18281 @@ -201,12 +267,27 @@ SECTIONS
18282 PERCPU_VADDR(0, :percpu)
18283 #endif
18284
18285 - INIT_TEXT_SECTION(PAGE_SIZE)
18286 -#ifdef CONFIG_X86_64
18287 - :init
18288 -#endif
18289 + . = ALIGN(PAGE_SIZE);
18290 + init_begin = .;
18291 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18292 + VMLINUX_SYMBOL(_sinittext) = .;
18293 + INIT_TEXT
18294 + VMLINUX_SYMBOL(_einittext) = .;
18295 + . = ALIGN(PAGE_SIZE);
18296 + } :text.init
18297
18298 - INIT_DATA_SECTION(16)
18299 + /*
18300 + * .exit.text is discard at runtime, not link time, to deal with
18301 + * references from .altinstructions and .eh_frame
18302 + */
18303 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18304 + EXIT_TEXT
18305 + . = ALIGN(16);
18306 + } :text.exit
18307 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18308 +
18309 + . = ALIGN(PAGE_SIZE);
18310 + INIT_DATA_SECTION(16) :init
18311
18312 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18313 __x86_cpu_dev_start = .;
18314 @@ -232,19 +313,11 @@ SECTIONS
18315 *(.altinstr_replacement)
18316 }
18317
18318 - /*
18319 - * .exit.text is discard at runtime, not link time, to deal with
18320 - * references from .altinstructions and .eh_frame
18321 - */
18322 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18323 - EXIT_TEXT
18324 - }
18325 -
18326 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18327 EXIT_DATA
18328 }
18329
18330 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18331 +#ifndef CONFIG_SMP
18332 PERCPU(PAGE_SIZE)
18333 #endif
18334
18335 @@ -267,12 +340,6 @@ SECTIONS
18336 . = ALIGN(PAGE_SIZE);
18337 }
18338
18339 -#ifdef CONFIG_X86_64
18340 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18341 - NOSAVE_DATA
18342 - }
18343 -#endif
18344 -
18345 /* BSS */
18346 . = ALIGN(PAGE_SIZE);
18347 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18348 @@ -288,6 +355,7 @@ SECTIONS
18349 __brk_base = .;
18350 . += 64 * 1024; /* 64k alignment slop space */
18351 *(.brk_reservation) /* areas brk users have reserved */
18352 + . = ALIGN(HPAGE_SIZE);
18353 __brk_limit = .;
18354 }
18355
18356 @@ -316,13 +384,12 @@ SECTIONS
18357 * for the boot processor.
18358 */
18359 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18360 -INIT_PER_CPU(gdt_page);
18361 INIT_PER_CPU(irq_stack_union);
18362
18363 /*
18364 * Build-time check on the image size:
18365 */
18366 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18367 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18368 "kernel image bigger than KERNEL_IMAGE_SIZE");
18369
18370 #ifdef CONFIG_SMP
18371 diff -urNp linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c
18372 --- linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18373 +++ linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18374 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18375
18376 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18377 /* copy vsyscall data */
18378 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18379 vsyscall_gtod_data.clock.vread = clock->vread;
18380 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18381 vsyscall_gtod_data.clock.mask = clock->mask;
18382 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18383 We do this here because otherwise user space would do it on
18384 its own in a likely inferior way (no access to jiffies).
18385 If you don't like it pass NULL. */
18386 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
18387 + if (tcache && tcache->blob[0] == (j = jiffies)) {
18388 p = tcache->blob[1];
18389 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18390 /* Load per CPU data from RDTSCP */
18391 diff -urNp linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c
18392 --- linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18393 +++ linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18394 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18395
18396 EXPORT_SYMBOL(copy_user_generic);
18397 EXPORT_SYMBOL(__copy_user_nocache);
18398 -EXPORT_SYMBOL(copy_from_user);
18399 -EXPORT_SYMBOL(copy_to_user);
18400 EXPORT_SYMBOL(__copy_from_user_inatomic);
18401
18402 EXPORT_SYMBOL(copy_page);
18403 diff -urNp linux-2.6.32.45/arch/x86/kernel/xsave.c linux-2.6.32.45/arch/x86/kernel/xsave.c
18404 --- linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18405 +++ linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18406 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18407 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18408 return -1;
18409
18410 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18411 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18412 fx_sw_user->extended_size -
18413 FP_XSTATE_MAGIC2_SIZE));
18414 /*
18415 @@ -196,7 +196,7 @@ fx_only:
18416 * the other extended state.
18417 */
18418 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18419 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18420 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18421 }
18422
18423 /*
18424 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18425 if (task_thread_info(tsk)->status & TS_XSAVE)
18426 err = restore_user_xstate(buf);
18427 else
18428 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18429 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
18430 buf);
18431 if (unlikely(err)) {
18432 /*
18433 diff -urNp linux-2.6.32.45/arch/x86/kvm/emulate.c linux-2.6.32.45/arch/x86/kvm/emulate.c
18434 --- linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18435 +++ linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18436 @@ -81,8 +81,8 @@
18437 #define Src2CL (1<<29)
18438 #define Src2ImmByte (2<<29)
18439 #define Src2One (3<<29)
18440 -#define Src2Imm16 (4<<29)
18441 -#define Src2Mask (7<<29)
18442 +#define Src2Imm16 (4U<<29)
18443 +#define Src2Mask (7U<<29)
18444
18445 enum {
18446 Group1_80, Group1_81, Group1_82, Group1_83,
18447 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
18448
18449 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18450 do { \
18451 + unsigned long _tmp; \
18452 __asm__ __volatile__ ( \
18453 _PRE_EFLAGS("0", "4", "2") \
18454 _op _suffix " %"_x"3,%1; " \
18455 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
18456 /* Raw emulation: instruction has two explicit operands. */
18457 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18458 do { \
18459 - unsigned long _tmp; \
18460 - \
18461 switch ((_dst).bytes) { \
18462 case 2: \
18463 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18464 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
18465
18466 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18467 do { \
18468 - unsigned long _tmp; \
18469 switch ((_dst).bytes) { \
18470 case 1: \
18471 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18472 diff -urNp linux-2.6.32.45/arch/x86/kvm/lapic.c linux-2.6.32.45/arch/x86/kvm/lapic.c
18473 --- linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18474 +++ linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18475 @@ -52,7 +52,7 @@
18476 #define APIC_BUS_CYCLE_NS 1
18477
18478 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18479 -#define apic_debug(fmt, arg...)
18480 +#define apic_debug(fmt, arg...) do {} while (0)
18481
18482 #define APIC_LVT_NUM 6
18483 /* 14 is the version for Xeon and Pentium 8.4.8*/
18484 diff -urNp linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h
18485 --- linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18486 +++ linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18487 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18488 int level = PT_PAGE_TABLE_LEVEL;
18489 unsigned long mmu_seq;
18490
18491 + pax_track_stack();
18492 +
18493 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18494 kvm_mmu_audit(vcpu, "pre page fault");
18495
18496 diff -urNp linux-2.6.32.45/arch/x86/kvm/svm.c linux-2.6.32.45/arch/x86/kvm/svm.c
18497 --- linux-2.6.32.45/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18498 +++ linux-2.6.32.45/arch/x86/kvm/svm.c 2011-08-05 20:33:55.000000000 -0400
18499 @@ -2485,7 +2485,11 @@ static void reload_tss(struct kvm_vcpu *
18500 int cpu = raw_smp_processor_id();
18501
18502 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18503 +
18504 + pax_open_kernel();
18505 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18506 + pax_close_kernel();
18507 +
18508 load_TR_desc();
18509 }
18510
18511 @@ -2946,7 +2950,7 @@ static bool svm_gb_page_enable(void)
18512 return true;
18513 }
18514
18515 -static struct kvm_x86_ops svm_x86_ops = {
18516 +static const struct kvm_x86_ops svm_x86_ops = {
18517 .cpu_has_kvm_support = has_svm,
18518 .disabled_by_bios = is_disabled,
18519 .hardware_setup = svm_hardware_setup,
18520 diff -urNp linux-2.6.32.45/arch/x86/kvm/vmx.c linux-2.6.32.45/arch/x86/kvm/vmx.c
18521 --- linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18522 +++ linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18523 @@ -570,7 +570,11 @@ static void reload_tss(void)
18524
18525 kvm_get_gdt(&gdt);
18526 descs = (void *)gdt.base;
18527 +
18528 + pax_open_kernel();
18529 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18530 + pax_close_kernel();
18531 +
18532 load_TR_desc();
18533 }
18534
18535 @@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18536 if (!cpu_has_vmx_flexpriority())
18537 flexpriority_enabled = 0;
18538
18539 - if (!cpu_has_vmx_tpr_shadow())
18540 - kvm_x86_ops->update_cr8_intercept = NULL;
18541 + if (!cpu_has_vmx_tpr_shadow()) {
18542 + pax_open_kernel();
18543 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18544 + pax_close_kernel();
18545 + }
18546
18547 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18548 kvm_disable_largepages();
18549 @@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18550 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18551
18552 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18553 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18554 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18555 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18556 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18557 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18558 @@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18559 "jmp .Lkvm_vmx_return \n\t"
18560 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18561 ".Lkvm_vmx_return: "
18562 +
18563 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18564 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18565 + ".Lkvm_vmx_return2: "
18566 +#endif
18567 +
18568 /* Save guest registers, load host registers, keep flags */
18569 "xchg %0, (%%"R"sp) \n\t"
18570 "mov %%"R"ax, %c[rax](%0) \n\t"
18571 @@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18572 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18573 #endif
18574 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18575 +
18576 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18577 + ,[cs]"i"(__KERNEL_CS)
18578 +#endif
18579 +
18580 : "cc", "memory"
18581 - , R"bx", R"di", R"si"
18582 + , R"ax", R"bx", R"di", R"si"
18583 #ifdef CONFIG_X86_64
18584 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18585 #endif
18586 @@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18587 if (vmx->rmode.irq.pending)
18588 fixup_rmode_irq(vmx);
18589
18590 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18591 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18592 +
18593 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18594 + loadsegment(fs, __KERNEL_PERCPU);
18595 +#endif
18596 +
18597 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18598 + __set_fs(current_thread_info()->addr_limit);
18599 +#endif
18600 +
18601 vmx->launched = 1;
18602
18603 vmx_complete_interrupts(vmx);
18604 @@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18605 return false;
18606 }
18607
18608 -static struct kvm_x86_ops vmx_x86_ops = {
18609 +static const struct kvm_x86_ops vmx_x86_ops = {
18610 .cpu_has_kvm_support = cpu_has_kvm_support,
18611 .disabled_by_bios = vmx_disabled_by_bios,
18612 .hardware_setup = hardware_setup,
18613 diff -urNp linux-2.6.32.45/arch/x86/kvm/x86.c linux-2.6.32.45/arch/x86/kvm/x86.c
18614 --- linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18615 +++ linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18616 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18617 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18618 struct kvm_cpuid_entry2 __user *entries);
18619
18620 -struct kvm_x86_ops *kvm_x86_ops;
18621 +const struct kvm_x86_ops *kvm_x86_ops;
18622 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18623
18624 int ignore_msrs = 0;
18625 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18626 struct kvm_cpuid2 *cpuid,
18627 struct kvm_cpuid_entry2 __user *entries)
18628 {
18629 - int r;
18630 + int r, i;
18631
18632 r = -E2BIG;
18633 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18634 goto out;
18635 r = -EFAULT;
18636 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18637 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18638 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18639 goto out;
18640 + for (i = 0; i < cpuid->nent; ++i) {
18641 + struct kvm_cpuid_entry2 cpuid_entry;
18642 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18643 + goto out;
18644 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18645 + }
18646 vcpu->arch.cpuid_nent = cpuid->nent;
18647 kvm_apic_set_version(vcpu);
18648 return 0;
18649 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18650 struct kvm_cpuid2 *cpuid,
18651 struct kvm_cpuid_entry2 __user *entries)
18652 {
18653 - int r;
18654 + int r, i;
18655
18656 vcpu_load(vcpu);
18657 r = -E2BIG;
18658 if (cpuid->nent < vcpu->arch.cpuid_nent)
18659 goto out;
18660 r = -EFAULT;
18661 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18662 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18663 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18664 goto out;
18665 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18666 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18667 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18668 + goto out;
18669 + }
18670 return 0;
18671
18672 out:
18673 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18674 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18675 struct kvm_interrupt *irq)
18676 {
18677 - if (irq->irq < 0 || irq->irq >= 256)
18678 + if (irq->irq >= 256)
18679 return -EINVAL;
18680 if (irqchip_in_kernel(vcpu->kvm))
18681 return -ENXIO;
18682 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18683 .notifier_call = kvmclock_cpufreq_notifier
18684 };
18685
18686 -int kvm_arch_init(void *opaque)
18687 +int kvm_arch_init(const void *opaque)
18688 {
18689 int r, cpu;
18690 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18691 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18692
18693 if (kvm_x86_ops) {
18694 printk(KERN_ERR "kvm: already loaded the other module\n");
18695 diff -urNp linux-2.6.32.45/arch/x86/lguest/boot.c linux-2.6.32.45/arch/x86/lguest/boot.c
18696 --- linux-2.6.32.45/arch/x86/lguest/boot.c 2011-03-27 14:31:47.000000000 -0400
18697 +++ linux-2.6.32.45/arch/x86/lguest/boot.c 2011-08-05 20:33:55.000000000 -0400
18698 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vt
18699 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18700 * Launcher to reboot us.
18701 */
18702 -static void lguest_restart(char *reason)
18703 +static __noreturn void lguest_restart(char *reason)
18704 {
18705 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
18706 + BUG();
18707 }
18708
18709 /*G:050
18710 diff -urNp linux-2.6.32.45/arch/x86/lib/atomic64_32.c linux-2.6.32.45/arch/x86/lib/atomic64_32.c
18711 --- linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18712 +++ linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18713 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18714 }
18715 EXPORT_SYMBOL(atomic64_cmpxchg);
18716
18717 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18718 +{
18719 + return cmpxchg8b(&ptr->counter, old_val, new_val);
18720 +}
18721 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18722 +
18723 /**
18724 * atomic64_xchg - xchg atomic64 variable
18725 * @ptr: pointer to type atomic64_t
18726 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18727 EXPORT_SYMBOL(atomic64_xchg);
18728
18729 /**
18730 + * atomic64_xchg_unchecked - xchg atomic64 variable
18731 + * @ptr: pointer to type atomic64_unchecked_t
18732 + * @new_val: value to assign
18733 + *
18734 + * Atomically xchgs the value of @ptr to @new_val and returns
18735 + * the old value.
18736 + */
18737 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18738 +{
18739 + /*
18740 + * Try first with a (possibly incorrect) assumption about
18741 + * what we have there. We'll do two loops most likely,
18742 + * but we'll get an ownership MESI transaction straight away
18743 + * instead of a read transaction followed by a
18744 + * flush-for-ownership transaction:
18745 + */
18746 + u64 old_val, real_val = 0;
18747 +
18748 + do {
18749 + old_val = real_val;
18750 +
18751 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18752 +
18753 + } while (real_val != old_val);
18754 +
18755 + return old_val;
18756 +}
18757 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
18758 +
18759 +/**
18760 * atomic64_set - set atomic64 variable
18761 * @ptr: pointer to type atomic64_t
18762 * @new_val: value to assign
18763 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18764 EXPORT_SYMBOL(atomic64_set);
18765
18766 /**
18767 -EXPORT_SYMBOL(atomic64_read);
18768 + * atomic64_unchecked_set - set atomic64 variable
18769 + * @ptr: pointer to type atomic64_unchecked_t
18770 + * @new_val: value to assign
18771 + *
18772 + * Atomically sets the value of @ptr to @new_val.
18773 + */
18774 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18775 +{
18776 + atomic64_xchg_unchecked(ptr, new_val);
18777 +}
18778 +EXPORT_SYMBOL(atomic64_set_unchecked);
18779 +
18780 +/**
18781 * atomic64_add_return - add and return
18782 * @delta: integer value to add
18783 * @ptr: pointer to type atomic64_t
18784 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18785 }
18786 EXPORT_SYMBOL(atomic64_add_return);
18787
18788 +/**
18789 + * atomic64_add_return_unchecked - add and return
18790 + * @delta: integer value to add
18791 + * @ptr: pointer to type atomic64_unchecked_t
18792 + *
18793 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
18794 + */
18795 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18796 +{
18797 + /*
18798 + * Try first with a (possibly incorrect) assumption about
18799 + * what we have there. We'll do two loops most likely,
18800 + * but we'll get an ownership MESI transaction straight away
18801 + * instead of a read transaction followed by a
18802 + * flush-for-ownership transaction:
18803 + */
18804 + u64 old_val, new_val, real_val = 0;
18805 +
18806 + do {
18807 + old_val = real_val;
18808 + new_val = old_val + delta;
18809 +
18810 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18811 +
18812 + } while (real_val != old_val);
18813 +
18814 + return new_val;
18815 +}
18816 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
18817 +
18818 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
18819 {
18820 return atomic64_add_return(-delta, ptr);
18821 }
18822 EXPORT_SYMBOL(atomic64_sub_return);
18823
18824 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18825 +{
18826 + return atomic64_add_return_unchecked(-delta, ptr);
18827 +}
18828 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
18829 +
18830 u64 atomic64_inc_return(atomic64_t *ptr)
18831 {
18832 return atomic64_add_return(1, ptr);
18833 }
18834 EXPORT_SYMBOL(atomic64_inc_return);
18835
18836 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
18837 +{
18838 + return atomic64_add_return_unchecked(1, ptr);
18839 +}
18840 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
18841 +
18842 u64 atomic64_dec_return(atomic64_t *ptr)
18843 {
18844 return atomic64_sub_return(1, ptr);
18845 }
18846 EXPORT_SYMBOL(atomic64_dec_return);
18847
18848 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
18849 +{
18850 + return atomic64_sub_return_unchecked(1, ptr);
18851 +}
18852 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
18853 +
18854 /**
18855 * atomic64_add - add integer to atomic64 variable
18856 * @delta: integer value to add
18857 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
18858 EXPORT_SYMBOL(atomic64_add);
18859
18860 /**
18861 + * atomic64_add_unchecked - add integer to atomic64 variable
18862 + * @delta: integer value to add
18863 + * @ptr: pointer to type atomic64_unchecked_t
18864 + *
18865 + * Atomically adds @delta to @ptr.
18866 + */
18867 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18868 +{
18869 + atomic64_add_return_unchecked(delta, ptr);
18870 +}
18871 +EXPORT_SYMBOL(atomic64_add_unchecked);
18872 +
18873 +/**
18874 * atomic64_sub - subtract the atomic64 variable
18875 * @delta: integer value to subtract
18876 * @ptr: pointer to type atomic64_t
18877 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
18878 EXPORT_SYMBOL(atomic64_sub);
18879
18880 /**
18881 + * atomic64_sub_unchecked - subtract the atomic64 variable
18882 + * @delta: integer value to subtract
18883 + * @ptr: pointer to type atomic64_unchecked_t
18884 + *
18885 + * Atomically subtracts @delta from @ptr.
18886 + */
18887 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18888 +{
18889 + atomic64_add_unchecked(-delta, ptr);
18890 +}
18891 +EXPORT_SYMBOL(atomic64_sub_unchecked);
18892 +
18893 +/**
18894 * atomic64_sub_and_test - subtract value from variable and test result
18895 * @delta: integer value to subtract
18896 * @ptr: pointer to type atomic64_t
18897 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
18898 EXPORT_SYMBOL(atomic64_inc);
18899
18900 /**
18901 + * atomic64_inc_unchecked - increment atomic64 variable
18902 + * @ptr: pointer to type atomic64_unchecked_t
18903 + *
18904 + * Atomically increments @ptr by 1.
18905 + */
18906 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
18907 +{
18908 + atomic64_add_unchecked(1, ptr);
18909 +}
18910 +EXPORT_SYMBOL(atomic64_inc_unchecked);
18911 +
18912 +/**
18913 * atomic64_dec - decrement atomic64 variable
18914 * @ptr: pointer to type atomic64_t
18915 *
18916 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
18917 EXPORT_SYMBOL(atomic64_dec);
18918
18919 /**
18920 + * atomic64_dec_unchecked - decrement atomic64 variable
18921 + * @ptr: pointer to type atomic64_unchecked_t
18922 + *
18923 + * Atomically decrements @ptr by 1.
18924 + */
18925 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
18926 +{
18927 + atomic64_sub_unchecked(1, ptr);
18928 +}
18929 +EXPORT_SYMBOL(atomic64_dec_unchecked);
18930 +
18931 +/**
18932 * atomic64_dec_and_test - decrement and test
18933 * @ptr: pointer to type atomic64_t
18934 *
18935 diff -urNp linux-2.6.32.45/arch/x86/lib/checksum_32.S linux-2.6.32.45/arch/x86/lib/checksum_32.S
18936 --- linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
18937 +++ linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
18938 @@ -28,7 +28,8 @@
18939 #include <linux/linkage.h>
18940 #include <asm/dwarf2.h>
18941 #include <asm/errno.h>
18942 -
18943 +#include <asm/segment.h>
18944 +
18945 /*
18946 * computes a partial checksum, e.g. for TCP/UDP fragments
18947 */
18948 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
18949
18950 #define ARGBASE 16
18951 #define FP 12
18952 -
18953 -ENTRY(csum_partial_copy_generic)
18954 +
18955 +ENTRY(csum_partial_copy_generic_to_user)
18956 CFI_STARTPROC
18957 +
18958 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18959 + pushl %gs
18960 + CFI_ADJUST_CFA_OFFSET 4
18961 + popl %es
18962 + CFI_ADJUST_CFA_OFFSET -4
18963 + jmp csum_partial_copy_generic
18964 +#endif
18965 +
18966 +ENTRY(csum_partial_copy_generic_from_user)
18967 +
18968 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18969 + pushl %gs
18970 + CFI_ADJUST_CFA_OFFSET 4
18971 + popl %ds
18972 + CFI_ADJUST_CFA_OFFSET -4
18973 +#endif
18974 +
18975 +ENTRY(csum_partial_copy_generic)
18976 subl $4,%esp
18977 CFI_ADJUST_CFA_OFFSET 4
18978 pushl %edi
18979 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
18980 jmp 4f
18981 SRC(1: movw (%esi), %bx )
18982 addl $2, %esi
18983 -DST( movw %bx, (%edi) )
18984 +DST( movw %bx, %es:(%edi) )
18985 addl $2, %edi
18986 addw %bx, %ax
18987 adcl $0, %eax
18988 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18989 SRC(1: movl (%esi), %ebx )
18990 SRC( movl 4(%esi), %edx )
18991 adcl %ebx, %eax
18992 -DST( movl %ebx, (%edi) )
18993 +DST( movl %ebx, %es:(%edi) )
18994 adcl %edx, %eax
18995 -DST( movl %edx, 4(%edi) )
18996 +DST( movl %edx, %es:4(%edi) )
18997
18998 SRC( movl 8(%esi), %ebx )
18999 SRC( movl 12(%esi), %edx )
19000 adcl %ebx, %eax
19001 -DST( movl %ebx, 8(%edi) )
19002 +DST( movl %ebx, %es:8(%edi) )
19003 adcl %edx, %eax
19004 -DST( movl %edx, 12(%edi) )
19005 +DST( movl %edx, %es:12(%edi) )
19006
19007 SRC( movl 16(%esi), %ebx )
19008 SRC( movl 20(%esi), %edx )
19009 adcl %ebx, %eax
19010 -DST( movl %ebx, 16(%edi) )
19011 +DST( movl %ebx, %es:16(%edi) )
19012 adcl %edx, %eax
19013 -DST( movl %edx, 20(%edi) )
19014 +DST( movl %edx, %es:20(%edi) )
19015
19016 SRC( movl 24(%esi), %ebx )
19017 SRC( movl 28(%esi), %edx )
19018 adcl %ebx, %eax
19019 -DST( movl %ebx, 24(%edi) )
19020 +DST( movl %ebx, %es:24(%edi) )
19021 adcl %edx, %eax
19022 -DST( movl %edx, 28(%edi) )
19023 +DST( movl %edx, %es:28(%edi) )
19024
19025 lea 32(%esi), %esi
19026 lea 32(%edi), %edi
19027 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
19028 shrl $2, %edx # This clears CF
19029 SRC(3: movl (%esi), %ebx )
19030 adcl %ebx, %eax
19031 -DST( movl %ebx, (%edi) )
19032 +DST( movl %ebx, %es:(%edi) )
19033 lea 4(%esi), %esi
19034 lea 4(%edi), %edi
19035 dec %edx
19036 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
19037 jb 5f
19038 SRC( movw (%esi), %cx )
19039 leal 2(%esi), %esi
19040 -DST( movw %cx, (%edi) )
19041 +DST( movw %cx, %es:(%edi) )
19042 leal 2(%edi), %edi
19043 je 6f
19044 shll $16,%ecx
19045 SRC(5: movb (%esi), %cl )
19046 -DST( movb %cl, (%edi) )
19047 +DST( movb %cl, %es:(%edi) )
19048 6: addl %ecx, %eax
19049 adcl $0, %eax
19050 7:
19051 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
19052
19053 6001:
19054 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19055 - movl $-EFAULT, (%ebx)
19056 + movl $-EFAULT, %ss:(%ebx)
19057
19058 # zero the complete destination - computing the rest
19059 # is too much work
19060 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
19061
19062 6002:
19063 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19064 - movl $-EFAULT,(%ebx)
19065 + movl $-EFAULT,%ss:(%ebx)
19066 jmp 5000b
19067
19068 .previous
19069
19070 + pushl %ss
19071 + CFI_ADJUST_CFA_OFFSET 4
19072 + popl %ds
19073 + CFI_ADJUST_CFA_OFFSET -4
19074 + pushl %ss
19075 + CFI_ADJUST_CFA_OFFSET 4
19076 + popl %es
19077 + CFI_ADJUST_CFA_OFFSET -4
19078 popl %ebx
19079 CFI_ADJUST_CFA_OFFSET -4
19080 CFI_RESTORE ebx
19081 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
19082 CFI_ADJUST_CFA_OFFSET -4
19083 ret
19084 CFI_ENDPROC
19085 -ENDPROC(csum_partial_copy_generic)
19086 +ENDPROC(csum_partial_copy_generic_to_user)
19087
19088 #else
19089
19090 /* Version for PentiumII/PPro */
19091
19092 #define ROUND1(x) \
19093 + nop; nop; nop; \
19094 SRC(movl x(%esi), %ebx ) ; \
19095 addl %ebx, %eax ; \
19096 - DST(movl %ebx, x(%edi) ) ;
19097 + DST(movl %ebx, %es:x(%edi)) ;
19098
19099 #define ROUND(x) \
19100 + nop; nop; nop; \
19101 SRC(movl x(%esi), %ebx ) ; \
19102 adcl %ebx, %eax ; \
19103 - DST(movl %ebx, x(%edi) ) ;
19104 + DST(movl %ebx, %es:x(%edi)) ;
19105
19106 #define ARGBASE 12
19107 -
19108 -ENTRY(csum_partial_copy_generic)
19109 +
19110 +ENTRY(csum_partial_copy_generic_to_user)
19111 CFI_STARTPROC
19112 +
19113 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19114 + pushl %gs
19115 + CFI_ADJUST_CFA_OFFSET 4
19116 + popl %es
19117 + CFI_ADJUST_CFA_OFFSET -4
19118 + jmp csum_partial_copy_generic
19119 +#endif
19120 +
19121 +ENTRY(csum_partial_copy_generic_from_user)
19122 +
19123 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19124 + pushl %gs
19125 + CFI_ADJUST_CFA_OFFSET 4
19126 + popl %ds
19127 + CFI_ADJUST_CFA_OFFSET -4
19128 +#endif
19129 +
19130 +ENTRY(csum_partial_copy_generic)
19131 pushl %ebx
19132 CFI_ADJUST_CFA_OFFSET 4
19133 CFI_REL_OFFSET ebx, 0
19134 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
19135 subl %ebx, %edi
19136 lea -1(%esi),%edx
19137 andl $-32,%edx
19138 - lea 3f(%ebx,%ebx), %ebx
19139 + lea 3f(%ebx,%ebx,2), %ebx
19140 testl %esi, %esi
19141 jmp *%ebx
19142 1: addl $64,%esi
19143 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
19144 jb 5f
19145 SRC( movw (%esi), %dx )
19146 leal 2(%esi), %esi
19147 -DST( movw %dx, (%edi) )
19148 +DST( movw %dx, %es:(%edi) )
19149 leal 2(%edi), %edi
19150 je 6f
19151 shll $16,%edx
19152 5:
19153 SRC( movb (%esi), %dl )
19154 -DST( movb %dl, (%edi) )
19155 +DST( movb %dl, %es:(%edi) )
19156 6: addl %edx, %eax
19157 adcl $0, %eax
19158 7:
19159 .section .fixup, "ax"
19160 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19161 - movl $-EFAULT, (%ebx)
19162 + movl $-EFAULT, %ss:(%ebx)
19163 # zero the complete destination (computing the rest is too much work)
19164 movl ARGBASE+8(%esp),%edi # dst
19165 movl ARGBASE+12(%esp),%ecx # len
19166 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
19167 rep; stosb
19168 jmp 7b
19169 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19170 - movl $-EFAULT, (%ebx)
19171 + movl $-EFAULT, %ss:(%ebx)
19172 jmp 7b
19173 .previous
19174
19175 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19176 + pushl %ss
19177 + CFI_ADJUST_CFA_OFFSET 4
19178 + popl %ds
19179 + CFI_ADJUST_CFA_OFFSET -4
19180 + pushl %ss
19181 + CFI_ADJUST_CFA_OFFSET 4
19182 + popl %es
19183 + CFI_ADJUST_CFA_OFFSET -4
19184 +#endif
19185 +
19186 popl %esi
19187 CFI_ADJUST_CFA_OFFSET -4
19188 CFI_RESTORE esi
19189 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
19190 CFI_RESTORE ebx
19191 ret
19192 CFI_ENDPROC
19193 -ENDPROC(csum_partial_copy_generic)
19194 +ENDPROC(csum_partial_copy_generic_to_user)
19195
19196 #undef ROUND
19197 #undef ROUND1
19198 diff -urNp linux-2.6.32.45/arch/x86/lib/clear_page_64.S linux-2.6.32.45/arch/x86/lib/clear_page_64.S
19199 --- linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
19200 +++ linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
19201 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
19202
19203 #include <asm/cpufeature.h>
19204
19205 - .section .altinstr_replacement,"ax"
19206 + .section .altinstr_replacement,"a"
19207 1: .byte 0xeb /* jmp <disp8> */
19208 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19209 2:
19210 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_page_64.S linux-2.6.32.45/arch/x86/lib/copy_page_64.S
19211 --- linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
19212 +++ linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
19213 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
19214
19215 #include <asm/cpufeature.h>
19216
19217 - .section .altinstr_replacement,"ax"
19218 + .section .altinstr_replacement,"a"
19219 1: .byte 0xeb /* jmp <disp8> */
19220 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19221 2:
19222 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_64.S linux-2.6.32.45/arch/x86/lib/copy_user_64.S
19223 --- linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
19224 +++ linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
19225 @@ -15,13 +15,14 @@
19226 #include <asm/asm-offsets.h>
19227 #include <asm/thread_info.h>
19228 #include <asm/cpufeature.h>
19229 +#include <asm/pgtable.h>
19230
19231 .macro ALTERNATIVE_JUMP feature,orig,alt
19232 0:
19233 .byte 0xe9 /* 32bit jump */
19234 .long \orig-1f /* by default jump to orig */
19235 1:
19236 - .section .altinstr_replacement,"ax"
19237 + .section .altinstr_replacement,"a"
19238 2: .byte 0xe9 /* near jump with 32bit immediate */
19239 .long \alt-1b /* offset */ /* or alternatively to alt */
19240 .previous
19241 @@ -64,49 +65,19 @@
19242 #endif
19243 .endm
19244
19245 -/* Standard copy_to_user with segment limit checking */
19246 -ENTRY(copy_to_user)
19247 - CFI_STARTPROC
19248 - GET_THREAD_INFO(%rax)
19249 - movq %rdi,%rcx
19250 - addq %rdx,%rcx
19251 - jc bad_to_user
19252 - cmpq TI_addr_limit(%rax),%rcx
19253 - ja bad_to_user
19254 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19255 - CFI_ENDPROC
19256 -ENDPROC(copy_to_user)
19257 -
19258 -/* Standard copy_from_user with segment limit checking */
19259 -ENTRY(copy_from_user)
19260 - CFI_STARTPROC
19261 - GET_THREAD_INFO(%rax)
19262 - movq %rsi,%rcx
19263 - addq %rdx,%rcx
19264 - jc bad_from_user
19265 - cmpq TI_addr_limit(%rax),%rcx
19266 - ja bad_from_user
19267 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19268 - CFI_ENDPROC
19269 -ENDPROC(copy_from_user)
19270 -
19271 ENTRY(copy_user_generic)
19272 CFI_STARTPROC
19273 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19274 CFI_ENDPROC
19275 ENDPROC(copy_user_generic)
19276
19277 -ENTRY(__copy_from_user_inatomic)
19278 - CFI_STARTPROC
19279 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19280 - CFI_ENDPROC
19281 -ENDPROC(__copy_from_user_inatomic)
19282 -
19283 .section .fixup,"ax"
19284 /* must zero dest */
19285 ENTRY(bad_from_user)
19286 bad_from_user:
19287 CFI_STARTPROC
19288 + testl %edx,%edx
19289 + js bad_to_user
19290 movl %edx,%ecx
19291 xorl %eax,%eax
19292 rep
19293 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S
19294 --- linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19295 +++ linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19296 @@ -14,6 +14,7 @@
19297 #include <asm/current.h>
19298 #include <asm/asm-offsets.h>
19299 #include <asm/thread_info.h>
19300 +#include <asm/pgtable.h>
19301
19302 .macro ALIGN_DESTINATION
19303 #ifdef FIX_ALIGNMENT
19304 @@ -50,6 +51,15 @@
19305 */
19306 ENTRY(__copy_user_nocache)
19307 CFI_STARTPROC
19308 +
19309 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19310 + mov $PAX_USER_SHADOW_BASE,%rcx
19311 + cmp %rcx,%rsi
19312 + jae 1f
19313 + add %rcx,%rsi
19314 +1:
19315 +#endif
19316 +
19317 cmpl $8,%edx
19318 jb 20f /* less then 8 bytes, go to byte copy loop */
19319 ALIGN_DESTINATION
19320 diff -urNp linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c
19321 --- linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19322 +++ linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19323 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19324 len -= 2;
19325 }
19326 }
19327 +
19328 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19329 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19330 + src += PAX_USER_SHADOW_BASE;
19331 +#endif
19332 +
19333 isum = csum_partial_copy_generic((__force const void *)src,
19334 dst, len, isum, errp, NULL);
19335 if (unlikely(*errp))
19336 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19337 }
19338
19339 *errp = 0;
19340 +
19341 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19342 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19343 + dst += PAX_USER_SHADOW_BASE;
19344 +#endif
19345 +
19346 return csum_partial_copy_generic(src, (void __force *)dst,
19347 len, isum, NULL, errp);
19348 }
19349 diff -urNp linux-2.6.32.45/arch/x86/lib/getuser.S linux-2.6.32.45/arch/x86/lib/getuser.S
19350 --- linux-2.6.32.45/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19351 +++ linux-2.6.32.45/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19352 @@ -33,14 +33,35 @@
19353 #include <asm/asm-offsets.h>
19354 #include <asm/thread_info.h>
19355 #include <asm/asm.h>
19356 +#include <asm/segment.h>
19357 +#include <asm/pgtable.h>
19358 +
19359 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19360 +#define __copyuser_seg gs;
19361 +#else
19362 +#define __copyuser_seg
19363 +#endif
19364
19365 .text
19366 ENTRY(__get_user_1)
19367 CFI_STARTPROC
19368 +
19369 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19370 GET_THREAD_INFO(%_ASM_DX)
19371 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19372 jae bad_get_user
19373 -1: movzb (%_ASM_AX),%edx
19374 +
19375 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19376 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19377 + cmp %_ASM_DX,%_ASM_AX
19378 + jae 1234f
19379 + add %_ASM_DX,%_ASM_AX
19380 +1234:
19381 +#endif
19382 +
19383 +#endif
19384 +
19385 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19386 xor %eax,%eax
19387 ret
19388 CFI_ENDPROC
19389 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19390 ENTRY(__get_user_2)
19391 CFI_STARTPROC
19392 add $1,%_ASM_AX
19393 +
19394 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19395 jc bad_get_user
19396 GET_THREAD_INFO(%_ASM_DX)
19397 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19398 jae bad_get_user
19399 -2: movzwl -1(%_ASM_AX),%edx
19400 +
19401 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19402 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19403 + cmp %_ASM_DX,%_ASM_AX
19404 + jae 1234f
19405 + add %_ASM_DX,%_ASM_AX
19406 +1234:
19407 +#endif
19408 +
19409 +#endif
19410 +
19411 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19412 xor %eax,%eax
19413 ret
19414 CFI_ENDPROC
19415 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19416 ENTRY(__get_user_4)
19417 CFI_STARTPROC
19418 add $3,%_ASM_AX
19419 +
19420 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19421 jc bad_get_user
19422 GET_THREAD_INFO(%_ASM_DX)
19423 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19424 jae bad_get_user
19425 -3: mov -3(%_ASM_AX),%edx
19426 +
19427 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19428 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19429 + cmp %_ASM_DX,%_ASM_AX
19430 + jae 1234f
19431 + add %_ASM_DX,%_ASM_AX
19432 +1234:
19433 +#endif
19434 +
19435 +#endif
19436 +
19437 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19438 xor %eax,%eax
19439 ret
19440 CFI_ENDPROC
19441 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19442 GET_THREAD_INFO(%_ASM_DX)
19443 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19444 jae bad_get_user
19445 +
19446 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19447 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19448 + cmp %_ASM_DX,%_ASM_AX
19449 + jae 1234f
19450 + add %_ASM_DX,%_ASM_AX
19451 +1234:
19452 +#endif
19453 +
19454 4: movq -7(%_ASM_AX),%_ASM_DX
19455 xor %eax,%eax
19456 ret
19457 diff -urNp linux-2.6.32.45/arch/x86/lib/memcpy_64.S linux-2.6.32.45/arch/x86/lib/memcpy_64.S
19458 --- linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19459 +++ linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19460 @@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19461 * It is also a lot simpler. Use this when possible:
19462 */
19463
19464 - .section .altinstr_replacement, "ax"
19465 + .section .altinstr_replacement, "a"
19466 1: .byte 0xeb /* jmp <disp8> */
19467 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19468 2:
19469 diff -urNp linux-2.6.32.45/arch/x86/lib/memset_64.S linux-2.6.32.45/arch/x86/lib/memset_64.S
19470 --- linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19471 +++ linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19472 @@ -118,7 +118,7 @@ ENDPROC(__memset)
19473
19474 #include <asm/cpufeature.h>
19475
19476 - .section .altinstr_replacement,"ax"
19477 + .section .altinstr_replacement,"a"
19478 1: .byte 0xeb /* jmp <disp8> */
19479 .byte (memset_c - memset) - (2f - 1b) /* offset */
19480 2:
19481 diff -urNp linux-2.6.32.45/arch/x86/lib/mmx_32.c linux-2.6.32.45/arch/x86/lib/mmx_32.c
19482 --- linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19483 +++ linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19484 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19485 {
19486 void *p;
19487 int i;
19488 + unsigned long cr0;
19489
19490 if (unlikely(in_interrupt()))
19491 return __memcpy(to, from, len);
19492 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19493 kernel_fpu_begin();
19494
19495 __asm__ __volatile__ (
19496 - "1: prefetch (%0)\n" /* This set is 28 bytes */
19497 - " prefetch 64(%0)\n"
19498 - " prefetch 128(%0)\n"
19499 - " prefetch 192(%0)\n"
19500 - " prefetch 256(%0)\n"
19501 + "1: prefetch (%1)\n" /* This set is 28 bytes */
19502 + " prefetch 64(%1)\n"
19503 + " prefetch 128(%1)\n"
19504 + " prefetch 192(%1)\n"
19505 + " prefetch 256(%1)\n"
19506 "2: \n"
19507 ".section .fixup, \"ax\"\n"
19508 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19509 + "3: \n"
19510 +
19511 +#ifdef CONFIG_PAX_KERNEXEC
19512 + " movl %%cr0, %0\n"
19513 + " movl %0, %%eax\n"
19514 + " andl $0xFFFEFFFF, %%eax\n"
19515 + " movl %%eax, %%cr0\n"
19516 +#endif
19517 +
19518 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19519 +
19520 +#ifdef CONFIG_PAX_KERNEXEC
19521 + " movl %0, %%cr0\n"
19522 +#endif
19523 +
19524 " jmp 2b\n"
19525 ".previous\n"
19526 _ASM_EXTABLE(1b, 3b)
19527 - : : "r" (from));
19528 + : "=&r" (cr0) : "r" (from) : "ax");
19529
19530 for ( ; i > 5; i--) {
19531 __asm__ __volatile__ (
19532 - "1: prefetch 320(%0)\n"
19533 - "2: movq (%0), %%mm0\n"
19534 - " movq 8(%0), %%mm1\n"
19535 - " movq 16(%0), %%mm2\n"
19536 - " movq 24(%0), %%mm3\n"
19537 - " movq %%mm0, (%1)\n"
19538 - " movq %%mm1, 8(%1)\n"
19539 - " movq %%mm2, 16(%1)\n"
19540 - " movq %%mm3, 24(%1)\n"
19541 - " movq 32(%0), %%mm0\n"
19542 - " movq 40(%0), %%mm1\n"
19543 - " movq 48(%0), %%mm2\n"
19544 - " movq 56(%0), %%mm3\n"
19545 - " movq %%mm0, 32(%1)\n"
19546 - " movq %%mm1, 40(%1)\n"
19547 - " movq %%mm2, 48(%1)\n"
19548 - " movq %%mm3, 56(%1)\n"
19549 + "1: prefetch 320(%1)\n"
19550 + "2: movq (%1), %%mm0\n"
19551 + " movq 8(%1), %%mm1\n"
19552 + " movq 16(%1), %%mm2\n"
19553 + " movq 24(%1), %%mm3\n"
19554 + " movq %%mm0, (%2)\n"
19555 + " movq %%mm1, 8(%2)\n"
19556 + " movq %%mm2, 16(%2)\n"
19557 + " movq %%mm3, 24(%2)\n"
19558 + " movq 32(%1), %%mm0\n"
19559 + " movq 40(%1), %%mm1\n"
19560 + " movq 48(%1), %%mm2\n"
19561 + " movq 56(%1), %%mm3\n"
19562 + " movq %%mm0, 32(%2)\n"
19563 + " movq %%mm1, 40(%2)\n"
19564 + " movq %%mm2, 48(%2)\n"
19565 + " movq %%mm3, 56(%2)\n"
19566 ".section .fixup, \"ax\"\n"
19567 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19568 + "3:\n"
19569 +
19570 +#ifdef CONFIG_PAX_KERNEXEC
19571 + " movl %%cr0, %0\n"
19572 + " movl %0, %%eax\n"
19573 + " andl $0xFFFEFFFF, %%eax\n"
19574 + " movl %%eax, %%cr0\n"
19575 +#endif
19576 +
19577 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19578 +
19579 +#ifdef CONFIG_PAX_KERNEXEC
19580 + " movl %0, %%cr0\n"
19581 +#endif
19582 +
19583 " jmp 2b\n"
19584 ".previous\n"
19585 _ASM_EXTABLE(1b, 3b)
19586 - : : "r" (from), "r" (to) : "memory");
19587 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19588
19589 from += 64;
19590 to += 64;
19591 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19592 static void fast_copy_page(void *to, void *from)
19593 {
19594 int i;
19595 + unsigned long cr0;
19596
19597 kernel_fpu_begin();
19598
19599 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19600 * but that is for later. -AV
19601 */
19602 __asm__ __volatile__(
19603 - "1: prefetch (%0)\n"
19604 - " prefetch 64(%0)\n"
19605 - " prefetch 128(%0)\n"
19606 - " prefetch 192(%0)\n"
19607 - " prefetch 256(%0)\n"
19608 + "1: prefetch (%1)\n"
19609 + " prefetch 64(%1)\n"
19610 + " prefetch 128(%1)\n"
19611 + " prefetch 192(%1)\n"
19612 + " prefetch 256(%1)\n"
19613 "2: \n"
19614 ".section .fixup, \"ax\"\n"
19615 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19616 + "3: \n"
19617 +
19618 +#ifdef CONFIG_PAX_KERNEXEC
19619 + " movl %%cr0, %0\n"
19620 + " movl %0, %%eax\n"
19621 + " andl $0xFFFEFFFF, %%eax\n"
19622 + " movl %%eax, %%cr0\n"
19623 +#endif
19624 +
19625 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19626 +
19627 +#ifdef CONFIG_PAX_KERNEXEC
19628 + " movl %0, %%cr0\n"
19629 +#endif
19630 +
19631 " jmp 2b\n"
19632 ".previous\n"
19633 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19634 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19635
19636 for (i = 0; i < (4096-320)/64; i++) {
19637 __asm__ __volatile__ (
19638 - "1: prefetch 320(%0)\n"
19639 - "2: movq (%0), %%mm0\n"
19640 - " movntq %%mm0, (%1)\n"
19641 - " movq 8(%0), %%mm1\n"
19642 - " movntq %%mm1, 8(%1)\n"
19643 - " movq 16(%0), %%mm2\n"
19644 - " movntq %%mm2, 16(%1)\n"
19645 - " movq 24(%0), %%mm3\n"
19646 - " movntq %%mm3, 24(%1)\n"
19647 - " movq 32(%0), %%mm4\n"
19648 - " movntq %%mm4, 32(%1)\n"
19649 - " movq 40(%0), %%mm5\n"
19650 - " movntq %%mm5, 40(%1)\n"
19651 - " movq 48(%0), %%mm6\n"
19652 - " movntq %%mm6, 48(%1)\n"
19653 - " movq 56(%0), %%mm7\n"
19654 - " movntq %%mm7, 56(%1)\n"
19655 + "1: prefetch 320(%1)\n"
19656 + "2: movq (%1), %%mm0\n"
19657 + " movntq %%mm0, (%2)\n"
19658 + " movq 8(%1), %%mm1\n"
19659 + " movntq %%mm1, 8(%2)\n"
19660 + " movq 16(%1), %%mm2\n"
19661 + " movntq %%mm2, 16(%2)\n"
19662 + " movq 24(%1), %%mm3\n"
19663 + " movntq %%mm3, 24(%2)\n"
19664 + " movq 32(%1), %%mm4\n"
19665 + " movntq %%mm4, 32(%2)\n"
19666 + " movq 40(%1), %%mm5\n"
19667 + " movntq %%mm5, 40(%2)\n"
19668 + " movq 48(%1), %%mm6\n"
19669 + " movntq %%mm6, 48(%2)\n"
19670 + " movq 56(%1), %%mm7\n"
19671 + " movntq %%mm7, 56(%2)\n"
19672 ".section .fixup, \"ax\"\n"
19673 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19674 + "3:\n"
19675 +
19676 +#ifdef CONFIG_PAX_KERNEXEC
19677 + " movl %%cr0, %0\n"
19678 + " movl %0, %%eax\n"
19679 + " andl $0xFFFEFFFF, %%eax\n"
19680 + " movl %%eax, %%cr0\n"
19681 +#endif
19682 +
19683 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19684 +
19685 +#ifdef CONFIG_PAX_KERNEXEC
19686 + " movl %0, %%cr0\n"
19687 +#endif
19688 +
19689 " jmp 2b\n"
19690 ".previous\n"
19691 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19692 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19693
19694 from += 64;
19695 to += 64;
19696 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19697 static void fast_copy_page(void *to, void *from)
19698 {
19699 int i;
19700 + unsigned long cr0;
19701
19702 kernel_fpu_begin();
19703
19704 __asm__ __volatile__ (
19705 - "1: prefetch (%0)\n"
19706 - " prefetch 64(%0)\n"
19707 - " prefetch 128(%0)\n"
19708 - " prefetch 192(%0)\n"
19709 - " prefetch 256(%0)\n"
19710 + "1: prefetch (%1)\n"
19711 + " prefetch 64(%1)\n"
19712 + " prefetch 128(%1)\n"
19713 + " prefetch 192(%1)\n"
19714 + " prefetch 256(%1)\n"
19715 "2: \n"
19716 ".section .fixup, \"ax\"\n"
19717 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19718 + "3: \n"
19719 +
19720 +#ifdef CONFIG_PAX_KERNEXEC
19721 + " movl %%cr0, %0\n"
19722 + " movl %0, %%eax\n"
19723 + " andl $0xFFFEFFFF, %%eax\n"
19724 + " movl %%eax, %%cr0\n"
19725 +#endif
19726 +
19727 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19728 +
19729 +#ifdef CONFIG_PAX_KERNEXEC
19730 + " movl %0, %%cr0\n"
19731 +#endif
19732 +
19733 " jmp 2b\n"
19734 ".previous\n"
19735 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19736 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19737
19738 for (i = 0; i < 4096/64; i++) {
19739 __asm__ __volatile__ (
19740 - "1: prefetch 320(%0)\n"
19741 - "2: movq (%0), %%mm0\n"
19742 - " movq 8(%0), %%mm1\n"
19743 - " movq 16(%0), %%mm2\n"
19744 - " movq 24(%0), %%mm3\n"
19745 - " movq %%mm0, (%1)\n"
19746 - " movq %%mm1, 8(%1)\n"
19747 - " movq %%mm2, 16(%1)\n"
19748 - " movq %%mm3, 24(%1)\n"
19749 - " movq 32(%0), %%mm0\n"
19750 - " movq 40(%0), %%mm1\n"
19751 - " movq 48(%0), %%mm2\n"
19752 - " movq 56(%0), %%mm3\n"
19753 - " movq %%mm0, 32(%1)\n"
19754 - " movq %%mm1, 40(%1)\n"
19755 - " movq %%mm2, 48(%1)\n"
19756 - " movq %%mm3, 56(%1)\n"
19757 + "1: prefetch 320(%1)\n"
19758 + "2: movq (%1), %%mm0\n"
19759 + " movq 8(%1), %%mm1\n"
19760 + " movq 16(%1), %%mm2\n"
19761 + " movq 24(%1), %%mm3\n"
19762 + " movq %%mm0, (%2)\n"
19763 + " movq %%mm1, 8(%2)\n"
19764 + " movq %%mm2, 16(%2)\n"
19765 + " movq %%mm3, 24(%2)\n"
19766 + " movq 32(%1), %%mm0\n"
19767 + " movq 40(%1), %%mm1\n"
19768 + " movq 48(%1), %%mm2\n"
19769 + " movq 56(%1), %%mm3\n"
19770 + " movq %%mm0, 32(%2)\n"
19771 + " movq %%mm1, 40(%2)\n"
19772 + " movq %%mm2, 48(%2)\n"
19773 + " movq %%mm3, 56(%2)\n"
19774 ".section .fixup, \"ax\"\n"
19775 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19776 + "3:\n"
19777 +
19778 +#ifdef CONFIG_PAX_KERNEXEC
19779 + " movl %%cr0, %0\n"
19780 + " movl %0, %%eax\n"
19781 + " andl $0xFFFEFFFF, %%eax\n"
19782 + " movl %%eax, %%cr0\n"
19783 +#endif
19784 +
19785 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19786 +
19787 +#ifdef CONFIG_PAX_KERNEXEC
19788 + " movl %0, %%cr0\n"
19789 +#endif
19790 +
19791 " jmp 2b\n"
19792 ".previous\n"
19793 _ASM_EXTABLE(1b, 3b)
19794 - : : "r" (from), "r" (to) : "memory");
19795 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19796
19797 from += 64;
19798 to += 64;
19799 diff -urNp linux-2.6.32.45/arch/x86/lib/putuser.S linux-2.6.32.45/arch/x86/lib/putuser.S
19800 --- linux-2.6.32.45/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19801 +++ linux-2.6.32.45/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19802 @@ -15,7 +15,8 @@
19803 #include <asm/thread_info.h>
19804 #include <asm/errno.h>
19805 #include <asm/asm.h>
19806 -
19807 +#include <asm/segment.h>
19808 +#include <asm/pgtable.h>
19809
19810 /*
19811 * __put_user_X
19812 @@ -29,52 +30,119 @@
19813 * as they get called from within inline assembly.
19814 */
19815
19816 -#define ENTER CFI_STARTPROC ; \
19817 - GET_THREAD_INFO(%_ASM_BX)
19818 +#define ENTER CFI_STARTPROC
19819 #define EXIT ret ; \
19820 CFI_ENDPROC
19821
19822 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19823 +#define _DEST %_ASM_CX,%_ASM_BX
19824 +#else
19825 +#define _DEST %_ASM_CX
19826 +#endif
19827 +
19828 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19829 +#define __copyuser_seg gs;
19830 +#else
19831 +#define __copyuser_seg
19832 +#endif
19833 +
19834 .text
19835 ENTRY(__put_user_1)
19836 ENTER
19837 +
19838 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19839 + GET_THREAD_INFO(%_ASM_BX)
19840 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
19841 jae bad_put_user
19842 -1: movb %al,(%_ASM_CX)
19843 +
19844 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19845 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19846 + cmp %_ASM_BX,%_ASM_CX
19847 + jb 1234f
19848 + xor %ebx,%ebx
19849 +1234:
19850 +#endif
19851 +
19852 +#endif
19853 +
19854 +1: __copyuser_seg movb %al,(_DEST)
19855 xor %eax,%eax
19856 EXIT
19857 ENDPROC(__put_user_1)
19858
19859 ENTRY(__put_user_2)
19860 ENTER
19861 +
19862 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19863 + GET_THREAD_INFO(%_ASM_BX)
19864 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19865 sub $1,%_ASM_BX
19866 cmp %_ASM_BX,%_ASM_CX
19867 jae bad_put_user
19868 -2: movw %ax,(%_ASM_CX)
19869 +
19870 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19871 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19872 + cmp %_ASM_BX,%_ASM_CX
19873 + jb 1234f
19874 + xor %ebx,%ebx
19875 +1234:
19876 +#endif
19877 +
19878 +#endif
19879 +
19880 +2: __copyuser_seg movw %ax,(_DEST)
19881 xor %eax,%eax
19882 EXIT
19883 ENDPROC(__put_user_2)
19884
19885 ENTRY(__put_user_4)
19886 ENTER
19887 +
19888 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19889 + GET_THREAD_INFO(%_ASM_BX)
19890 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19891 sub $3,%_ASM_BX
19892 cmp %_ASM_BX,%_ASM_CX
19893 jae bad_put_user
19894 -3: movl %eax,(%_ASM_CX)
19895 +
19896 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19897 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19898 + cmp %_ASM_BX,%_ASM_CX
19899 + jb 1234f
19900 + xor %ebx,%ebx
19901 +1234:
19902 +#endif
19903 +
19904 +#endif
19905 +
19906 +3: __copyuser_seg movl %eax,(_DEST)
19907 xor %eax,%eax
19908 EXIT
19909 ENDPROC(__put_user_4)
19910
19911 ENTRY(__put_user_8)
19912 ENTER
19913 +
19914 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19915 + GET_THREAD_INFO(%_ASM_BX)
19916 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19917 sub $7,%_ASM_BX
19918 cmp %_ASM_BX,%_ASM_CX
19919 jae bad_put_user
19920 -4: mov %_ASM_AX,(%_ASM_CX)
19921 +
19922 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19923 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19924 + cmp %_ASM_BX,%_ASM_CX
19925 + jb 1234f
19926 + xor %ebx,%ebx
19927 +1234:
19928 +#endif
19929 +
19930 +#endif
19931 +
19932 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
19933 #ifdef CONFIG_X86_32
19934 -5: movl %edx,4(%_ASM_CX)
19935 +5: __copyuser_seg movl %edx,4(_DEST)
19936 #endif
19937 xor %eax,%eax
19938 EXIT
19939 diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_32.c linux-2.6.32.45/arch/x86/lib/usercopy_32.c
19940 --- linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
19941 +++ linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
19942 @@ -43,7 +43,7 @@ do { \
19943 __asm__ __volatile__( \
19944 " testl %1,%1\n" \
19945 " jz 2f\n" \
19946 - "0: lodsb\n" \
19947 + "0: "__copyuser_seg"lodsb\n" \
19948 " stosb\n" \
19949 " testb %%al,%%al\n" \
19950 " jz 1f\n" \
19951 @@ -128,10 +128,12 @@ do { \
19952 int __d0; \
19953 might_fault(); \
19954 __asm__ __volatile__( \
19955 + __COPYUSER_SET_ES \
19956 "0: rep; stosl\n" \
19957 " movl %2,%0\n" \
19958 "1: rep; stosb\n" \
19959 "2:\n" \
19960 + __COPYUSER_RESTORE_ES \
19961 ".section .fixup,\"ax\"\n" \
19962 "3: lea 0(%2,%0,4),%0\n" \
19963 " jmp 2b\n" \
19964 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19965 might_fault();
19966
19967 __asm__ __volatile__(
19968 + __COPYUSER_SET_ES
19969 " testl %0, %0\n"
19970 " jz 3f\n"
19971 " andl %0,%%ecx\n"
19972 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19973 " subl %%ecx,%0\n"
19974 " addl %0,%%eax\n"
19975 "1:\n"
19976 + __COPYUSER_RESTORE_ES
19977 ".section .fixup,\"ax\"\n"
19978 "2: xorl %%eax,%%eax\n"
19979 " jmp 1b\n"
19980 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19981
19982 #ifdef CONFIG_X86_INTEL_USERCOPY
19983 static unsigned long
19984 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
19985 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19986 {
19987 int d0, d1;
19988 __asm__ __volatile__(
19989 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19990 " .align 2,0x90\n"
19991 "3: movl 0(%4), %%eax\n"
19992 "4: movl 4(%4), %%edx\n"
19993 - "5: movl %%eax, 0(%3)\n"
19994 - "6: movl %%edx, 4(%3)\n"
19995 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19996 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19997 "7: movl 8(%4), %%eax\n"
19998 "8: movl 12(%4),%%edx\n"
19999 - "9: movl %%eax, 8(%3)\n"
20000 - "10: movl %%edx, 12(%3)\n"
20001 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20002 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20003 "11: movl 16(%4), %%eax\n"
20004 "12: movl 20(%4), %%edx\n"
20005 - "13: movl %%eax, 16(%3)\n"
20006 - "14: movl %%edx, 20(%3)\n"
20007 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20008 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20009 "15: movl 24(%4), %%eax\n"
20010 "16: movl 28(%4), %%edx\n"
20011 - "17: movl %%eax, 24(%3)\n"
20012 - "18: movl %%edx, 28(%3)\n"
20013 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20014 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20015 "19: movl 32(%4), %%eax\n"
20016 "20: movl 36(%4), %%edx\n"
20017 - "21: movl %%eax, 32(%3)\n"
20018 - "22: movl %%edx, 36(%3)\n"
20019 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20020 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20021 "23: movl 40(%4), %%eax\n"
20022 "24: movl 44(%4), %%edx\n"
20023 - "25: movl %%eax, 40(%3)\n"
20024 - "26: movl %%edx, 44(%3)\n"
20025 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20026 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20027 "27: movl 48(%4), %%eax\n"
20028 "28: movl 52(%4), %%edx\n"
20029 - "29: movl %%eax, 48(%3)\n"
20030 - "30: movl %%edx, 52(%3)\n"
20031 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20032 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20033 "31: movl 56(%4), %%eax\n"
20034 "32: movl 60(%4), %%edx\n"
20035 - "33: movl %%eax, 56(%3)\n"
20036 - "34: movl %%edx, 60(%3)\n"
20037 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20038 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20039 " addl $-64, %0\n"
20040 " addl $64, %4\n"
20041 " addl $64, %3\n"
20042 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
20043 " shrl $2, %0\n"
20044 " andl $3, %%eax\n"
20045 " cld\n"
20046 + __COPYUSER_SET_ES
20047 "99: rep; movsl\n"
20048 "36: movl %%eax, %0\n"
20049 "37: rep; movsb\n"
20050 "100:\n"
20051 + __COPYUSER_RESTORE_ES
20052 + ".section .fixup,\"ax\"\n"
20053 + "101: lea 0(%%eax,%0,4),%0\n"
20054 + " jmp 100b\n"
20055 + ".previous\n"
20056 + ".section __ex_table,\"a\"\n"
20057 + " .align 4\n"
20058 + " .long 1b,100b\n"
20059 + " .long 2b,100b\n"
20060 + " .long 3b,100b\n"
20061 + " .long 4b,100b\n"
20062 + " .long 5b,100b\n"
20063 + " .long 6b,100b\n"
20064 + " .long 7b,100b\n"
20065 + " .long 8b,100b\n"
20066 + " .long 9b,100b\n"
20067 + " .long 10b,100b\n"
20068 + " .long 11b,100b\n"
20069 + " .long 12b,100b\n"
20070 + " .long 13b,100b\n"
20071 + " .long 14b,100b\n"
20072 + " .long 15b,100b\n"
20073 + " .long 16b,100b\n"
20074 + " .long 17b,100b\n"
20075 + " .long 18b,100b\n"
20076 + " .long 19b,100b\n"
20077 + " .long 20b,100b\n"
20078 + " .long 21b,100b\n"
20079 + " .long 22b,100b\n"
20080 + " .long 23b,100b\n"
20081 + " .long 24b,100b\n"
20082 + " .long 25b,100b\n"
20083 + " .long 26b,100b\n"
20084 + " .long 27b,100b\n"
20085 + " .long 28b,100b\n"
20086 + " .long 29b,100b\n"
20087 + " .long 30b,100b\n"
20088 + " .long 31b,100b\n"
20089 + " .long 32b,100b\n"
20090 + " .long 33b,100b\n"
20091 + " .long 34b,100b\n"
20092 + " .long 35b,100b\n"
20093 + " .long 36b,100b\n"
20094 + " .long 37b,100b\n"
20095 + " .long 99b,101b\n"
20096 + ".previous"
20097 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
20098 + : "1"(to), "2"(from), "0"(size)
20099 + : "eax", "edx", "memory");
20100 + return size;
20101 +}
20102 +
20103 +static unsigned long
20104 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20105 +{
20106 + int d0, d1;
20107 + __asm__ __volatile__(
20108 + " .align 2,0x90\n"
20109 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20110 + " cmpl $67, %0\n"
20111 + " jbe 3f\n"
20112 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20113 + " .align 2,0x90\n"
20114 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20115 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20116 + "5: movl %%eax, 0(%3)\n"
20117 + "6: movl %%edx, 4(%3)\n"
20118 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20119 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20120 + "9: movl %%eax, 8(%3)\n"
20121 + "10: movl %%edx, 12(%3)\n"
20122 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20123 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20124 + "13: movl %%eax, 16(%3)\n"
20125 + "14: movl %%edx, 20(%3)\n"
20126 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20127 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20128 + "17: movl %%eax, 24(%3)\n"
20129 + "18: movl %%edx, 28(%3)\n"
20130 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20131 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20132 + "21: movl %%eax, 32(%3)\n"
20133 + "22: movl %%edx, 36(%3)\n"
20134 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20135 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20136 + "25: movl %%eax, 40(%3)\n"
20137 + "26: movl %%edx, 44(%3)\n"
20138 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20139 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20140 + "29: movl %%eax, 48(%3)\n"
20141 + "30: movl %%edx, 52(%3)\n"
20142 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20143 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20144 + "33: movl %%eax, 56(%3)\n"
20145 + "34: movl %%edx, 60(%3)\n"
20146 + " addl $-64, %0\n"
20147 + " addl $64, %4\n"
20148 + " addl $64, %3\n"
20149 + " cmpl $63, %0\n"
20150 + " ja 1b\n"
20151 + "35: movl %0, %%eax\n"
20152 + " shrl $2, %0\n"
20153 + " andl $3, %%eax\n"
20154 + " cld\n"
20155 + "99: rep; "__copyuser_seg" movsl\n"
20156 + "36: movl %%eax, %0\n"
20157 + "37: rep; "__copyuser_seg" movsb\n"
20158 + "100:\n"
20159 ".section .fixup,\"ax\"\n"
20160 "101: lea 0(%%eax,%0,4),%0\n"
20161 " jmp 100b\n"
20162 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
20163 int d0, d1;
20164 __asm__ __volatile__(
20165 " .align 2,0x90\n"
20166 - "0: movl 32(%4), %%eax\n"
20167 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20168 " cmpl $67, %0\n"
20169 " jbe 2f\n"
20170 - "1: movl 64(%4), %%eax\n"
20171 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20172 " .align 2,0x90\n"
20173 - "2: movl 0(%4), %%eax\n"
20174 - "21: movl 4(%4), %%edx\n"
20175 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20176 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20177 " movl %%eax, 0(%3)\n"
20178 " movl %%edx, 4(%3)\n"
20179 - "3: movl 8(%4), %%eax\n"
20180 - "31: movl 12(%4),%%edx\n"
20181 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20182 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20183 " movl %%eax, 8(%3)\n"
20184 " movl %%edx, 12(%3)\n"
20185 - "4: movl 16(%4), %%eax\n"
20186 - "41: movl 20(%4), %%edx\n"
20187 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20188 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20189 " movl %%eax, 16(%3)\n"
20190 " movl %%edx, 20(%3)\n"
20191 - "10: movl 24(%4), %%eax\n"
20192 - "51: movl 28(%4), %%edx\n"
20193 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20194 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20195 " movl %%eax, 24(%3)\n"
20196 " movl %%edx, 28(%3)\n"
20197 - "11: movl 32(%4), %%eax\n"
20198 - "61: movl 36(%4), %%edx\n"
20199 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20200 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20201 " movl %%eax, 32(%3)\n"
20202 " movl %%edx, 36(%3)\n"
20203 - "12: movl 40(%4), %%eax\n"
20204 - "71: movl 44(%4), %%edx\n"
20205 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20206 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20207 " movl %%eax, 40(%3)\n"
20208 " movl %%edx, 44(%3)\n"
20209 - "13: movl 48(%4), %%eax\n"
20210 - "81: movl 52(%4), %%edx\n"
20211 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20212 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20213 " movl %%eax, 48(%3)\n"
20214 " movl %%edx, 52(%3)\n"
20215 - "14: movl 56(%4), %%eax\n"
20216 - "91: movl 60(%4), %%edx\n"
20217 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20218 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20219 " movl %%eax, 56(%3)\n"
20220 " movl %%edx, 60(%3)\n"
20221 " addl $-64, %0\n"
20222 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
20223 " shrl $2, %0\n"
20224 " andl $3, %%eax\n"
20225 " cld\n"
20226 - "6: rep; movsl\n"
20227 + "6: rep; "__copyuser_seg" movsl\n"
20228 " movl %%eax,%0\n"
20229 - "7: rep; movsb\n"
20230 + "7: rep; "__copyuser_seg" movsb\n"
20231 "8:\n"
20232 ".section .fixup,\"ax\"\n"
20233 "9: lea 0(%%eax,%0,4),%0\n"
20234 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
20235
20236 __asm__ __volatile__(
20237 " .align 2,0x90\n"
20238 - "0: movl 32(%4), %%eax\n"
20239 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20240 " cmpl $67, %0\n"
20241 " jbe 2f\n"
20242 - "1: movl 64(%4), %%eax\n"
20243 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20244 " .align 2,0x90\n"
20245 - "2: movl 0(%4), %%eax\n"
20246 - "21: movl 4(%4), %%edx\n"
20247 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20248 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20249 " movnti %%eax, 0(%3)\n"
20250 " movnti %%edx, 4(%3)\n"
20251 - "3: movl 8(%4), %%eax\n"
20252 - "31: movl 12(%4),%%edx\n"
20253 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20254 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20255 " movnti %%eax, 8(%3)\n"
20256 " movnti %%edx, 12(%3)\n"
20257 - "4: movl 16(%4), %%eax\n"
20258 - "41: movl 20(%4), %%edx\n"
20259 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20260 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20261 " movnti %%eax, 16(%3)\n"
20262 " movnti %%edx, 20(%3)\n"
20263 - "10: movl 24(%4), %%eax\n"
20264 - "51: movl 28(%4), %%edx\n"
20265 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20266 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20267 " movnti %%eax, 24(%3)\n"
20268 " movnti %%edx, 28(%3)\n"
20269 - "11: movl 32(%4), %%eax\n"
20270 - "61: movl 36(%4), %%edx\n"
20271 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20272 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20273 " movnti %%eax, 32(%3)\n"
20274 " movnti %%edx, 36(%3)\n"
20275 - "12: movl 40(%4), %%eax\n"
20276 - "71: movl 44(%4), %%edx\n"
20277 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20278 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20279 " movnti %%eax, 40(%3)\n"
20280 " movnti %%edx, 44(%3)\n"
20281 - "13: movl 48(%4), %%eax\n"
20282 - "81: movl 52(%4), %%edx\n"
20283 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20284 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20285 " movnti %%eax, 48(%3)\n"
20286 " movnti %%edx, 52(%3)\n"
20287 - "14: movl 56(%4), %%eax\n"
20288 - "91: movl 60(%4), %%edx\n"
20289 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20290 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20291 " movnti %%eax, 56(%3)\n"
20292 " movnti %%edx, 60(%3)\n"
20293 " addl $-64, %0\n"
20294 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20295 " shrl $2, %0\n"
20296 " andl $3, %%eax\n"
20297 " cld\n"
20298 - "6: rep; movsl\n"
20299 + "6: rep; "__copyuser_seg" movsl\n"
20300 " movl %%eax,%0\n"
20301 - "7: rep; movsb\n"
20302 + "7: rep; "__copyuser_seg" movsb\n"
20303 "8:\n"
20304 ".section .fixup,\"ax\"\n"
20305 "9: lea 0(%%eax,%0,4),%0\n"
20306 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20307
20308 __asm__ __volatile__(
20309 " .align 2,0x90\n"
20310 - "0: movl 32(%4), %%eax\n"
20311 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20312 " cmpl $67, %0\n"
20313 " jbe 2f\n"
20314 - "1: movl 64(%4), %%eax\n"
20315 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20316 " .align 2,0x90\n"
20317 - "2: movl 0(%4), %%eax\n"
20318 - "21: movl 4(%4), %%edx\n"
20319 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20320 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20321 " movnti %%eax, 0(%3)\n"
20322 " movnti %%edx, 4(%3)\n"
20323 - "3: movl 8(%4), %%eax\n"
20324 - "31: movl 12(%4),%%edx\n"
20325 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20326 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20327 " movnti %%eax, 8(%3)\n"
20328 " movnti %%edx, 12(%3)\n"
20329 - "4: movl 16(%4), %%eax\n"
20330 - "41: movl 20(%4), %%edx\n"
20331 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20332 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20333 " movnti %%eax, 16(%3)\n"
20334 " movnti %%edx, 20(%3)\n"
20335 - "10: movl 24(%4), %%eax\n"
20336 - "51: movl 28(%4), %%edx\n"
20337 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20338 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20339 " movnti %%eax, 24(%3)\n"
20340 " movnti %%edx, 28(%3)\n"
20341 - "11: movl 32(%4), %%eax\n"
20342 - "61: movl 36(%4), %%edx\n"
20343 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20344 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20345 " movnti %%eax, 32(%3)\n"
20346 " movnti %%edx, 36(%3)\n"
20347 - "12: movl 40(%4), %%eax\n"
20348 - "71: movl 44(%4), %%edx\n"
20349 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20350 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20351 " movnti %%eax, 40(%3)\n"
20352 " movnti %%edx, 44(%3)\n"
20353 - "13: movl 48(%4), %%eax\n"
20354 - "81: movl 52(%4), %%edx\n"
20355 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20356 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20357 " movnti %%eax, 48(%3)\n"
20358 " movnti %%edx, 52(%3)\n"
20359 - "14: movl 56(%4), %%eax\n"
20360 - "91: movl 60(%4), %%edx\n"
20361 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20362 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20363 " movnti %%eax, 56(%3)\n"
20364 " movnti %%edx, 60(%3)\n"
20365 " addl $-64, %0\n"
20366 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20367 " shrl $2, %0\n"
20368 " andl $3, %%eax\n"
20369 " cld\n"
20370 - "6: rep; movsl\n"
20371 + "6: rep; "__copyuser_seg" movsl\n"
20372 " movl %%eax,%0\n"
20373 - "7: rep; movsb\n"
20374 + "7: rep; "__copyuser_seg" movsb\n"
20375 "8:\n"
20376 ".section .fixup,\"ax\"\n"
20377 "9: lea 0(%%eax,%0,4),%0\n"
20378 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20379 */
20380 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20381 unsigned long size);
20382 -unsigned long __copy_user_intel(void __user *to, const void *from,
20383 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20384 + unsigned long size);
20385 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20386 unsigned long size);
20387 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20388 const void __user *from, unsigned long size);
20389 #endif /* CONFIG_X86_INTEL_USERCOPY */
20390
20391 /* Generic arbitrary sized copy. */
20392 -#define __copy_user(to, from, size) \
20393 +#define __copy_user(to, from, size, prefix, set, restore) \
20394 do { \
20395 int __d0, __d1, __d2; \
20396 __asm__ __volatile__( \
20397 + set \
20398 " cmp $7,%0\n" \
20399 " jbe 1f\n" \
20400 " movl %1,%0\n" \
20401 " negl %0\n" \
20402 " andl $7,%0\n" \
20403 " subl %0,%3\n" \
20404 - "4: rep; movsb\n" \
20405 + "4: rep; "prefix"movsb\n" \
20406 " movl %3,%0\n" \
20407 " shrl $2,%0\n" \
20408 " andl $3,%3\n" \
20409 " .align 2,0x90\n" \
20410 - "0: rep; movsl\n" \
20411 + "0: rep; "prefix"movsl\n" \
20412 " movl %3,%0\n" \
20413 - "1: rep; movsb\n" \
20414 + "1: rep; "prefix"movsb\n" \
20415 "2:\n" \
20416 + restore \
20417 ".section .fixup,\"ax\"\n" \
20418 "5: addl %3,%0\n" \
20419 " jmp 2b\n" \
20420 @@ -682,14 +799,14 @@ do { \
20421 " negl %0\n" \
20422 " andl $7,%0\n" \
20423 " subl %0,%3\n" \
20424 - "4: rep; movsb\n" \
20425 + "4: rep; "__copyuser_seg"movsb\n" \
20426 " movl %3,%0\n" \
20427 " shrl $2,%0\n" \
20428 " andl $3,%3\n" \
20429 " .align 2,0x90\n" \
20430 - "0: rep; movsl\n" \
20431 + "0: rep; "__copyuser_seg"movsl\n" \
20432 " movl %3,%0\n" \
20433 - "1: rep; movsb\n" \
20434 + "1: rep; "__copyuser_seg"movsb\n" \
20435 "2:\n" \
20436 ".section .fixup,\"ax\"\n" \
20437 "5: addl %3,%0\n" \
20438 @@ -775,9 +892,9 @@ survive:
20439 }
20440 #endif
20441 if (movsl_is_ok(to, from, n))
20442 - __copy_user(to, from, n);
20443 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20444 else
20445 - n = __copy_user_intel(to, from, n);
20446 + n = __generic_copy_to_user_intel(to, from, n);
20447 return n;
20448 }
20449 EXPORT_SYMBOL(__copy_to_user_ll);
20450 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20451 unsigned long n)
20452 {
20453 if (movsl_is_ok(to, from, n))
20454 - __copy_user(to, from, n);
20455 + __copy_user(to, from, n, __copyuser_seg, "", "");
20456 else
20457 - n = __copy_user_intel((void __user *)to,
20458 - (const void *)from, n);
20459 + n = __generic_copy_from_user_intel(to, from, n);
20460 return n;
20461 }
20462 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20463 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20464 if (n > 64 && cpu_has_xmm2)
20465 n = __copy_user_intel_nocache(to, from, n);
20466 else
20467 - __copy_user(to, from, n);
20468 + __copy_user(to, from, n, __copyuser_seg, "", "");
20469 #else
20470 - __copy_user(to, from, n);
20471 + __copy_user(to, from, n, __copyuser_seg, "", "");
20472 #endif
20473 return n;
20474 }
20475 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20476
20477 -/**
20478 - * copy_to_user: - Copy a block of data into user space.
20479 - * @to: Destination address, in user space.
20480 - * @from: Source address, in kernel space.
20481 - * @n: Number of bytes to copy.
20482 - *
20483 - * Context: User context only. This function may sleep.
20484 - *
20485 - * Copy data from kernel space to user space.
20486 - *
20487 - * Returns number of bytes that could not be copied.
20488 - * On success, this will be zero.
20489 - */
20490 -unsigned long
20491 -copy_to_user(void __user *to, const void *from, unsigned long n)
20492 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20493 +void __set_fs(mm_segment_t x)
20494 {
20495 - if (access_ok(VERIFY_WRITE, to, n))
20496 - n = __copy_to_user(to, from, n);
20497 - return n;
20498 + switch (x.seg) {
20499 + case 0:
20500 + loadsegment(gs, 0);
20501 + break;
20502 + case TASK_SIZE_MAX:
20503 + loadsegment(gs, __USER_DS);
20504 + break;
20505 + case -1UL:
20506 + loadsegment(gs, __KERNEL_DS);
20507 + break;
20508 + default:
20509 + BUG();
20510 + }
20511 + return;
20512 }
20513 -EXPORT_SYMBOL(copy_to_user);
20514 +EXPORT_SYMBOL(__set_fs);
20515
20516 -/**
20517 - * copy_from_user: - Copy a block of data from user space.
20518 - * @to: Destination address, in kernel space.
20519 - * @from: Source address, in user space.
20520 - * @n: Number of bytes to copy.
20521 - *
20522 - * Context: User context only. This function may sleep.
20523 - *
20524 - * Copy data from user space to kernel space.
20525 - *
20526 - * Returns number of bytes that could not be copied.
20527 - * On success, this will be zero.
20528 - *
20529 - * If some data could not be copied, this function will pad the copied
20530 - * data to the requested size using zero bytes.
20531 - */
20532 -unsigned long
20533 -copy_from_user(void *to, const void __user *from, unsigned long n)
20534 +void set_fs(mm_segment_t x)
20535 {
20536 - if (access_ok(VERIFY_READ, from, n))
20537 - n = __copy_from_user(to, from, n);
20538 - else
20539 - memset(to, 0, n);
20540 - return n;
20541 + current_thread_info()->addr_limit = x;
20542 + __set_fs(x);
20543 }
20544 -EXPORT_SYMBOL(copy_from_user);
20545 +EXPORT_SYMBOL(set_fs);
20546 +#endif
20547 diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_64.c linux-2.6.32.45/arch/x86/lib/usercopy_64.c
20548 --- linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20549 +++ linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20550 @@ -42,6 +42,12 @@ long
20551 __strncpy_from_user(char *dst, const char __user *src, long count)
20552 {
20553 long res;
20554 +
20555 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20556 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20557 + src += PAX_USER_SHADOW_BASE;
20558 +#endif
20559 +
20560 __do_strncpy_from_user(dst, src, count, res);
20561 return res;
20562 }
20563 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20564 {
20565 long __d0;
20566 might_fault();
20567 +
20568 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20569 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20570 + addr += PAX_USER_SHADOW_BASE;
20571 +#endif
20572 +
20573 /* no memory constraint because it doesn't change any memory gcc knows
20574 about */
20575 asm volatile(
20576 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20577
20578 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20579 {
20580 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20581 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20582 +
20583 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20584 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20585 + to += PAX_USER_SHADOW_BASE;
20586 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20587 + from += PAX_USER_SHADOW_BASE;
20588 +#endif
20589 +
20590 return copy_user_generic((__force void *)to, (__force void *)from, len);
20591 - }
20592 - return len;
20593 + }
20594 + return len;
20595 }
20596 EXPORT_SYMBOL(copy_in_user);
20597
20598 diff -urNp linux-2.6.32.45/arch/x86/Makefile linux-2.6.32.45/arch/x86/Makefile
20599 --- linux-2.6.32.45/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20600 +++ linux-2.6.32.45/arch/x86/Makefile 2011-07-19 18:16:02.000000000 -0400
20601 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
20602 else
20603 BITS := 64
20604 UTS_MACHINE := x86_64
20605 + biarch := $(call cc-option,-m64)
20606 CHECKFLAGS += -D__x86_64__ -m64
20607
20608 KBUILD_AFLAGS += -m64
20609 @@ -189,3 +190,12 @@ define archhelp
20610 echo ' FDARGS="..." arguments for the booted kernel'
20611 echo ' FDINITRD=file initrd for the booted kernel'
20612 endef
20613 +
20614 +define OLD_LD
20615 +
20616 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20617 +*** Please upgrade your binutils to 2.18 or newer
20618 +endef
20619 +
20620 +archprepare:
20621 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20622 diff -urNp linux-2.6.32.45/arch/x86/mm/extable.c linux-2.6.32.45/arch/x86/mm/extable.c
20623 --- linux-2.6.32.45/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20624 +++ linux-2.6.32.45/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20625 @@ -1,14 +1,71 @@
20626 #include <linux/module.h>
20627 #include <linux/spinlock.h>
20628 +#include <linux/sort.h>
20629 #include <asm/uaccess.h>
20630 +#include <asm/pgtable.h>
20631
20632 +/*
20633 + * The exception table needs to be sorted so that the binary
20634 + * search that we use to find entries in it works properly.
20635 + * This is used both for the kernel exception table and for
20636 + * the exception tables of modules that get loaded.
20637 + */
20638 +static int cmp_ex(const void *a, const void *b)
20639 +{
20640 + const struct exception_table_entry *x = a, *y = b;
20641 +
20642 + /* avoid overflow */
20643 + if (x->insn > y->insn)
20644 + return 1;
20645 + if (x->insn < y->insn)
20646 + return -1;
20647 + return 0;
20648 +}
20649 +
20650 +static void swap_ex(void *a, void *b, int size)
20651 +{
20652 + struct exception_table_entry t, *x = a, *y = b;
20653 +
20654 + t = *x;
20655 +
20656 + pax_open_kernel();
20657 + *x = *y;
20658 + *y = t;
20659 + pax_close_kernel();
20660 +}
20661 +
20662 +void sort_extable(struct exception_table_entry *start,
20663 + struct exception_table_entry *finish)
20664 +{
20665 + sort(start, finish - start, sizeof(struct exception_table_entry),
20666 + cmp_ex, swap_ex);
20667 +}
20668 +
20669 +#ifdef CONFIG_MODULES
20670 +/*
20671 + * If the exception table is sorted, any referring to the module init
20672 + * will be at the beginning or the end.
20673 + */
20674 +void trim_init_extable(struct module *m)
20675 +{
20676 + /*trim the beginning*/
20677 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20678 + m->extable++;
20679 + m->num_exentries--;
20680 + }
20681 + /*trim the end*/
20682 + while (m->num_exentries &&
20683 + within_module_init(m->extable[m->num_exentries-1].insn, m))
20684 + m->num_exentries--;
20685 +}
20686 +#endif /* CONFIG_MODULES */
20687
20688 int fixup_exception(struct pt_regs *regs)
20689 {
20690 const struct exception_table_entry *fixup;
20691
20692 #ifdef CONFIG_PNPBIOS
20693 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20694 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20695 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20696 extern u32 pnp_bios_is_utter_crap;
20697 pnp_bios_is_utter_crap = 1;
20698 diff -urNp linux-2.6.32.45/arch/x86/mm/fault.c linux-2.6.32.45/arch/x86/mm/fault.c
20699 --- linux-2.6.32.45/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20700 +++ linux-2.6.32.45/arch/x86/mm/fault.c 2011-08-17 20:06:44.000000000 -0400
20701 @@ -11,10 +11,19 @@
20702 #include <linux/kprobes.h> /* __kprobes, ... */
20703 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20704 #include <linux/perf_event.h> /* perf_sw_event */
20705 +#include <linux/unistd.h>
20706 +#include <linux/compiler.h>
20707
20708 #include <asm/traps.h> /* dotraplinkage, ... */
20709 #include <asm/pgalloc.h> /* pgd_*(), ... */
20710 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20711 +#include <asm/vsyscall.h>
20712 +#include <asm/tlbflush.h>
20713 +
20714 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20715 +#include <asm/stacktrace.h>
20716 +#include "../kernel/dumpstack.h"
20717 +#endif
20718
20719 /*
20720 * Page fault error code bits:
20721 @@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20722 int ret = 0;
20723
20724 /* kprobe_running() needs smp_processor_id() */
20725 - if (kprobes_built_in() && !user_mode_vm(regs)) {
20726 + if (kprobes_built_in() && !user_mode(regs)) {
20727 preempt_disable();
20728 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20729 ret = 1;
20730 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20731 return !instr_lo || (instr_lo>>1) == 1;
20732 case 0x00:
20733 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20734 - if (probe_kernel_address(instr, opcode))
20735 + if (user_mode(regs)) {
20736 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20737 + return 0;
20738 + } else if (probe_kernel_address(instr, opcode))
20739 return 0;
20740
20741 *prefetch = (instr_lo == 0xF) &&
20742 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20743 while (instr < max_instr) {
20744 unsigned char opcode;
20745
20746 - if (probe_kernel_address(instr, opcode))
20747 + if (user_mode(regs)) {
20748 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20749 + break;
20750 + } else if (probe_kernel_address(instr, opcode))
20751 break;
20752
20753 instr++;
20754 @@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20755 force_sig_info(si_signo, &info, tsk);
20756 }
20757
20758 +#ifdef CONFIG_PAX_EMUTRAMP
20759 +static int pax_handle_fetch_fault(struct pt_regs *regs);
20760 +#endif
20761 +
20762 +#ifdef CONFIG_PAX_PAGEEXEC
20763 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20764 +{
20765 + pgd_t *pgd;
20766 + pud_t *pud;
20767 + pmd_t *pmd;
20768 +
20769 + pgd = pgd_offset(mm, address);
20770 + if (!pgd_present(*pgd))
20771 + return NULL;
20772 + pud = pud_offset(pgd, address);
20773 + if (!pud_present(*pud))
20774 + return NULL;
20775 + pmd = pmd_offset(pud, address);
20776 + if (!pmd_present(*pmd))
20777 + return NULL;
20778 + return pmd;
20779 +}
20780 +#endif
20781 +
20782 DEFINE_SPINLOCK(pgd_lock);
20783 LIST_HEAD(pgd_list);
20784
20785 @@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20786 address += PMD_SIZE) {
20787
20788 unsigned long flags;
20789 +
20790 +#ifdef CONFIG_PAX_PER_CPU_PGD
20791 + unsigned long cpu;
20792 +#else
20793 struct page *page;
20794 +#endif
20795
20796 spin_lock_irqsave(&pgd_lock, flags);
20797 +
20798 +#ifdef CONFIG_PAX_PER_CPU_PGD
20799 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20800 + pgd_t *pgd = get_cpu_pgd(cpu);
20801 +#else
20802 list_for_each_entry(page, &pgd_list, lru) {
20803 - if (!vmalloc_sync_one(page_address(page), address))
20804 + pgd_t *pgd = page_address(page);
20805 +#endif
20806 +
20807 + if (!vmalloc_sync_one(pgd, address))
20808 break;
20809 }
20810 spin_unlock_irqrestore(&pgd_lock, flags);
20811 @@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20812 * an interrupt in the middle of a task switch..
20813 */
20814 pgd_paddr = read_cr3();
20815 +
20816 +#ifdef CONFIG_PAX_PER_CPU_PGD
20817 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20818 +#endif
20819 +
20820 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20821 if (!pmd_k)
20822 return -1;
20823 @@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
20824
20825 const pgd_t *pgd_ref = pgd_offset_k(address);
20826 unsigned long flags;
20827 +
20828 +#ifdef CONFIG_PAX_PER_CPU_PGD
20829 + unsigned long cpu;
20830 +#else
20831 struct page *page;
20832 +#endif
20833
20834 if (pgd_none(*pgd_ref))
20835 continue;
20836
20837 spin_lock_irqsave(&pgd_lock, flags);
20838 +
20839 +#ifdef CONFIG_PAX_PER_CPU_PGD
20840 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20841 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
20842 +#else
20843 list_for_each_entry(page, &pgd_list, lru) {
20844 pgd_t *pgd;
20845 pgd = (pgd_t *)page_address(page) + pgd_index(address);
20846 +#endif
20847 +
20848 if (pgd_none(*pgd))
20849 set_pgd(pgd, *pgd_ref);
20850 else
20851 @@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
20852 * happen within a race in page table update. In the later
20853 * case just flush:
20854 */
20855 +
20856 +#ifdef CONFIG_PAX_PER_CPU_PGD
20857 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20858 + pgd = pgd_offset_cpu(smp_processor_id(), address);
20859 +#else
20860 pgd = pgd_offset(current->active_mm, address);
20861 +#endif
20862 +
20863 pgd_ref = pgd_offset_k(address);
20864 if (pgd_none(*pgd_ref))
20865 return -1;
20866 @@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
20867 static int is_errata100(struct pt_regs *regs, unsigned long address)
20868 {
20869 #ifdef CONFIG_X86_64
20870 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20871 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20872 return 1;
20873 #endif
20874 return 0;
20875 @@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
20876 }
20877
20878 static const char nx_warning[] = KERN_CRIT
20879 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20880 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20881
20882 static void
20883 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20884 @@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
20885 if (!oops_may_print())
20886 return;
20887
20888 - if (error_code & PF_INSTR) {
20889 + if (nx_enabled && (error_code & PF_INSTR)) {
20890 unsigned int level;
20891
20892 pte_t *pte = lookup_address(address, &level);
20893
20894 if (pte && pte_present(*pte) && !pte_exec(*pte))
20895 - printk(nx_warning, current_uid());
20896 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20897 }
20898
20899 +#ifdef CONFIG_PAX_KERNEXEC
20900 + if (init_mm.start_code <= address && address < init_mm.end_code) {
20901 + if (current->signal->curr_ip)
20902 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20903 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20904 + else
20905 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20906 + current->comm, task_pid_nr(current), current_uid(), current_euid());
20907 + }
20908 +#endif
20909 +
20910 printk(KERN_ALERT "BUG: unable to handle kernel ");
20911 if (address < PAGE_SIZE)
20912 printk(KERN_CONT "NULL pointer dereference");
20913 @@ -704,6 +791,70 @@ __bad_area_nosemaphore(struct pt_regs *r
20914 unsigned long address, int si_code)
20915 {
20916 struct task_struct *tsk = current;
20917 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20918 + struct mm_struct *mm = tsk->mm;
20919 +#endif
20920 +
20921 +#ifdef CONFIG_X86_64
20922 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
20923 + if (regs->ip == (unsigned long)vgettimeofday) {
20924 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
20925 + return;
20926 + } else if (regs->ip == (unsigned long)vtime) {
20927 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
20928 + return;
20929 + } else if (regs->ip == (unsigned long)vgetcpu) {
20930 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
20931 + return;
20932 + }
20933 + }
20934 +#endif
20935 +
20936 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20937 + if (mm && (error_code & PF_USER)) {
20938 + unsigned long ip = regs->ip;
20939 +
20940 + if (v8086_mode(regs))
20941 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20942 +
20943 + /*
20944 + * It's possible to have interrupts off here:
20945 + */
20946 + local_irq_enable();
20947 +
20948 +#ifdef CONFIG_PAX_PAGEEXEC
20949 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
20950 + ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
20951 +
20952 +#ifdef CONFIG_PAX_EMUTRAMP
20953 + switch (pax_handle_fetch_fault(regs)) {
20954 + case 2:
20955 + return;
20956 + }
20957 +#endif
20958 +
20959 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20960 + do_group_exit(SIGKILL);
20961 + }
20962 +#endif
20963 +
20964 +#ifdef CONFIG_PAX_SEGMEXEC
20965 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
20966 +
20967 +#ifdef CONFIG_PAX_EMUTRAMP
20968 + switch (pax_handle_fetch_fault(regs)) {
20969 + case 2:
20970 + return;
20971 + }
20972 +#endif
20973 +
20974 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20975 + do_group_exit(SIGKILL);
20976 + }
20977 +#endif
20978 +
20979 + }
20980 +#endif
20981
20982 /* User mode accesses just cause a SIGSEGV */
20983 if (error_code & PF_USER) {
20984 @@ -857,6 +1008,99 @@ static int spurious_fault_check(unsigned
20985 return 1;
20986 }
20987
20988 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20989 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20990 +{
20991 + pte_t *pte;
20992 + pmd_t *pmd;
20993 + spinlock_t *ptl;
20994 + unsigned char pte_mask;
20995 +
20996 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20997 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
20998 + return 0;
20999 +
21000 + /* PaX: it's our fault, let's handle it if we can */
21001 +
21002 + /* PaX: take a look at read faults before acquiring any locks */
21003 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21004 + /* instruction fetch attempt from a protected page in user mode */
21005 + up_read(&mm->mmap_sem);
21006 +
21007 +#ifdef CONFIG_PAX_EMUTRAMP
21008 + switch (pax_handle_fetch_fault(regs)) {
21009 + case 2:
21010 + return 1;
21011 + }
21012 +#endif
21013 +
21014 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21015 + do_group_exit(SIGKILL);
21016 + }
21017 +
21018 + pmd = pax_get_pmd(mm, address);
21019 + if (unlikely(!pmd))
21020 + return 0;
21021 +
21022 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21023 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21024 + pte_unmap_unlock(pte, ptl);
21025 + return 0;
21026 + }
21027 +
21028 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21029 + /* write attempt to a protected page in user mode */
21030 + pte_unmap_unlock(pte, ptl);
21031 + return 0;
21032 + }
21033 +
21034 +#ifdef CONFIG_SMP
21035 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21036 +#else
21037 + if (likely(address > get_limit(regs->cs)))
21038 +#endif
21039 + {
21040 + set_pte(pte, pte_mkread(*pte));
21041 + __flush_tlb_one(address);
21042 + pte_unmap_unlock(pte, ptl);
21043 + up_read(&mm->mmap_sem);
21044 + return 1;
21045 + }
21046 +
21047 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21048 +
21049 + /*
21050 + * PaX: fill DTLB with user rights and retry
21051 + */
21052 + __asm__ __volatile__ (
21053 + "orb %2,(%1)\n"
21054 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21055 +/*
21056 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21057 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21058 + * page fault when examined during a TLB load attempt. this is true not only
21059 + * for PTEs holding a non-present entry but also present entries that will
21060 + * raise a page fault (such as those set up by PaX, or the copy-on-write
21061 + * mechanism). in effect it means that we do *not* need to flush the TLBs
21062 + * for our target pages since their PTEs are simply not in the TLBs at all.
21063 +
21064 + * the best thing in omitting it is that we gain around 15-20% speed in the
21065 + * fast path of the page fault handler and can get rid of tracing since we
21066 + * can no longer flush unintended entries.
21067 + */
21068 + "invlpg (%0)\n"
21069 +#endif
21070 + __copyuser_seg"testb $0,(%0)\n"
21071 + "xorb %3,(%1)\n"
21072 + :
21073 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21074 + : "memory", "cc");
21075 + pte_unmap_unlock(pte, ptl);
21076 + up_read(&mm->mmap_sem);
21077 + return 1;
21078 +}
21079 +#endif
21080 +
21081 /*
21082 * Handle a spurious fault caused by a stale TLB entry.
21083 *
21084 @@ -923,6 +1167,9 @@ int show_unhandled_signals = 1;
21085 static inline int
21086 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
21087 {
21088 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21089 + return 1;
21090 +
21091 if (write) {
21092 /* write, present and write, not present: */
21093 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21094 @@ -956,17 +1203,31 @@ do_page_fault(struct pt_regs *regs, unsi
21095 {
21096 struct vm_area_struct *vma;
21097 struct task_struct *tsk;
21098 - unsigned long address;
21099 struct mm_struct *mm;
21100 int write;
21101 int fault;
21102
21103 + /* Get the faulting address: */
21104 + unsigned long address = read_cr2();
21105 +
21106 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21107 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21108 + if (!search_exception_tables(regs->ip)) {
21109 + bad_area_nosemaphore(regs, error_code, address);
21110 + return;
21111 + }
21112 + if (address < PAX_USER_SHADOW_BASE) {
21113 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21114 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
21115 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21116 + } else
21117 + address -= PAX_USER_SHADOW_BASE;
21118 + }
21119 +#endif
21120 +
21121 tsk = current;
21122 mm = tsk->mm;
21123
21124 - /* Get the faulting address: */
21125 - address = read_cr2();
21126 -
21127 /*
21128 * Detect and handle instructions that would cause a page fault for
21129 * both a tracked kernel page and a userspace page.
21130 @@ -1026,7 +1287,7 @@ do_page_fault(struct pt_regs *regs, unsi
21131 * User-mode registers count as a user access even for any
21132 * potential system fault or CPU buglet:
21133 */
21134 - if (user_mode_vm(regs)) {
21135 + if (user_mode(regs)) {
21136 local_irq_enable();
21137 error_code |= PF_USER;
21138 } else {
21139 @@ -1080,6 +1341,11 @@ do_page_fault(struct pt_regs *regs, unsi
21140 might_sleep();
21141 }
21142
21143 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21144 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21145 + return;
21146 +#endif
21147 +
21148 vma = find_vma(mm, address);
21149 if (unlikely(!vma)) {
21150 bad_area(regs, error_code, address);
21151 @@ -1091,18 +1357,24 @@ do_page_fault(struct pt_regs *regs, unsi
21152 bad_area(regs, error_code, address);
21153 return;
21154 }
21155 - if (error_code & PF_USER) {
21156 - /*
21157 - * Accessing the stack below %sp is always a bug.
21158 - * The large cushion allows instructions like enter
21159 - * and pusha to work. ("enter $65535, $31" pushes
21160 - * 32 pointers and then decrements %sp by 65535.)
21161 - */
21162 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21163 - bad_area(regs, error_code, address);
21164 - return;
21165 - }
21166 + /*
21167 + * Accessing the stack below %sp is always a bug.
21168 + * The large cushion allows instructions like enter
21169 + * and pusha to work. ("enter $65535, $31" pushes
21170 + * 32 pointers and then decrements %sp by 65535.)
21171 + */
21172 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21173 + bad_area(regs, error_code, address);
21174 + return;
21175 }
21176 +
21177 +#ifdef CONFIG_PAX_SEGMEXEC
21178 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21179 + bad_area(regs, error_code, address);
21180 + return;
21181 + }
21182 +#endif
21183 +
21184 if (unlikely(expand_stack(vma, address))) {
21185 bad_area(regs, error_code, address);
21186 return;
21187 @@ -1146,3 +1418,199 @@ good_area:
21188
21189 up_read(&mm->mmap_sem);
21190 }
21191 +
21192 +#ifdef CONFIG_PAX_EMUTRAMP
21193 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21194 +{
21195 + int err;
21196 +
21197 + do { /* PaX: gcc trampoline emulation #1 */
21198 + unsigned char mov1, mov2;
21199 + unsigned short jmp;
21200 + unsigned int addr1, addr2;
21201 +
21202 +#ifdef CONFIG_X86_64
21203 + if ((regs->ip + 11) >> 32)
21204 + break;
21205 +#endif
21206 +
21207 + err = get_user(mov1, (unsigned char __user *)regs->ip);
21208 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21209 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21210 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21211 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21212 +
21213 + if (err)
21214 + break;
21215 +
21216 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21217 + regs->cx = addr1;
21218 + regs->ax = addr2;
21219 + regs->ip = addr2;
21220 + return 2;
21221 + }
21222 + } while (0);
21223 +
21224 + do { /* PaX: gcc trampoline emulation #2 */
21225 + unsigned char mov, jmp;
21226 + unsigned int addr1, addr2;
21227 +
21228 +#ifdef CONFIG_X86_64
21229 + if ((regs->ip + 9) >> 32)
21230 + break;
21231 +#endif
21232 +
21233 + err = get_user(mov, (unsigned char __user *)regs->ip);
21234 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21235 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21236 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21237 +
21238 + if (err)
21239 + break;
21240 +
21241 + if (mov == 0xB9 && jmp == 0xE9) {
21242 + regs->cx = addr1;
21243 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21244 + return 2;
21245 + }
21246 + } while (0);
21247 +
21248 + return 1; /* PaX in action */
21249 +}
21250 +
21251 +#ifdef CONFIG_X86_64
21252 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21253 +{
21254 + int err;
21255 +
21256 + do { /* PaX: gcc trampoline emulation #1 */
21257 + unsigned short mov1, mov2, jmp1;
21258 + unsigned char jmp2;
21259 + unsigned int addr1;
21260 + unsigned long addr2;
21261 +
21262 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21263 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21264 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21265 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21266 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21267 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21268 +
21269 + if (err)
21270 + break;
21271 +
21272 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21273 + regs->r11 = addr1;
21274 + regs->r10 = addr2;
21275 + regs->ip = addr1;
21276 + return 2;
21277 + }
21278 + } while (0);
21279 +
21280 + do { /* PaX: gcc trampoline emulation #2 */
21281 + unsigned short mov1, mov2, jmp1;
21282 + unsigned char jmp2;
21283 + unsigned long addr1, addr2;
21284 +
21285 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21286 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21287 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21288 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21289 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21290 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21291 +
21292 + if (err)
21293 + break;
21294 +
21295 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21296 + regs->r11 = addr1;
21297 + regs->r10 = addr2;
21298 + regs->ip = addr1;
21299 + return 2;
21300 + }
21301 + } while (0);
21302 +
21303 + return 1; /* PaX in action */
21304 +}
21305 +#endif
21306 +
21307 +/*
21308 + * PaX: decide what to do with offenders (regs->ip = fault address)
21309 + *
21310 + * returns 1 when task should be killed
21311 + * 2 when gcc trampoline was detected
21312 + */
21313 +static int pax_handle_fetch_fault(struct pt_regs *regs)
21314 +{
21315 + if (v8086_mode(regs))
21316 + return 1;
21317 +
21318 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21319 + return 1;
21320 +
21321 +#ifdef CONFIG_X86_32
21322 + return pax_handle_fetch_fault_32(regs);
21323 +#else
21324 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21325 + return pax_handle_fetch_fault_32(regs);
21326 + else
21327 + return pax_handle_fetch_fault_64(regs);
21328 +#endif
21329 +}
21330 +#endif
21331 +
21332 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21333 +void pax_report_insns(void *pc, void *sp)
21334 +{
21335 + long i;
21336 +
21337 + printk(KERN_ERR "PAX: bytes at PC: ");
21338 + for (i = 0; i < 20; i++) {
21339 + unsigned char c;
21340 + if (get_user(c, (__force unsigned char __user *)pc+i))
21341 + printk(KERN_CONT "?? ");
21342 + else
21343 + printk(KERN_CONT "%02x ", c);
21344 + }
21345 + printk("\n");
21346 +
21347 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21348 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
21349 + unsigned long c;
21350 + if (get_user(c, (__force unsigned long __user *)sp+i))
21351 +#ifdef CONFIG_X86_32
21352 + printk(KERN_CONT "???????? ");
21353 +#else
21354 + printk(KERN_CONT "???????????????? ");
21355 +#endif
21356 + else
21357 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21358 + }
21359 + printk("\n");
21360 +}
21361 +#endif
21362 +
21363 +/**
21364 + * probe_kernel_write(): safely attempt to write to a location
21365 + * @dst: address to write to
21366 + * @src: pointer to the data that shall be written
21367 + * @size: size of the data chunk
21368 + *
21369 + * Safely write to address @dst from the buffer at @src. If a kernel fault
21370 + * happens, handle that and return -EFAULT.
21371 + */
21372 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21373 +{
21374 + long ret;
21375 + mm_segment_t old_fs = get_fs();
21376 +
21377 + set_fs(KERNEL_DS);
21378 + pagefault_disable();
21379 + pax_open_kernel();
21380 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21381 + pax_close_kernel();
21382 + pagefault_enable();
21383 + set_fs(old_fs);
21384 +
21385 + return ret ? -EFAULT : 0;
21386 +}
21387 diff -urNp linux-2.6.32.45/arch/x86/mm/gup.c linux-2.6.32.45/arch/x86/mm/gup.c
21388 --- linux-2.6.32.45/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21389 +++ linux-2.6.32.45/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21390 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21391 addr = start;
21392 len = (unsigned long) nr_pages << PAGE_SHIFT;
21393 end = start + len;
21394 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21395 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21396 (void __user *)start, len)))
21397 return 0;
21398
21399 diff -urNp linux-2.6.32.45/arch/x86/mm/highmem_32.c linux-2.6.32.45/arch/x86/mm/highmem_32.c
21400 --- linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21401 +++ linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21402 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21403 idx = type + KM_TYPE_NR*smp_processor_id();
21404 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21405 BUG_ON(!pte_none(*(kmap_pte-idx)));
21406 +
21407 + pax_open_kernel();
21408 set_pte(kmap_pte-idx, mk_pte(page, prot));
21409 + pax_close_kernel();
21410
21411 return (void *)vaddr;
21412 }
21413 diff -urNp linux-2.6.32.45/arch/x86/mm/hugetlbpage.c linux-2.6.32.45/arch/x86/mm/hugetlbpage.c
21414 --- linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21415 +++ linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21416 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21417 struct hstate *h = hstate_file(file);
21418 struct mm_struct *mm = current->mm;
21419 struct vm_area_struct *vma;
21420 - unsigned long start_addr;
21421 + unsigned long start_addr, pax_task_size = TASK_SIZE;
21422 +
21423 +#ifdef CONFIG_PAX_SEGMEXEC
21424 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21425 + pax_task_size = SEGMEXEC_TASK_SIZE;
21426 +#endif
21427 +
21428 + pax_task_size -= PAGE_SIZE;
21429
21430 if (len > mm->cached_hole_size) {
21431 - start_addr = mm->free_area_cache;
21432 + start_addr = mm->free_area_cache;
21433 } else {
21434 - start_addr = TASK_UNMAPPED_BASE;
21435 - mm->cached_hole_size = 0;
21436 + start_addr = mm->mmap_base;
21437 + mm->cached_hole_size = 0;
21438 }
21439
21440 full_search:
21441 @@ -281,26 +288,27 @@ full_search:
21442
21443 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21444 /* At this point: (!vma || addr < vma->vm_end). */
21445 - if (TASK_SIZE - len < addr) {
21446 + if (pax_task_size - len < addr) {
21447 /*
21448 * Start a new search - just in case we missed
21449 * some holes.
21450 */
21451 - if (start_addr != TASK_UNMAPPED_BASE) {
21452 - start_addr = TASK_UNMAPPED_BASE;
21453 + if (start_addr != mm->mmap_base) {
21454 + start_addr = mm->mmap_base;
21455 mm->cached_hole_size = 0;
21456 goto full_search;
21457 }
21458 return -ENOMEM;
21459 }
21460 - if (!vma || addr + len <= vma->vm_start) {
21461 - mm->free_area_cache = addr + len;
21462 - return addr;
21463 - }
21464 + if (check_heap_stack_gap(vma, addr, len))
21465 + break;
21466 if (addr + mm->cached_hole_size < vma->vm_start)
21467 mm->cached_hole_size = vma->vm_start - addr;
21468 addr = ALIGN(vma->vm_end, huge_page_size(h));
21469 }
21470 +
21471 + mm->free_area_cache = addr + len;
21472 + return addr;
21473 }
21474
21475 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21476 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21477 {
21478 struct hstate *h = hstate_file(file);
21479 struct mm_struct *mm = current->mm;
21480 - struct vm_area_struct *vma, *prev_vma;
21481 - unsigned long base = mm->mmap_base, addr = addr0;
21482 + struct vm_area_struct *vma;
21483 + unsigned long base = mm->mmap_base, addr;
21484 unsigned long largest_hole = mm->cached_hole_size;
21485 - int first_time = 1;
21486
21487 /* don't allow allocations above current base */
21488 if (mm->free_area_cache > base)
21489 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21490 largest_hole = 0;
21491 mm->free_area_cache = base;
21492 }
21493 -try_again:
21494 +
21495 /* make sure it can fit in the remaining address space */
21496 if (mm->free_area_cache < len)
21497 goto fail;
21498
21499 /* either no address requested or cant fit in requested address hole */
21500 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
21501 + addr = (mm->free_area_cache - len);
21502 do {
21503 + addr &= huge_page_mask(h);
21504 + vma = find_vma(mm, addr);
21505 /*
21506 * Lookup failure means no vma is above this address,
21507 * i.e. return with success:
21508 - */
21509 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21510 - return addr;
21511 -
21512 - /*
21513 * new region fits between prev_vma->vm_end and
21514 * vma->vm_start, use it:
21515 */
21516 - if (addr + len <= vma->vm_start &&
21517 - (!prev_vma || (addr >= prev_vma->vm_end))) {
21518 + if (check_heap_stack_gap(vma, addr, len)) {
21519 /* remember the address as a hint for next time */
21520 - mm->cached_hole_size = largest_hole;
21521 - return (mm->free_area_cache = addr);
21522 - } else {
21523 - /* pull free_area_cache down to the first hole */
21524 - if (mm->free_area_cache == vma->vm_end) {
21525 - mm->free_area_cache = vma->vm_start;
21526 - mm->cached_hole_size = largest_hole;
21527 - }
21528 + mm->cached_hole_size = largest_hole;
21529 + return (mm->free_area_cache = addr);
21530 + }
21531 + /* pull free_area_cache down to the first hole */
21532 + if (mm->free_area_cache == vma->vm_end) {
21533 + mm->free_area_cache = vma->vm_start;
21534 + mm->cached_hole_size = largest_hole;
21535 }
21536
21537 /* remember the largest hole we saw so far */
21538 if (addr + largest_hole < vma->vm_start)
21539 - largest_hole = vma->vm_start - addr;
21540 + largest_hole = vma->vm_start - addr;
21541
21542 /* try just below the current vma->vm_start */
21543 - addr = (vma->vm_start - len) & huge_page_mask(h);
21544 - } while (len <= vma->vm_start);
21545 + addr = skip_heap_stack_gap(vma, len);
21546 + } while (!IS_ERR_VALUE(addr));
21547
21548 fail:
21549 /*
21550 - * if hint left us with no space for the requested
21551 - * mapping then try again:
21552 - */
21553 - if (first_time) {
21554 - mm->free_area_cache = base;
21555 - largest_hole = 0;
21556 - first_time = 0;
21557 - goto try_again;
21558 - }
21559 - /*
21560 * A failed mmap() very likely causes application failure,
21561 * so fall back to the bottom-up function here. This scenario
21562 * can happen with large stack limits and large mmap()
21563 * allocations.
21564 */
21565 - mm->free_area_cache = TASK_UNMAPPED_BASE;
21566 +
21567 +#ifdef CONFIG_PAX_SEGMEXEC
21568 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21569 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21570 + else
21571 +#endif
21572 +
21573 + mm->mmap_base = TASK_UNMAPPED_BASE;
21574 +
21575 +#ifdef CONFIG_PAX_RANDMMAP
21576 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21577 + mm->mmap_base += mm->delta_mmap;
21578 +#endif
21579 +
21580 + mm->free_area_cache = mm->mmap_base;
21581 mm->cached_hole_size = ~0UL;
21582 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21583 len, pgoff, flags);
21584 @@ -387,6 +393,7 @@ fail:
21585 /*
21586 * Restore the topdown base:
21587 */
21588 + mm->mmap_base = base;
21589 mm->free_area_cache = base;
21590 mm->cached_hole_size = ~0UL;
21591
21592 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21593 struct hstate *h = hstate_file(file);
21594 struct mm_struct *mm = current->mm;
21595 struct vm_area_struct *vma;
21596 + unsigned long pax_task_size = TASK_SIZE;
21597
21598 if (len & ~huge_page_mask(h))
21599 return -EINVAL;
21600 - if (len > TASK_SIZE)
21601 +
21602 +#ifdef CONFIG_PAX_SEGMEXEC
21603 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21604 + pax_task_size = SEGMEXEC_TASK_SIZE;
21605 +#endif
21606 +
21607 + pax_task_size -= PAGE_SIZE;
21608 +
21609 + if (len > pax_task_size)
21610 return -ENOMEM;
21611
21612 if (flags & MAP_FIXED) {
21613 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21614 if (addr) {
21615 addr = ALIGN(addr, huge_page_size(h));
21616 vma = find_vma(mm, addr);
21617 - if (TASK_SIZE - len >= addr &&
21618 - (!vma || addr + len <= vma->vm_start))
21619 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21620 return addr;
21621 }
21622 if (mm->get_unmapped_area == arch_get_unmapped_area)
21623 diff -urNp linux-2.6.32.45/arch/x86/mm/init_32.c linux-2.6.32.45/arch/x86/mm/init_32.c
21624 --- linux-2.6.32.45/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21625 +++ linux-2.6.32.45/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21626 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21627 }
21628
21629 /*
21630 - * Creates a middle page table and puts a pointer to it in the
21631 - * given global directory entry. This only returns the gd entry
21632 - * in non-PAE compilation mode, since the middle layer is folded.
21633 - */
21634 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
21635 -{
21636 - pud_t *pud;
21637 - pmd_t *pmd_table;
21638 -
21639 -#ifdef CONFIG_X86_PAE
21640 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21641 - if (after_bootmem)
21642 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21643 - else
21644 - pmd_table = (pmd_t *)alloc_low_page();
21645 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21646 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21647 - pud = pud_offset(pgd, 0);
21648 - BUG_ON(pmd_table != pmd_offset(pud, 0));
21649 -
21650 - return pmd_table;
21651 - }
21652 -#endif
21653 - pud = pud_offset(pgd, 0);
21654 - pmd_table = pmd_offset(pud, 0);
21655 -
21656 - return pmd_table;
21657 -}
21658 -
21659 -/*
21660 * Create a page table and place a pointer to it in a middle page
21661 * directory entry:
21662 */
21663 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21664 page_table = (pte_t *)alloc_low_page();
21665
21666 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21667 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21668 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21669 +#else
21670 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21671 +#endif
21672 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21673 }
21674
21675 return pte_offset_kernel(pmd, 0);
21676 }
21677
21678 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
21679 +{
21680 + pud_t *pud;
21681 + pmd_t *pmd_table;
21682 +
21683 + pud = pud_offset(pgd, 0);
21684 + pmd_table = pmd_offset(pud, 0);
21685 +
21686 + return pmd_table;
21687 +}
21688 +
21689 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21690 {
21691 int pgd_idx = pgd_index(vaddr);
21692 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21693 int pgd_idx, pmd_idx;
21694 unsigned long vaddr;
21695 pgd_t *pgd;
21696 + pud_t *pud;
21697 pmd_t *pmd;
21698 pte_t *pte = NULL;
21699
21700 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21701 pgd = pgd_base + pgd_idx;
21702
21703 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21704 - pmd = one_md_table_init(pgd);
21705 - pmd = pmd + pmd_index(vaddr);
21706 + pud = pud_offset(pgd, vaddr);
21707 + pmd = pmd_offset(pud, vaddr);
21708 +
21709 +#ifdef CONFIG_X86_PAE
21710 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21711 +#endif
21712 +
21713 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21714 pmd++, pmd_idx++) {
21715 pte = page_table_kmap_check(one_page_table_init(pmd),
21716 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21717 }
21718 }
21719
21720 -static inline int is_kernel_text(unsigned long addr)
21721 +static inline int is_kernel_text(unsigned long start, unsigned long end)
21722 {
21723 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21724 - return 1;
21725 - return 0;
21726 + if ((start > ktla_ktva((unsigned long)_etext) ||
21727 + end <= ktla_ktva((unsigned long)_stext)) &&
21728 + (start > ktla_ktva((unsigned long)_einittext) ||
21729 + end <= ktla_ktva((unsigned long)_sinittext)) &&
21730 +
21731 +#ifdef CONFIG_ACPI_SLEEP
21732 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21733 +#endif
21734 +
21735 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21736 + return 0;
21737 + return 1;
21738 }
21739
21740 /*
21741 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21742 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21743 unsigned long start_pfn, end_pfn;
21744 pgd_t *pgd_base = swapper_pg_dir;
21745 - int pgd_idx, pmd_idx, pte_ofs;
21746 + unsigned int pgd_idx, pmd_idx, pte_ofs;
21747 unsigned long pfn;
21748 pgd_t *pgd;
21749 + pud_t *pud;
21750 pmd_t *pmd;
21751 pte_t *pte;
21752 unsigned pages_2m, pages_4k;
21753 @@ -278,8 +279,13 @@ repeat:
21754 pfn = start_pfn;
21755 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21756 pgd = pgd_base + pgd_idx;
21757 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21758 - pmd = one_md_table_init(pgd);
21759 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21760 + pud = pud_offset(pgd, 0);
21761 + pmd = pmd_offset(pud, 0);
21762 +
21763 +#ifdef CONFIG_X86_PAE
21764 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21765 +#endif
21766
21767 if (pfn >= end_pfn)
21768 continue;
21769 @@ -291,14 +297,13 @@ repeat:
21770 #endif
21771 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21772 pmd++, pmd_idx++) {
21773 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21774 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21775
21776 /*
21777 * Map with big pages if possible, otherwise
21778 * create normal page tables:
21779 */
21780 if (use_pse) {
21781 - unsigned int addr2;
21782 pgprot_t prot = PAGE_KERNEL_LARGE;
21783 /*
21784 * first pass will use the same initial
21785 @@ -308,11 +313,7 @@ repeat:
21786 __pgprot(PTE_IDENT_ATTR |
21787 _PAGE_PSE);
21788
21789 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21790 - PAGE_OFFSET + PAGE_SIZE-1;
21791 -
21792 - if (is_kernel_text(addr) ||
21793 - is_kernel_text(addr2))
21794 + if (is_kernel_text(address, address + PMD_SIZE))
21795 prot = PAGE_KERNEL_LARGE_EXEC;
21796
21797 pages_2m++;
21798 @@ -329,7 +330,7 @@ repeat:
21799 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21800 pte += pte_ofs;
21801 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21802 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21803 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21804 pgprot_t prot = PAGE_KERNEL;
21805 /*
21806 * first pass will use the same initial
21807 @@ -337,7 +338,7 @@ repeat:
21808 */
21809 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21810
21811 - if (is_kernel_text(addr))
21812 + if (is_kernel_text(address, address + PAGE_SIZE))
21813 prot = PAGE_KERNEL_EXEC;
21814
21815 pages_4k++;
21816 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21817
21818 pud = pud_offset(pgd, va);
21819 pmd = pmd_offset(pud, va);
21820 - if (!pmd_present(*pmd))
21821 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
21822 break;
21823
21824 pte = pte_offset_kernel(pmd, va);
21825 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
21826
21827 static void __init pagetable_init(void)
21828 {
21829 - pgd_t *pgd_base = swapper_pg_dir;
21830 -
21831 - permanent_kmaps_init(pgd_base);
21832 + permanent_kmaps_init(swapper_pg_dir);
21833 }
21834
21835 #ifdef CONFIG_ACPI_SLEEP
21836 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
21837 * ACPI suspend needs this for resume, because things like the intel-agp
21838 * driver might have split up a kernel 4MB mapping.
21839 */
21840 -char swsusp_pg_dir[PAGE_SIZE]
21841 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
21842 __attribute__ ((aligned(PAGE_SIZE)));
21843
21844 static inline void save_pg_dir(void)
21845 {
21846 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
21847 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
21848 }
21849 #else /* !CONFIG_ACPI_SLEEP */
21850 static inline void save_pg_dir(void)
21851 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
21852 flush_tlb_all();
21853 }
21854
21855 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21856 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21857 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21858
21859 /* user-defined highmem size */
21860 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
21861 * Initialize the boot-time allocator (with low memory only):
21862 */
21863 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
21864 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21865 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21866 PAGE_SIZE);
21867 if (bootmap == -1L)
21868 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
21869 @@ -864,6 +863,12 @@ void __init mem_init(void)
21870
21871 pci_iommu_alloc();
21872
21873 +#ifdef CONFIG_PAX_PER_CPU_PGD
21874 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21875 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21876 + KERNEL_PGD_PTRS);
21877 +#endif
21878 +
21879 #ifdef CONFIG_FLATMEM
21880 BUG_ON(!mem_map);
21881 #endif
21882 @@ -881,7 +886,7 @@ void __init mem_init(void)
21883 set_highmem_pages_init();
21884
21885 codesize = (unsigned long) &_etext - (unsigned long) &_text;
21886 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21887 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21888 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21889
21890 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21891 @@ -923,10 +928,10 @@ void __init mem_init(void)
21892 ((unsigned long)&__init_end -
21893 (unsigned long)&__init_begin) >> 10,
21894
21895 - (unsigned long)&_etext, (unsigned long)&_edata,
21896 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21897 + (unsigned long)&_sdata, (unsigned long)&_edata,
21898 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21899
21900 - (unsigned long)&_text, (unsigned long)&_etext,
21901 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21902 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21903
21904 /*
21905 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
21906 if (!kernel_set_to_readonly)
21907 return;
21908
21909 + start = ktla_ktva(start);
21910 pr_debug("Set kernel text: %lx - %lx for read write\n",
21911 start, start+size);
21912
21913 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
21914 if (!kernel_set_to_readonly)
21915 return;
21916
21917 + start = ktla_ktva(start);
21918 pr_debug("Set kernel text: %lx - %lx for read only\n",
21919 start, start+size);
21920
21921 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
21922 unsigned long start = PFN_ALIGN(_text);
21923 unsigned long size = PFN_ALIGN(_etext) - start;
21924
21925 + start = ktla_ktva(start);
21926 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21927 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21928 size >> 10);
21929 diff -urNp linux-2.6.32.45/arch/x86/mm/init_64.c linux-2.6.32.45/arch/x86/mm/init_64.c
21930 --- linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
21931 +++ linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
21932 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21933 pmd = fill_pmd(pud, vaddr);
21934 pte = fill_pte(pmd, vaddr);
21935
21936 + pax_open_kernel();
21937 set_pte(pte, new_pte);
21938 + pax_close_kernel();
21939
21940 /*
21941 * It's enough to flush this one mapping.
21942 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
21943 pgd = pgd_offset_k((unsigned long)__va(phys));
21944 if (pgd_none(*pgd)) {
21945 pud = (pud_t *) spp_getpage();
21946 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21947 - _PAGE_USER));
21948 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21949 }
21950 pud = pud_offset(pgd, (unsigned long)__va(phys));
21951 if (pud_none(*pud)) {
21952 pmd = (pmd_t *) spp_getpage();
21953 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21954 - _PAGE_USER));
21955 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21956 }
21957 pmd = pmd_offset(pud, phys);
21958 BUG_ON(!pmd_none(*pmd));
21959 @@ -675,6 +675,12 @@ void __init mem_init(void)
21960
21961 pci_iommu_alloc();
21962
21963 +#ifdef CONFIG_PAX_PER_CPU_PGD
21964 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21965 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21966 + KERNEL_PGD_PTRS);
21967 +#endif
21968 +
21969 /* clear_bss() already clear the empty_zero_page */
21970
21971 reservedpages = 0;
21972 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
21973 static struct vm_area_struct gate_vma = {
21974 .vm_start = VSYSCALL_START,
21975 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21976 - .vm_page_prot = PAGE_READONLY_EXEC,
21977 - .vm_flags = VM_READ | VM_EXEC
21978 + .vm_page_prot = PAGE_READONLY,
21979 + .vm_flags = VM_READ
21980 };
21981
21982 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21983 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
21984
21985 const char *arch_vma_name(struct vm_area_struct *vma)
21986 {
21987 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21988 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21989 return "[vdso]";
21990 if (vma == &gate_vma)
21991 return "[vsyscall]";
21992 diff -urNp linux-2.6.32.45/arch/x86/mm/init.c linux-2.6.32.45/arch/x86/mm/init.c
21993 --- linux-2.6.32.45/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21994 +++ linux-2.6.32.45/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21995 @@ -69,11 +69,7 @@ static void __init find_early_table_spac
21996 * cause a hotspot and fill up ZONE_DMA. The page tables
21997 * need roughly 0.5KB per GB.
21998 */
21999 -#ifdef CONFIG_X86_32
22000 - start = 0x7000;
22001 -#else
22002 - start = 0x8000;
22003 -#endif
22004 + start = 0x100000;
22005 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
22006 tables, PAGE_SIZE);
22007 if (e820_table_start == -1UL)
22008 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
22009 #endif
22010
22011 set_nx();
22012 - if (nx_enabled)
22013 + if (nx_enabled && cpu_has_nx)
22014 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
22015
22016 /* Enable PSE if available */
22017 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
22018 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
22019 * mmio resources as well as potential bios/acpi data regions.
22020 */
22021 +
22022 int devmem_is_allowed(unsigned long pagenr)
22023 {
22024 +#ifdef CONFIG_GRKERNSEC_KMEM
22025 + /* allow BDA */
22026 + if (!pagenr)
22027 + return 1;
22028 + /* allow EBDA */
22029 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22030 + return 1;
22031 + /* allow ISA/video mem */
22032 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22033 + return 1;
22034 + /* throw out everything else below 1MB */
22035 + if (pagenr <= 256)
22036 + return 0;
22037 +#else
22038 if (pagenr <= 256)
22039 return 1;
22040 +#endif
22041 +
22042 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22043 return 0;
22044 if (!page_is_ram(pagenr))
22045 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
22046
22047 void free_initmem(void)
22048 {
22049 +
22050 +#ifdef CONFIG_PAX_KERNEXEC
22051 +#ifdef CONFIG_X86_32
22052 + /* PaX: limit KERNEL_CS to actual size */
22053 + unsigned long addr, limit;
22054 + struct desc_struct d;
22055 + int cpu;
22056 +
22057 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22058 + limit = (limit - 1UL) >> PAGE_SHIFT;
22059 +
22060 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22061 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22062 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22063 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22064 + }
22065 +
22066 + /* PaX: make KERNEL_CS read-only */
22067 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22068 + if (!paravirt_enabled())
22069 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22070 +/*
22071 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22072 + pgd = pgd_offset_k(addr);
22073 + pud = pud_offset(pgd, addr);
22074 + pmd = pmd_offset(pud, addr);
22075 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22076 + }
22077 +*/
22078 +#ifdef CONFIG_X86_PAE
22079 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22080 +/*
22081 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22082 + pgd = pgd_offset_k(addr);
22083 + pud = pud_offset(pgd, addr);
22084 + pmd = pmd_offset(pud, addr);
22085 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22086 + }
22087 +*/
22088 +#endif
22089 +
22090 +#ifdef CONFIG_MODULES
22091 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22092 +#endif
22093 +
22094 +#else
22095 + pgd_t *pgd;
22096 + pud_t *pud;
22097 + pmd_t *pmd;
22098 + unsigned long addr, end;
22099 +
22100 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22101 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22102 + pgd = pgd_offset_k(addr);
22103 + pud = pud_offset(pgd, addr);
22104 + pmd = pmd_offset(pud, addr);
22105 + if (!pmd_present(*pmd))
22106 + continue;
22107 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22108 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22109 + else
22110 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22111 + }
22112 +
22113 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22114 + end = addr + KERNEL_IMAGE_SIZE;
22115 + for (; addr < end; addr += PMD_SIZE) {
22116 + pgd = pgd_offset_k(addr);
22117 + pud = pud_offset(pgd, addr);
22118 + pmd = pmd_offset(pud, addr);
22119 + if (!pmd_present(*pmd))
22120 + continue;
22121 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22122 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22123 + }
22124 +#endif
22125 +
22126 + flush_tlb_all();
22127 +#endif
22128 +
22129 free_init_pages("unused kernel memory",
22130 (unsigned long)(&__init_begin),
22131 (unsigned long)(&__init_end));
22132 diff -urNp linux-2.6.32.45/arch/x86/mm/iomap_32.c linux-2.6.32.45/arch/x86/mm/iomap_32.c
22133 --- linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
22134 +++ linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
22135 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
22136 debug_kmap_atomic(type);
22137 idx = type + KM_TYPE_NR * smp_processor_id();
22138 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22139 +
22140 + pax_open_kernel();
22141 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22142 + pax_close_kernel();
22143 +
22144 arch_flush_lazy_mmu_mode();
22145
22146 return (void *)vaddr;
22147 diff -urNp linux-2.6.32.45/arch/x86/mm/ioremap.c linux-2.6.32.45/arch/x86/mm/ioremap.c
22148 --- linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
22149 +++ linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
22150 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
22151 * Second special case: Some BIOSen report the PC BIOS
22152 * area (640->1Mb) as ram even though it is not.
22153 */
22154 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
22155 - pagenr < (BIOS_END >> PAGE_SHIFT))
22156 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
22157 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22158 return 0;
22159
22160 for (i = 0; i < e820.nr_map; i++) {
22161 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
22162 /*
22163 * Don't allow anybody to remap normal RAM that we're using..
22164 */
22165 - for (pfn = phys_addr >> PAGE_SHIFT;
22166 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
22167 - pfn++) {
22168 -
22169 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
22170 int is_ram = page_is_ram(pfn);
22171
22172 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22173 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22174 return NULL;
22175 WARN_ON_ONCE(is_ram);
22176 }
22177 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
22178 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22179
22180 static __initdata int after_paging_init;
22181 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22182 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22183
22184 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22185 {
22186 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
22187 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22188
22189 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22190 - memset(bm_pte, 0, sizeof(bm_pte));
22191 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
22192 + pmd_populate_user(&init_mm, pmd, bm_pte);
22193
22194 /*
22195 * The boot-ioremap range spans multiple pmds, for which
22196 diff -urNp linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c
22197 --- linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
22198 +++ linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
22199 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
22200 * memory (e.g. tracked pages)? For now, we need this to avoid
22201 * invoking kmemcheck for PnP BIOS calls.
22202 */
22203 - if (regs->flags & X86_VM_MASK)
22204 + if (v8086_mode(regs))
22205 return false;
22206 - if (regs->cs != __KERNEL_CS)
22207 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22208 return false;
22209
22210 pte = kmemcheck_pte_lookup(address);
22211 diff -urNp linux-2.6.32.45/arch/x86/mm/mmap.c linux-2.6.32.45/arch/x86/mm/mmap.c
22212 --- linux-2.6.32.45/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
22213 +++ linux-2.6.32.45/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
22214 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
22215 * Leave an at least ~128 MB hole with possible stack randomization.
22216 */
22217 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22218 -#define MAX_GAP (TASK_SIZE/6*5)
22219 +#define MAX_GAP (pax_task_size/6*5)
22220
22221 /*
22222 * True on X86_32 or when emulating IA32 on X86_64
22223 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22224 return rnd << PAGE_SHIFT;
22225 }
22226
22227 -static unsigned long mmap_base(void)
22228 +static unsigned long mmap_base(struct mm_struct *mm)
22229 {
22230 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
22231 + unsigned long pax_task_size = TASK_SIZE;
22232 +
22233 +#ifdef CONFIG_PAX_SEGMEXEC
22234 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22235 + pax_task_size = SEGMEXEC_TASK_SIZE;
22236 +#endif
22237
22238 if (gap < MIN_GAP)
22239 gap = MIN_GAP;
22240 else if (gap > MAX_GAP)
22241 gap = MAX_GAP;
22242
22243 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22244 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22245 }
22246
22247 /*
22248 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22249 * does, but not when emulating X86_32
22250 */
22251 -static unsigned long mmap_legacy_base(void)
22252 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
22253 {
22254 - if (mmap_is_ia32())
22255 + if (mmap_is_ia32()) {
22256 +
22257 +#ifdef CONFIG_PAX_SEGMEXEC
22258 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22259 + return SEGMEXEC_TASK_UNMAPPED_BASE;
22260 + else
22261 +#endif
22262 +
22263 return TASK_UNMAPPED_BASE;
22264 - else
22265 + } else
22266 return TASK_UNMAPPED_BASE + mmap_rnd();
22267 }
22268
22269 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22270 void arch_pick_mmap_layout(struct mm_struct *mm)
22271 {
22272 if (mmap_is_legacy()) {
22273 - mm->mmap_base = mmap_legacy_base();
22274 + mm->mmap_base = mmap_legacy_base(mm);
22275 +
22276 +#ifdef CONFIG_PAX_RANDMMAP
22277 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22278 + mm->mmap_base += mm->delta_mmap;
22279 +#endif
22280 +
22281 mm->get_unmapped_area = arch_get_unmapped_area;
22282 mm->unmap_area = arch_unmap_area;
22283 } else {
22284 - mm->mmap_base = mmap_base();
22285 + mm->mmap_base = mmap_base(mm);
22286 +
22287 +#ifdef CONFIG_PAX_RANDMMAP
22288 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22289 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22290 +#endif
22291 +
22292 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22293 mm->unmap_area = arch_unmap_area_topdown;
22294 }
22295 diff -urNp linux-2.6.32.45/arch/x86/mm/mmio-mod.c linux-2.6.32.45/arch/x86/mm/mmio-mod.c
22296 --- linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22297 +++ linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22298 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22299 break;
22300 default:
22301 {
22302 - unsigned char *ip = (unsigned char *)instptr;
22303 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22304 my_trace->opcode = MMIO_UNKNOWN_OP;
22305 my_trace->width = 0;
22306 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22307 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22308 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22309 void __iomem *addr)
22310 {
22311 - static atomic_t next_id;
22312 + static atomic_unchecked_t next_id;
22313 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22314 /* These are page-unaligned. */
22315 struct mmiotrace_map map = {
22316 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22317 .private = trace
22318 },
22319 .phys = offset,
22320 - .id = atomic_inc_return(&next_id)
22321 + .id = atomic_inc_return_unchecked(&next_id)
22322 };
22323 map.map_id = trace->id;
22324
22325 diff -urNp linux-2.6.32.45/arch/x86/mm/numa_32.c linux-2.6.32.45/arch/x86/mm/numa_32.c
22326 --- linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22327 +++ linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22328 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22329 }
22330 #endif
22331
22332 -extern unsigned long find_max_low_pfn(void);
22333 extern unsigned long highend_pfn, highstart_pfn;
22334
22335 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22336 diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr.c linux-2.6.32.45/arch/x86/mm/pageattr.c
22337 --- linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22338 +++ linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22339 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22340 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22341 */
22342 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22343 - pgprot_val(forbidden) |= _PAGE_NX;
22344 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22345
22346 /*
22347 * The kernel text needs to be executable for obvious reasons
22348 * Does not cover __inittext since that is gone later on. On
22349 * 64bit we do not enforce !NX on the low mapping
22350 */
22351 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
22352 - pgprot_val(forbidden) |= _PAGE_NX;
22353 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22354 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22355
22356 +#ifdef CONFIG_DEBUG_RODATA
22357 /*
22358 * The .rodata section needs to be read-only. Using the pfn
22359 * catches all aliases.
22360 @@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22361 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22362 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22363 pgprot_val(forbidden) |= _PAGE_RW;
22364 +#endif
22365 +
22366 +#ifdef CONFIG_PAX_KERNEXEC
22367 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22368 + pgprot_val(forbidden) |= _PAGE_RW;
22369 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22370 + }
22371 +#endif
22372
22373 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22374
22375 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22376 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22377 {
22378 /* change init_mm */
22379 + pax_open_kernel();
22380 set_pte_atomic(kpte, pte);
22381 +
22382 #ifdef CONFIG_X86_32
22383 if (!SHARED_KERNEL_PMD) {
22384 +
22385 +#ifdef CONFIG_PAX_PER_CPU_PGD
22386 + unsigned long cpu;
22387 +#else
22388 struct page *page;
22389 +#endif
22390
22391 +#ifdef CONFIG_PAX_PER_CPU_PGD
22392 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22393 + pgd_t *pgd = get_cpu_pgd(cpu);
22394 +#else
22395 list_for_each_entry(page, &pgd_list, lru) {
22396 - pgd_t *pgd;
22397 + pgd_t *pgd = (pgd_t *)page_address(page);
22398 +#endif
22399 +
22400 pud_t *pud;
22401 pmd_t *pmd;
22402
22403 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
22404 + pgd += pgd_index(address);
22405 pud = pud_offset(pgd, address);
22406 pmd = pmd_offset(pud, address);
22407 set_pte_atomic((pte_t *)pmd, pte);
22408 }
22409 }
22410 #endif
22411 + pax_close_kernel();
22412 }
22413
22414 static int
22415 diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr-test.c linux-2.6.32.45/arch/x86/mm/pageattr-test.c
22416 --- linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22417 +++ linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22418 @@ -36,7 +36,7 @@ enum {
22419
22420 static int pte_testbit(pte_t pte)
22421 {
22422 - return pte_flags(pte) & _PAGE_UNUSED1;
22423 + return pte_flags(pte) & _PAGE_CPA_TEST;
22424 }
22425
22426 struct split_state {
22427 diff -urNp linux-2.6.32.45/arch/x86/mm/pat.c linux-2.6.32.45/arch/x86/mm/pat.c
22428 --- linux-2.6.32.45/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22429 +++ linux-2.6.32.45/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22430 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22431
22432 conflict:
22433 printk(KERN_INFO "%s:%d conflicting memory types "
22434 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22435 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22436 new->end, cattr_name(new->type), cattr_name(entry->type));
22437 return -EBUSY;
22438 }
22439 @@ -559,7 +559,7 @@ unlock_ret:
22440
22441 if (err) {
22442 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22443 - current->comm, current->pid, start, end);
22444 + current->comm, task_pid_nr(current), start, end);
22445 }
22446
22447 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22448 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22449 while (cursor < to) {
22450 if (!devmem_is_allowed(pfn)) {
22451 printk(KERN_INFO
22452 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22453 - current->comm, from, to);
22454 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22455 + current->comm, from, to, cursor);
22456 return 0;
22457 }
22458 cursor += PAGE_SIZE;
22459 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22460 printk(KERN_INFO
22461 "%s:%d ioremap_change_attr failed %s "
22462 "for %Lx-%Lx\n",
22463 - current->comm, current->pid,
22464 + current->comm, task_pid_nr(current),
22465 cattr_name(flags),
22466 base, (unsigned long long)(base + size));
22467 return -EINVAL;
22468 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22469 free_memtype(paddr, paddr + size);
22470 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22471 " for %Lx-%Lx, got %s\n",
22472 - current->comm, current->pid,
22473 + current->comm, task_pid_nr(current),
22474 cattr_name(want_flags),
22475 (unsigned long long)paddr,
22476 (unsigned long long)(paddr + size),
22477 diff -urNp linux-2.6.32.45/arch/x86/mm/pf_in.c linux-2.6.32.45/arch/x86/mm/pf_in.c
22478 --- linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22479 +++ linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22480 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22481 int i;
22482 enum reason_type rv = OTHERS;
22483
22484 - p = (unsigned char *)ins_addr;
22485 + p = (unsigned char *)ktla_ktva(ins_addr);
22486 p += skip_prefix(p, &prf);
22487 p += get_opcode(p, &opcode);
22488
22489 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22490 struct prefix_bits prf;
22491 int i;
22492
22493 - p = (unsigned char *)ins_addr;
22494 + p = (unsigned char *)ktla_ktva(ins_addr);
22495 p += skip_prefix(p, &prf);
22496 p += get_opcode(p, &opcode);
22497
22498 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22499 struct prefix_bits prf;
22500 int i;
22501
22502 - p = (unsigned char *)ins_addr;
22503 + p = (unsigned char *)ktla_ktva(ins_addr);
22504 p += skip_prefix(p, &prf);
22505 p += get_opcode(p, &opcode);
22506
22507 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22508 int i;
22509 unsigned long rv;
22510
22511 - p = (unsigned char *)ins_addr;
22512 + p = (unsigned char *)ktla_ktva(ins_addr);
22513 p += skip_prefix(p, &prf);
22514 p += get_opcode(p, &opcode);
22515 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22516 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22517 int i;
22518 unsigned long rv;
22519
22520 - p = (unsigned char *)ins_addr;
22521 + p = (unsigned char *)ktla_ktva(ins_addr);
22522 p += skip_prefix(p, &prf);
22523 p += get_opcode(p, &opcode);
22524 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22525 diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable_32.c linux-2.6.32.45/arch/x86/mm/pgtable_32.c
22526 --- linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22527 +++ linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22528 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22529 return;
22530 }
22531 pte = pte_offset_kernel(pmd, vaddr);
22532 +
22533 + pax_open_kernel();
22534 if (pte_val(pteval))
22535 set_pte_at(&init_mm, vaddr, pte, pteval);
22536 else
22537 pte_clear(&init_mm, vaddr, pte);
22538 + pax_close_kernel();
22539
22540 /*
22541 * It's enough to flush this one mapping.
22542 diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable.c linux-2.6.32.45/arch/x86/mm/pgtable.c
22543 --- linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22544 +++ linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22545 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22546 list_del(&page->lru);
22547 }
22548
22549 -#define UNSHARED_PTRS_PER_PGD \
22550 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22551 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22552 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22553
22554 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22555 +{
22556 + while (count--)
22557 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22558 +}
22559 +#endif
22560 +
22561 +#ifdef CONFIG_PAX_PER_CPU_PGD
22562 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22563 +{
22564 + while (count--)
22565 +
22566 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22567 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22568 +#else
22569 + *dst++ = *src++;
22570 +#endif
22571 +
22572 +}
22573 +#endif
22574 +
22575 +#ifdef CONFIG_X86_64
22576 +#define pxd_t pud_t
22577 +#define pyd_t pgd_t
22578 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22579 +#define pxd_free(mm, pud) pud_free((mm), (pud))
22580 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22581 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22582 +#define PYD_SIZE PGDIR_SIZE
22583 +#else
22584 +#define pxd_t pmd_t
22585 +#define pyd_t pud_t
22586 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22587 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
22588 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22589 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
22590 +#define PYD_SIZE PUD_SIZE
22591 +#endif
22592 +
22593 +#ifdef CONFIG_PAX_PER_CPU_PGD
22594 +static inline void pgd_ctor(pgd_t *pgd) {}
22595 +static inline void pgd_dtor(pgd_t *pgd) {}
22596 +#else
22597 static void pgd_ctor(pgd_t *pgd)
22598 {
22599 /* If the pgd points to a shared pagetable level (either the
22600 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22601 pgd_list_del(pgd);
22602 spin_unlock_irqrestore(&pgd_lock, flags);
22603 }
22604 +#endif
22605
22606 /*
22607 * List of all pgd's needed for non-PAE so it can invalidate entries
22608 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22609 * -- wli
22610 */
22611
22612 -#ifdef CONFIG_X86_PAE
22613 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22614 /*
22615 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22616 * updating the top-level pagetable entries to guarantee the
22617 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22618 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22619 * and initialize the kernel pmds here.
22620 */
22621 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22622 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22623
22624 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22625 {
22626 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22627 */
22628 flush_tlb_mm(mm);
22629 }
22630 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22631 +#define PREALLOCATED_PXDS USER_PGD_PTRS
22632 #else /* !CONFIG_X86_PAE */
22633
22634 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22635 -#define PREALLOCATED_PMDS 0
22636 +#define PREALLOCATED_PXDS 0
22637
22638 #endif /* CONFIG_X86_PAE */
22639
22640 -static void free_pmds(pmd_t *pmds[])
22641 +static void free_pxds(pxd_t *pxds[])
22642 {
22643 int i;
22644
22645 - for(i = 0; i < PREALLOCATED_PMDS; i++)
22646 - if (pmds[i])
22647 - free_page((unsigned long)pmds[i]);
22648 + for(i = 0; i < PREALLOCATED_PXDS; i++)
22649 + if (pxds[i])
22650 + free_page((unsigned long)pxds[i]);
22651 }
22652
22653 -static int preallocate_pmds(pmd_t *pmds[])
22654 +static int preallocate_pxds(pxd_t *pxds[])
22655 {
22656 int i;
22657 bool failed = false;
22658
22659 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22660 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22661 - if (pmd == NULL)
22662 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22663 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22664 + if (pxd == NULL)
22665 failed = true;
22666 - pmds[i] = pmd;
22667 + pxds[i] = pxd;
22668 }
22669
22670 if (failed) {
22671 - free_pmds(pmds);
22672 + free_pxds(pxds);
22673 return -ENOMEM;
22674 }
22675
22676 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22677 * preallocate which never got a corresponding vma will need to be
22678 * freed manually.
22679 */
22680 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22681 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22682 {
22683 int i;
22684
22685 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22686 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22687 pgd_t pgd = pgdp[i];
22688
22689 if (pgd_val(pgd) != 0) {
22690 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22691 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22692
22693 - pgdp[i] = native_make_pgd(0);
22694 + set_pgd(pgdp + i, native_make_pgd(0));
22695
22696 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22697 - pmd_free(mm, pmd);
22698 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22699 + pxd_free(mm, pxd);
22700 }
22701 }
22702 }
22703
22704 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22705 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22706 {
22707 - pud_t *pud;
22708 + pyd_t *pyd;
22709 unsigned long addr;
22710 int i;
22711
22712 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22713 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22714 return;
22715
22716 - pud = pud_offset(pgd, 0);
22717 +#ifdef CONFIG_X86_64
22718 + pyd = pyd_offset(mm, 0L);
22719 +#else
22720 + pyd = pyd_offset(pgd, 0L);
22721 +#endif
22722
22723 - for (addr = i = 0; i < PREALLOCATED_PMDS;
22724 - i++, pud++, addr += PUD_SIZE) {
22725 - pmd_t *pmd = pmds[i];
22726 + for (addr = i = 0; i < PREALLOCATED_PXDS;
22727 + i++, pyd++, addr += PYD_SIZE) {
22728 + pxd_t *pxd = pxds[i];
22729
22730 if (i >= KERNEL_PGD_BOUNDARY)
22731 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22732 - sizeof(pmd_t) * PTRS_PER_PMD);
22733 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22734 + sizeof(pxd_t) * PTRS_PER_PMD);
22735
22736 - pud_populate(mm, pud, pmd);
22737 + pyd_populate(mm, pyd, pxd);
22738 }
22739 }
22740
22741 pgd_t *pgd_alloc(struct mm_struct *mm)
22742 {
22743 pgd_t *pgd;
22744 - pmd_t *pmds[PREALLOCATED_PMDS];
22745 + pxd_t *pxds[PREALLOCATED_PXDS];
22746 +
22747 unsigned long flags;
22748
22749 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22750 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22751
22752 mm->pgd = pgd;
22753
22754 - if (preallocate_pmds(pmds) != 0)
22755 + if (preallocate_pxds(pxds) != 0)
22756 goto out_free_pgd;
22757
22758 if (paravirt_pgd_alloc(mm) != 0)
22759 - goto out_free_pmds;
22760 + goto out_free_pxds;
22761
22762 /*
22763 * Make sure that pre-populating the pmds is atomic with
22764 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22765 spin_lock_irqsave(&pgd_lock, flags);
22766
22767 pgd_ctor(pgd);
22768 - pgd_prepopulate_pmd(mm, pgd, pmds);
22769 + pgd_prepopulate_pxd(mm, pgd, pxds);
22770
22771 spin_unlock_irqrestore(&pgd_lock, flags);
22772
22773 return pgd;
22774
22775 -out_free_pmds:
22776 - free_pmds(pmds);
22777 +out_free_pxds:
22778 + free_pxds(pxds);
22779 out_free_pgd:
22780 free_page((unsigned long)pgd);
22781 out:
22782 @@ -287,7 +338,7 @@ out:
22783
22784 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22785 {
22786 - pgd_mop_up_pmds(mm, pgd);
22787 + pgd_mop_up_pxds(mm, pgd);
22788 pgd_dtor(pgd);
22789 paravirt_pgd_free(mm, pgd);
22790 free_page((unsigned long)pgd);
22791 diff -urNp linux-2.6.32.45/arch/x86/mm/setup_nx.c linux-2.6.32.45/arch/x86/mm/setup_nx.c
22792 --- linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22793 +++ linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22794 @@ -4,11 +4,10 @@
22795
22796 #include <asm/pgtable.h>
22797
22798 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22799 int nx_enabled;
22800
22801 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22802 -static int disable_nx __cpuinitdata;
22803 -
22804 +#ifndef CONFIG_PAX_PAGEEXEC
22805 /*
22806 * noexec = on|off
22807 *
22808 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22809 if (!str)
22810 return -EINVAL;
22811 if (!strncmp(str, "on", 2)) {
22812 - __supported_pte_mask |= _PAGE_NX;
22813 - disable_nx = 0;
22814 + nx_enabled = 1;
22815 } else if (!strncmp(str, "off", 3)) {
22816 - disable_nx = 1;
22817 - __supported_pte_mask &= ~_PAGE_NX;
22818 + nx_enabled = 0;
22819 }
22820 return 0;
22821 }
22822 early_param("noexec", noexec_setup);
22823 #endif
22824 +#endif
22825
22826 #ifdef CONFIG_X86_PAE
22827 void __init set_nx(void)
22828 {
22829 - unsigned int v[4], l, h;
22830 + if (!nx_enabled && cpu_has_nx) {
22831 + unsigned l, h;
22832
22833 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
22834 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
22835 -
22836 - if ((v[3] & (1 << 20)) && !disable_nx) {
22837 - rdmsr(MSR_EFER, l, h);
22838 - l |= EFER_NX;
22839 - wrmsr(MSR_EFER, l, h);
22840 - nx_enabled = 1;
22841 - __supported_pte_mask |= _PAGE_NX;
22842 - }
22843 + __supported_pte_mask &= ~_PAGE_NX;
22844 + rdmsr(MSR_EFER, l, h);
22845 + l &= ~EFER_NX;
22846 + wrmsr(MSR_EFER, l, h);
22847 }
22848 }
22849 #else
22850 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
22851 unsigned long efer;
22852
22853 rdmsrl(MSR_EFER, efer);
22854 - if (!(efer & EFER_NX) || disable_nx)
22855 + if (!(efer & EFER_NX) || !nx_enabled)
22856 __supported_pte_mask &= ~_PAGE_NX;
22857 }
22858 #endif
22859 diff -urNp linux-2.6.32.45/arch/x86/mm/tlb.c linux-2.6.32.45/arch/x86/mm/tlb.c
22860 --- linux-2.6.32.45/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
22861 +++ linux-2.6.32.45/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
22862 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
22863 BUG();
22864 cpumask_clear_cpu(cpu,
22865 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
22866 +
22867 +#ifndef CONFIG_PAX_PER_CPU_PGD
22868 load_cr3(swapper_pg_dir);
22869 +#endif
22870 +
22871 }
22872 EXPORT_SYMBOL_GPL(leave_mm);
22873
22874 diff -urNp linux-2.6.32.45/arch/x86/oprofile/backtrace.c linux-2.6.32.45/arch/x86/oprofile/backtrace.c
22875 --- linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
22876 +++ linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
22877 @@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
22878 struct frame_head bufhead[2];
22879
22880 /* Also check accessibility of one struct frame_head beyond */
22881 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
22882 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
22883 return NULL;
22884 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
22885 return NULL;
22886 @@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
22887 {
22888 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
22889
22890 - if (!user_mode_vm(regs)) {
22891 + if (!user_mode(regs)) {
22892 unsigned long stack = kernel_stack_pointer(regs);
22893 if (depth)
22894 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22895 diff -urNp linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c
22896 --- linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
22897 +++ linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
22898 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
22899 #endif
22900 }
22901
22902 -static int inline addr_increment(void)
22903 +static inline int addr_increment(void)
22904 {
22905 #ifdef CONFIG_SMP
22906 return smp_num_siblings == 2 ? 2 : 1;
22907 diff -urNp linux-2.6.32.45/arch/x86/pci/common.c linux-2.6.32.45/arch/x86/pci/common.c
22908 --- linux-2.6.32.45/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
22909 +++ linux-2.6.32.45/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
22910 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
22911 int pcibios_last_bus = -1;
22912 unsigned long pirq_table_addr;
22913 struct pci_bus *pci_root_bus;
22914 -struct pci_raw_ops *raw_pci_ops;
22915 -struct pci_raw_ops *raw_pci_ext_ops;
22916 +const struct pci_raw_ops *raw_pci_ops;
22917 +const struct pci_raw_ops *raw_pci_ext_ops;
22918
22919 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
22920 int reg, int len, u32 *val)
22921 diff -urNp linux-2.6.32.45/arch/x86/pci/direct.c linux-2.6.32.45/arch/x86/pci/direct.c
22922 --- linux-2.6.32.45/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
22923 +++ linux-2.6.32.45/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
22924 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
22925
22926 #undef PCI_CONF1_ADDRESS
22927
22928 -struct pci_raw_ops pci_direct_conf1 = {
22929 +const struct pci_raw_ops pci_direct_conf1 = {
22930 .read = pci_conf1_read,
22931 .write = pci_conf1_write,
22932 };
22933 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
22934
22935 #undef PCI_CONF2_ADDRESS
22936
22937 -struct pci_raw_ops pci_direct_conf2 = {
22938 +const struct pci_raw_ops pci_direct_conf2 = {
22939 .read = pci_conf2_read,
22940 .write = pci_conf2_write,
22941 };
22942 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
22943 * This should be close to trivial, but it isn't, because there are buggy
22944 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
22945 */
22946 -static int __init pci_sanity_check(struct pci_raw_ops *o)
22947 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
22948 {
22949 u32 x = 0;
22950 int year, devfn;
22951 diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_32.c linux-2.6.32.45/arch/x86/pci/mmconfig_32.c
22952 --- linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
22953 +++ linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
22954 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
22955 return 0;
22956 }
22957
22958 -static struct pci_raw_ops pci_mmcfg = {
22959 +static const struct pci_raw_ops pci_mmcfg = {
22960 .read = pci_mmcfg_read,
22961 .write = pci_mmcfg_write,
22962 };
22963 diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_64.c linux-2.6.32.45/arch/x86/pci/mmconfig_64.c
22964 --- linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
22965 +++ linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
22966 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
22967 return 0;
22968 }
22969
22970 -static struct pci_raw_ops pci_mmcfg = {
22971 +static const struct pci_raw_ops pci_mmcfg = {
22972 .read = pci_mmcfg_read,
22973 .write = pci_mmcfg_write,
22974 };
22975 diff -urNp linux-2.6.32.45/arch/x86/pci/numaq_32.c linux-2.6.32.45/arch/x86/pci/numaq_32.c
22976 --- linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
22977 +++ linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
22978 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
22979
22980 #undef PCI_CONF1_MQ_ADDRESS
22981
22982 -static struct pci_raw_ops pci_direct_conf1_mq = {
22983 +static const struct pci_raw_ops pci_direct_conf1_mq = {
22984 .read = pci_conf1_mq_read,
22985 .write = pci_conf1_mq_write
22986 };
22987 diff -urNp linux-2.6.32.45/arch/x86/pci/olpc.c linux-2.6.32.45/arch/x86/pci/olpc.c
22988 --- linux-2.6.32.45/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
22989 +++ linux-2.6.32.45/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
22990 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
22991 return 0;
22992 }
22993
22994 -static struct pci_raw_ops pci_olpc_conf = {
22995 +static const struct pci_raw_ops pci_olpc_conf = {
22996 .read = pci_olpc_read,
22997 .write = pci_olpc_write,
22998 };
22999 diff -urNp linux-2.6.32.45/arch/x86/pci/pcbios.c linux-2.6.32.45/arch/x86/pci/pcbios.c
23000 --- linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
23001 +++ linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
23002 @@ -56,50 +56,93 @@ union bios32 {
23003 static struct {
23004 unsigned long address;
23005 unsigned short segment;
23006 -} bios32_indirect = { 0, __KERNEL_CS };
23007 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23008
23009 /*
23010 * Returns the entry point for the given service, NULL on error
23011 */
23012
23013 -static unsigned long bios32_service(unsigned long service)
23014 +static unsigned long __devinit bios32_service(unsigned long service)
23015 {
23016 unsigned char return_code; /* %al */
23017 unsigned long address; /* %ebx */
23018 unsigned long length; /* %ecx */
23019 unsigned long entry; /* %edx */
23020 unsigned long flags;
23021 + struct desc_struct d, *gdt;
23022
23023 local_irq_save(flags);
23024 - __asm__("lcall *(%%edi); cld"
23025 +
23026 + gdt = get_cpu_gdt_table(smp_processor_id());
23027 +
23028 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23029 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23030 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23031 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23032 +
23033 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23034 : "=a" (return_code),
23035 "=b" (address),
23036 "=c" (length),
23037 "=d" (entry)
23038 : "0" (service),
23039 "1" (0),
23040 - "D" (&bios32_indirect));
23041 + "D" (&bios32_indirect),
23042 + "r"(__PCIBIOS_DS)
23043 + : "memory");
23044 +
23045 + pax_open_kernel();
23046 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23047 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23048 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23049 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23050 + pax_close_kernel();
23051 +
23052 local_irq_restore(flags);
23053
23054 switch (return_code) {
23055 - case 0:
23056 - return address + entry;
23057 - case 0x80: /* Not present */
23058 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23059 - return 0;
23060 - default: /* Shouldn't happen */
23061 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23062 - service, return_code);
23063 + case 0: {
23064 + int cpu;
23065 + unsigned char flags;
23066 +
23067 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23068 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23069 + printk(KERN_WARNING "bios32_service: not valid\n");
23070 return 0;
23071 + }
23072 + address = address + PAGE_OFFSET;
23073 + length += 16UL; /* some BIOSs underreport this... */
23074 + flags = 4;
23075 + if (length >= 64*1024*1024) {
23076 + length >>= PAGE_SHIFT;
23077 + flags |= 8;
23078 + }
23079 +
23080 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
23081 + gdt = get_cpu_gdt_table(cpu);
23082 + pack_descriptor(&d, address, length, 0x9b, flags);
23083 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23084 + pack_descriptor(&d, address, length, 0x93, flags);
23085 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23086 + }
23087 + return entry;
23088 + }
23089 + case 0x80: /* Not present */
23090 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23091 + return 0;
23092 + default: /* Shouldn't happen */
23093 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23094 + service, return_code);
23095 + return 0;
23096 }
23097 }
23098
23099 static struct {
23100 unsigned long address;
23101 unsigned short segment;
23102 -} pci_indirect = { 0, __KERNEL_CS };
23103 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23104
23105 -static int pci_bios_present;
23106 +static int pci_bios_present __read_only;
23107
23108 static int __devinit check_pcibios(void)
23109 {
23110 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
23111 unsigned long flags, pcibios_entry;
23112
23113 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23114 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23115 + pci_indirect.address = pcibios_entry;
23116
23117 local_irq_save(flags);
23118 - __asm__(
23119 - "lcall *(%%edi); cld\n\t"
23120 + __asm__("movw %w6, %%ds\n\t"
23121 + "lcall *%%ss:(%%edi); cld\n\t"
23122 + "push %%ss\n\t"
23123 + "pop %%ds\n\t"
23124 "jc 1f\n\t"
23125 "xor %%ah, %%ah\n"
23126 "1:"
23127 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
23128 "=b" (ebx),
23129 "=c" (ecx)
23130 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23131 - "D" (&pci_indirect)
23132 + "D" (&pci_indirect),
23133 + "r" (__PCIBIOS_DS)
23134 : "memory");
23135 local_irq_restore(flags);
23136
23137 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
23138
23139 switch (len) {
23140 case 1:
23141 - __asm__("lcall *(%%esi); cld\n\t"
23142 + __asm__("movw %w6, %%ds\n\t"
23143 + "lcall *%%ss:(%%esi); cld\n\t"
23144 + "push %%ss\n\t"
23145 + "pop %%ds\n\t"
23146 "jc 1f\n\t"
23147 "xor %%ah, %%ah\n"
23148 "1:"
23149 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
23150 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23151 "b" (bx),
23152 "D" ((long)reg),
23153 - "S" (&pci_indirect));
23154 + "S" (&pci_indirect),
23155 + "r" (__PCIBIOS_DS));
23156 /*
23157 * Zero-extend the result beyond 8 bits, do not trust the
23158 * BIOS having done it:
23159 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
23160 *value &= 0xff;
23161 break;
23162 case 2:
23163 - __asm__("lcall *(%%esi); cld\n\t"
23164 + __asm__("movw %w6, %%ds\n\t"
23165 + "lcall *%%ss:(%%esi); cld\n\t"
23166 + "push %%ss\n\t"
23167 + "pop %%ds\n\t"
23168 "jc 1f\n\t"
23169 "xor %%ah, %%ah\n"
23170 "1:"
23171 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
23172 : "1" (PCIBIOS_READ_CONFIG_WORD),
23173 "b" (bx),
23174 "D" ((long)reg),
23175 - "S" (&pci_indirect));
23176 + "S" (&pci_indirect),
23177 + "r" (__PCIBIOS_DS));
23178 /*
23179 * Zero-extend the result beyond 16 bits, do not trust the
23180 * BIOS having done it:
23181 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
23182 *value &= 0xffff;
23183 break;
23184 case 4:
23185 - __asm__("lcall *(%%esi); cld\n\t"
23186 + __asm__("movw %w6, %%ds\n\t"
23187 + "lcall *%%ss:(%%esi); cld\n\t"
23188 + "push %%ss\n\t"
23189 + "pop %%ds\n\t"
23190 "jc 1f\n\t"
23191 "xor %%ah, %%ah\n"
23192 "1:"
23193 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
23194 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23195 "b" (bx),
23196 "D" ((long)reg),
23197 - "S" (&pci_indirect));
23198 + "S" (&pci_indirect),
23199 + "r" (__PCIBIOS_DS));
23200 break;
23201 }
23202
23203 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
23204
23205 switch (len) {
23206 case 1:
23207 - __asm__("lcall *(%%esi); cld\n\t"
23208 + __asm__("movw %w6, %%ds\n\t"
23209 + "lcall *%%ss:(%%esi); cld\n\t"
23210 + "push %%ss\n\t"
23211 + "pop %%ds\n\t"
23212 "jc 1f\n\t"
23213 "xor %%ah, %%ah\n"
23214 "1:"
23215 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
23216 "c" (value),
23217 "b" (bx),
23218 "D" ((long)reg),
23219 - "S" (&pci_indirect));
23220 + "S" (&pci_indirect),
23221 + "r" (__PCIBIOS_DS));
23222 break;
23223 case 2:
23224 - __asm__("lcall *(%%esi); cld\n\t"
23225 + __asm__("movw %w6, %%ds\n\t"
23226 + "lcall *%%ss:(%%esi); cld\n\t"
23227 + "push %%ss\n\t"
23228 + "pop %%ds\n\t"
23229 "jc 1f\n\t"
23230 "xor %%ah, %%ah\n"
23231 "1:"
23232 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
23233 "c" (value),
23234 "b" (bx),
23235 "D" ((long)reg),
23236 - "S" (&pci_indirect));
23237 + "S" (&pci_indirect),
23238 + "r" (__PCIBIOS_DS));
23239 break;
23240 case 4:
23241 - __asm__("lcall *(%%esi); cld\n\t"
23242 + __asm__("movw %w6, %%ds\n\t"
23243 + "lcall *%%ss:(%%esi); cld\n\t"
23244 + "push %%ss\n\t"
23245 + "pop %%ds\n\t"
23246 "jc 1f\n\t"
23247 "xor %%ah, %%ah\n"
23248 "1:"
23249 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
23250 "c" (value),
23251 "b" (bx),
23252 "D" ((long)reg),
23253 - "S" (&pci_indirect));
23254 + "S" (&pci_indirect),
23255 + "r" (__PCIBIOS_DS));
23256 break;
23257 }
23258
23259 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
23260 * Function table for BIOS32 access
23261 */
23262
23263 -static struct pci_raw_ops pci_bios_access = {
23264 +static const struct pci_raw_ops pci_bios_access = {
23265 .read = pci_bios_read,
23266 .write = pci_bios_write
23267 };
23268 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
23269 * Try to find PCI BIOS.
23270 */
23271
23272 -static struct pci_raw_ops * __devinit pci_find_bios(void)
23273 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
23274 {
23275 union bios32 *check;
23276 unsigned char sum;
23277 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
23278
23279 DBG("PCI: Fetching IRQ routing table... ");
23280 __asm__("push %%es\n\t"
23281 + "movw %w8, %%ds\n\t"
23282 "push %%ds\n\t"
23283 "pop %%es\n\t"
23284 - "lcall *(%%esi); cld\n\t"
23285 + "lcall *%%ss:(%%esi); cld\n\t"
23286 "pop %%es\n\t"
23287 + "push %%ss\n\t"
23288 + "pop %%ds\n"
23289 "jc 1f\n\t"
23290 "xor %%ah, %%ah\n"
23291 "1:"
23292 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
23293 "1" (0),
23294 "D" ((long) &opt),
23295 "S" (&pci_indirect),
23296 - "m" (opt)
23297 + "m" (opt),
23298 + "r" (__PCIBIOS_DS)
23299 : "memory");
23300 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23301 if (ret & 0xff00)
23302 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23303 {
23304 int ret;
23305
23306 - __asm__("lcall *(%%esi); cld\n\t"
23307 + __asm__("movw %w5, %%ds\n\t"
23308 + "lcall *%%ss:(%%esi); cld\n\t"
23309 + "push %%ss\n\t"
23310 + "pop %%ds\n"
23311 "jc 1f\n\t"
23312 "xor %%ah, %%ah\n"
23313 "1:"
23314 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23315 : "0" (PCIBIOS_SET_PCI_HW_INT),
23316 "b" ((dev->bus->number << 8) | dev->devfn),
23317 "c" ((irq << 8) | (pin + 10)),
23318 - "S" (&pci_indirect));
23319 + "S" (&pci_indirect),
23320 + "r" (__PCIBIOS_DS));
23321 return !(ret & 0xff00);
23322 }
23323 EXPORT_SYMBOL(pcibios_set_irq_routing);
23324 diff -urNp linux-2.6.32.45/arch/x86/power/cpu.c linux-2.6.32.45/arch/x86/power/cpu.c
23325 --- linux-2.6.32.45/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23326 +++ linux-2.6.32.45/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23327 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
23328 static void fix_processor_context(void)
23329 {
23330 int cpu = smp_processor_id();
23331 - struct tss_struct *t = &per_cpu(init_tss, cpu);
23332 + struct tss_struct *t = init_tss + cpu;
23333
23334 set_tss_desc(cpu, t); /*
23335 * This just modifies memory; should not be
23336 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
23337 */
23338
23339 #ifdef CONFIG_X86_64
23340 + pax_open_kernel();
23341 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23342 + pax_close_kernel();
23343
23344 syscall_init(); /* This sets MSR_*STAR and related */
23345 #endif
23346 diff -urNp linux-2.6.32.45/arch/x86/vdso/Makefile linux-2.6.32.45/arch/x86/vdso/Makefile
23347 --- linux-2.6.32.45/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23348 +++ linux-2.6.32.45/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23349 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23350 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23351 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23352
23353 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23354 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23355 GCOV_PROFILE := n
23356
23357 #
23358 diff -urNp linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c
23359 --- linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23360 +++ linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23361 @@ -22,24 +22,48 @@
23362 #include <asm/hpet.h>
23363 #include <asm/unistd.h>
23364 #include <asm/io.h>
23365 +#include <asm/fixmap.h>
23366 #include "vextern.h"
23367
23368 #define gtod vdso_vsyscall_gtod_data
23369
23370 +notrace noinline long __vdso_fallback_time(long *t)
23371 +{
23372 + long secs;
23373 + asm volatile("syscall"
23374 + : "=a" (secs)
23375 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23376 + return secs;
23377 +}
23378 +
23379 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23380 {
23381 long ret;
23382 asm("syscall" : "=a" (ret) :
23383 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23384 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23385 return ret;
23386 }
23387
23388 +notrace static inline cycle_t __vdso_vread_hpet(void)
23389 +{
23390 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23391 +}
23392 +
23393 +notrace static inline cycle_t __vdso_vread_tsc(void)
23394 +{
23395 + cycle_t ret = (cycle_t)vget_cycles();
23396 +
23397 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23398 +}
23399 +
23400 notrace static inline long vgetns(void)
23401 {
23402 long v;
23403 - cycles_t (*vread)(void);
23404 - vread = gtod->clock.vread;
23405 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23406 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23407 + v = __vdso_vread_tsc();
23408 + else
23409 + v = __vdso_vread_hpet();
23410 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23411 return (v * gtod->clock.mult) >> gtod->clock.shift;
23412 }
23413
23414 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23415
23416 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23417 {
23418 - if (likely(gtod->sysctl_enabled))
23419 + if (likely(gtod->sysctl_enabled &&
23420 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23421 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23422 switch (clock) {
23423 case CLOCK_REALTIME:
23424 if (likely(gtod->clock.vread))
23425 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23426 int clock_gettime(clockid_t, struct timespec *)
23427 __attribute__((weak, alias("__vdso_clock_gettime")));
23428
23429 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23430 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23431 {
23432 long ret;
23433 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23434 + asm("syscall" : "=a" (ret) :
23435 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23436 + return ret;
23437 +}
23438 +
23439 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23440 +{
23441 + if (likely(gtod->sysctl_enabled &&
23442 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23443 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23444 + {
23445 if (likely(tv != NULL)) {
23446 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23447 offsetof(struct timespec, tv_nsec) ||
23448 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23449 }
23450 return 0;
23451 }
23452 - asm("syscall" : "=a" (ret) :
23453 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23454 - return ret;
23455 + return __vdso_fallback_gettimeofday(tv, tz);
23456 }
23457 int gettimeofday(struct timeval *, struct timezone *)
23458 __attribute__((weak, alias("__vdso_gettimeofday")));
23459 diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c
23460 --- linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23461 +++ linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23462 @@ -25,6 +25,7 @@
23463 #include <asm/tlbflush.h>
23464 #include <asm/vdso.h>
23465 #include <asm/proto.h>
23466 +#include <asm/mman.h>
23467
23468 enum {
23469 VDSO_DISABLED = 0,
23470 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23471 void enable_sep_cpu(void)
23472 {
23473 int cpu = get_cpu();
23474 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
23475 + struct tss_struct *tss = init_tss + cpu;
23476
23477 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23478 put_cpu();
23479 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23480 gate_vma.vm_start = FIXADDR_USER_START;
23481 gate_vma.vm_end = FIXADDR_USER_END;
23482 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23483 - gate_vma.vm_page_prot = __P101;
23484 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23485 /*
23486 * Make sure the vDSO gets into every core dump.
23487 * Dumping its contents makes post-mortem fully interpretable later
23488 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23489 if (compat)
23490 addr = VDSO_HIGH_BASE;
23491 else {
23492 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23493 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23494 if (IS_ERR_VALUE(addr)) {
23495 ret = addr;
23496 goto up_fail;
23497 }
23498 }
23499
23500 - current->mm->context.vdso = (void *)addr;
23501 + current->mm->context.vdso = addr;
23502
23503 if (compat_uses_vma || !compat) {
23504 /*
23505 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23506 }
23507
23508 current_thread_info()->sysenter_return =
23509 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23510 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23511
23512 up_fail:
23513 if (ret)
23514 - current->mm->context.vdso = NULL;
23515 + current->mm->context.vdso = 0;
23516
23517 up_write(&mm->mmap_sem);
23518
23519 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23520
23521 const char *arch_vma_name(struct vm_area_struct *vma)
23522 {
23523 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23524 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23525 return "[vdso]";
23526 +
23527 +#ifdef CONFIG_PAX_SEGMEXEC
23528 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23529 + return "[vdso]";
23530 +#endif
23531 +
23532 return NULL;
23533 }
23534
23535 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23536 struct mm_struct *mm = tsk->mm;
23537
23538 /* Check to see if this task was created in compat vdso mode */
23539 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23540 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23541 return &gate_vma;
23542 return NULL;
23543 }
23544 diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso.lds.S linux-2.6.32.45/arch/x86/vdso/vdso.lds.S
23545 --- linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23546 +++ linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23547 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23548 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23549 #include "vextern.h"
23550 #undef VEXTERN
23551 +
23552 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23553 +VEXTERN(fallback_gettimeofday)
23554 +VEXTERN(fallback_time)
23555 +VEXTERN(getcpu)
23556 +#undef VEXTERN
23557 diff -urNp linux-2.6.32.45/arch/x86/vdso/vextern.h linux-2.6.32.45/arch/x86/vdso/vextern.h
23558 --- linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23559 +++ linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23560 @@ -11,6 +11,5 @@
23561 put into vextern.h and be referenced as a pointer with vdso prefix.
23562 The main kernel later fills in the values. */
23563
23564 -VEXTERN(jiffies)
23565 VEXTERN(vgetcpu_mode)
23566 VEXTERN(vsyscall_gtod_data)
23567 diff -urNp linux-2.6.32.45/arch/x86/vdso/vma.c linux-2.6.32.45/arch/x86/vdso/vma.c
23568 --- linux-2.6.32.45/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23569 +++ linux-2.6.32.45/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
23570 @@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
23571 if (!vbase)
23572 goto oom;
23573
23574 - if (memcmp(vbase, "\177ELF", 4)) {
23575 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
23576 printk("VDSO: I'm broken; not ELF\n");
23577 vdso_enabled = 0;
23578 }
23579 @@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
23580 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23581 #include "vextern.h"
23582 #undef VEXTERN
23583 + vunmap(vbase);
23584 return 0;
23585
23586 oom:
23587 @@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
23588 goto up_fail;
23589 }
23590
23591 - current->mm->context.vdso = (void *)addr;
23592 + current->mm->context.vdso = addr;
23593
23594 ret = install_special_mapping(mm, addr, vdso_size,
23595 VM_READ|VM_EXEC|
23596 @@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
23597 VM_ALWAYSDUMP,
23598 vdso_pages);
23599 if (ret) {
23600 - current->mm->context.vdso = NULL;
23601 + current->mm->context.vdso = 0;
23602 goto up_fail;
23603 }
23604
23605 @@ -132,10 +133,3 @@ up_fail:
23606 up_write(&mm->mmap_sem);
23607 return ret;
23608 }
23609 -
23610 -static __init int vdso_setup(char *s)
23611 -{
23612 - vdso_enabled = simple_strtoul(s, NULL, 0);
23613 - return 0;
23614 -}
23615 -__setup("vdso=", vdso_setup);
23616 diff -urNp linux-2.6.32.45/arch/x86/xen/enlighten.c linux-2.6.32.45/arch/x86/xen/enlighten.c
23617 --- linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23618 +++ linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23619 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23620
23621 struct shared_info xen_dummy_shared_info;
23622
23623 -void *xen_initial_gdt;
23624 -
23625 /*
23626 * Point at some empty memory to start with. We map the real shared_info
23627 * page as soon as fixmap is up and running.
23628 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23629
23630 preempt_disable();
23631
23632 - start = __get_cpu_var(idt_desc).address;
23633 + start = (unsigned long)__get_cpu_var(idt_desc).address;
23634 end = start + __get_cpu_var(idt_desc).size + 1;
23635
23636 xen_mc_flush();
23637 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23638 #endif
23639 };
23640
23641 -static void xen_reboot(int reason)
23642 +static __noreturn void xen_reboot(int reason)
23643 {
23644 struct sched_shutdown r = { .reason = reason };
23645
23646 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23647 BUG();
23648 }
23649
23650 -static void xen_restart(char *msg)
23651 +static __noreturn void xen_restart(char *msg)
23652 {
23653 xen_reboot(SHUTDOWN_reboot);
23654 }
23655
23656 -static void xen_emergency_restart(void)
23657 +static __noreturn void xen_emergency_restart(void)
23658 {
23659 xen_reboot(SHUTDOWN_reboot);
23660 }
23661
23662 -static void xen_machine_halt(void)
23663 +static __noreturn void xen_machine_halt(void)
23664 {
23665 xen_reboot(SHUTDOWN_poweroff);
23666 }
23667 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23668 */
23669 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23670
23671 -#ifdef CONFIG_X86_64
23672 /* Work out if we support NX */
23673 - check_efer();
23674 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23675 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23676 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23677 + unsigned l, h;
23678 +
23679 +#ifdef CONFIG_X86_PAE
23680 + nx_enabled = 1;
23681 +#endif
23682 + __supported_pte_mask |= _PAGE_NX;
23683 + rdmsr(MSR_EFER, l, h);
23684 + l |= EFER_NX;
23685 + wrmsr(MSR_EFER, l, h);
23686 + }
23687 #endif
23688
23689 xen_setup_features();
23690 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23691
23692 machine_ops = xen_machine_ops;
23693
23694 - /*
23695 - * The only reliable way to retain the initial address of the
23696 - * percpu gdt_page is to remember it here, so we can go and
23697 - * mark it RW later, when the initial percpu area is freed.
23698 - */
23699 - xen_initial_gdt = &per_cpu(gdt_page, 0);
23700 -
23701 xen_smp_init();
23702
23703 pgd = (pgd_t *)xen_start_info->pt_base;
23704 diff -urNp linux-2.6.32.45/arch/x86/xen/mmu.c linux-2.6.32.45/arch/x86/xen/mmu.c
23705 --- linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:04.000000000 -0400
23706 +++ linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:18.000000000 -0400
23707 @@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23708 convert_pfn_mfn(init_level4_pgt);
23709 convert_pfn_mfn(level3_ident_pgt);
23710 convert_pfn_mfn(level3_kernel_pgt);
23711 + convert_pfn_mfn(level3_vmalloc_pgt);
23712 + convert_pfn_mfn(level3_vmemmap_pgt);
23713
23714 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23715 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23716 @@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23717 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23718 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23719 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23720 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23721 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23722 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23723 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23724 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23725 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23726
23727 diff -urNp linux-2.6.32.45/arch/x86/xen/smp.c linux-2.6.32.45/arch/x86/xen/smp.c
23728 --- linux-2.6.32.45/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23729 +++ linux-2.6.32.45/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23730 @@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23731 {
23732 BUG_ON(smp_processor_id() != 0);
23733 native_smp_prepare_boot_cpu();
23734 -
23735 - /* We've switched to the "real" per-cpu gdt, so make sure the
23736 - old memory can be recycled */
23737 - make_lowmem_page_readwrite(xen_initial_gdt);
23738 -
23739 xen_setup_vcpu_info_placement();
23740 }
23741
23742 @@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23743 gdt = get_cpu_gdt_table(cpu);
23744
23745 ctxt->flags = VGCF_IN_KERNEL;
23746 - ctxt->user_regs.ds = __USER_DS;
23747 - ctxt->user_regs.es = __USER_DS;
23748 + ctxt->user_regs.ds = __KERNEL_DS;
23749 + ctxt->user_regs.es = __KERNEL_DS;
23750 ctxt->user_regs.ss = __KERNEL_DS;
23751 #ifdef CONFIG_X86_32
23752 ctxt->user_regs.fs = __KERNEL_PERCPU;
23753 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23754 + savesegment(gs, ctxt->user_regs.gs);
23755 #else
23756 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23757 #endif
23758 @@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23759 int rc;
23760
23761 per_cpu(current_task, cpu) = idle;
23762 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
23763 #ifdef CONFIG_X86_32
23764 irq_ctx_init(cpu);
23765 #else
23766 clear_tsk_thread_flag(idle, TIF_FORK);
23767 - per_cpu(kernel_stack, cpu) =
23768 - (unsigned long)task_stack_page(idle) -
23769 - KERNEL_STACK_OFFSET + THREAD_SIZE;
23770 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23771 #endif
23772 xen_setup_runstate_info(cpu);
23773 xen_setup_timer(cpu);
23774 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-asm_32.S linux-2.6.32.45/arch/x86/xen/xen-asm_32.S
23775 --- linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
23776 +++ linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
23777 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
23778 ESP_OFFSET=4 # bytes pushed onto stack
23779
23780 /*
23781 - * Store vcpu_info pointer for easy access. Do it this way to
23782 - * avoid having to reload %fs
23783 + * Store vcpu_info pointer for easy access.
23784 */
23785 #ifdef CONFIG_SMP
23786 - GET_THREAD_INFO(%eax)
23787 - movl TI_cpu(%eax), %eax
23788 - movl __per_cpu_offset(,%eax,4), %eax
23789 - mov per_cpu__xen_vcpu(%eax), %eax
23790 + push %fs
23791 + mov $(__KERNEL_PERCPU), %eax
23792 + mov %eax, %fs
23793 + mov PER_CPU_VAR(xen_vcpu), %eax
23794 + pop %fs
23795 #else
23796 movl per_cpu__xen_vcpu, %eax
23797 #endif
23798 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-head.S linux-2.6.32.45/arch/x86/xen/xen-head.S
23799 --- linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
23800 +++ linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
23801 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
23802 #ifdef CONFIG_X86_32
23803 mov %esi,xen_start_info
23804 mov $init_thread_union+THREAD_SIZE,%esp
23805 +#ifdef CONFIG_SMP
23806 + movl $cpu_gdt_table,%edi
23807 + movl $__per_cpu_load,%eax
23808 + movw %ax,__KERNEL_PERCPU + 2(%edi)
23809 + rorl $16,%eax
23810 + movb %al,__KERNEL_PERCPU + 4(%edi)
23811 + movb %ah,__KERNEL_PERCPU + 7(%edi)
23812 + movl $__per_cpu_end - 1,%eax
23813 + subl $__per_cpu_start,%eax
23814 + movw %ax,__KERNEL_PERCPU + 0(%edi)
23815 +#endif
23816 #else
23817 mov %rsi,xen_start_info
23818 mov $init_thread_union+THREAD_SIZE,%rsp
23819 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-ops.h linux-2.6.32.45/arch/x86/xen/xen-ops.h
23820 --- linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
23821 +++ linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
23822 @@ -10,8 +10,6 @@
23823 extern const char xen_hypervisor_callback[];
23824 extern const char xen_failsafe_callback[];
23825
23826 -extern void *xen_initial_gdt;
23827 -
23828 struct trap_info;
23829 void xen_copy_trap_info(struct trap_info *traps);
23830
23831 diff -urNp linux-2.6.32.45/block/blk-integrity.c linux-2.6.32.45/block/blk-integrity.c
23832 --- linux-2.6.32.45/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
23833 +++ linux-2.6.32.45/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
23834 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
23835 NULL,
23836 };
23837
23838 -static struct sysfs_ops integrity_ops = {
23839 +static const struct sysfs_ops integrity_ops = {
23840 .show = &integrity_attr_show,
23841 .store = &integrity_attr_store,
23842 };
23843 diff -urNp linux-2.6.32.45/block/blk-iopoll.c linux-2.6.32.45/block/blk-iopoll.c
23844 --- linux-2.6.32.45/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
23845 +++ linux-2.6.32.45/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
23846 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23847 }
23848 EXPORT_SYMBOL(blk_iopoll_complete);
23849
23850 -static void blk_iopoll_softirq(struct softirq_action *h)
23851 +static void blk_iopoll_softirq(void)
23852 {
23853 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23854 int rearm = 0, budget = blk_iopoll_budget;
23855 diff -urNp linux-2.6.32.45/block/blk-map.c linux-2.6.32.45/block/blk-map.c
23856 --- linux-2.6.32.45/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
23857 +++ linux-2.6.32.45/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
23858 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
23859 * direct dma. else, set up kernel bounce buffers
23860 */
23861 uaddr = (unsigned long) ubuf;
23862 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
23863 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
23864 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
23865 else
23866 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
23867 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
23868 for (i = 0; i < iov_count; i++) {
23869 unsigned long uaddr = (unsigned long)iov[i].iov_base;
23870
23871 + if (!iov[i].iov_len)
23872 + return -EINVAL;
23873 +
23874 if (uaddr & queue_dma_alignment(q)) {
23875 unaligned = 1;
23876 break;
23877 }
23878 - if (!iov[i].iov_len)
23879 - return -EINVAL;
23880 }
23881
23882 if (unaligned || (q->dma_pad_mask & len) || map_data)
23883 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
23884 if (!len || !kbuf)
23885 return -EINVAL;
23886
23887 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
23888 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
23889 if (do_copy)
23890 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23891 else
23892 diff -urNp linux-2.6.32.45/block/blk-softirq.c linux-2.6.32.45/block/blk-softirq.c
23893 --- linux-2.6.32.45/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
23894 +++ linux-2.6.32.45/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
23895 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23896 * Softirq action handler - move entries to local list and loop over them
23897 * while passing them to the queue registered handler.
23898 */
23899 -static void blk_done_softirq(struct softirq_action *h)
23900 +static void blk_done_softirq(void)
23901 {
23902 struct list_head *cpu_list, local_list;
23903
23904 diff -urNp linux-2.6.32.45/block/blk-sysfs.c linux-2.6.32.45/block/blk-sysfs.c
23905 --- linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
23906 +++ linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
23907 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
23908 kmem_cache_free(blk_requestq_cachep, q);
23909 }
23910
23911 -static struct sysfs_ops queue_sysfs_ops = {
23912 +static const struct sysfs_ops queue_sysfs_ops = {
23913 .show = queue_attr_show,
23914 .store = queue_attr_store,
23915 };
23916 diff -urNp linux-2.6.32.45/block/bsg.c linux-2.6.32.45/block/bsg.c
23917 --- linux-2.6.32.45/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
23918 +++ linux-2.6.32.45/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
23919 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23920 struct sg_io_v4 *hdr, struct bsg_device *bd,
23921 fmode_t has_write_perm)
23922 {
23923 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23924 + unsigned char *cmdptr;
23925 +
23926 if (hdr->request_len > BLK_MAX_CDB) {
23927 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23928 if (!rq->cmd)
23929 return -ENOMEM;
23930 - }
23931 + cmdptr = rq->cmd;
23932 + } else
23933 + cmdptr = tmpcmd;
23934
23935 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
23936 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
23937 hdr->request_len))
23938 return -EFAULT;
23939
23940 + if (cmdptr != rq->cmd)
23941 + memcpy(rq->cmd, cmdptr, hdr->request_len);
23942 +
23943 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23944 if (blk_verify_command(rq->cmd, has_write_perm))
23945 return -EPERM;
23946 diff -urNp linux-2.6.32.45/block/elevator.c linux-2.6.32.45/block/elevator.c
23947 --- linux-2.6.32.45/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
23948 +++ linux-2.6.32.45/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
23949 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
23950 return error;
23951 }
23952
23953 -static struct sysfs_ops elv_sysfs_ops = {
23954 +static const struct sysfs_ops elv_sysfs_ops = {
23955 .show = elv_attr_show,
23956 .store = elv_attr_store,
23957 };
23958 diff -urNp linux-2.6.32.45/block/scsi_ioctl.c linux-2.6.32.45/block/scsi_ioctl.c
23959 --- linux-2.6.32.45/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
23960 +++ linux-2.6.32.45/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
23961 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
23962 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23963 struct sg_io_hdr *hdr, fmode_t mode)
23964 {
23965 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23966 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23967 + unsigned char *cmdptr;
23968 +
23969 + if (rq->cmd != rq->__cmd)
23970 + cmdptr = rq->cmd;
23971 + else
23972 + cmdptr = tmpcmd;
23973 +
23974 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23975 return -EFAULT;
23976 +
23977 + if (cmdptr != rq->cmd)
23978 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23979 +
23980 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23981 return -EPERM;
23982
23983 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
23984 int err;
23985 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23986 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23987 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23988 + unsigned char *cmdptr;
23989
23990 if (!sic)
23991 return -EINVAL;
23992 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
23993 */
23994 err = -EFAULT;
23995 rq->cmd_len = cmdlen;
23996 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
23997 +
23998 + if (rq->cmd != rq->__cmd)
23999 + cmdptr = rq->cmd;
24000 + else
24001 + cmdptr = tmpcmd;
24002 +
24003 + if (copy_from_user(cmdptr, sic->data, cmdlen))
24004 goto error;
24005
24006 + if (rq->cmd != cmdptr)
24007 + memcpy(rq->cmd, cmdptr, cmdlen);
24008 +
24009 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24010 goto error;
24011
24012 diff -urNp linux-2.6.32.45/crypto/cryptd.c linux-2.6.32.45/crypto/cryptd.c
24013 --- linux-2.6.32.45/crypto/cryptd.c 2011-03-27 14:31:47.000000000 -0400
24014 +++ linux-2.6.32.45/crypto/cryptd.c 2011-08-05 20:33:55.000000000 -0400
24015 @@ -214,7 +214,7 @@ static int cryptd_blkcipher_enqueue(stru
24016 struct cryptd_queue *queue;
24017
24018 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
24019 - rctx->complete = req->base.complete;
24020 + *(void **)&rctx->complete = req->base.complete;
24021 req->base.complete = complete;
24022
24023 return cryptd_enqueue_request(queue, &req->base);
24024 diff -urNp linux-2.6.32.45/crypto/gf128mul.c linux-2.6.32.45/crypto/gf128mul.c
24025 --- linux-2.6.32.45/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
24026 +++ linux-2.6.32.45/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
24027 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
24028 for (i = 0; i < 7; ++i)
24029 gf128mul_x_lle(&p[i + 1], &p[i]);
24030
24031 - memset(r, 0, sizeof(r));
24032 + memset(r, 0, sizeof(*r));
24033 for (i = 0;;) {
24034 u8 ch = ((u8 *)b)[15 - i];
24035
24036 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
24037 for (i = 0; i < 7; ++i)
24038 gf128mul_x_bbe(&p[i + 1], &p[i]);
24039
24040 - memset(r, 0, sizeof(r));
24041 + memset(r, 0, sizeof(*r));
24042 for (i = 0;;) {
24043 u8 ch = ((u8 *)b)[i];
24044
24045 diff -urNp linux-2.6.32.45/crypto/serpent.c linux-2.6.32.45/crypto/serpent.c
24046 --- linux-2.6.32.45/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
24047 +++ linux-2.6.32.45/crypto/serpent.c 2011-08-18 23:59:56.000000000 -0400
24048 @@ -21,6 +21,7 @@
24049 #include <asm/byteorder.h>
24050 #include <linux/crypto.h>
24051 #include <linux/types.h>
24052 +#include <linux/sched.h>
24053
24054 /* Key is padded to the maximum of 256 bits before round key generation.
24055 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
24056 @@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_
24057 u32 r0,r1,r2,r3,r4;
24058 int i;
24059
24060 + pax_track_stack();
24061 +
24062 /* Copy key, add padding */
24063
24064 for (i = 0; i < keylen; ++i)
24065 diff -urNp linux-2.6.32.45/Documentation/dontdiff linux-2.6.32.45/Documentation/dontdiff
24066 --- linux-2.6.32.45/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
24067 +++ linux-2.6.32.45/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
24068 @@ -1,13 +1,16 @@
24069 *.a
24070 *.aux
24071 *.bin
24072 +*.cis
24073 *.cpio
24074 *.csp
24075 +*.dbg
24076 *.dsp
24077 *.dvi
24078 *.elf
24079 *.eps
24080 *.fw
24081 +*.gcno
24082 *.gen.S
24083 *.gif
24084 *.grep
24085 @@ -38,8 +41,10 @@
24086 *.tab.h
24087 *.tex
24088 *.ver
24089 +*.vim
24090 *.xml
24091 *_MODULES
24092 +*_reg_safe.h
24093 *_vga16.c
24094 *~
24095 *.9
24096 @@ -49,11 +54,16 @@
24097 53c700_d.h
24098 CVS
24099 ChangeSet
24100 +GPATH
24101 +GRTAGS
24102 +GSYMS
24103 +GTAGS
24104 Image
24105 Kerntypes
24106 Module.markers
24107 Module.symvers
24108 PENDING
24109 +PERF*
24110 SCCS
24111 System.map*
24112 TAGS
24113 @@ -76,7 +86,11 @@ btfixupprep
24114 build
24115 bvmlinux
24116 bzImage*
24117 +capability_names.h
24118 +capflags.c
24119 classlist.h*
24120 +clut_vga16.c
24121 +common-cmds.h
24122 comp*.log
24123 compile.h*
24124 conf
24125 @@ -103,13 +117,14 @@ gen_crc32table
24126 gen_init_cpio
24127 genksyms
24128 *_gray256.c
24129 +hash
24130 ihex2fw
24131 ikconfig.h*
24132 initramfs_data.cpio
24133 +initramfs_data.cpio.bz2
24134 initramfs_data.cpio.gz
24135 initramfs_list
24136 kallsyms
24137 -kconfig
24138 keywords.c
24139 ksym.c*
24140 ksym.h*
24141 @@ -133,7 +148,9 @@ mkboot
24142 mkbugboot
24143 mkcpustr
24144 mkdep
24145 +mkpiggy
24146 mkprep
24147 +mkregtable
24148 mktables
24149 mktree
24150 modpost
24151 @@ -149,6 +166,7 @@ patches*
24152 pca200e.bin
24153 pca200e_ecd.bin2
24154 piggy.gz
24155 +piggy.S
24156 piggyback
24157 pnmtologo
24158 ppc_defs.h*
24159 @@ -157,12 +175,15 @@ qconf
24160 raid6altivec*.c
24161 raid6int*.c
24162 raid6tables.c
24163 +regdb.c
24164 relocs
24165 +rlim_names.h
24166 series
24167 setup
24168 setup.bin
24169 setup.elf
24170 sImage
24171 +slabinfo
24172 sm_tbl*
24173 split-include
24174 syscalltab.h
24175 @@ -186,14 +207,20 @@ version.h*
24176 vmlinux
24177 vmlinux-*
24178 vmlinux.aout
24179 +vmlinux.bin.all
24180 +vmlinux.bin.bz2
24181 vmlinux.lds
24182 +vmlinux.relocs
24183 +voffset.h
24184 vsyscall.lds
24185 vsyscall_32.lds
24186 wanxlfw.inc
24187 uImage
24188 unifdef
24189 +utsrelease.h
24190 wakeup.bin
24191 wakeup.elf
24192 wakeup.lds
24193 zImage*
24194 zconf.hash.c
24195 +zoffset.h
24196 diff -urNp linux-2.6.32.45/Documentation/kernel-parameters.txt linux-2.6.32.45/Documentation/kernel-parameters.txt
24197 --- linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
24198 +++ linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
24199 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
24200 the specified number of seconds. This is to be used if
24201 your oopses keep scrolling off the screen.
24202
24203 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
24204 + virtualization environments that don't cope well with the
24205 + expand down segment used by UDEREF on X86-32 or the frequent
24206 + page table updates on X86-64.
24207 +
24208 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
24209 +
24210 pcbit= [HW,ISDN]
24211
24212 pcd. [PARIDE]
24213 diff -urNp linux-2.6.32.45/drivers/acpi/acpi_pad.c linux-2.6.32.45/drivers/acpi/acpi_pad.c
24214 --- linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
24215 +++ linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
24216 @@ -30,7 +30,7 @@
24217 #include <acpi/acpi_bus.h>
24218 #include <acpi/acpi_drivers.h>
24219
24220 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
24221 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
24222 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
24223 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
24224 static DEFINE_MUTEX(isolated_cpus_lock);
24225 diff -urNp linux-2.6.32.45/drivers/acpi/battery.c linux-2.6.32.45/drivers/acpi/battery.c
24226 --- linux-2.6.32.45/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
24227 +++ linux-2.6.32.45/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
24228 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
24229 }
24230
24231 static struct battery_file {
24232 - struct file_operations ops;
24233 + const struct file_operations ops;
24234 mode_t mode;
24235 const char *name;
24236 } acpi_battery_file[] = {
24237 diff -urNp linux-2.6.32.45/drivers/acpi/dock.c linux-2.6.32.45/drivers/acpi/dock.c
24238 --- linux-2.6.32.45/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
24239 +++ linux-2.6.32.45/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
24240 @@ -77,7 +77,7 @@ struct dock_dependent_device {
24241 struct list_head list;
24242 struct list_head hotplug_list;
24243 acpi_handle handle;
24244 - struct acpi_dock_ops *ops;
24245 + const struct acpi_dock_ops *ops;
24246 void *context;
24247 };
24248
24249 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
24250 * the dock driver after _DCK is executed.
24251 */
24252 int
24253 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
24254 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
24255 void *context)
24256 {
24257 struct dock_dependent_device *dd;
24258 diff -urNp linux-2.6.32.45/drivers/acpi/osl.c linux-2.6.32.45/drivers/acpi/osl.c
24259 --- linux-2.6.32.45/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
24260 +++ linux-2.6.32.45/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
24261 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
24262 void __iomem *virt_addr;
24263
24264 virt_addr = ioremap(phys_addr, width);
24265 + if (!virt_addr)
24266 + return AE_NO_MEMORY;
24267 if (!value)
24268 value = &dummy;
24269
24270 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
24271 void __iomem *virt_addr;
24272
24273 virt_addr = ioremap(phys_addr, width);
24274 + if (!virt_addr)
24275 + return AE_NO_MEMORY;
24276
24277 switch (width) {
24278 case 8:
24279 diff -urNp linux-2.6.32.45/drivers/acpi/power_meter.c linux-2.6.32.45/drivers/acpi/power_meter.c
24280 --- linux-2.6.32.45/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
24281 +++ linux-2.6.32.45/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
24282 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
24283 return res;
24284
24285 temp /= 1000;
24286 - if (temp < 0)
24287 - return -EINVAL;
24288
24289 mutex_lock(&resource->lock);
24290 resource->trip[attr->index - 7] = temp;
24291 diff -urNp linux-2.6.32.45/drivers/acpi/proc.c linux-2.6.32.45/drivers/acpi/proc.c
24292 --- linux-2.6.32.45/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
24293 +++ linux-2.6.32.45/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
24294 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
24295 size_t count, loff_t * ppos)
24296 {
24297 struct list_head *node, *next;
24298 - char strbuf[5];
24299 - char str[5] = "";
24300 - unsigned int len = count;
24301 + char strbuf[5] = {0};
24302 struct acpi_device *found_dev = NULL;
24303
24304 - if (len > 4)
24305 - len = 4;
24306 - if (len < 0)
24307 - return -EFAULT;
24308 + if (count > 4)
24309 + count = 4;
24310
24311 - if (copy_from_user(strbuf, buffer, len))
24312 + if (copy_from_user(strbuf, buffer, count))
24313 return -EFAULT;
24314 - strbuf[len] = '\0';
24315 - sscanf(strbuf, "%s", str);
24316 + strbuf[count] = '\0';
24317
24318 mutex_lock(&acpi_device_lock);
24319 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24320 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24321 if (!dev->wakeup.flags.valid)
24322 continue;
24323
24324 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
24325 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24326 dev->wakeup.state.enabled =
24327 dev->wakeup.state.enabled ? 0 : 1;
24328 found_dev = dev;
24329 diff -urNp linux-2.6.32.45/drivers/acpi/processor_core.c linux-2.6.32.45/drivers/acpi/processor_core.c
24330 --- linux-2.6.32.45/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24331 +++ linux-2.6.32.45/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24332 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24333 return 0;
24334 }
24335
24336 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24337 + BUG_ON(pr->id >= nr_cpu_ids);
24338
24339 /*
24340 * Buggy BIOS check
24341 diff -urNp linux-2.6.32.45/drivers/acpi/sbshc.c linux-2.6.32.45/drivers/acpi/sbshc.c
24342 --- linux-2.6.32.45/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24343 +++ linux-2.6.32.45/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24344 @@ -17,7 +17,7 @@
24345
24346 #define PREFIX "ACPI: "
24347
24348 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24349 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24350 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24351
24352 struct acpi_smb_hc {
24353 diff -urNp linux-2.6.32.45/drivers/acpi/sleep.c linux-2.6.32.45/drivers/acpi/sleep.c
24354 --- linux-2.6.32.45/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24355 +++ linux-2.6.32.45/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24356 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24357 }
24358 }
24359
24360 -static struct platform_suspend_ops acpi_suspend_ops = {
24361 +static const struct platform_suspend_ops acpi_suspend_ops = {
24362 .valid = acpi_suspend_state_valid,
24363 .begin = acpi_suspend_begin,
24364 .prepare_late = acpi_pm_prepare,
24365 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24366 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24367 * been requested.
24368 */
24369 -static struct platform_suspend_ops acpi_suspend_ops_old = {
24370 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
24371 .valid = acpi_suspend_state_valid,
24372 .begin = acpi_suspend_begin_old,
24373 .prepare_late = acpi_pm_disable_gpes,
24374 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24375 acpi_enable_all_runtime_gpes();
24376 }
24377
24378 -static struct platform_hibernation_ops acpi_hibernation_ops = {
24379 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
24380 .begin = acpi_hibernation_begin,
24381 .end = acpi_pm_end,
24382 .pre_snapshot = acpi_hibernation_pre_snapshot,
24383 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24384 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24385 * been requested.
24386 */
24387 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24388 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24389 .begin = acpi_hibernation_begin_old,
24390 .end = acpi_pm_end,
24391 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24392 diff -urNp linux-2.6.32.45/drivers/acpi/video.c linux-2.6.32.45/drivers/acpi/video.c
24393 --- linux-2.6.32.45/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24394 +++ linux-2.6.32.45/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24395 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24396 vd->brightness->levels[request_level]);
24397 }
24398
24399 -static struct backlight_ops acpi_backlight_ops = {
24400 +static const struct backlight_ops acpi_backlight_ops = {
24401 .get_brightness = acpi_video_get_brightness,
24402 .update_status = acpi_video_set_brightness,
24403 };
24404 diff -urNp linux-2.6.32.45/drivers/ata/ahci.c linux-2.6.32.45/drivers/ata/ahci.c
24405 --- linux-2.6.32.45/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24406 +++ linux-2.6.32.45/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24407 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24408 .sdev_attrs = ahci_sdev_attrs,
24409 };
24410
24411 -static struct ata_port_operations ahci_ops = {
24412 +static const struct ata_port_operations ahci_ops = {
24413 .inherits = &sata_pmp_port_ops,
24414
24415 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24416 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24417 .port_stop = ahci_port_stop,
24418 };
24419
24420 -static struct ata_port_operations ahci_vt8251_ops = {
24421 +static const struct ata_port_operations ahci_vt8251_ops = {
24422 .inherits = &ahci_ops,
24423 .hardreset = ahci_vt8251_hardreset,
24424 };
24425
24426 -static struct ata_port_operations ahci_p5wdh_ops = {
24427 +static const struct ata_port_operations ahci_p5wdh_ops = {
24428 .inherits = &ahci_ops,
24429 .hardreset = ahci_p5wdh_hardreset,
24430 };
24431
24432 -static struct ata_port_operations ahci_sb600_ops = {
24433 +static const struct ata_port_operations ahci_sb600_ops = {
24434 .inherits = &ahci_ops,
24435 .softreset = ahci_sb600_softreset,
24436 .pmp_softreset = ahci_sb600_softreset,
24437 diff -urNp linux-2.6.32.45/drivers/ata/ata_generic.c linux-2.6.32.45/drivers/ata/ata_generic.c
24438 --- linux-2.6.32.45/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24439 +++ linux-2.6.32.45/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24440 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
24441 ATA_BMDMA_SHT(DRV_NAME),
24442 };
24443
24444 -static struct ata_port_operations generic_port_ops = {
24445 +static const struct ata_port_operations generic_port_ops = {
24446 .inherits = &ata_bmdma_port_ops,
24447 .cable_detect = ata_cable_unknown,
24448 .set_mode = generic_set_mode,
24449 diff -urNp linux-2.6.32.45/drivers/ata/ata_piix.c linux-2.6.32.45/drivers/ata/ata_piix.c
24450 --- linux-2.6.32.45/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24451 +++ linux-2.6.32.45/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24452 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24453 ATA_BMDMA_SHT(DRV_NAME),
24454 };
24455
24456 -static struct ata_port_operations piix_pata_ops = {
24457 +static const struct ata_port_operations piix_pata_ops = {
24458 .inherits = &ata_bmdma32_port_ops,
24459 .cable_detect = ata_cable_40wire,
24460 .set_piomode = piix_set_piomode,
24461 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24462 .prereset = piix_pata_prereset,
24463 };
24464
24465 -static struct ata_port_operations piix_vmw_ops = {
24466 +static const struct ata_port_operations piix_vmw_ops = {
24467 .inherits = &piix_pata_ops,
24468 .bmdma_status = piix_vmw_bmdma_status,
24469 };
24470
24471 -static struct ata_port_operations ich_pata_ops = {
24472 +static const struct ata_port_operations ich_pata_ops = {
24473 .inherits = &piix_pata_ops,
24474 .cable_detect = ich_pata_cable_detect,
24475 .set_dmamode = ich_set_dmamode,
24476 };
24477
24478 -static struct ata_port_operations piix_sata_ops = {
24479 +static const struct ata_port_operations piix_sata_ops = {
24480 .inherits = &ata_bmdma_port_ops,
24481 };
24482
24483 -static struct ata_port_operations piix_sidpr_sata_ops = {
24484 +static const struct ata_port_operations piix_sidpr_sata_ops = {
24485 .inherits = &piix_sata_ops,
24486 .hardreset = sata_std_hardreset,
24487 .scr_read = piix_sidpr_scr_read,
24488 diff -urNp linux-2.6.32.45/drivers/ata/libata-acpi.c linux-2.6.32.45/drivers/ata/libata-acpi.c
24489 --- linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24490 +++ linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24491 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24492 ata_acpi_uevent(dev->link->ap, dev, event);
24493 }
24494
24495 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24496 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24497 .handler = ata_acpi_dev_notify_dock,
24498 .uevent = ata_acpi_dev_uevent,
24499 };
24500
24501 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24502 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24503 .handler = ata_acpi_ap_notify_dock,
24504 .uevent = ata_acpi_ap_uevent,
24505 };
24506 diff -urNp linux-2.6.32.45/drivers/ata/libata-core.c linux-2.6.32.45/drivers/ata/libata-core.c
24507 --- linux-2.6.32.45/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24508 +++ linux-2.6.32.45/drivers/ata/libata-core.c 2011-08-05 20:33:55.000000000 -0400
24509 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24510 struct ata_port *ap;
24511 unsigned int tag;
24512
24513 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24514 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24515 ap = qc->ap;
24516
24517 qc->flags = 0;
24518 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24519 struct ata_port *ap;
24520 struct ata_link *link;
24521
24522 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24523 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24524 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24525 ap = qc->ap;
24526 link = qc->dev->link;
24527 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24528 * LOCKING:
24529 * None.
24530 */
24531 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
24532 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24533 {
24534 static DEFINE_SPINLOCK(lock);
24535 const struct ata_port_operations *cur;
24536 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24537 return;
24538
24539 spin_lock(&lock);
24540 + pax_open_kernel();
24541
24542 for (cur = ops->inherits; cur; cur = cur->inherits) {
24543 void **inherit = (void **)cur;
24544 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24545 if (IS_ERR(*pp))
24546 *pp = NULL;
24547
24548 - ops->inherits = NULL;
24549 + *(struct ata_port_operations **)&ops->inherits = NULL;
24550
24551 + pax_close_kernel();
24552 spin_unlock(&lock);
24553 }
24554
24555 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24556 */
24557 /* KILLME - the only user left is ipr */
24558 void ata_host_init(struct ata_host *host, struct device *dev,
24559 - unsigned long flags, struct ata_port_operations *ops)
24560 + unsigned long flags, const struct ata_port_operations *ops)
24561 {
24562 spin_lock_init(&host->lock);
24563 host->dev = dev;
24564 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24565 /* truly dummy */
24566 }
24567
24568 -struct ata_port_operations ata_dummy_port_ops = {
24569 +const struct ata_port_operations ata_dummy_port_ops = {
24570 .qc_prep = ata_noop_qc_prep,
24571 .qc_issue = ata_dummy_qc_issue,
24572 .error_handler = ata_dummy_error_handler,
24573 diff -urNp linux-2.6.32.45/drivers/ata/libata-eh.c linux-2.6.32.45/drivers/ata/libata-eh.c
24574 --- linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:35:28.000000000 -0400
24575 +++ linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:33:59.000000000 -0400
24576 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24577 {
24578 struct ata_link *link;
24579
24580 + pax_track_stack();
24581 +
24582 ata_for_each_link(link, ap, HOST_FIRST)
24583 ata_eh_link_report(link);
24584 }
24585 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24586 */
24587 void ata_std_error_handler(struct ata_port *ap)
24588 {
24589 - struct ata_port_operations *ops = ap->ops;
24590 + const struct ata_port_operations *ops = ap->ops;
24591 ata_reset_fn_t hardreset = ops->hardreset;
24592
24593 /* ignore built-in hardreset if SCR access is not available */
24594 diff -urNp linux-2.6.32.45/drivers/ata/libata-pmp.c linux-2.6.32.45/drivers/ata/libata-pmp.c
24595 --- linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24596 +++ linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24597 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24598 */
24599 static int sata_pmp_eh_recover(struct ata_port *ap)
24600 {
24601 - struct ata_port_operations *ops = ap->ops;
24602 + const struct ata_port_operations *ops = ap->ops;
24603 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24604 struct ata_link *pmp_link = &ap->link;
24605 struct ata_device *pmp_dev = pmp_link->device;
24606 diff -urNp linux-2.6.32.45/drivers/ata/pata_acpi.c linux-2.6.32.45/drivers/ata/pata_acpi.c
24607 --- linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24608 +++ linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24609 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24610 ATA_BMDMA_SHT(DRV_NAME),
24611 };
24612
24613 -static struct ata_port_operations pacpi_ops = {
24614 +static const struct ata_port_operations pacpi_ops = {
24615 .inherits = &ata_bmdma_port_ops,
24616 .qc_issue = pacpi_qc_issue,
24617 .cable_detect = pacpi_cable_detect,
24618 diff -urNp linux-2.6.32.45/drivers/ata/pata_ali.c linux-2.6.32.45/drivers/ata/pata_ali.c
24619 --- linux-2.6.32.45/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24620 +++ linux-2.6.32.45/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24621 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24622 * Port operations for PIO only ALi
24623 */
24624
24625 -static struct ata_port_operations ali_early_port_ops = {
24626 +static const struct ata_port_operations ali_early_port_ops = {
24627 .inherits = &ata_sff_port_ops,
24628 .cable_detect = ata_cable_40wire,
24629 .set_piomode = ali_set_piomode,
24630 @@ -382,7 +382,7 @@ static const struct ata_port_operations
24631 * Port operations for DMA capable ALi without cable
24632 * detect
24633 */
24634 -static struct ata_port_operations ali_20_port_ops = {
24635 +static const struct ata_port_operations ali_20_port_ops = {
24636 .inherits = &ali_dma_base_ops,
24637 .cable_detect = ata_cable_40wire,
24638 .mode_filter = ali_20_filter,
24639 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24640 /*
24641 * Port operations for DMA capable ALi with cable detect
24642 */
24643 -static struct ata_port_operations ali_c2_port_ops = {
24644 +static const struct ata_port_operations ali_c2_port_ops = {
24645 .inherits = &ali_dma_base_ops,
24646 .check_atapi_dma = ali_check_atapi_dma,
24647 .cable_detect = ali_c2_cable_detect,
24648 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24649 /*
24650 * Port operations for DMA capable ALi with cable detect
24651 */
24652 -static struct ata_port_operations ali_c4_port_ops = {
24653 +static const struct ata_port_operations ali_c4_port_ops = {
24654 .inherits = &ali_dma_base_ops,
24655 .check_atapi_dma = ali_check_atapi_dma,
24656 .cable_detect = ali_c2_cable_detect,
24657 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24658 /*
24659 * Port operations for DMA capable ALi with cable detect and LBA48
24660 */
24661 -static struct ata_port_operations ali_c5_port_ops = {
24662 +static const struct ata_port_operations ali_c5_port_ops = {
24663 .inherits = &ali_dma_base_ops,
24664 .check_atapi_dma = ali_check_atapi_dma,
24665 .dev_config = ali_warn_atapi_dma,
24666 diff -urNp linux-2.6.32.45/drivers/ata/pata_amd.c linux-2.6.32.45/drivers/ata/pata_amd.c
24667 --- linux-2.6.32.45/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24668 +++ linux-2.6.32.45/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24669 @@ -397,28 +397,28 @@ static const struct ata_port_operations
24670 .prereset = amd_pre_reset,
24671 };
24672
24673 -static struct ata_port_operations amd33_port_ops = {
24674 +static const struct ata_port_operations amd33_port_ops = {
24675 .inherits = &amd_base_port_ops,
24676 .cable_detect = ata_cable_40wire,
24677 .set_piomode = amd33_set_piomode,
24678 .set_dmamode = amd33_set_dmamode,
24679 };
24680
24681 -static struct ata_port_operations amd66_port_ops = {
24682 +static const struct ata_port_operations amd66_port_ops = {
24683 .inherits = &amd_base_port_ops,
24684 .cable_detect = ata_cable_unknown,
24685 .set_piomode = amd66_set_piomode,
24686 .set_dmamode = amd66_set_dmamode,
24687 };
24688
24689 -static struct ata_port_operations amd100_port_ops = {
24690 +static const struct ata_port_operations amd100_port_ops = {
24691 .inherits = &amd_base_port_ops,
24692 .cable_detect = ata_cable_unknown,
24693 .set_piomode = amd100_set_piomode,
24694 .set_dmamode = amd100_set_dmamode,
24695 };
24696
24697 -static struct ata_port_operations amd133_port_ops = {
24698 +static const struct ata_port_operations amd133_port_ops = {
24699 .inherits = &amd_base_port_ops,
24700 .cable_detect = amd_cable_detect,
24701 .set_piomode = amd133_set_piomode,
24702 @@ -433,13 +433,13 @@ static const struct ata_port_operations
24703 .host_stop = nv_host_stop,
24704 };
24705
24706 -static struct ata_port_operations nv100_port_ops = {
24707 +static const struct ata_port_operations nv100_port_ops = {
24708 .inherits = &nv_base_port_ops,
24709 .set_piomode = nv100_set_piomode,
24710 .set_dmamode = nv100_set_dmamode,
24711 };
24712
24713 -static struct ata_port_operations nv133_port_ops = {
24714 +static const struct ata_port_operations nv133_port_ops = {
24715 .inherits = &nv_base_port_ops,
24716 .set_piomode = nv133_set_piomode,
24717 .set_dmamode = nv133_set_dmamode,
24718 diff -urNp linux-2.6.32.45/drivers/ata/pata_artop.c linux-2.6.32.45/drivers/ata/pata_artop.c
24719 --- linux-2.6.32.45/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24720 +++ linux-2.6.32.45/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24721 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24722 ATA_BMDMA_SHT(DRV_NAME),
24723 };
24724
24725 -static struct ata_port_operations artop6210_ops = {
24726 +static const struct ata_port_operations artop6210_ops = {
24727 .inherits = &ata_bmdma_port_ops,
24728 .cable_detect = ata_cable_40wire,
24729 .set_piomode = artop6210_set_piomode,
24730 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24731 .qc_defer = artop6210_qc_defer,
24732 };
24733
24734 -static struct ata_port_operations artop6260_ops = {
24735 +static const struct ata_port_operations artop6260_ops = {
24736 .inherits = &ata_bmdma_port_ops,
24737 .cable_detect = artop6260_cable_detect,
24738 .set_piomode = artop6260_set_piomode,
24739 diff -urNp linux-2.6.32.45/drivers/ata/pata_at32.c linux-2.6.32.45/drivers/ata/pata_at32.c
24740 --- linux-2.6.32.45/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24741 +++ linux-2.6.32.45/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24742 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24743 ATA_PIO_SHT(DRV_NAME),
24744 };
24745
24746 -static struct ata_port_operations at32_port_ops = {
24747 +static const struct ata_port_operations at32_port_ops = {
24748 .inherits = &ata_sff_port_ops,
24749 .cable_detect = ata_cable_40wire,
24750 .set_piomode = pata_at32_set_piomode,
24751 diff -urNp linux-2.6.32.45/drivers/ata/pata_at91.c linux-2.6.32.45/drivers/ata/pata_at91.c
24752 --- linux-2.6.32.45/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
24753 +++ linux-2.6.32.45/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
24754 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
24755 ATA_PIO_SHT(DRV_NAME),
24756 };
24757
24758 -static struct ata_port_operations pata_at91_port_ops = {
24759 +static const struct ata_port_operations pata_at91_port_ops = {
24760 .inherits = &ata_sff_port_ops,
24761
24762 .sff_data_xfer = pata_at91_data_xfer_noirq,
24763 diff -urNp linux-2.6.32.45/drivers/ata/pata_atiixp.c linux-2.6.32.45/drivers/ata/pata_atiixp.c
24764 --- linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
24765 +++ linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
24766 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
24767 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24768 };
24769
24770 -static struct ata_port_operations atiixp_port_ops = {
24771 +static const struct ata_port_operations atiixp_port_ops = {
24772 .inherits = &ata_bmdma_port_ops,
24773
24774 .qc_prep = ata_sff_dumb_qc_prep,
24775 diff -urNp linux-2.6.32.45/drivers/ata/pata_atp867x.c linux-2.6.32.45/drivers/ata/pata_atp867x.c
24776 --- linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
24777 +++ linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
24778 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
24779 ATA_BMDMA_SHT(DRV_NAME),
24780 };
24781
24782 -static struct ata_port_operations atp867x_ops = {
24783 +static const struct ata_port_operations atp867x_ops = {
24784 .inherits = &ata_bmdma_port_ops,
24785 .cable_detect = atp867x_cable_detect,
24786 .set_piomode = atp867x_set_piomode,
24787 diff -urNp linux-2.6.32.45/drivers/ata/pata_bf54x.c linux-2.6.32.45/drivers/ata/pata_bf54x.c
24788 --- linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
24789 +++ linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
24790 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
24791 .dma_boundary = ATA_DMA_BOUNDARY,
24792 };
24793
24794 -static struct ata_port_operations bfin_pata_ops = {
24795 +static const struct ata_port_operations bfin_pata_ops = {
24796 .inherits = &ata_sff_port_ops,
24797
24798 .set_piomode = bfin_set_piomode,
24799 diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd640.c linux-2.6.32.45/drivers/ata/pata_cmd640.c
24800 --- linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
24801 +++ linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
24802 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
24803 ATA_BMDMA_SHT(DRV_NAME),
24804 };
24805
24806 -static struct ata_port_operations cmd640_port_ops = {
24807 +static const struct ata_port_operations cmd640_port_ops = {
24808 .inherits = &ata_bmdma_port_ops,
24809 /* In theory xfer_noirq is not needed once we kill the prefetcher */
24810 .sff_data_xfer = ata_sff_data_xfer_noirq,
24811 diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd64x.c linux-2.6.32.45/drivers/ata/pata_cmd64x.c
24812 --- linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
24813 +++ linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
24814 @@ -271,18 +271,18 @@ static const struct ata_port_operations
24815 .set_dmamode = cmd64x_set_dmamode,
24816 };
24817
24818 -static struct ata_port_operations cmd64x_port_ops = {
24819 +static const struct ata_port_operations cmd64x_port_ops = {
24820 .inherits = &cmd64x_base_ops,
24821 .cable_detect = ata_cable_40wire,
24822 };
24823
24824 -static struct ata_port_operations cmd646r1_port_ops = {
24825 +static const struct ata_port_operations cmd646r1_port_ops = {
24826 .inherits = &cmd64x_base_ops,
24827 .bmdma_stop = cmd646r1_bmdma_stop,
24828 .cable_detect = ata_cable_40wire,
24829 };
24830
24831 -static struct ata_port_operations cmd648_port_ops = {
24832 +static const struct ata_port_operations cmd648_port_ops = {
24833 .inherits = &cmd64x_base_ops,
24834 .bmdma_stop = cmd648_bmdma_stop,
24835 .cable_detect = cmd648_cable_detect,
24836 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5520.c linux-2.6.32.45/drivers/ata/pata_cs5520.c
24837 --- linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
24838 +++ linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
24839 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
24840 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24841 };
24842
24843 -static struct ata_port_operations cs5520_port_ops = {
24844 +static const struct ata_port_operations cs5520_port_ops = {
24845 .inherits = &ata_bmdma_port_ops,
24846 .qc_prep = ata_sff_dumb_qc_prep,
24847 .cable_detect = ata_cable_40wire,
24848 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5530.c linux-2.6.32.45/drivers/ata/pata_cs5530.c
24849 --- linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
24850 +++ linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
24851 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
24852 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24853 };
24854
24855 -static struct ata_port_operations cs5530_port_ops = {
24856 +static const struct ata_port_operations cs5530_port_ops = {
24857 .inherits = &ata_bmdma_port_ops,
24858
24859 .qc_prep = ata_sff_dumb_qc_prep,
24860 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5535.c linux-2.6.32.45/drivers/ata/pata_cs5535.c
24861 --- linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
24862 +++ linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
24863 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
24864 ATA_BMDMA_SHT(DRV_NAME),
24865 };
24866
24867 -static struct ata_port_operations cs5535_port_ops = {
24868 +static const struct ata_port_operations cs5535_port_ops = {
24869 .inherits = &ata_bmdma_port_ops,
24870 .cable_detect = cs5535_cable_detect,
24871 .set_piomode = cs5535_set_piomode,
24872 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5536.c linux-2.6.32.45/drivers/ata/pata_cs5536.c
24873 --- linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
24874 +++ linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
24875 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
24876 ATA_BMDMA_SHT(DRV_NAME),
24877 };
24878
24879 -static struct ata_port_operations cs5536_port_ops = {
24880 +static const struct ata_port_operations cs5536_port_ops = {
24881 .inherits = &ata_bmdma_port_ops,
24882 .cable_detect = cs5536_cable_detect,
24883 .set_piomode = cs5536_set_piomode,
24884 diff -urNp linux-2.6.32.45/drivers/ata/pata_cypress.c linux-2.6.32.45/drivers/ata/pata_cypress.c
24885 --- linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
24886 +++ linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
24887 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
24888 ATA_BMDMA_SHT(DRV_NAME),
24889 };
24890
24891 -static struct ata_port_operations cy82c693_port_ops = {
24892 +static const struct ata_port_operations cy82c693_port_ops = {
24893 .inherits = &ata_bmdma_port_ops,
24894 .cable_detect = ata_cable_40wire,
24895 .set_piomode = cy82c693_set_piomode,
24896 diff -urNp linux-2.6.32.45/drivers/ata/pata_efar.c linux-2.6.32.45/drivers/ata/pata_efar.c
24897 --- linux-2.6.32.45/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
24898 +++ linux-2.6.32.45/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
24899 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
24900 ATA_BMDMA_SHT(DRV_NAME),
24901 };
24902
24903 -static struct ata_port_operations efar_ops = {
24904 +static const struct ata_port_operations efar_ops = {
24905 .inherits = &ata_bmdma_port_ops,
24906 .cable_detect = efar_cable_detect,
24907 .set_piomode = efar_set_piomode,
24908 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt366.c linux-2.6.32.45/drivers/ata/pata_hpt366.c
24909 --- linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
24910 +++ linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
24911 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
24912 * Configuration for HPT366/68
24913 */
24914
24915 -static struct ata_port_operations hpt366_port_ops = {
24916 +static const struct ata_port_operations hpt366_port_ops = {
24917 .inherits = &ata_bmdma_port_ops,
24918 .cable_detect = hpt36x_cable_detect,
24919 .mode_filter = hpt366_filter,
24920 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt37x.c linux-2.6.32.45/drivers/ata/pata_hpt37x.c
24921 --- linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
24922 +++ linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
24923 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
24924 * Configuration for HPT370
24925 */
24926
24927 -static struct ata_port_operations hpt370_port_ops = {
24928 +static const struct ata_port_operations hpt370_port_ops = {
24929 .inherits = &ata_bmdma_port_ops,
24930
24931 .bmdma_stop = hpt370_bmdma_stop,
24932 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
24933 * Configuration for HPT370A. Close to 370 but less filters
24934 */
24935
24936 -static struct ata_port_operations hpt370a_port_ops = {
24937 +static const struct ata_port_operations hpt370a_port_ops = {
24938 .inherits = &hpt370_port_ops,
24939 .mode_filter = hpt370a_filter,
24940 };
24941 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
24942 * and DMA mode setting functionality.
24943 */
24944
24945 -static struct ata_port_operations hpt372_port_ops = {
24946 +static const struct ata_port_operations hpt372_port_ops = {
24947 .inherits = &ata_bmdma_port_ops,
24948
24949 .bmdma_stop = hpt37x_bmdma_stop,
24950 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
24951 * but we have a different cable detection procedure for function 1.
24952 */
24953
24954 -static struct ata_port_operations hpt374_fn1_port_ops = {
24955 +static const struct ata_port_operations hpt374_fn1_port_ops = {
24956 .inherits = &hpt372_port_ops,
24957 .prereset = hpt374_fn1_pre_reset,
24958 };
24959 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c
24960 --- linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
24961 +++ linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
24962 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
24963 * Configuration for HPT3x2n.
24964 */
24965
24966 -static struct ata_port_operations hpt3x2n_port_ops = {
24967 +static const struct ata_port_operations hpt3x2n_port_ops = {
24968 .inherits = &ata_bmdma_port_ops,
24969
24970 .bmdma_stop = hpt3x2n_bmdma_stop,
24971 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x3.c linux-2.6.32.45/drivers/ata/pata_hpt3x3.c
24972 --- linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
24973 +++ linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
24974 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
24975 ATA_BMDMA_SHT(DRV_NAME),
24976 };
24977
24978 -static struct ata_port_operations hpt3x3_port_ops = {
24979 +static const struct ata_port_operations hpt3x3_port_ops = {
24980 .inherits = &ata_bmdma_port_ops,
24981 .cable_detect = ata_cable_40wire,
24982 .set_piomode = hpt3x3_set_piomode,
24983 diff -urNp linux-2.6.32.45/drivers/ata/pata_icside.c linux-2.6.32.45/drivers/ata/pata_icside.c
24984 --- linux-2.6.32.45/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
24985 +++ linux-2.6.32.45/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
24986 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
24987 }
24988 }
24989
24990 -static struct ata_port_operations pata_icside_port_ops = {
24991 +static const struct ata_port_operations pata_icside_port_ops = {
24992 .inherits = &ata_sff_port_ops,
24993 /* no need to build any PRD tables for DMA */
24994 .qc_prep = ata_noop_qc_prep,
24995 diff -urNp linux-2.6.32.45/drivers/ata/pata_isapnp.c linux-2.6.32.45/drivers/ata/pata_isapnp.c
24996 --- linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
24997 +++ linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
24998 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
24999 ATA_PIO_SHT(DRV_NAME),
25000 };
25001
25002 -static struct ata_port_operations isapnp_port_ops = {
25003 +static const struct ata_port_operations isapnp_port_ops = {
25004 .inherits = &ata_sff_port_ops,
25005 .cable_detect = ata_cable_40wire,
25006 };
25007
25008 -static struct ata_port_operations isapnp_noalt_port_ops = {
25009 +static const struct ata_port_operations isapnp_noalt_port_ops = {
25010 .inherits = &ata_sff_port_ops,
25011 .cable_detect = ata_cable_40wire,
25012 /* No altstatus so we don't want to use the lost interrupt poll */
25013 diff -urNp linux-2.6.32.45/drivers/ata/pata_it8213.c linux-2.6.32.45/drivers/ata/pata_it8213.c
25014 --- linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
25015 +++ linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
25016 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
25017 };
25018
25019
25020 -static struct ata_port_operations it8213_ops = {
25021 +static const struct ata_port_operations it8213_ops = {
25022 .inherits = &ata_bmdma_port_ops,
25023 .cable_detect = it8213_cable_detect,
25024 .set_piomode = it8213_set_piomode,
25025 diff -urNp linux-2.6.32.45/drivers/ata/pata_it821x.c linux-2.6.32.45/drivers/ata/pata_it821x.c
25026 --- linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
25027 +++ linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
25028 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
25029 ATA_BMDMA_SHT(DRV_NAME),
25030 };
25031
25032 -static struct ata_port_operations it821x_smart_port_ops = {
25033 +static const struct ata_port_operations it821x_smart_port_ops = {
25034 .inherits = &ata_bmdma_port_ops,
25035
25036 .check_atapi_dma= it821x_check_atapi_dma,
25037 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x
25038 .port_start = it821x_port_start,
25039 };
25040
25041 -static struct ata_port_operations it821x_passthru_port_ops = {
25042 +static const struct ata_port_operations it821x_passthru_port_ops = {
25043 .inherits = &ata_bmdma_port_ops,
25044
25045 .check_atapi_dma= it821x_check_atapi_dma,
25046 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x
25047 .port_start = it821x_port_start,
25048 };
25049
25050 -static struct ata_port_operations it821x_rdc_port_ops = {
25051 +static const struct ata_port_operations it821x_rdc_port_ops = {
25052 .inherits = &ata_bmdma_port_ops,
25053
25054 .check_atapi_dma= it821x_check_atapi_dma,
25055 diff -urNp linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c
25056 --- linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
25057 +++ linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
25058 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
25059 ATA_PIO_SHT(DRV_NAME),
25060 };
25061
25062 -static struct ata_port_operations ixp4xx_port_ops = {
25063 +static const struct ata_port_operations ixp4xx_port_ops = {
25064 .inherits = &ata_sff_port_ops,
25065 .sff_data_xfer = ixp4xx_mmio_data_xfer,
25066 .cable_detect = ata_cable_40wire,
25067 diff -urNp linux-2.6.32.45/drivers/ata/pata_jmicron.c linux-2.6.32.45/drivers/ata/pata_jmicron.c
25068 --- linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
25069 +++ linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
25070 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
25071 ATA_BMDMA_SHT(DRV_NAME),
25072 };
25073
25074 -static struct ata_port_operations jmicron_ops = {
25075 +static const struct ata_port_operations jmicron_ops = {
25076 .inherits = &ata_bmdma_port_ops,
25077 .prereset = jmicron_pre_reset,
25078 };
25079 diff -urNp linux-2.6.32.45/drivers/ata/pata_legacy.c linux-2.6.32.45/drivers/ata/pata_legacy.c
25080 --- linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
25081 +++ linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
25082 @@ -106,7 +106,7 @@ struct legacy_probe {
25083
25084 struct legacy_controller {
25085 const char *name;
25086 - struct ata_port_operations *ops;
25087 + const struct ata_port_operations *ops;
25088 unsigned int pio_mask;
25089 unsigned int flags;
25090 unsigned int pflags;
25091 @@ -223,12 +223,12 @@ static const struct ata_port_operations
25092 * pio_mask as well.
25093 */
25094
25095 -static struct ata_port_operations simple_port_ops = {
25096 +static const struct ata_port_operations simple_port_ops = {
25097 .inherits = &legacy_base_port_ops,
25098 .sff_data_xfer = ata_sff_data_xfer_noirq,
25099 };
25100
25101 -static struct ata_port_operations legacy_port_ops = {
25102 +static const struct ata_port_operations legacy_port_ops = {
25103 .inherits = &legacy_base_port_ops,
25104 .sff_data_xfer = ata_sff_data_xfer_noirq,
25105 .set_mode = legacy_set_mode,
25106 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
25107 return buflen;
25108 }
25109
25110 -static struct ata_port_operations pdc20230_port_ops = {
25111 +static const struct ata_port_operations pdc20230_port_ops = {
25112 .inherits = &legacy_base_port_ops,
25113 .set_piomode = pdc20230_set_piomode,
25114 .sff_data_xfer = pdc_data_xfer_vlb,
25115 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
25116 ioread8(ap->ioaddr.status_addr);
25117 }
25118
25119 -static struct ata_port_operations ht6560a_port_ops = {
25120 +static const struct ata_port_operations ht6560a_port_ops = {
25121 .inherits = &legacy_base_port_ops,
25122 .set_piomode = ht6560a_set_piomode,
25123 };
25124 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
25125 ioread8(ap->ioaddr.status_addr);
25126 }
25127
25128 -static struct ata_port_operations ht6560b_port_ops = {
25129 +static const struct ata_port_operations ht6560b_port_ops = {
25130 .inherits = &legacy_base_port_ops,
25131 .set_piomode = ht6560b_set_piomode,
25132 };
25133 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
25134 }
25135
25136
25137 -static struct ata_port_operations opti82c611a_port_ops = {
25138 +static const struct ata_port_operations opti82c611a_port_ops = {
25139 .inherits = &legacy_base_port_ops,
25140 .set_piomode = opti82c611a_set_piomode,
25141 };
25142 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
25143 return ata_sff_qc_issue(qc);
25144 }
25145
25146 -static struct ata_port_operations opti82c46x_port_ops = {
25147 +static const struct ata_port_operations opti82c46x_port_ops = {
25148 .inherits = &legacy_base_port_ops,
25149 .set_piomode = opti82c46x_set_piomode,
25150 .qc_issue = opti82c46x_qc_issue,
25151 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
25152 return 0;
25153 }
25154
25155 -static struct ata_port_operations qdi6500_port_ops = {
25156 +static const struct ata_port_operations qdi6500_port_ops = {
25157 .inherits = &legacy_base_port_ops,
25158 .set_piomode = qdi6500_set_piomode,
25159 .qc_issue = qdi_qc_issue,
25160 .sff_data_xfer = vlb32_data_xfer,
25161 };
25162
25163 -static struct ata_port_operations qdi6580_port_ops = {
25164 +static const struct ata_port_operations qdi6580_port_ops = {
25165 .inherits = &legacy_base_port_ops,
25166 .set_piomode = qdi6580_set_piomode,
25167 .sff_data_xfer = vlb32_data_xfer,
25168 };
25169
25170 -static struct ata_port_operations qdi6580dp_port_ops = {
25171 +static const struct ata_port_operations qdi6580dp_port_ops = {
25172 .inherits = &legacy_base_port_ops,
25173 .set_piomode = qdi6580dp_set_piomode,
25174 .sff_data_xfer = vlb32_data_xfer,
25175 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_
25176 return 0;
25177 }
25178
25179 -static struct ata_port_operations winbond_port_ops = {
25180 +static const struct ata_port_operations winbond_port_ops = {
25181 .inherits = &legacy_base_port_ops,
25182 .set_piomode = winbond_set_piomode,
25183 .sff_data_xfer = vlb32_data_xfer,
25184 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
25185 int pio_modes = controller->pio_mask;
25186 unsigned long io = probe->port;
25187 u32 mask = (1 << probe->slot);
25188 - struct ata_port_operations *ops = controller->ops;
25189 + const struct ata_port_operations *ops = controller->ops;
25190 struct legacy_data *ld = &legacy_data[probe->slot];
25191 struct ata_host *host = NULL;
25192 struct ata_port *ap;
25193 diff -urNp linux-2.6.32.45/drivers/ata/pata_marvell.c linux-2.6.32.45/drivers/ata/pata_marvell.c
25194 --- linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
25195 +++ linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
25196 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
25197 ATA_BMDMA_SHT(DRV_NAME),
25198 };
25199
25200 -static struct ata_port_operations marvell_ops = {
25201 +static const struct ata_port_operations marvell_ops = {
25202 .inherits = &ata_bmdma_port_ops,
25203 .cable_detect = marvell_cable_detect,
25204 .prereset = marvell_pre_reset,
25205 diff -urNp linux-2.6.32.45/drivers/ata/pata_mpc52xx.c linux-2.6.32.45/drivers/ata/pata_mpc52xx.c
25206 --- linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
25207 +++ linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
25208 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
25209 ATA_PIO_SHT(DRV_NAME),
25210 };
25211
25212 -static struct ata_port_operations mpc52xx_ata_port_ops = {
25213 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
25214 .inherits = &ata_bmdma_port_ops,
25215 .sff_dev_select = mpc52xx_ata_dev_select,
25216 .set_piomode = mpc52xx_ata_set_piomode,
25217 diff -urNp linux-2.6.32.45/drivers/ata/pata_mpiix.c linux-2.6.32.45/drivers/ata/pata_mpiix.c
25218 --- linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
25219 +++ linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
25220 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
25221 ATA_PIO_SHT(DRV_NAME),
25222 };
25223
25224 -static struct ata_port_operations mpiix_port_ops = {
25225 +static const struct ata_port_operations mpiix_port_ops = {
25226 .inherits = &ata_sff_port_ops,
25227 .qc_issue = mpiix_qc_issue,
25228 .cable_detect = ata_cable_40wire,
25229 diff -urNp linux-2.6.32.45/drivers/ata/pata_netcell.c linux-2.6.32.45/drivers/ata/pata_netcell.c
25230 --- linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
25231 +++ linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
25232 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
25233 ATA_BMDMA_SHT(DRV_NAME),
25234 };
25235
25236 -static struct ata_port_operations netcell_ops = {
25237 +static const struct ata_port_operations netcell_ops = {
25238 .inherits = &ata_bmdma_port_ops,
25239 .cable_detect = ata_cable_80wire,
25240 .read_id = netcell_read_id,
25241 diff -urNp linux-2.6.32.45/drivers/ata/pata_ninja32.c linux-2.6.32.45/drivers/ata/pata_ninja32.c
25242 --- linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
25243 +++ linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
25244 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
25245 ATA_BMDMA_SHT(DRV_NAME),
25246 };
25247
25248 -static struct ata_port_operations ninja32_port_ops = {
25249 +static const struct ata_port_operations ninja32_port_ops = {
25250 .inherits = &ata_bmdma_port_ops,
25251 .sff_dev_select = ninja32_dev_select,
25252 .cable_detect = ata_cable_40wire,
25253 diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87410.c linux-2.6.32.45/drivers/ata/pata_ns87410.c
25254 --- linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
25255 +++ linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
25256 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
25257 ATA_PIO_SHT(DRV_NAME),
25258 };
25259
25260 -static struct ata_port_operations ns87410_port_ops = {
25261 +static const struct ata_port_operations ns87410_port_ops = {
25262 .inherits = &ata_sff_port_ops,
25263 .qc_issue = ns87410_qc_issue,
25264 .cable_detect = ata_cable_40wire,
25265 diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87415.c linux-2.6.32.45/drivers/ata/pata_ns87415.c
25266 --- linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
25267 +++ linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
25268 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
25269 }
25270 #endif /* 87560 SuperIO Support */
25271
25272 -static struct ata_port_operations ns87415_pata_ops = {
25273 +static const struct ata_port_operations ns87415_pata_ops = {
25274 .inherits = &ata_bmdma_port_ops,
25275
25276 .check_atapi_dma = ns87415_check_atapi_dma,
25277 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
25278 };
25279
25280 #if defined(CONFIG_SUPERIO)
25281 -static struct ata_port_operations ns87560_pata_ops = {
25282 +static const struct ata_port_operations ns87560_pata_ops = {
25283 .inherits = &ns87415_pata_ops,
25284 .sff_tf_read = ns87560_tf_read,
25285 .sff_check_status = ns87560_check_status,
25286 diff -urNp linux-2.6.32.45/drivers/ata/pata_octeon_cf.c linux-2.6.32.45/drivers/ata/pata_octeon_cf.c
25287 --- linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
25288 +++ linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
25289 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
25290 return 0;
25291 }
25292
25293 +/* cannot be const */
25294 static struct ata_port_operations octeon_cf_ops = {
25295 .inherits = &ata_sff_port_ops,
25296 .check_atapi_dma = octeon_cf_check_atapi_dma,
25297 diff -urNp linux-2.6.32.45/drivers/ata/pata_oldpiix.c linux-2.6.32.45/drivers/ata/pata_oldpiix.c
25298 --- linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
25299 +++ linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
25300 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
25301 ATA_BMDMA_SHT(DRV_NAME),
25302 };
25303
25304 -static struct ata_port_operations oldpiix_pata_ops = {
25305 +static const struct ata_port_operations oldpiix_pata_ops = {
25306 .inherits = &ata_bmdma_port_ops,
25307 .qc_issue = oldpiix_qc_issue,
25308 .cable_detect = ata_cable_40wire,
25309 diff -urNp linux-2.6.32.45/drivers/ata/pata_opti.c linux-2.6.32.45/drivers/ata/pata_opti.c
25310 --- linux-2.6.32.45/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
25311 +++ linux-2.6.32.45/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
25312 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
25313 ATA_PIO_SHT(DRV_NAME),
25314 };
25315
25316 -static struct ata_port_operations opti_port_ops = {
25317 +static const struct ata_port_operations opti_port_ops = {
25318 .inherits = &ata_sff_port_ops,
25319 .cable_detect = ata_cable_40wire,
25320 .set_piomode = opti_set_piomode,
25321 diff -urNp linux-2.6.32.45/drivers/ata/pata_optidma.c linux-2.6.32.45/drivers/ata/pata_optidma.c
25322 --- linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25323 +++ linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25324 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25325 ATA_BMDMA_SHT(DRV_NAME),
25326 };
25327
25328 -static struct ata_port_operations optidma_port_ops = {
25329 +static const struct ata_port_operations optidma_port_ops = {
25330 .inherits = &ata_bmdma_port_ops,
25331 .cable_detect = ata_cable_40wire,
25332 .set_piomode = optidma_set_pio_mode,
25333 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25334 .prereset = optidma_pre_reset,
25335 };
25336
25337 -static struct ata_port_operations optiplus_port_ops = {
25338 +static const struct ata_port_operations optiplus_port_ops = {
25339 .inherits = &optidma_port_ops,
25340 .set_piomode = optiplus_set_pio_mode,
25341 .set_dmamode = optiplus_set_dma_mode,
25342 diff -urNp linux-2.6.32.45/drivers/ata/pata_palmld.c linux-2.6.32.45/drivers/ata/pata_palmld.c
25343 --- linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25344 +++ linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25345 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25346 ATA_PIO_SHT(DRV_NAME),
25347 };
25348
25349 -static struct ata_port_operations palmld_port_ops = {
25350 +static const struct ata_port_operations palmld_port_ops = {
25351 .inherits = &ata_sff_port_ops,
25352 .sff_data_xfer = ata_sff_data_xfer_noirq,
25353 .cable_detect = ata_cable_40wire,
25354 diff -urNp linux-2.6.32.45/drivers/ata/pata_pcmcia.c linux-2.6.32.45/drivers/ata/pata_pcmcia.c
25355 --- linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25356 +++ linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25357 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25358 ATA_PIO_SHT(DRV_NAME),
25359 };
25360
25361 -static struct ata_port_operations pcmcia_port_ops = {
25362 +static const struct ata_port_operations pcmcia_port_ops = {
25363 .inherits = &ata_sff_port_ops,
25364 .sff_data_xfer = ata_sff_data_xfer_noirq,
25365 .cable_detect = ata_cable_40wire,
25366 .set_mode = pcmcia_set_mode,
25367 };
25368
25369 -static struct ata_port_operations pcmcia_8bit_port_ops = {
25370 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
25371 .inherits = &ata_sff_port_ops,
25372 .sff_data_xfer = ata_data_xfer_8bit,
25373 .cable_detect = ata_cable_40wire,
25374 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25375 unsigned long io_base, ctl_base;
25376 void __iomem *io_addr, *ctl_addr;
25377 int n_ports = 1;
25378 - struct ata_port_operations *ops = &pcmcia_port_ops;
25379 + const struct ata_port_operations *ops = &pcmcia_port_ops;
25380
25381 info = kzalloc(sizeof(*info), GFP_KERNEL);
25382 if (info == NULL)
25383 diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc2027x.c linux-2.6.32.45/drivers/ata/pata_pdc2027x.c
25384 --- linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25385 +++ linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25386 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25387 ATA_BMDMA_SHT(DRV_NAME),
25388 };
25389
25390 -static struct ata_port_operations pdc2027x_pata100_ops = {
25391 +static const struct ata_port_operations pdc2027x_pata100_ops = {
25392 .inherits = &ata_bmdma_port_ops,
25393 .check_atapi_dma = pdc2027x_check_atapi_dma,
25394 .cable_detect = pdc2027x_cable_detect,
25395 .prereset = pdc2027x_prereset,
25396 };
25397
25398 -static struct ata_port_operations pdc2027x_pata133_ops = {
25399 +static const struct ata_port_operations pdc2027x_pata133_ops = {
25400 .inherits = &pdc2027x_pata100_ops,
25401 .mode_filter = pdc2027x_mode_filter,
25402 .set_piomode = pdc2027x_set_piomode,
25403 diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c
25404 --- linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25405 +++ linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25406 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25407 ATA_BMDMA_SHT(DRV_NAME),
25408 };
25409
25410 -static struct ata_port_operations pdc2024x_port_ops = {
25411 +static const struct ata_port_operations pdc2024x_port_ops = {
25412 .inherits = &ata_bmdma_port_ops,
25413
25414 .cable_detect = ata_cable_40wire,
25415 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25416 .sff_exec_command = pdc202xx_exec_command,
25417 };
25418
25419 -static struct ata_port_operations pdc2026x_port_ops = {
25420 +static const struct ata_port_operations pdc2026x_port_ops = {
25421 .inherits = &pdc2024x_port_ops,
25422
25423 .check_atapi_dma = pdc2026x_check_atapi_dma,
25424 diff -urNp linux-2.6.32.45/drivers/ata/pata_platform.c linux-2.6.32.45/drivers/ata/pata_platform.c
25425 --- linux-2.6.32.45/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25426 +++ linux-2.6.32.45/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25427 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25428 ATA_PIO_SHT(DRV_NAME),
25429 };
25430
25431 -static struct ata_port_operations pata_platform_port_ops = {
25432 +static const struct ata_port_operations pata_platform_port_ops = {
25433 .inherits = &ata_sff_port_ops,
25434 .sff_data_xfer = ata_sff_data_xfer_noirq,
25435 .cable_detect = ata_cable_unknown,
25436 diff -urNp linux-2.6.32.45/drivers/ata/pata_qdi.c linux-2.6.32.45/drivers/ata/pata_qdi.c
25437 --- linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25438 +++ linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25439 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25440 ATA_PIO_SHT(DRV_NAME),
25441 };
25442
25443 -static struct ata_port_operations qdi6500_port_ops = {
25444 +static const struct ata_port_operations qdi6500_port_ops = {
25445 .inherits = &ata_sff_port_ops,
25446 .qc_issue = qdi_qc_issue,
25447 .sff_data_xfer = qdi_data_xfer,
25448 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25449 .set_piomode = qdi6500_set_piomode,
25450 };
25451
25452 -static struct ata_port_operations qdi6580_port_ops = {
25453 +static const struct ata_port_operations qdi6580_port_ops = {
25454 .inherits = &qdi6500_port_ops,
25455 .set_piomode = qdi6580_set_piomode,
25456 };
25457 diff -urNp linux-2.6.32.45/drivers/ata/pata_radisys.c linux-2.6.32.45/drivers/ata/pata_radisys.c
25458 --- linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25459 +++ linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25460 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25461 ATA_BMDMA_SHT(DRV_NAME),
25462 };
25463
25464 -static struct ata_port_operations radisys_pata_ops = {
25465 +static const struct ata_port_operations radisys_pata_ops = {
25466 .inherits = &ata_bmdma_port_ops,
25467 .qc_issue = radisys_qc_issue,
25468 .cable_detect = ata_cable_unknown,
25469 diff -urNp linux-2.6.32.45/drivers/ata/pata_rb532_cf.c linux-2.6.32.45/drivers/ata/pata_rb532_cf.c
25470 --- linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25471 +++ linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25472 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25473 return IRQ_HANDLED;
25474 }
25475
25476 -static struct ata_port_operations rb532_pata_port_ops = {
25477 +static const struct ata_port_operations rb532_pata_port_ops = {
25478 .inherits = &ata_sff_port_ops,
25479 .sff_data_xfer = ata_sff_data_xfer32,
25480 };
25481 diff -urNp linux-2.6.32.45/drivers/ata/pata_rdc.c linux-2.6.32.45/drivers/ata/pata_rdc.c
25482 --- linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25483 +++ linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25484 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25485 pci_write_config_byte(dev, 0x48, udma_enable);
25486 }
25487
25488 -static struct ata_port_operations rdc_pata_ops = {
25489 +static const struct ata_port_operations rdc_pata_ops = {
25490 .inherits = &ata_bmdma32_port_ops,
25491 .cable_detect = rdc_pata_cable_detect,
25492 .set_piomode = rdc_set_piomode,
25493 diff -urNp linux-2.6.32.45/drivers/ata/pata_rz1000.c linux-2.6.32.45/drivers/ata/pata_rz1000.c
25494 --- linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25495 +++ linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25496 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25497 ATA_PIO_SHT(DRV_NAME),
25498 };
25499
25500 -static struct ata_port_operations rz1000_port_ops = {
25501 +static const struct ata_port_operations rz1000_port_ops = {
25502 .inherits = &ata_sff_port_ops,
25503 .cable_detect = ata_cable_40wire,
25504 .set_mode = rz1000_set_mode,
25505 diff -urNp linux-2.6.32.45/drivers/ata/pata_sc1200.c linux-2.6.32.45/drivers/ata/pata_sc1200.c
25506 --- linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25507 +++ linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25508 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25509 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25510 };
25511
25512 -static struct ata_port_operations sc1200_port_ops = {
25513 +static const struct ata_port_operations sc1200_port_ops = {
25514 .inherits = &ata_bmdma_port_ops,
25515 .qc_prep = ata_sff_dumb_qc_prep,
25516 .qc_issue = sc1200_qc_issue,
25517 diff -urNp linux-2.6.32.45/drivers/ata/pata_scc.c linux-2.6.32.45/drivers/ata/pata_scc.c
25518 --- linux-2.6.32.45/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25519 +++ linux-2.6.32.45/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25520 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25521 ATA_BMDMA_SHT(DRV_NAME),
25522 };
25523
25524 -static struct ata_port_operations scc_pata_ops = {
25525 +static const struct ata_port_operations scc_pata_ops = {
25526 .inherits = &ata_bmdma_port_ops,
25527
25528 .set_piomode = scc_set_piomode,
25529 diff -urNp linux-2.6.32.45/drivers/ata/pata_sch.c linux-2.6.32.45/drivers/ata/pata_sch.c
25530 --- linux-2.6.32.45/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25531 +++ linux-2.6.32.45/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25532 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25533 ATA_BMDMA_SHT(DRV_NAME),
25534 };
25535
25536 -static struct ata_port_operations sch_pata_ops = {
25537 +static const struct ata_port_operations sch_pata_ops = {
25538 .inherits = &ata_bmdma_port_ops,
25539 .cable_detect = ata_cable_unknown,
25540 .set_piomode = sch_set_piomode,
25541 diff -urNp linux-2.6.32.45/drivers/ata/pata_serverworks.c linux-2.6.32.45/drivers/ata/pata_serverworks.c
25542 --- linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25543 +++ linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25544 @@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25545 ATA_BMDMA_SHT(DRV_NAME),
25546 };
25547
25548 -static struct ata_port_operations serverworks_osb4_port_ops = {
25549 +static const struct ata_port_operations serverworks_osb4_port_ops = {
25550 .inherits = &ata_bmdma_port_ops,
25551 .cable_detect = serverworks_cable_detect,
25552 .mode_filter = serverworks_osb4_filter,
25553 @@ -307,7 +307,7 @@ static struct ata_port_operations server
25554 .set_dmamode = serverworks_set_dmamode,
25555 };
25556
25557 -static struct ata_port_operations serverworks_csb_port_ops = {
25558 +static const struct ata_port_operations serverworks_csb_port_ops = {
25559 .inherits = &serverworks_osb4_port_ops,
25560 .mode_filter = serverworks_csb_filter,
25561 };
25562 diff -urNp linux-2.6.32.45/drivers/ata/pata_sil680.c linux-2.6.32.45/drivers/ata/pata_sil680.c
25563 --- linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25564 +++ linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25565 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25566 ATA_BMDMA_SHT(DRV_NAME),
25567 };
25568
25569 -static struct ata_port_operations sil680_port_ops = {
25570 +static const struct ata_port_operations sil680_port_ops = {
25571 .inherits = &ata_bmdma32_port_ops,
25572 .cable_detect = sil680_cable_detect,
25573 .set_piomode = sil680_set_piomode,
25574 diff -urNp linux-2.6.32.45/drivers/ata/pata_sis.c linux-2.6.32.45/drivers/ata/pata_sis.c
25575 --- linux-2.6.32.45/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25576 +++ linux-2.6.32.45/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25577 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25578 ATA_BMDMA_SHT(DRV_NAME),
25579 };
25580
25581 -static struct ata_port_operations sis_133_for_sata_ops = {
25582 +static const struct ata_port_operations sis_133_for_sata_ops = {
25583 .inherits = &ata_bmdma_port_ops,
25584 .set_piomode = sis_133_set_piomode,
25585 .set_dmamode = sis_133_set_dmamode,
25586 .cable_detect = sis_133_cable_detect,
25587 };
25588
25589 -static struct ata_port_operations sis_base_ops = {
25590 +static const struct ata_port_operations sis_base_ops = {
25591 .inherits = &ata_bmdma_port_ops,
25592 .prereset = sis_pre_reset,
25593 };
25594
25595 -static struct ata_port_operations sis_133_ops = {
25596 +static const struct ata_port_operations sis_133_ops = {
25597 .inherits = &sis_base_ops,
25598 .set_piomode = sis_133_set_piomode,
25599 .set_dmamode = sis_133_set_dmamode,
25600 .cable_detect = sis_133_cable_detect,
25601 };
25602
25603 -static struct ata_port_operations sis_133_early_ops = {
25604 +static const struct ata_port_operations sis_133_early_ops = {
25605 .inherits = &sis_base_ops,
25606 .set_piomode = sis_100_set_piomode,
25607 .set_dmamode = sis_133_early_set_dmamode,
25608 .cable_detect = sis_66_cable_detect,
25609 };
25610
25611 -static struct ata_port_operations sis_100_ops = {
25612 +static const struct ata_port_operations sis_100_ops = {
25613 .inherits = &sis_base_ops,
25614 .set_piomode = sis_100_set_piomode,
25615 .set_dmamode = sis_100_set_dmamode,
25616 .cable_detect = sis_66_cable_detect,
25617 };
25618
25619 -static struct ata_port_operations sis_66_ops = {
25620 +static const struct ata_port_operations sis_66_ops = {
25621 .inherits = &sis_base_ops,
25622 .set_piomode = sis_old_set_piomode,
25623 .set_dmamode = sis_66_set_dmamode,
25624 .cable_detect = sis_66_cable_detect,
25625 };
25626
25627 -static struct ata_port_operations sis_old_ops = {
25628 +static const struct ata_port_operations sis_old_ops = {
25629 .inherits = &sis_base_ops,
25630 .set_piomode = sis_old_set_piomode,
25631 .set_dmamode = sis_old_set_dmamode,
25632 diff -urNp linux-2.6.32.45/drivers/ata/pata_sl82c105.c linux-2.6.32.45/drivers/ata/pata_sl82c105.c
25633 --- linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25634 +++ linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25635 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25636 ATA_BMDMA_SHT(DRV_NAME),
25637 };
25638
25639 -static struct ata_port_operations sl82c105_port_ops = {
25640 +static const struct ata_port_operations sl82c105_port_ops = {
25641 .inherits = &ata_bmdma_port_ops,
25642 .qc_defer = sl82c105_qc_defer,
25643 .bmdma_start = sl82c105_bmdma_start,
25644 diff -urNp linux-2.6.32.45/drivers/ata/pata_triflex.c linux-2.6.32.45/drivers/ata/pata_triflex.c
25645 --- linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25646 +++ linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25647 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25648 ATA_BMDMA_SHT(DRV_NAME),
25649 };
25650
25651 -static struct ata_port_operations triflex_port_ops = {
25652 +static const struct ata_port_operations triflex_port_ops = {
25653 .inherits = &ata_bmdma_port_ops,
25654 .bmdma_start = triflex_bmdma_start,
25655 .bmdma_stop = triflex_bmdma_stop,
25656 diff -urNp linux-2.6.32.45/drivers/ata/pata_via.c linux-2.6.32.45/drivers/ata/pata_via.c
25657 --- linux-2.6.32.45/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25658 +++ linux-2.6.32.45/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25659 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25660 ATA_BMDMA_SHT(DRV_NAME),
25661 };
25662
25663 -static struct ata_port_operations via_port_ops = {
25664 +static const struct ata_port_operations via_port_ops = {
25665 .inherits = &ata_bmdma_port_ops,
25666 .cable_detect = via_cable_detect,
25667 .set_piomode = via_set_piomode,
25668 @@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25669 .port_start = via_port_start,
25670 };
25671
25672 -static struct ata_port_operations via_port_ops_noirq = {
25673 +static const struct ata_port_operations via_port_ops_noirq = {
25674 .inherits = &via_port_ops,
25675 .sff_data_xfer = ata_sff_data_xfer_noirq,
25676 };
25677 diff -urNp linux-2.6.32.45/drivers/ata/pata_winbond.c linux-2.6.32.45/drivers/ata/pata_winbond.c
25678 --- linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25679 +++ linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25680 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25681 ATA_PIO_SHT(DRV_NAME),
25682 };
25683
25684 -static struct ata_port_operations winbond_port_ops = {
25685 +static const struct ata_port_operations winbond_port_ops = {
25686 .inherits = &ata_sff_port_ops,
25687 .sff_data_xfer = winbond_data_xfer,
25688 .cable_detect = ata_cable_40wire,
25689 diff -urNp linux-2.6.32.45/drivers/ata/pdc_adma.c linux-2.6.32.45/drivers/ata/pdc_adma.c
25690 --- linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25691 +++ linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25692 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25693 .dma_boundary = ADMA_DMA_BOUNDARY,
25694 };
25695
25696 -static struct ata_port_operations adma_ata_ops = {
25697 +static const struct ata_port_operations adma_ata_ops = {
25698 .inherits = &ata_sff_port_ops,
25699
25700 .lost_interrupt = ATA_OP_NULL,
25701 diff -urNp linux-2.6.32.45/drivers/ata/sata_fsl.c linux-2.6.32.45/drivers/ata/sata_fsl.c
25702 --- linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25703 +++ linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25704 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25705 .dma_boundary = ATA_DMA_BOUNDARY,
25706 };
25707
25708 -static struct ata_port_operations sata_fsl_ops = {
25709 +static const struct ata_port_operations sata_fsl_ops = {
25710 .inherits = &sata_pmp_port_ops,
25711
25712 .qc_defer = ata_std_qc_defer,
25713 diff -urNp linux-2.6.32.45/drivers/ata/sata_inic162x.c linux-2.6.32.45/drivers/ata/sata_inic162x.c
25714 --- linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25715 +++ linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25716 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25717 return 0;
25718 }
25719
25720 -static struct ata_port_operations inic_port_ops = {
25721 +static const struct ata_port_operations inic_port_ops = {
25722 .inherits = &sata_port_ops,
25723
25724 .check_atapi_dma = inic_check_atapi_dma,
25725 diff -urNp linux-2.6.32.45/drivers/ata/sata_mv.c linux-2.6.32.45/drivers/ata/sata_mv.c
25726 --- linux-2.6.32.45/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25727 +++ linux-2.6.32.45/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25728 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25729 .dma_boundary = MV_DMA_BOUNDARY,
25730 };
25731
25732 -static struct ata_port_operations mv5_ops = {
25733 +static const struct ata_port_operations mv5_ops = {
25734 .inherits = &ata_sff_port_ops,
25735
25736 .lost_interrupt = ATA_OP_NULL,
25737 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25738 .port_stop = mv_port_stop,
25739 };
25740
25741 -static struct ata_port_operations mv6_ops = {
25742 +static const struct ata_port_operations mv6_ops = {
25743 .inherits = &mv5_ops,
25744 .dev_config = mv6_dev_config,
25745 .scr_read = mv_scr_read,
25746 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25747 .bmdma_status = mv_bmdma_status,
25748 };
25749
25750 -static struct ata_port_operations mv_iie_ops = {
25751 +static const struct ata_port_operations mv_iie_ops = {
25752 .inherits = &mv6_ops,
25753 .dev_config = ATA_OP_NULL,
25754 .qc_prep = mv_qc_prep_iie,
25755 diff -urNp linux-2.6.32.45/drivers/ata/sata_nv.c linux-2.6.32.45/drivers/ata/sata_nv.c
25756 --- linux-2.6.32.45/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
25757 +++ linux-2.6.32.45/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
25758 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
25759 * cases. Define nv_hardreset() which only kicks in for post-boot
25760 * probing and use it for all variants.
25761 */
25762 -static struct ata_port_operations nv_generic_ops = {
25763 +static const struct ata_port_operations nv_generic_ops = {
25764 .inherits = &ata_bmdma_port_ops,
25765 .lost_interrupt = ATA_OP_NULL,
25766 .scr_read = nv_scr_read,
25767 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
25768 .hardreset = nv_hardreset,
25769 };
25770
25771 -static struct ata_port_operations nv_nf2_ops = {
25772 +static const struct ata_port_operations nv_nf2_ops = {
25773 .inherits = &nv_generic_ops,
25774 .freeze = nv_nf2_freeze,
25775 .thaw = nv_nf2_thaw,
25776 };
25777
25778 -static struct ata_port_operations nv_ck804_ops = {
25779 +static const struct ata_port_operations nv_ck804_ops = {
25780 .inherits = &nv_generic_ops,
25781 .freeze = nv_ck804_freeze,
25782 .thaw = nv_ck804_thaw,
25783 .host_stop = nv_ck804_host_stop,
25784 };
25785
25786 -static struct ata_port_operations nv_adma_ops = {
25787 +static const struct ata_port_operations nv_adma_ops = {
25788 .inherits = &nv_ck804_ops,
25789
25790 .check_atapi_dma = nv_adma_check_atapi_dma,
25791 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
25792 .host_stop = nv_adma_host_stop,
25793 };
25794
25795 -static struct ata_port_operations nv_swncq_ops = {
25796 +static const struct ata_port_operations nv_swncq_ops = {
25797 .inherits = &nv_generic_ops,
25798
25799 .qc_defer = ata_std_qc_defer,
25800 diff -urNp linux-2.6.32.45/drivers/ata/sata_promise.c linux-2.6.32.45/drivers/ata/sata_promise.c
25801 --- linux-2.6.32.45/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
25802 +++ linux-2.6.32.45/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
25803 @@ -195,7 +195,7 @@ static const struct ata_port_operations
25804 .error_handler = pdc_error_handler,
25805 };
25806
25807 -static struct ata_port_operations pdc_sata_ops = {
25808 +static const struct ata_port_operations pdc_sata_ops = {
25809 .inherits = &pdc_common_ops,
25810 .cable_detect = pdc_sata_cable_detect,
25811 .freeze = pdc_sata_freeze,
25812 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
25813
25814 /* First-generation chips need a more restrictive ->check_atapi_dma op,
25815 and ->freeze/thaw that ignore the hotplug controls. */
25816 -static struct ata_port_operations pdc_old_sata_ops = {
25817 +static const struct ata_port_operations pdc_old_sata_ops = {
25818 .inherits = &pdc_sata_ops,
25819 .freeze = pdc_freeze,
25820 .thaw = pdc_thaw,
25821 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
25822 };
25823
25824 -static struct ata_port_operations pdc_pata_ops = {
25825 +static const struct ata_port_operations pdc_pata_ops = {
25826 .inherits = &pdc_common_ops,
25827 .cable_detect = pdc_pata_cable_detect,
25828 .freeze = pdc_freeze,
25829 diff -urNp linux-2.6.32.45/drivers/ata/sata_qstor.c linux-2.6.32.45/drivers/ata/sata_qstor.c
25830 --- linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
25831 +++ linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
25832 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
25833 .dma_boundary = QS_DMA_BOUNDARY,
25834 };
25835
25836 -static struct ata_port_operations qs_ata_ops = {
25837 +static const struct ata_port_operations qs_ata_ops = {
25838 .inherits = &ata_sff_port_ops,
25839
25840 .check_atapi_dma = qs_check_atapi_dma,
25841 diff -urNp linux-2.6.32.45/drivers/ata/sata_sil24.c linux-2.6.32.45/drivers/ata/sata_sil24.c
25842 --- linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
25843 +++ linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
25844 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
25845 .dma_boundary = ATA_DMA_BOUNDARY,
25846 };
25847
25848 -static struct ata_port_operations sil24_ops = {
25849 +static const struct ata_port_operations sil24_ops = {
25850 .inherits = &sata_pmp_port_ops,
25851
25852 .qc_defer = sil24_qc_defer,
25853 diff -urNp linux-2.6.32.45/drivers/ata/sata_sil.c linux-2.6.32.45/drivers/ata/sata_sil.c
25854 --- linux-2.6.32.45/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
25855 +++ linux-2.6.32.45/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
25856 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
25857 .sg_tablesize = ATA_MAX_PRD
25858 };
25859
25860 -static struct ata_port_operations sil_ops = {
25861 +static const struct ata_port_operations sil_ops = {
25862 .inherits = &ata_bmdma32_port_ops,
25863 .dev_config = sil_dev_config,
25864 .set_mode = sil_set_mode,
25865 diff -urNp linux-2.6.32.45/drivers/ata/sata_sis.c linux-2.6.32.45/drivers/ata/sata_sis.c
25866 --- linux-2.6.32.45/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
25867 +++ linux-2.6.32.45/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
25868 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
25869 ATA_BMDMA_SHT(DRV_NAME),
25870 };
25871
25872 -static struct ata_port_operations sis_ops = {
25873 +static const struct ata_port_operations sis_ops = {
25874 .inherits = &ata_bmdma_port_ops,
25875 .scr_read = sis_scr_read,
25876 .scr_write = sis_scr_write,
25877 diff -urNp linux-2.6.32.45/drivers/ata/sata_svw.c linux-2.6.32.45/drivers/ata/sata_svw.c
25878 --- linux-2.6.32.45/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
25879 +++ linux-2.6.32.45/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
25880 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
25881 };
25882
25883
25884 -static struct ata_port_operations k2_sata_ops = {
25885 +static const struct ata_port_operations k2_sata_ops = {
25886 .inherits = &ata_bmdma_port_ops,
25887 .sff_tf_load = k2_sata_tf_load,
25888 .sff_tf_read = k2_sata_tf_read,
25889 diff -urNp linux-2.6.32.45/drivers/ata/sata_sx4.c linux-2.6.32.45/drivers/ata/sata_sx4.c
25890 --- linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
25891 +++ linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
25892 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
25893 };
25894
25895 /* TODO: inherit from base port_ops after converting to new EH */
25896 -static struct ata_port_operations pdc_20621_ops = {
25897 +static const struct ata_port_operations pdc_20621_ops = {
25898 .inherits = &ata_sff_port_ops,
25899
25900 .check_atapi_dma = pdc_check_atapi_dma,
25901 diff -urNp linux-2.6.32.45/drivers/ata/sata_uli.c linux-2.6.32.45/drivers/ata/sata_uli.c
25902 --- linux-2.6.32.45/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
25903 +++ linux-2.6.32.45/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
25904 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
25905 ATA_BMDMA_SHT(DRV_NAME),
25906 };
25907
25908 -static struct ata_port_operations uli_ops = {
25909 +static const struct ata_port_operations uli_ops = {
25910 .inherits = &ata_bmdma_port_ops,
25911 .scr_read = uli_scr_read,
25912 .scr_write = uli_scr_write,
25913 diff -urNp linux-2.6.32.45/drivers/ata/sata_via.c linux-2.6.32.45/drivers/ata/sata_via.c
25914 --- linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
25915 +++ linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
25916 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
25917 ATA_BMDMA_SHT(DRV_NAME),
25918 };
25919
25920 -static struct ata_port_operations svia_base_ops = {
25921 +static const struct ata_port_operations svia_base_ops = {
25922 .inherits = &ata_bmdma_port_ops,
25923 .sff_tf_load = svia_tf_load,
25924 };
25925
25926 -static struct ata_port_operations vt6420_sata_ops = {
25927 +static const struct ata_port_operations vt6420_sata_ops = {
25928 .inherits = &svia_base_ops,
25929 .freeze = svia_noop_freeze,
25930 .prereset = vt6420_prereset,
25931 .bmdma_start = vt6420_bmdma_start,
25932 };
25933
25934 -static struct ata_port_operations vt6421_pata_ops = {
25935 +static const struct ata_port_operations vt6421_pata_ops = {
25936 .inherits = &svia_base_ops,
25937 .cable_detect = vt6421_pata_cable_detect,
25938 .set_piomode = vt6421_set_pio_mode,
25939 .set_dmamode = vt6421_set_dma_mode,
25940 };
25941
25942 -static struct ata_port_operations vt6421_sata_ops = {
25943 +static const struct ata_port_operations vt6421_sata_ops = {
25944 .inherits = &svia_base_ops,
25945 .scr_read = svia_scr_read,
25946 .scr_write = svia_scr_write,
25947 };
25948
25949 -static struct ata_port_operations vt8251_ops = {
25950 +static const struct ata_port_operations vt8251_ops = {
25951 .inherits = &svia_base_ops,
25952 .hardreset = sata_std_hardreset,
25953 .scr_read = vt8251_scr_read,
25954 diff -urNp linux-2.6.32.45/drivers/ata/sata_vsc.c linux-2.6.32.45/drivers/ata/sata_vsc.c
25955 --- linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
25956 +++ linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
25957 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
25958 };
25959
25960
25961 -static struct ata_port_operations vsc_sata_ops = {
25962 +static const struct ata_port_operations vsc_sata_ops = {
25963 .inherits = &ata_bmdma_port_ops,
25964 /* The IRQ handling is not quite standard SFF behaviour so we
25965 cannot use the default lost interrupt handler */
25966 diff -urNp linux-2.6.32.45/drivers/atm/adummy.c linux-2.6.32.45/drivers/atm/adummy.c
25967 --- linux-2.6.32.45/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
25968 +++ linux-2.6.32.45/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
25969 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
25970 vcc->pop(vcc, skb);
25971 else
25972 dev_kfree_skb_any(skb);
25973 - atomic_inc(&vcc->stats->tx);
25974 + atomic_inc_unchecked(&vcc->stats->tx);
25975
25976 return 0;
25977 }
25978 diff -urNp linux-2.6.32.45/drivers/atm/ambassador.c linux-2.6.32.45/drivers/atm/ambassador.c
25979 --- linux-2.6.32.45/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
25980 +++ linux-2.6.32.45/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
25981 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
25982 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25983
25984 // VC layer stats
25985 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25986 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25987
25988 // free the descriptor
25989 kfree (tx_descr);
25990 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
25991 dump_skb ("<<<", vc, skb);
25992
25993 // VC layer stats
25994 - atomic_inc(&atm_vcc->stats->rx);
25995 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25996 __net_timestamp(skb);
25997 // end of our responsability
25998 atm_vcc->push (atm_vcc, skb);
25999 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
26000 } else {
26001 PRINTK (KERN_INFO, "dropped over-size frame");
26002 // should we count this?
26003 - atomic_inc(&atm_vcc->stats->rx_drop);
26004 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26005 }
26006
26007 } else {
26008 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
26009 }
26010
26011 if (check_area (skb->data, skb->len)) {
26012 - atomic_inc(&atm_vcc->stats->tx_err);
26013 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26014 return -ENOMEM; // ?
26015 }
26016
26017 diff -urNp linux-2.6.32.45/drivers/atm/atmtcp.c linux-2.6.32.45/drivers/atm/atmtcp.c
26018 --- linux-2.6.32.45/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
26019 +++ linux-2.6.32.45/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
26020 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
26021 if (vcc->pop) vcc->pop(vcc,skb);
26022 else dev_kfree_skb(skb);
26023 if (dev_data) return 0;
26024 - atomic_inc(&vcc->stats->tx_err);
26025 + atomic_inc_unchecked(&vcc->stats->tx_err);
26026 return -ENOLINK;
26027 }
26028 size = skb->len+sizeof(struct atmtcp_hdr);
26029 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
26030 if (!new_skb) {
26031 if (vcc->pop) vcc->pop(vcc,skb);
26032 else dev_kfree_skb(skb);
26033 - atomic_inc(&vcc->stats->tx_err);
26034 + atomic_inc_unchecked(&vcc->stats->tx_err);
26035 return -ENOBUFS;
26036 }
26037 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26038 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
26039 if (vcc->pop) vcc->pop(vcc,skb);
26040 else dev_kfree_skb(skb);
26041 out_vcc->push(out_vcc,new_skb);
26042 - atomic_inc(&vcc->stats->tx);
26043 - atomic_inc(&out_vcc->stats->rx);
26044 + atomic_inc_unchecked(&vcc->stats->tx);
26045 + atomic_inc_unchecked(&out_vcc->stats->rx);
26046 return 0;
26047 }
26048
26049 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
26050 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26051 read_unlock(&vcc_sklist_lock);
26052 if (!out_vcc) {
26053 - atomic_inc(&vcc->stats->tx_err);
26054 + atomic_inc_unchecked(&vcc->stats->tx_err);
26055 goto done;
26056 }
26057 skb_pull(skb,sizeof(struct atmtcp_hdr));
26058 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
26059 __net_timestamp(new_skb);
26060 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26061 out_vcc->push(out_vcc,new_skb);
26062 - atomic_inc(&vcc->stats->tx);
26063 - atomic_inc(&out_vcc->stats->rx);
26064 + atomic_inc_unchecked(&vcc->stats->tx);
26065 + atomic_inc_unchecked(&out_vcc->stats->rx);
26066 done:
26067 if (vcc->pop) vcc->pop(vcc,skb);
26068 else dev_kfree_skb(skb);
26069 diff -urNp linux-2.6.32.45/drivers/atm/eni.c linux-2.6.32.45/drivers/atm/eni.c
26070 --- linux-2.6.32.45/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
26071 +++ linux-2.6.32.45/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
26072 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26073 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26074 vcc->dev->number);
26075 length = 0;
26076 - atomic_inc(&vcc->stats->rx_err);
26077 + atomic_inc_unchecked(&vcc->stats->rx_err);
26078 }
26079 else {
26080 length = ATM_CELL_SIZE-1; /* no HEC */
26081 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26082 size);
26083 }
26084 eff = length = 0;
26085 - atomic_inc(&vcc->stats->rx_err);
26086 + atomic_inc_unchecked(&vcc->stats->rx_err);
26087 }
26088 else {
26089 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26090 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26091 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26092 vcc->dev->number,vcc->vci,length,size << 2,descr);
26093 length = eff = 0;
26094 - atomic_inc(&vcc->stats->rx_err);
26095 + atomic_inc_unchecked(&vcc->stats->rx_err);
26096 }
26097 }
26098 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26099 @@ -770,7 +770,7 @@ rx_dequeued++;
26100 vcc->push(vcc,skb);
26101 pushed++;
26102 }
26103 - atomic_inc(&vcc->stats->rx);
26104 + atomic_inc_unchecked(&vcc->stats->rx);
26105 }
26106 wake_up(&eni_dev->rx_wait);
26107 }
26108 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
26109 PCI_DMA_TODEVICE);
26110 if (vcc->pop) vcc->pop(vcc,skb);
26111 else dev_kfree_skb_irq(skb);
26112 - atomic_inc(&vcc->stats->tx);
26113 + atomic_inc_unchecked(&vcc->stats->tx);
26114 wake_up(&eni_dev->tx_wait);
26115 dma_complete++;
26116 }
26117 diff -urNp linux-2.6.32.45/drivers/atm/firestream.c linux-2.6.32.45/drivers/atm/firestream.c
26118 --- linux-2.6.32.45/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
26119 +++ linux-2.6.32.45/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
26120 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct
26121 }
26122 }
26123
26124 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26125 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26126
26127 fs_dprintk (FS_DEBUG_TXMEM, "i");
26128 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26129 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_
26130 #endif
26131 skb_put (skb, qe->p1 & 0xffff);
26132 ATM_SKB(skb)->vcc = atm_vcc;
26133 - atomic_inc(&atm_vcc->stats->rx);
26134 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26135 __net_timestamp(skb);
26136 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26137 atm_vcc->push (atm_vcc, skb);
26138 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_
26139 kfree (pe);
26140 }
26141 if (atm_vcc)
26142 - atomic_inc(&atm_vcc->stats->rx_drop);
26143 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26144 break;
26145 case 0x1f: /* Reassembly abort: no buffers. */
26146 /* Silently increment error counter. */
26147 if (atm_vcc)
26148 - atomic_inc(&atm_vcc->stats->rx_drop);
26149 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26150 break;
26151 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26152 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26153 diff -urNp linux-2.6.32.45/drivers/atm/fore200e.c linux-2.6.32.45/drivers/atm/fore200e.c
26154 --- linux-2.6.32.45/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
26155 +++ linux-2.6.32.45/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
26156 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
26157 #endif
26158 /* check error condition */
26159 if (*entry->status & STATUS_ERROR)
26160 - atomic_inc(&vcc->stats->tx_err);
26161 + atomic_inc_unchecked(&vcc->stats->tx_err);
26162 else
26163 - atomic_inc(&vcc->stats->tx);
26164 + atomic_inc_unchecked(&vcc->stats->tx);
26165 }
26166 }
26167
26168 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
26169 if (skb == NULL) {
26170 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26171
26172 - atomic_inc(&vcc->stats->rx_drop);
26173 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26174 return -ENOMEM;
26175 }
26176
26177 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
26178
26179 dev_kfree_skb_any(skb);
26180
26181 - atomic_inc(&vcc->stats->rx_drop);
26182 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26183 return -ENOMEM;
26184 }
26185
26186 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26187
26188 vcc->push(vcc, skb);
26189 - atomic_inc(&vcc->stats->rx);
26190 + atomic_inc_unchecked(&vcc->stats->rx);
26191
26192 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26193
26194 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
26195 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
26196 fore200e->atm_dev->number,
26197 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
26198 - atomic_inc(&vcc->stats->rx_err);
26199 + atomic_inc_unchecked(&vcc->stats->rx_err);
26200 }
26201 }
26202
26203 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
26204 goto retry_here;
26205 }
26206
26207 - atomic_inc(&vcc->stats->tx_err);
26208 + atomic_inc_unchecked(&vcc->stats->tx_err);
26209
26210 fore200e->tx_sat++;
26211 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
26212 diff -urNp linux-2.6.32.45/drivers/atm/he.c linux-2.6.32.45/drivers/atm/he.c
26213 --- linux-2.6.32.45/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
26214 +++ linux-2.6.32.45/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
26215 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26216
26217 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
26218 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
26219 - atomic_inc(&vcc->stats->rx_drop);
26220 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26221 goto return_host_buffers;
26222 }
26223
26224 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26225 RBRQ_LEN_ERR(he_dev->rbrq_head)
26226 ? "LEN_ERR" : "",
26227 vcc->vpi, vcc->vci);
26228 - atomic_inc(&vcc->stats->rx_err);
26229 + atomic_inc_unchecked(&vcc->stats->rx_err);
26230 goto return_host_buffers;
26231 }
26232
26233 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26234 vcc->push(vcc, skb);
26235 spin_lock(&he_dev->global_lock);
26236
26237 - atomic_inc(&vcc->stats->rx);
26238 + atomic_inc_unchecked(&vcc->stats->rx);
26239
26240 return_host_buffers:
26241 ++pdus_assembled;
26242 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
26243 tpd->vcc->pop(tpd->vcc, tpd->skb);
26244 else
26245 dev_kfree_skb_any(tpd->skb);
26246 - atomic_inc(&tpd->vcc->stats->tx_err);
26247 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
26248 }
26249 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
26250 return;
26251 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26252 vcc->pop(vcc, skb);
26253 else
26254 dev_kfree_skb_any(skb);
26255 - atomic_inc(&vcc->stats->tx_err);
26256 + atomic_inc_unchecked(&vcc->stats->tx_err);
26257 return -EINVAL;
26258 }
26259
26260 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26261 vcc->pop(vcc, skb);
26262 else
26263 dev_kfree_skb_any(skb);
26264 - atomic_inc(&vcc->stats->tx_err);
26265 + atomic_inc_unchecked(&vcc->stats->tx_err);
26266 return -EINVAL;
26267 }
26268 #endif
26269 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26270 vcc->pop(vcc, skb);
26271 else
26272 dev_kfree_skb_any(skb);
26273 - atomic_inc(&vcc->stats->tx_err);
26274 + atomic_inc_unchecked(&vcc->stats->tx_err);
26275 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26276 return -ENOMEM;
26277 }
26278 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26279 vcc->pop(vcc, skb);
26280 else
26281 dev_kfree_skb_any(skb);
26282 - atomic_inc(&vcc->stats->tx_err);
26283 + atomic_inc_unchecked(&vcc->stats->tx_err);
26284 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26285 return -ENOMEM;
26286 }
26287 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26288 __enqueue_tpd(he_dev, tpd, cid);
26289 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26290
26291 - atomic_inc(&vcc->stats->tx);
26292 + atomic_inc_unchecked(&vcc->stats->tx);
26293
26294 return 0;
26295 }
26296 diff -urNp linux-2.6.32.45/drivers/atm/horizon.c linux-2.6.32.45/drivers/atm/horizon.c
26297 --- linux-2.6.32.45/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
26298 +++ linux-2.6.32.45/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
26299 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
26300 {
26301 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
26302 // VC layer stats
26303 - atomic_inc(&vcc->stats->rx);
26304 + atomic_inc_unchecked(&vcc->stats->rx);
26305 __net_timestamp(skb);
26306 // end of our responsability
26307 vcc->push (vcc, skb);
26308 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
26309 dev->tx_iovec = NULL;
26310
26311 // VC layer stats
26312 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26313 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26314
26315 // free the skb
26316 hrz_kfree_skb (skb);
26317 diff -urNp linux-2.6.32.45/drivers/atm/idt77252.c linux-2.6.32.45/drivers/atm/idt77252.c
26318 --- linux-2.6.32.45/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26319 +++ linux-2.6.32.45/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26320 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26321 else
26322 dev_kfree_skb(skb);
26323
26324 - atomic_inc(&vcc->stats->tx);
26325 + atomic_inc_unchecked(&vcc->stats->tx);
26326 }
26327
26328 atomic_dec(&scq->used);
26329 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26330 if ((sb = dev_alloc_skb(64)) == NULL) {
26331 printk("%s: Can't allocate buffers for aal0.\n",
26332 card->name);
26333 - atomic_add(i, &vcc->stats->rx_drop);
26334 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
26335 break;
26336 }
26337 if (!atm_charge(vcc, sb->truesize)) {
26338 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26339 card->name);
26340 - atomic_add(i - 1, &vcc->stats->rx_drop);
26341 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26342 dev_kfree_skb(sb);
26343 break;
26344 }
26345 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26346 ATM_SKB(sb)->vcc = vcc;
26347 __net_timestamp(sb);
26348 vcc->push(vcc, sb);
26349 - atomic_inc(&vcc->stats->rx);
26350 + atomic_inc_unchecked(&vcc->stats->rx);
26351
26352 cell += ATM_CELL_PAYLOAD;
26353 }
26354 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26355 "(CDC: %08x)\n",
26356 card->name, len, rpp->len, readl(SAR_REG_CDC));
26357 recycle_rx_pool_skb(card, rpp);
26358 - atomic_inc(&vcc->stats->rx_err);
26359 + atomic_inc_unchecked(&vcc->stats->rx_err);
26360 return;
26361 }
26362 if (stat & SAR_RSQE_CRC) {
26363 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26364 recycle_rx_pool_skb(card, rpp);
26365 - atomic_inc(&vcc->stats->rx_err);
26366 + atomic_inc_unchecked(&vcc->stats->rx_err);
26367 return;
26368 }
26369 if (skb_queue_len(&rpp->queue) > 1) {
26370 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26371 RXPRINTK("%s: Can't alloc RX skb.\n",
26372 card->name);
26373 recycle_rx_pool_skb(card, rpp);
26374 - atomic_inc(&vcc->stats->rx_err);
26375 + atomic_inc_unchecked(&vcc->stats->rx_err);
26376 return;
26377 }
26378 if (!atm_charge(vcc, skb->truesize)) {
26379 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26380 __net_timestamp(skb);
26381
26382 vcc->push(vcc, skb);
26383 - atomic_inc(&vcc->stats->rx);
26384 + atomic_inc_unchecked(&vcc->stats->rx);
26385
26386 return;
26387 }
26388 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26389 __net_timestamp(skb);
26390
26391 vcc->push(vcc, skb);
26392 - atomic_inc(&vcc->stats->rx);
26393 + atomic_inc_unchecked(&vcc->stats->rx);
26394
26395 if (skb->truesize > SAR_FB_SIZE_3)
26396 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26397 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26398 if (vcc->qos.aal != ATM_AAL0) {
26399 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26400 card->name, vpi, vci);
26401 - atomic_inc(&vcc->stats->rx_drop);
26402 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26403 goto drop;
26404 }
26405
26406 if ((sb = dev_alloc_skb(64)) == NULL) {
26407 printk("%s: Can't allocate buffers for AAL0.\n",
26408 card->name);
26409 - atomic_inc(&vcc->stats->rx_err);
26410 + atomic_inc_unchecked(&vcc->stats->rx_err);
26411 goto drop;
26412 }
26413
26414 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26415 ATM_SKB(sb)->vcc = vcc;
26416 __net_timestamp(sb);
26417 vcc->push(vcc, sb);
26418 - atomic_inc(&vcc->stats->rx);
26419 + atomic_inc_unchecked(&vcc->stats->rx);
26420
26421 drop:
26422 skb_pull(queue, 64);
26423 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26424
26425 if (vc == NULL) {
26426 printk("%s: NULL connection in send().\n", card->name);
26427 - atomic_inc(&vcc->stats->tx_err);
26428 + atomic_inc_unchecked(&vcc->stats->tx_err);
26429 dev_kfree_skb(skb);
26430 return -EINVAL;
26431 }
26432 if (!test_bit(VCF_TX, &vc->flags)) {
26433 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26434 - atomic_inc(&vcc->stats->tx_err);
26435 + atomic_inc_unchecked(&vcc->stats->tx_err);
26436 dev_kfree_skb(skb);
26437 return -EINVAL;
26438 }
26439 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26440 break;
26441 default:
26442 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26443 - atomic_inc(&vcc->stats->tx_err);
26444 + atomic_inc_unchecked(&vcc->stats->tx_err);
26445 dev_kfree_skb(skb);
26446 return -EINVAL;
26447 }
26448
26449 if (skb_shinfo(skb)->nr_frags != 0) {
26450 printk("%s: No scatter-gather yet.\n", card->name);
26451 - atomic_inc(&vcc->stats->tx_err);
26452 + atomic_inc_unchecked(&vcc->stats->tx_err);
26453 dev_kfree_skb(skb);
26454 return -EINVAL;
26455 }
26456 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26457
26458 err = queue_skb(card, vc, skb, oam);
26459 if (err) {
26460 - atomic_inc(&vcc->stats->tx_err);
26461 + atomic_inc_unchecked(&vcc->stats->tx_err);
26462 dev_kfree_skb(skb);
26463 return err;
26464 }
26465 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26466 skb = dev_alloc_skb(64);
26467 if (!skb) {
26468 printk("%s: Out of memory in send_oam().\n", card->name);
26469 - atomic_inc(&vcc->stats->tx_err);
26470 + atomic_inc_unchecked(&vcc->stats->tx_err);
26471 return -ENOMEM;
26472 }
26473 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26474 diff -urNp linux-2.6.32.45/drivers/atm/iphase.c linux-2.6.32.45/drivers/atm/iphase.c
26475 --- linux-2.6.32.45/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26476 +++ linux-2.6.32.45/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26477 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26478 status = (u_short) (buf_desc_ptr->desc_mode);
26479 if (status & (RX_CER | RX_PTE | RX_OFL))
26480 {
26481 - atomic_inc(&vcc->stats->rx_err);
26482 + atomic_inc_unchecked(&vcc->stats->rx_err);
26483 IF_ERR(printk("IA: bad packet, dropping it");)
26484 if (status & RX_CER) {
26485 IF_ERR(printk(" cause: packet CRC error\n");)
26486 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26487 len = dma_addr - buf_addr;
26488 if (len > iadev->rx_buf_sz) {
26489 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26490 - atomic_inc(&vcc->stats->rx_err);
26491 + atomic_inc_unchecked(&vcc->stats->rx_err);
26492 goto out_free_desc;
26493 }
26494
26495 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26496 ia_vcc = INPH_IA_VCC(vcc);
26497 if (ia_vcc == NULL)
26498 {
26499 - atomic_inc(&vcc->stats->rx_err);
26500 + atomic_inc_unchecked(&vcc->stats->rx_err);
26501 dev_kfree_skb_any(skb);
26502 atm_return(vcc, atm_guess_pdu2truesize(len));
26503 goto INCR_DLE;
26504 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26505 if ((length > iadev->rx_buf_sz) || (length >
26506 (skb->len - sizeof(struct cpcs_trailer))))
26507 {
26508 - atomic_inc(&vcc->stats->rx_err);
26509 + atomic_inc_unchecked(&vcc->stats->rx_err);
26510 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26511 length, skb->len);)
26512 dev_kfree_skb_any(skb);
26513 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26514
26515 IF_RX(printk("rx_dle_intr: skb push");)
26516 vcc->push(vcc,skb);
26517 - atomic_inc(&vcc->stats->rx);
26518 + atomic_inc_unchecked(&vcc->stats->rx);
26519 iadev->rx_pkt_cnt++;
26520 }
26521 INCR_DLE:
26522 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26523 {
26524 struct k_sonet_stats *stats;
26525 stats = &PRIV(_ia_dev[board])->sonet_stats;
26526 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26527 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26528 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26529 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26530 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26531 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26532 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26533 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26534 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26535 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26536 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26537 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26538 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26539 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26540 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26541 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26542 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26543 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26544 }
26545 ia_cmds.status = 0;
26546 break;
26547 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26548 if ((desc == 0) || (desc > iadev->num_tx_desc))
26549 {
26550 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26551 - atomic_inc(&vcc->stats->tx);
26552 + atomic_inc_unchecked(&vcc->stats->tx);
26553 if (vcc->pop)
26554 vcc->pop(vcc, skb);
26555 else
26556 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26557 ATM_DESC(skb) = vcc->vci;
26558 skb_queue_tail(&iadev->tx_dma_q, skb);
26559
26560 - atomic_inc(&vcc->stats->tx);
26561 + atomic_inc_unchecked(&vcc->stats->tx);
26562 iadev->tx_pkt_cnt++;
26563 /* Increment transaction counter */
26564 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26565
26566 #if 0
26567 /* add flow control logic */
26568 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26569 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26570 if (iavcc->vc_desc_cnt > 10) {
26571 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26572 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26573 diff -urNp linux-2.6.32.45/drivers/atm/lanai.c linux-2.6.32.45/drivers/atm/lanai.c
26574 --- linux-2.6.32.45/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26575 +++ linux-2.6.32.45/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26576 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26577 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26578 lanai_endtx(lanai, lvcc);
26579 lanai_free_skb(lvcc->tx.atmvcc, skb);
26580 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26581 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26582 }
26583
26584 /* Try to fill the buffer - don't call unless there is backlog */
26585 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26586 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26587 __net_timestamp(skb);
26588 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26589 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26590 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26591 out:
26592 lvcc->rx.buf.ptr = end;
26593 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26594 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26595 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26596 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26597 lanai->stats.service_rxnotaal5++;
26598 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26599 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26600 return 0;
26601 }
26602 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26603 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26604 int bytes;
26605 read_unlock(&vcc_sklist_lock);
26606 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26607 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26608 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26609 lvcc->stats.x.aal5.service_trash++;
26610 bytes = (SERVICE_GET_END(s) * 16) -
26611 (((unsigned long) lvcc->rx.buf.ptr) -
26612 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26613 }
26614 if (s & SERVICE_STREAM) {
26615 read_unlock(&vcc_sklist_lock);
26616 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26617 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26618 lvcc->stats.x.aal5.service_stream++;
26619 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26620 "PDU on VCI %d!\n", lanai->number, vci);
26621 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26622 return 0;
26623 }
26624 DPRINTK("got rx crc error on vci %d\n", vci);
26625 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26626 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26627 lvcc->stats.x.aal5.service_rxcrc++;
26628 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26629 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26630 diff -urNp linux-2.6.32.45/drivers/atm/nicstar.c linux-2.6.32.45/drivers/atm/nicstar.c
26631 --- linux-2.6.32.45/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26632 +++ linux-2.6.32.45/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26633 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26634 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26635 {
26636 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26637 - atomic_inc(&vcc->stats->tx_err);
26638 + atomic_inc_unchecked(&vcc->stats->tx_err);
26639 dev_kfree_skb_any(skb);
26640 return -EINVAL;
26641 }
26642 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26643 if (!vc->tx)
26644 {
26645 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26646 - atomic_inc(&vcc->stats->tx_err);
26647 + atomic_inc_unchecked(&vcc->stats->tx_err);
26648 dev_kfree_skb_any(skb);
26649 return -EINVAL;
26650 }
26651 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26652 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26653 {
26654 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26655 - atomic_inc(&vcc->stats->tx_err);
26656 + atomic_inc_unchecked(&vcc->stats->tx_err);
26657 dev_kfree_skb_any(skb);
26658 return -EINVAL;
26659 }
26660 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26661 if (skb_shinfo(skb)->nr_frags != 0)
26662 {
26663 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26664 - atomic_inc(&vcc->stats->tx_err);
26665 + atomic_inc_unchecked(&vcc->stats->tx_err);
26666 dev_kfree_skb_any(skb);
26667 return -EINVAL;
26668 }
26669 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26670
26671 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26672 {
26673 - atomic_inc(&vcc->stats->tx_err);
26674 + atomic_inc_unchecked(&vcc->stats->tx_err);
26675 dev_kfree_skb_any(skb);
26676 return -EIO;
26677 }
26678 - atomic_inc(&vcc->stats->tx);
26679 + atomic_inc_unchecked(&vcc->stats->tx);
26680
26681 return 0;
26682 }
26683 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26684 {
26685 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26686 card->index);
26687 - atomic_add(i,&vcc->stats->rx_drop);
26688 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
26689 break;
26690 }
26691 if (!atm_charge(vcc, sb->truesize))
26692 {
26693 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26694 card->index);
26695 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26696 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26697 dev_kfree_skb_any(sb);
26698 break;
26699 }
26700 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26701 ATM_SKB(sb)->vcc = vcc;
26702 __net_timestamp(sb);
26703 vcc->push(vcc, sb);
26704 - atomic_inc(&vcc->stats->rx);
26705 + atomic_inc_unchecked(&vcc->stats->rx);
26706 cell += ATM_CELL_PAYLOAD;
26707 }
26708
26709 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26710 if (iovb == NULL)
26711 {
26712 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26713 - atomic_inc(&vcc->stats->rx_drop);
26714 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26715 recycle_rx_buf(card, skb);
26716 return;
26717 }
26718 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26719 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26720 {
26721 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26722 - atomic_inc(&vcc->stats->rx_err);
26723 + atomic_inc_unchecked(&vcc->stats->rx_err);
26724 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26725 NS_SKB(iovb)->iovcnt = 0;
26726 iovb->len = 0;
26727 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26728 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26729 card->index);
26730 which_list(card, skb);
26731 - atomic_inc(&vcc->stats->rx_err);
26732 + atomic_inc_unchecked(&vcc->stats->rx_err);
26733 recycle_rx_buf(card, skb);
26734 vc->rx_iov = NULL;
26735 recycle_iov_buf(card, iovb);
26736 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26737 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26738 card->index);
26739 which_list(card, skb);
26740 - atomic_inc(&vcc->stats->rx_err);
26741 + atomic_inc_unchecked(&vcc->stats->rx_err);
26742 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26743 NS_SKB(iovb)->iovcnt);
26744 vc->rx_iov = NULL;
26745 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26746 printk(" - PDU size mismatch.\n");
26747 else
26748 printk(".\n");
26749 - atomic_inc(&vcc->stats->rx_err);
26750 + atomic_inc_unchecked(&vcc->stats->rx_err);
26751 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26752 NS_SKB(iovb)->iovcnt);
26753 vc->rx_iov = NULL;
26754 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
26755 if (!atm_charge(vcc, skb->truesize))
26756 {
26757 push_rxbufs(card, skb);
26758 - atomic_inc(&vcc->stats->rx_drop);
26759 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26760 }
26761 else
26762 {
26763 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
26764 ATM_SKB(skb)->vcc = vcc;
26765 __net_timestamp(skb);
26766 vcc->push(vcc, skb);
26767 - atomic_inc(&vcc->stats->rx);
26768 + atomic_inc_unchecked(&vcc->stats->rx);
26769 }
26770 }
26771 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
26772 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
26773 if (!atm_charge(vcc, sb->truesize))
26774 {
26775 push_rxbufs(card, sb);
26776 - atomic_inc(&vcc->stats->rx_drop);
26777 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26778 }
26779 else
26780 {
26781 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
26782 ATM_SKB(sb)->vcc = vcc;
26783 __net_timestamp(sb);
26784 vcc->push(vcc, sb);
26785 - atomic_inc(&vcc->stats->rx);
26786 + atomic_inc_unchecked(&vcc->stats->rx);
26787 }
26788
26789 push_rxbufs(card, skb);
26790 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
26791 if (!atm_charge(vcc, skb->truesize))
26792 {
26793 push_rxbufs(card, skb);
26794 - atomic_inc(&vcc->stats->rx_drop);
26795 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26796 }
26797 else
26798 {
26799 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
26800 ATM_SKB(skb)->vcc = vcc;
26801 __net_timestamp(skb);
26802 vcc->push(vcc, skb);
26803 - atomic_inc(&vcc->stats->rx);
26804 + atomic_inc_unchecked(&vcc->stats->rx);
26805 }
26806
26807 push_rxbufs(card, sb);
26808 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
26809 if (hb == NULL)
26810 {
26811 printk("nicstar%d: Out of huge buffers.\n", card->index);
26812 - atomic_inc(&vcc->stats->rx_drop);
26813 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26814 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26815 NS_SKB(iovb)->iovcnt);
26816 vc->rx_iov = NULL;
26817 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
26818 }
26819 else
26820 dev_kfree_skb_any(hb);
26821 - atomic_inc(&vcc->stats->rx_drop);
26822 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26823 }
26824 else
26825 {
26826 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
26827 #endif /* NS_USE_DESTRUCTORS */
26828 __net_timestamp(hb);
26829 vcc->push(vcc, hb);
26830 - atomic_inc(&vcc->stats->rx);
26831 + atomic_inc_unchecked(&vcc->stats->rx);
26832 }
26833 }
26834
26835 diff -urNp linux-2.6.32.45/drivers/atm/solos-pci.c linux-2.6.32.45/drivers/atm/solos-pci.c
26836 --- linux-2.6.32.45/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
26837 +++ linux-2.6.32.45/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
26838 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
26839 }
26840 atm_charge(vcc, skb->truesize);
26841 vcc->push(vcc, skb);
26842 - atomic_inc(&vcc->stats->rx);
26843 + atomic_inc_unchecked(&vcc->stats->rx);
26844 break;
26845
26846 case PKT_STATUS:
26847 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
26848 char msg[500];
26849 char item[10];
26850
26851 + pax_track_stack();
26852 +
26853 len = buf->len;
26854 for (i = 0; i < len; i++){
26855 if(i % 8 == 0)
26856 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
26857 vcc = SKB_CB(oldskb)->vcc;
26858
26859 if (vcc) {
26860 - atomic_inc(&vcc->stats->tx);
26861 + atomic_inc_unchecked(&vcc->stats->tx);
26862 solos_pop(vcc, oldskb);
26863 } else
26864 dev_kfree_skb_irq(oldskb);
26865 diff -urNp linux-2.6.32.45/drivers/atm/suni.c linux-2.6.32.45/drivers/atm/suni.c
26866 --- linux-2.6.32.45/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
26867 +++ linux-2.6.32.45/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
26868 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26869
26870
26871 #define ADD_LIMITED(s,v) \
26872 - atomic_add((v),&stats->s); \
26873 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26874 + atomic_add_unchecked((v),&stats->s); \
26875 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26876
26877
26878 static void suni_hz(unsigned long from_timer)
26879 diff -urNp linux-2.6.32.45/drivers/atm/uPD98402.c linux-2.6.32.45/drivers/atm/uPD98402.c
26880 --- linux-2.6.32.45/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
26881 +++ linux-2.6.32.45/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
26882 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
26883 struct sonet_stats tmp;
26884 int error = 0;
26885
26886 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26887 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26888 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26889 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26890 if (zero && !error) {
26891 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
26892
26893
26894 #define ADD_LIMITED(s,v) \
26895 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26896 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26897 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26898 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26899 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26900 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26901
26902
26903 static void stat_event(struct atm_dev *dev)
26904 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
26905 if (reason & uPD98402_INT_PFM) stat_event(dev);
26906 if (reason & uPD98402_INT_PCO) {
26907 (void) GET(PCOCR); /* clear interrupt cause */
26908 - atomic_add(GET(HECCT),
26909 + atomic_add_unchecked(GET(HECCT),
26910 &PRIV(dev)->sonet_stats.uncorr_hcs);
26911 }
26912 if ((reason & uPD98402_INT_RFO) &&
26913 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
26914 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26915 uPD98402_INT_LOS),PIMR); /* enable them */
26916 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26917 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26918 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26919 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26920 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26921 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26922 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26923 return 0;
26924 }
26925
26926 diff -urNp linux-2.6.32.45/drivers/atm/zatm.c linux-2.6.32.45/drivers/atm/zatm.c
26927 --- linux-2.6.32.45/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
26928 +++ linux-2.6.32.45/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
26929 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26930 }
26931 if (!size) {
26932 dev_kfree_skb_irq(skb);
26933 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26934 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26935 continue;
26936 }
26937 if (!atm_charge(vcc,skb->truesize)) {
26938 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26939 skb->len = size;
26940 ATM_SKB(skb)->vcc = vcc;
26941 vcc->push(vcc,skb);
26942 - atomic_inc(&vcc->stats->rx);
26943 + atomic_inc_unchecked(&vcc->stats->rx);
26944 }
26945 zout(pos & 0xffff,MTA(mbx));
26946 #if 0 /* probably a stupid idea */
26947 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
26948 skb_queue_head(&zatm_vcc->backlog,skb);
26949 break;
26950 }
26951 - atomic_inc(&vcc->stats->tx);
26952 + atomic_inc_unchecked(&vcc->stats->tx);
26953 wake_up(&zatm_vcc->tx_wait);
26954 }
26955
26956 diff -urNp linux-2.6.32.45/drivers/base/bus.c linux-2.6.32.45/drivers/base/bus.c
26957 --- linux-2.6.32.45/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
26958 +++ linux-2.6.32.45/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
26959 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
26960 return ret;
26961 }
26962
26963 -static struct sysfs_ops driver_sysfs_ops = {
26964 +static const struct sysfs_ops driver_sysfs_ops = {
26965 .show = drv_attr_show,
26966 .store = drv_attr_store,
26967 };
26968 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
26969 return ret;
26970 }
26971
26972 -static struct sysfs_ops bus_sysfs_ops = {
26973 +static const struct sysfs_ops bus_sysfs_ops = {
26974 .show = bus_attr_show,
26975 .store = bus_attr_store,
26976 };
26977 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
26978 return 0;
26979 }
26980
26981 -static struct kset_uevent_ops bus_uevent_ops = {
26982 +static const struct kset_uevent_ops bus_uevent_ops = {
26983 .filter = bus_uevent_filter,
26984 };
26985
26986 diff -urNp linux-2.6.32.45/drivers/base/class.c linux-2.6.32.45/drivers/base/class.c
26987 --- linux-2.6.32.45/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
26988 +++ linux-2.6.32.45/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
26989 @@ -63,7 +63,7 @@ static void class_release(struct kobject
26990 kfree(cp);
26991 }
26992
26993 -static struct sysfs_ops class_sysfs_ops = {
26994 +static const struct sysfs_ops class_sysfs_ops = {
26995 .show = class_attr_show,
26996 .store = class_attr_store,
26997 };
26998 diff -urNp linux-2.6.32.45/drivers/base/core.c linux-2.6.32.45/drivers/base/core.c
26999 --- linux-2.6.32.45/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
27000 +++ linux-2.6.32.45/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
27001 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
27002 return ret;
27003 }
27004
27005 -static struct sysfs_ops dev_sysfs_ops = {
27006 +static const struct sysfs_ops dev_sysfs_ops = {
27007 .show = dev_attr_show,
27008 .store = dev_attr_store,
27009 };
27010 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
27011 return retval;
27012 }
27013
27014 -static struct kset_uevent_ops device_uevent_ops = {
27015 +static const struct kset_uevent_ops device_uevent_ops = {
27016 .filter = dev_uevent_filter,
27017 .name = dev_uevent_name,
27018 .uevent = dev_uevent,
27019 diff -urNp linux-2.6.32.45/drivers/base/memory.c linux-2.6.32.45/drivers/base/memory.c
27020 --- linux-2.6.32.45/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
27021 +++ linux-2.6.32.45/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
27022 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
27023 return retval;
27024 }
27025
27026 -static struct kset_uevent_ops memory_uevent_ops = {
27027 +static const struct kset_uevent_ops memory_uevent_ops = {
27028 .name = memory_uevent_name,
27029 .uevent = memory_uevent,
27030 };
27031 diff -urNp linux-2.6.32.45/drivers/base/sys.c linux-2.6.32.45/drivers/base/sys.c
27032 --- linux-2.6.32.45/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
27033 +++ linux-2.6.32.45/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
27034 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
27035 return -EIO;
27036 }
27037
27038 -static struct sysfs_ops sysfs_ops = {
27039 +static const struct sysfs_ops sysfs_ops = {
27040 .show = sysdev_show,
27041 .store = sysdev_store,
27042 };
27043 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
27044 return -EIO;
27045 }
27046
27047 -static struct sysfs_ops sysfs_class_ops = {
27048 +static const struct sysfs_ops sysfs_class_ops = {
27049 .show = sysdev_class_show,
27050 .store = sysdev_class_store,
27051 };
27052 diff -urNp linux-2.6.32.45/drivers/block/cciss.c linux-2.6.32.45/drivers/block/cciss.c
27053 --- linux-2.6.32.45/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
27054 +++ linux-2.6.32.45/drivers/block/cciss.c 2011-08-05 20:33:55.000000000 -0400
27055 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
27056 int err;
27057 u32 cp;
27058
27059 + memset(&arg64, 0, sizeof(arg64));
27060 +
27061 err = 0;
27062 err |=
27063 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27064 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ct
27065 /* Wait (up to 20 seconds) for a command to complete */
27066
27067 for (i = 20 * HZ; i > 0; i--) {
27068 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
27069 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
27070 if (done == FIFO_EMPTY)
27071 schedule_timeout_uninterruptible(1);
27072 else
27073 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h,
27074 resend_cmd1:
27075
27076 /* Disable interrupt on the board. */
27077 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27078 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27079
27080 /* Make sure there is room in the command FIFO */
27081 /* Actually it should be completely empty at this time */
27082 @@ -2884,13 +2886,13 @@ resend_cmd1:
27083 /* tape side of the driver. */
27084 for (i = 200000; i > 0; i--) {
27085 /* if fifo isn't full go */
27086 - if (!(h->access.fifo_full(h)))
27087 + if (!(h->access->fifo_full(h)))
27088 break;
27089 udelay(10);
27090 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
27091 " waiting!\n", h->ctlr);
27092 }
27093 - h->access.submit_command(h, c); /* Send the cmd */
27094 + h->access->submit_command(h, c); /* Send the cmd */
27095 do {
27096 complete = pollcomplete(h->ctlr);
27097
27098 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
27099 while (!hlist_empty(&h->reqQ)) {
27100 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
27101 /* can't do anything if fifo is full */
27102 - if ((h->access.fifo_full(h))) {
27103 + if ((h->access->fifo_full(h))) {
27104 printk(KERN_WARNING "cciss: fifo full\n");
27105 break;
27106 }
27107 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
27108 h->Qdepth--;
27109
27110 /* Tell the controller execute command */
27111 - h->access.submit_command(h, c);
27112 + h->access->submit_command(h, c);
27113
27114 /* Put job onto the completed Q */
27115 addQ(&h->cmpQ, c);
27116 @@ -3393,17 +3395,17 @@ startio:
27117
27118 static inline unsigned long get_next_completion(ctlr_info_t *h)
27119 {
27120 - return h->access.command_completed(h);
27121 + return h->access->command_completed(h);
27122 }
27123
27124 static inline int interrupt_pending(ctlr_info_t *h)
27125 {
27126 - return h->access.intr_pending(h);
27127 + return h->access->intr_pending(h);
27128 }
27129
27130 static inline long interrupt_not_for_us(ctlr_info_t *h)
27131 {
27132 - return (((h->access.intr_pending(h) == 0) ||
27133 + return (((h->access->intr_pending(h) == 0) ||
27134 (h->interrupts_enabled == 0)));
27135 }
27136
27137 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr
27138 */
27139 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
27140 c->product_name = products[prod_index].product_name;
27141 - c->access = *(products[prod_index].access);
27142 + c->access = products[prod_index].access;
27143 c->nr_cmds = c->max_commands - 4;
27144 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
27145 (readb(&c->cfgtable->Signature[1]) != 'I') ||
27146 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(stru
27147 }
27148
27149 /* make sure the board interrupts are off */
27150 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
27151 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
27152 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
27153 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
27154 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
27155 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(stru
27156 cciss_scsi_setup(i);
27157
27158 /* Turn the interrupts on so we can service requests */
27159 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
27160 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
27161
27162 /* Get the firmware version */
27163 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27164 diff -urNp linux-2.6.32.45/drivers/block/cciss.h linux-2.6.32.45/drivers/block/cciss.h
27165 --- linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:35:28.000000000 -0400
27166 +++ linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:33:59.000000000 -0400
27167 @@ -90,7 +90,7 @@ struct ctlr_info
27168 // information about each logical volume
27169 drive_info_struct *drv[CISS_MAX_LUN];
27170
27171 - struct access_method access;
27172 + struct access_method *access;
27173
27174 /* queue and queue Info */
27175 struct hlist_head reqQ;
27176 diff -urNp linux-2.6.32.45/drivers/block/cpqarray.c linux-2.6.32.45/drivers/block/cpqarray.c
27177 --- linux-2.6.32.45/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
27178 +++ linux-2.6.32.45/drivers/block/cpqarray.c 2011-08-05 20:33:55.000000000 -0400
27179 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr
27180 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27181 goto Enomem4;
27182 }
27183 - hba[i]->access.set_intr_mask(hba[i], 0);
27184 + hba[i]->access->set_intr_mask(hba[i], 0);
27185 if (request_irq(hba[i]->intr, do_ida_intr,
27186 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27187 {
27188 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr
27189 add_timer(&hba[i]->timer);
27190
27191 /* Enable IRQ now that spinlock and rate limit timer are set up */
27192 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27193 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27194
27195 for(j=0; j<NWD; j++) {
27196 struct gendisk *disk = ida_gendisk[i][j];
27197 @@ -695,7 +695,7 @@ DBGINFO(
27198 for(i=0; i<NR_PRODUCTS; i++) {
27199 if (board_id == products[i].board_id) {
27200 c->product_name = products[i].product_name;
27201 - c->access = *(products[i].access);
27202 + c->access = products[i].access;
27203 break;
27204 }
27205 }
27206 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(v
27207 hba[ctlr]->intr = intr;
27208 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
27209 hba[ctlr]->product_name = products[j].product_name;
27210 - hba[ctlr]->access = *(products[j].access);
27211 + hba[ctlr]->access = products[j].access;
27212 hba[ctlr]->ctlr = ctlr;
27213 hba[ctlr]->board_id = board_id;
27214 hba[ctlr]->pci_dev = NULL; /* not PCI */
27215 @@ -896,6 +896,8 @@ static void do_ida_request(struct reques
27216 struct scatterlist tmp_sg[SG_MAX];
27217 int i, dir, seg;
27218
27219 + pax_track_stack();
27220 +
27221 if (blk_queue_plugged(q))
27222 goto startio;
27223
27224 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
27225
27226 while((c = h->reqQ) != NULL) {
27227 /* Can't do anything if we're busy */
27228 - if (h->access.fifo_full(h) == 0)
27229 + if (h->access->fifo_full(h) == 0)
27230 return;
27231
27232 /* Get the first entry from the request Q */
27233 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
27234 h->Qdepth--;
27235
27236 /* Tell the controller to do our bidding */
27237 - h->access.submit_command(h, c);
27238 + h->access->submit_command(h, c);
27239
27240 /* Get onto the completion Q */
27241 addQ(&h->cmpQ, c);
27242 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq,
27243 unsigned long flags;
27244 __u32 a,a1;
27245
27246 - istat = h->access.intr_pending(h);
27247 + istat = h->access->intr_pending(h);
27248 /* Is this interrupt for us? */
27249 if (istat == 0)
27250 return IRQ_NONE;
27251 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq,
27252 */
27253 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
27254 if (istat & FIFO_NOT_EMPTY) {
27255 - while((a = h->access.command_completed(h))) {
27256 + while((a = h->access->command_completed(h))) {
27257 a1 = a; a &= ~3;
27258 if ((c = h->cmpQ) == NULL)
27259 {
27260 @@ -1434,11 +1436,11 @@ static int sendcmd(
27261 /*
27262 * Disable interrupt
27263 */
27264 - info_p->access.set_intr_mask(info_p, 0);
27265 + info_p->access->set_intr_mask(info_p, 0);
27266 /* Make sure there is room in the command FIFO */
27267 /* Actually it should be completely empty at this time. */
27268 for (i = 200000; i > 0; i--) {
27269 - temp = info_p->access.fifo_full(info_p);
27270 + temp = info_p->access->fifo_full(info_p);
27271 if (temp != 0) {
27272 break;
27273 }
27274 @@ -1451,7 +1453,7 @@ DBG(
27275 /*
27276 * Send the cmd
27277 */
27278 - info_p->access.submit_command(info_p, c);
27279 + info_p->access->submit_command(info_p, c);
27280 complete = pollcomplete(ctlr);
27281
27282 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
27283 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t
27284 * we check the new geometry. Then turn interrupts back on when
27285 * we're done.
27286 */
27287 - host->access.set_intr_mask(host, 0);
27288 + host->access->set_intr_mask(host, 0);
27289 getgeometry(ctlr);
27290 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
27291 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
27292
27293 for(i=0; i<NWD; i++) {
27294 struct gendisk *disk = ida_gendisk[ctlr][i];
27295 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
27296 /* Wait (up to 2 seconds) for a command to complete */
27297
27298 for (i = 200000; i > 0; i--) {
27299 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
27300 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
27301 if (done == 0) {
27302 udelay(10); /* a short fixed delay */
27303 } else
27304 diff -urNp linux-2.6.32.45/drivers/block/cpqarray.h linux-2.6.32.45/drivers/block/cpqarray.h
27305 --- linux-2.6.32.45/drivers/block/cpqarray.h 2011-03-27 14:31:47.000000000 -0400
27306 +++ linux-2.6.32.45/drivers/block/cpqarray.h 2011-08-05 20:33:55.000000000 -0400
27307 @@ -99,7 +99,7 @@ struct ctlr_info {
27308 drv_info_t drv[NWD];
27309 struct proc_dir_entry *proc;
27310
27311 - struct access_method access;
27312 + struct access_method *access;
27313
27314 cmdlist_t *reqQ;
27315 cmdlist_t *cmpQ;
27316 diff -urNp linux-2.6.32.45/drivers/block/DAC960.c linux-2.6.32.45/drivers/block/DAC960.c
27317 --- linux-2.6.32.45/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
27318 +++ linux-2.6.32.45/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
27319 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
27320 unsigned long flags;
27321 int Channel, TargetID;
27322
27323 + pax_track_stack();
27324 +
27325 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
27326 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
27327 sizeof(DAC960_SCSI_Inquiry_T) +
27328 diff -urNp linux-2.6.32.45/drivers/block/nbd.c linux-2.6.32.45/drivers/block/nbd.c
27329 --- linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
27330 +++ linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
27331 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
27332 struct kvec iov;
27333 sigset_t blocked, oldset;
27334
27335 + pax_track_stack();
27336 +
27337 if (unlikely(!sock)) {
27338 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
27339 lo->disk->disk_name, (send ? "send" : "recv"));
27340 @@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
27341 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
27342 unsigned int cmd, unsigned long arg)
27343 {
27344 + pax_track_stack();
27345 +
27346 switch (cmd) {
27347 case NBD_DISCONNECT: {
27348 struct request sreq;
27349 diff -urNp linux-2.6.32.45/drivers/block/pktcdvd.c linux-2.6.32.45/drivers/block/pktcdvd.c
27350 --- linux-2.6.32.45/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
27351 +++ linux-2.6.32.45/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
27352 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
27353 return len;
27354 }
27355
27356 -static struct sysfs_ops kobj_pkt_ops = {
27357 +static const struct sysfs_ops kobj_pkt_ops = {
27358 .show = kobj_pkt_show,
27359 .store = kobj_pkt_store
27360 };
27361 diff -urNp linux-2.6.32.45/drivers/char/agp/frontend.c linux-2.6.32.45/drivers/char/agp/frontend.c
27362 --- linux-2.6.32.45/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
27363 +++ linux-2.6.32.45/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
27364 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
27365 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27366 return -EFAULT;
27367
27368 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27369 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27370 return -EFAULT;
27371
27372 client = agp_find_client_by_pid(reserve.pid);
27373 diff -urNp linux-2.6.32.45/drivers/char/briq_panel.c linux-2.6.32.45/drivers/char/briq_panel.c
27374 --- linux-2.6.32.45/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
27375 +++ linux-2.6.32.45/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
27376 @@ -10,6 +10,7 @@
27377 #include <linux/types.h>
27378 #include <linux/errno.h>
27379 #include <linux/tty.h>
27380 +#include <linux/mutex.h>
27381 #include <linux/timer.h>
27382 #include <linux/kernel.h>
27383 #include <linux/wait.h>
27384 @@ -36,6 +37,7 @@ static int vfd_is_open;
27385 static unsigned char vfd[40];
27386 static int vfd_cursor;
27387 static unsigned char ledpb, led;
27388 +static DEFINE_MUTEX(vfd_mutex);
27389
27390 static void update_vfd(void)
27391 {
27392 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
27393 if (!vfd_is_open)
27394 return -EBUSY;
27395
27396 + mutex_lock(&vfd_mutex);
27397 for (;;) {
27398 char c;
27399 if (!indx)
27400 break;
27401 - if (get_user(c, buf))
27402 + if (get_user(c, buf)) {
27403 + mutex_unlock(&vfd_mutex);
27404 return -EFAULT;
27405 + }
27406 if (esc) {
27407 set_led(c);
27408 esc = 0;
27409 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
27410 buf++;
27411 }
27412 update_vfd();
27413 + mutex_unlock(&vfd_mutex);
27414
27415 return len;
27416 }
27417 diff -urNp linux-2.6.32.45/drivers/char/genrtc.c linux-2.6.32.45/drivers/char/genrtc.c
27418 --- linux-2.6.32.45/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
27419 +++ linux-2.6.32.45/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
27420 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
27421 switch (cmd) {
27422
27423 case RTC_PLL_GET:
27424 + memset(&pll, 0, sizeof(pll));
27425 if (get_rtc_pll(&pll))
27426 return -EINVAL;
27427 else
27428 diff -urNp linux-2.6.32.45/drivers/char/hpet.c linux-2.6.32.45/drivers/char/hpet.c
27429 --- linux-2.6.32.45/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
27430 +++ linux-2.6.32.45/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
27431 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
27432 return 0;
27433 }
27434
27435 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
27436 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
27437
27438 static int
27439 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
27440 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
27441 }
27442
27443 static int
27444 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
27445 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
27446 {
27447 struct hpet_timer __iomem *timer;
27448 struct hpet __iomem *hpet;
27449 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
27450 {
27451 struct hpet_info info;
27452
27453 + memset(&info, 0, sizeof(info));
27454 +
27455 if (devp->hd_ireqfreq)
27456 info.hi_ireqfreq =
27457 hpet_time_div(hpetp, devp->hd_ireqfreq);
27458 - else
27459 - info.hi_ireqfreq = 0;
27460 info.hi_flags =
27461 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
27462 info.hi_hpet = hpetp->hp_which;
27463 diff -urNp linux-2.6.32.45/drivers/char/hvc_beat.c linux-2.6.32.45/drivers/char/hvc_beat.c
27464 --- linux-2.6.32.45/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
27465 +++ linux-2.6.32.45/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
27466 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
27467 return cnt;
27468 }
27469
27470 -static struct hv_ops hvc_beat_get_put_ops = {
27471 +static const struct hv_ops hvc_beat_get_put_ops = {
27472 .get_chars = hvc_beat_get_chars,
27473 .put_chars = hvc_beat_put_chars,
27474 };
27475 diff -urNp linux-2.6.32.45/drivers/char/hvc_console.c linux-2.6.32.45/drivers/char/hvc_console.c
27476 --- linux-2.6.32.45/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
27477 +++ linux-2.6.32.45/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
27478 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
27479 * console interfaces but can still be used as a tty device. This has to be
27480 * static because kmalloc will not work during early console init.
27481 */
27482 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27483 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27484 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
27485 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
27486
27487 @@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
27488 * vty adapters do NOT get an hvc_instantiate() callback since they
27489 * appear after early console init.
27490 */
27491 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
27492 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
27493 {
27494 struct hvc_struct *hp;
27495
27496 @@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
27497 };
27498
27499 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
27500 - struct hv_ops *ops, int outbuf_size)
27501 + const struct hv_ops *ops, int outbuf_size)
27502 {
27503 struct hvc_struct *hp;
27504 int i;
27505 diff -urNp linux-2.6.32.45/drivers/char/hvc_console.h linux-2.6.32.45/drivers/char/hvc_console.h
27506 --- linux-2.6.32.45/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
27507 +++ linux-2.6.32.45/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
27508 @@ -55,7 +55,7 @@ struct hvc_struct {
27509 int outbuf_size;
27510 int n_outbuf;
27511 uint32_t vtermno;
27512 - struct hv_ops *ops;
27513 + const struct hv_ops *ops;
27514 int irq_requested;
27515 int data;
27516 struct winsize ws;
27517 @@ -76,11 +76,11 @@ struct hv_ops {
27518 };
27519
27520 /* Register a vterm and a slot index for use as a console (console_init) */
27521 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
27522 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
27523
27524 /* register a vterm for hvc tty operation (module_init or hotplug add) */
27525 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
27526 - struct hv_ops *ops, int outbuf_size);
27527 + const struct hv_ops *ops, int outbuf_size);
27528 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
27529 extern int hvc_remove(struct hvc_struct *hp);
27530
27531 diff -urNp linux-2.6.32.45/drivers/char/hvc_iseries.c linux-2.6.32.45/drivers/char/hvc_iseries.c
27532 --- linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
27533 +++ linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
27534 @@ -197,7 +197,7 @@ done:
27535 return sent;
27536 }
27537
27538 -static struct hv_ops hvc_get_put_ops = {
27539 +static const struct hv_ops hvc_get_put_ops = {
27540 .get_chars = get_chars,
27541 .put_chars = put_chars,
27542 .notifier_add = notifier_add_irq,
27543 diff -urNp linux-2.6.32.45/drivers/char/hvc_iucv.c linux-2.6.32.45/drivers/char/hvc_iucv.c
27544 --- linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
27545 +++ linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
27546 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
27547
27548
27549 /* HVC operations */
27550 -static struct hv_ops hvc_iucv_ops = {
27551 +static const struct hv_ops hvc_iucv_ops = {
27552 .get_chars = hvc_iucv_get_chars,
27553 .put_chars = hvc_iucv_put_chars,
27554 .notifier_add = hvc_iucv_notifier_add,
27555 diff -urNp linux-2.6.32.45/drivers/char/hvc_rtas.c linux-2.6.32.45/drivers/char/hvc_rtas.c
27556 --- linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27557 +++ linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27558 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27559 return i;
27560 }
27561
27562 -static struct hv_ops hvc_rtas_get_put_ops = {
27563 +static const struct hv_ops hvc_rtas_get_put_ops = {
27564 .get_chars = hvc_rtas_read_console,
27565 .put_chars = hvc_rtas_write_console,
27566 };
27567 diff -urNp linux-2.6.32.45/drivers/char/hvcs.c linux-2.6.32.45/drivers/char/hvcs.c
27568 --- linux-2.6.32.45/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27569 +++ linux-2.6.32.45/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27570 @@ -82,6 +82,7 @@
27571 #include <asm/hvcserver.h>
27572 #include <asm/uaccess.h>
27573 #include <asm/vio.h>
27574 +#include <asm/local.h>
27575
27576 /*
27577 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27578 @@ -269,7 +270,7 @@ struct hvcs_struct {
27579 unsigned int index;
27580
27581 struct tty_struct *tty;
27582 - int open_count;
27583 + local_t open_count;
27584
27585 /*
27586 * Used to tell the driver kernel_thread what operations need to take
27587 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27588
27589 spin_lock_irqsave(&hvcsd->lock, flags);
27590
27591 - if (hvcsd->open_count > 0) {
27592 + if (local_read(&hvcsd->open_count) > 0) {
27593 spin_unlock_irqrestore(&hvcsd->lock, flags);
27594 printk(KERN_INFO "HVCS: vterm state unchanged. "
27595 "The hvcs device node is still in use.\n");
27596 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27597 if ((retval = hvcs_partner_connect(hvcsd)))
27598 goto error_release;
27599
27600 - hvcsd->open_count = 1;
27601 + local_set(&hvcsd->open_count, 1);
27602 hvcsd->tty = tty;
27603 tty->driver_data = hvcsd;
27604
27605 @@ -1169,7 +1170,7 @@ fast_open:
27606
27607 spin_lock_irqsave(&hvcsd->lock, flags);
27608 kref_get(&hvcsd->kref);
27609 - hvcsd->open_count++;
27610 + local_inc(&hvcsd->open_count);
27611 hvcsd->todo_mask |= HVCS_SCHED_READ;
27612 spin_unlock_irqrestore(&hvcsd->lock, flags);
27613
27614 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27615 hvcsd = tty->driver_data;
27616
27617 spin_lock_irqsave(&hvcsd->lock, flags);
27618 - if (--hvcsd->open_count == 0) {
27619 + if (local_dec_and_test(&hvcsd->open_count)) {
27620
27621 vio_disable_interrupts(hvcsd->vdev);
27622
27623 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27624 free_irq(irq, hvcsd);
27625 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27626 return;
27627 - } else if (hvcsd->open_count < 0) {
27628 + } else if (local_read(&hvcsd->open_count) < 0) {
27629 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27630 " is missmanaged.\n",
27631 - hvcsd->vdev->unit_address, hvcsd->open_count);
27632 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27633 }
27634
27635 spin_unlock_irqrestore(&hvcsd->lock, flags);
27636 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27637
27638 spin_lock_irqsave(&hvcsd->lock, flags);
27639 /* Preserve this so that we know how many kref refs to put */
27640 - temp_open_count = hvcsd->open_count;
27641 + temp_open_count = local_read(&hvcsd->open_count);
27642
27643 /*
27644 * Don't kref put inside the spinlock because the destruction
27645 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27646 hvcsd->tty->driver_data = NULL;
27647 hvcsd->tty = NULL;
27648
27649 - hvcsd->open_count = 0;
27650 + local_set(&hvcsd->open_count, 0);
27651
27652 /* This will drop any buffered data on the floor which is OK in a hangup
27653 * scenario. */
27654 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27655 * the middle of a write operation? This is a crummy place to do this
27656 * but we want to keep it all in the spinlock.
27657 */
27658 - if (hvcsd->open_count <= 0) {
27659 + if (local_read(&hvcsd->open_count) <= 0) {
27660 spin_unlock_irqrestore(&hvcsd->lock, flags);
27661 return -ENODEV;
27662 }
27663 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27664 {
27665 struct hvcs_struct *hvcsd = tty->driver_data;
27666
27667 - if (!hvcsd || hvcsd->open_count <= 0)
27668 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27669 return 0;
27670
27671 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27672 diff -urNp linux-2.6.32.45/drivers/char/hvc_udbg.c linux-2.6.32.45/drivers/char/hvc_udbg.c
27673 --- linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27674 +++ linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27675 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27676 return i;
27677 }
27678
27679 -static struct hv_ops hvc_udbg_ops = {
27680 +static const struct hv_ops hvc_udbg_ops = {
27681 .get_chars = hvc_udbg_get,
27682 .put_chars = hvc_udbg_put,
27683 };
27684 diff -urNp linux-2.6.32.45/drivers/char/hvc_vio.c linux-2.6.32.45/drivers/char/hvc_vio.c
27685 --- linux-2.6.32.45/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27686 +++ linux-2.6.32.45/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27687 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27688 return got;
27689 }
27690
27691 -static struct hv_ops hvc_get_put_ops = {
27692 +static const struct hv_ops hvc_get_put_ops = {
27693 .get_chars = filtered_get_chars,
27694 .put_chars = hvc_put_chars,
27695 .notifier_add = notifier_add_irq,
27696 diff -urNp linux-2.6.32.45/drivers/char/hvc_xen.c linux-2.6.32.45/drivers/char/hvc_xen.c
27697 --- linux-2.6.32.45/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27698 +++ linux-2.6.32.45/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27699 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27700 return recv;
27701 }
27702
27703 -static struct hv_ops hvc_ops = {
27704 +static const struct hv_ops hvc_ops = {
27705 .get_chars = read_console,
27706 .put_chars = write_console,
27707 .notifier_add = notifier_add_irq,
27708 diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c
27709 --- linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27710 +++ linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27711 @@ -414,7 +414,7 @@ struct ipmi_smi {
27712 struct proc_dir_entry *proc_dir;
27713 char proc_dir_name[10];
27714
27715 - atomic_t stats[IPMI_NUM_STATS];
27716 + atomic_unchecked_t stats[IPMI_NUM_STATS];
27717
27718 /*
27719 * run_to_completion duplicate of smb_info, smi_info
27720 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27721
27722
27723 #define ipmi_inc_stat(intf, stat) \
27724 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27725 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27726 #define ipmi_get_stat(intf, stat) \
27727 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27728 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27729
27730 static int is_lan_addr(struct ipmi_addr *addr)
27731 {
27732 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27733 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27734 init_waitqueue_head(&intf->waitq);
27735 for (i = 0; i < IPMI_NUM_STATS; i++)
27736 - atomic_set(&intf->stats[i], 0);
27737 + atomic_set_unchecked(&intf->stats[i], 0);
27738
27739 intf->proc_dir = NULL;
27740
27741 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27742 struct ipmi_smi_msg smi_msg;
27743 struct ipmi_recv_msg recv_msg;
27744
27745 + pax_track_stack();
27746 +
27747 si = (struct ipmi_system_interface_addr *) &addr;
27748 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27749 si->channel = IPMI_BMC_CHANNEL;
27750 diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c
27751 --- linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
27752 +++ linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
27753 @@ -277,7 +277,7 @@ struct smi_info {
27754 unsigned char slave_addr;
27755
27756 /* Counters and things for the proc filesystem. */
27757 - atomic_t stats[SI_NUM_STATS];
27758 + atomic_unchecked_t stats[SI_NUM_STATS];
27759
27760 struct task_struct *thread;
27761
27762 @@ -285,9 +285,9 @@ struct smi_info {
27763 };
27764
27765 #define smi_inc_stat(smi, stat) \
27766 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27767 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27768 #define smi_get_stat(smi, stat) \
27769 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27770 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27771
27772 #define SI_MAX_PARMS 4
27773
27774 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
27775 atomic_set(&new_smi->req_events, 0);
27776 new_smi->run_to_completion = 0;
27777 for (i = 0; i < SI_NUM_STATS; i++)
27778 - atomic_set(&new_smi->stats[i], 0);
27779 + atomic_set_unchecked(&new_smi->stats[i], 0);
27780
27781 new_smi->interrupt_disabled = 0;
27782 atomic_set(&new_smi->stop_operation, 0);
27783 diff -urNp linux-2.6.32.45/drivers/char/istallion.c linux-2.6.32.45/drivers/char/istallion.c
27784 --- linux-2.6.32.45/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
27785 +++ linux-2.6.32.45/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
27786 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
27787 * re-used for each stats call.
27788 */
27789 static comstats_t stli_comstats;
27790 -static combrd_t stli_brdstats;
27791 static struct asystats stli_cdkstats;
27792
27793 /*****************************************************************************/
27794 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
27795 {
27796 struct stlibrd *brdp;
27797 unsigned int i;
27798 + combrd_t stli_brdstats;
27799
27800 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
27801 return -EFAULT;
27802 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
27803 struct stliport stli_dummyport;
27804 struct stliport *portp;
27805
27806 + pax_track_stack();
27807 +
27808 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
27809 return -EFAULT;
27810 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
27811 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
27812 struct stlibrd stli_dummybrd;
27813 struct stlibrd *brdp;
27814
27815 + pax_track_stack();
27816 +
27817 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
27818 return -EFAULT;
27819 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
27820 diff -urNp linux-2.6.32.45/drivers/char/Kconfig linux-2.6.32.45/drivers/char/Kconfig
27821 --- linux-2.6.32.45/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
27822 +++ linux-2.6.32.45/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
27823 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
27824
27825 config DEVKMEM
27826 bool "/dev/kmem virtual device support"
27827 - default y
27828 + default n
27829 + depends on !GRKERNSEC_KMEM
27830 help
27831 Say Y here if you want to support the /dev/kmem device. The
27832 /dev/kmem device is rarely used, but can be used for certain
27833 @@ -1114,6 +1115,7 @@ config DEVPORT
27834 bool
27835 depends on !M68K
27836 depends on ISA || PCI
27837 + depends on !GRKERNSEC_KMEM
27838 default y
27839
27840 source "drivers/s390/char/Kconfig"
27841 diff -urNp linux-2.6.32.45/drivers/char/keyboard.c linux-2.6.32.45/drivers/char/keyboard.c
27842 --- linux-2.6.32.45/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
27843 +++ linux-2.6.32.45/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
27844 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
27845 kbd->kbdmode == VC_MEDIUMRAW) &&
27846 value != KVAL(K_SAK))
27847 return; /* SAK is allowed even in raw mode */
27848 +
27849 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
27850 + {
27851 + void *func = fn_handler[value];
27852 + if (func == fn_show_state || func == fn_show_ptregs ||
27853 + func == fn_show_mem)
27854 + return;
27855 + }
27856 +#endif
27857 +
27858 fn_handler[value](vc);
27859 }
27860
27861 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
27862 .evbit = { BIT_MASK(EV_SND) },
27863 },
27864
27865 - { }, /* Terminating entry */
27866 + { 0 }, /* Terminating entry */
27867 };
27868
27869 MODULE_DEVICE_TABLE(input, kbd_ids);
27870 diff -urNp linux-2.6.32.45/drivers/char/mem.c linux-2.6.32.45/drivers/char/mem.c
27871 --- linux-2.6.32.45/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
27872 +++ linux-2.6.32.45/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
27873 @@ -18,6 +18,7 @@
27874 #include <linux/raw.h>
27875 #include <linux/tty.h>
27876 #include <linux/capability.h>
27877 +#include <linux/security.h>
27878 #include <linux/ptrace.h>
27879 #include <linux/device.h>
27880 #include <linux/highmem.h>
27881 @@ -35,6 +36,10 @@
27882 # include <linux/efi.h>
27883 #endif
27884
27885 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27886 +extern struct file_operations grsec_fops;
27887 +#endif
27888 +
27889 static inline unsigned long size_inside_page(unsigned long start,
27890 unsigned long size)
27891 {
27892 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
27893
27894 while (cursor < to) {
27895 if (!devmem_is_allowed(pfn)) {
27896 +#ifdef CONFIG_GRKERNSEC_KMEM
27897 + gr_handle_mem_readwrite(from, to);
27898 +#else
27899 printk(KERN_INFO
27900 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27901 current->comm, from, to);
27902 +#endif
27903 return 0;
27904 }
27905 cursor += PAGE_SIZE;
27906 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
27907 }
27908 return 1;
27909 }
27910 +#elif defined(CONFIG_GRKERNSEC_KMEM)
27911 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27912 +{
27913 + return 0;
27914 +}
27915 #else
27916 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27917 {
27918 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
27919 #endif
27920
27921 while (count > 0) {
27922 + char *temp;
27923 +
27924 /*
27925 * Handle first page in case it's not aligned
27926 */
27927 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
27928 if (!ptr)
27929 return -EFAULT;
27930
27931 - if (copy_to_user(buf, ptr, sz)) {
27932 +#ifdef CONFIG_PAX_USERCOPY
27933 + temp = kmalloc(sz, GFP_KERNEL);
27934 + if (!temp) {
27935 + unxlate_dev_mem_ptr(p, ptr);
27936 + return -ENOMEM;
27937 + }
27938 + memcpy(temp, ptr, sz);
27939 +#else
27940 + temp = ptr;
27941 +#endif
27942 +
27943 + if (copy_to_user(buf, temp, sz)) {
27944 +
27945 +#ifdef CONFIG_PAX_USERCOPY
27946 + kfree(temp);
27947 +#endif
27948 +
27949 unxlate_dev_mem_ptr(p, ptr);
27950 return -EFAULT;
27951 }
27952
27953 +#ifdef CONFIG_PAX_USERCOPY
27954 + kfree(temp);
27955 +#endif
27956 +
27957 unxlate_dev_mem_ptr(p, ptr);
27958
27959 buf += sz;
27960 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
27961 size_t count, loff_t *ppos)
27962 {
27963 unsigned long p = *ppos;
27964 - ssize_t low_count, read, sz;
27965 + ssize_t low_count, read, sz, err = 0;
27966 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27967 - int err = 0;
27968
27969 read = 0;
27970 if (p < (unsigned long) high_memory) {
27971 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
27972 }
27973 #endif
27974 while (low_count > 0) {
27975 + char *temp;
27976 +
27977 sz = size_inside_page(p, low_count);
27978
27979 /*
27980 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
27981 */
27982 kbuf = xlate_dev_kmem_ptr((char *)p);
27983
27984 - if (copy_to_user(buf, kbuf, sz))
27985 +#ifdef CONFIG_PAX_USERCOPY
27986 + temp = kmalloc(sz, GFP_KERNEL);
27987 + if (!temp)
27988 + return -ENOMEM;
27989 + memcpy(temp, kbuf, sz);
27990 +#else
27991 + temp = kbuf;
27992 +#endif
27993 +
27994 + err = copy_to_user(buf, temp, sz);
27995 +
27996 +#ifdef CONFIG_PAX_USERCOPY
27997 + kfree(temp);
27998 +#endif
27999 +
28000 + if (err)
28001 return -EFAULT;
28002 buf += sz;
28003 p += sz;
28004 @@ -889,6 +941,9 @@ static const struct memdev {
28005 #ifdef CONFIG_CRASH_DUMP
28006 [12] = { "oldmem", 0, &oldmem_fops, NULL },
28007 #endif
28008 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28009 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28010 +#endif
28011 };
28012
28013 static int memory_open(struct inode *inode, struct file *filp)
28014 diff -urNp linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c
28015 --- linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
28016 +++ linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
28017 @@ -29,6 +29,7 @@
28018 #include <linux/tty_driver.h>
28019 #include <linux/tty_flip.h>
28020 #include <linux/uaccess.h>
28021 +#include <asm/local.h>
28022
28023 #include "tty.h"
28024 #include "network.h"
28025 @@ -51,7 +52,7 @@ struct ipw_tty {
28026 int tty_type;
28027 struct ipw_network *network;
28028 struct tty_struct *linux_tty;
28029 - int open_count;
28030 + local_t open_count;
28031 unsigned int control_lines;
28032 struct mutex ipw_tty_mutex;
28033 int tx_bytes_queued;
28034 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
28035 mutex_unlock(&tty->ipw_tty_mutex);
28036 return -ENODEV;
28037 }
28038 - if (tty->open_count == 0)
28039 + if (local_read(&tty->open_count) == 0)
28040 tty->tx_bytes_queued = 0;
28041
28042 - tty->open_count++;
28043 + local_inc(&tty->open_count);
28044
28045 tty->linux_tty = linux_tty;
28046 linux_tty->driver_data = tty;
28047 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
28048
28049 static void do_ipw_close(struct ipw_tty *tty)
28050 {
28051 - tty->open_count--;
28052 -
28053 - if (tty->open_count == 0) {
28054 + if (local_dec_return(&tty->open_count) == 0) {
28055 struct tty_struct *linux_tty = tty->linux_tty;
28056
28057 if (linux_tty != NULL) {
28058 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
28059 return;
28060
28061 mutex_lock(&tty->ipw_tty_mutex);
28062 - if (tty->open_count == 0) {
28063 + if (local_read(&tty->open_count) == 0) {
28064 mutex_unlock(&tty->ipw_tty_mutex);
28065 return;
28066 }
28067 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
28068 return;
28069 }
28070
28071 - if (!tty->open_count) {
28072 + if (!local_read(&tty->open_count)) {
28073 mutex_unlock(&tty->ipw_tty_mutex);
28074 return;
28075 }
28076 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
28077 return -ENODEV;
28078
28079 mutex_lock(&tty->ipw_tty_mutex);
28080 - if (!tty->open_count) {
28081 + if (!local_read(&tty->open_count)) {
28082 mutex_unlock(&tty->ipw_tty_mutex);
28083 return -EINVAL;
28084 }
28085 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
28086 if (!tty)
28087 return -ENODEV;
28088
28089 - if (!tty->open_count)
28090 + if (!local_read(&tty->open_count))
28091 return -EINVAL;
28092
28093 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
28094 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
28095 if (!tty)
28096 return 0;
28097
28098 - if (!tty->open_count)
28099 + if (!local_read(&tty->open_count))
28100 return 0;
28101
28102 return tty->tx_bytes_queued;
28103 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
28104 if (!tty)
28105 return -ENODEV;
28106
28107 - if (!tty->open_count)
28108 + if (!local_read(&tty->open_count))
28109 return -EINVAL;
28110
28111 return get_control_lines(tty);
28112 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
28113 if (!tty)
28114 return -ENODEV;
28115
28116 - if (!tty->open_count)
28117 + if (!local_read(&tty->open_count))
28118 return -EINVAL;
28119
28120 return set_control_lines(tty, set, clear);
28121 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
28122 if (!tty)
28123 return -ENODEV;
28124
28125 - if (!tty->open_count)
28126 + if (!local_read(&tty->open_count))
28127 return -EINVAL;
28128
28129 /* FIXME: Exactly how is the tty object locked here .. */
28130 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
28131 against a parallel ioctl etc */
28132 mutex_lock(&ttyj->ipw_tty_mutex);
28133 }
28134 - while (ttyj->open_count)
28135 + while (local_read(&ttyj->open_count))
28136 do_ipw_close(ttyj);
28137 ipwireless_disassociate_network_ttys(network,
28138 ttyj->channel_idx);
28139 diff -urNp linux-2.6.32.45/drivers/char/pty.c linux-2.6.32.45/drivers/char/pty.c
28140 --- linux-2.6.32.45/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
28141 +++ linux-2.6.32.45/drivers/char/pty.c 2011-08-05 20:33:55.000000000 -0400
28142 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
28143 register_sysctl_table(pty_root_table);
28144
28145 /* Now create the /dev/ptmx special device */
28146 + pax_open_kernel();
28147 tty_default_fops(&ptmx_fops);
28148 - ptmx_fops.open = ptmx_open;
28149 + *(void **)&ptmx_fops.open = ptmx_open;
28150 + pax_close_kernel();
28151
28152 cdev_init(&ptmx_cdev, &ptmx_fops);
28153 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
28154 diff -urNp linux-2.6.32.45/drivers/char/random.c linux-2.6.32.45/drivers/char/random.c
28155 --- linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:37:25.000000000 -0400
28156 +++ linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:43:23.000000000 -0400
28157 @@ -254,8 +254,13 @@
28158 /*
28159 * Configuration information
28160 */
28161 +#ifdef CONFIG_GRKERNSEC_RANDNET
28162 +#define INPUT_POOL_WORDS 512
28163 +#define OUTPUT_POOL_WORDS 128
28164 +#else
28165 #define INPUT_POOL_WORDS 128
28166 #define OUTPUT_POOL_WORDS 32
28167 +#endif
28168 #define SEC_XFER_SIZE 512
28169
28170 /*
28171 @@ -292,10 +297,17 @@ static struct poolinfo {
28172 int poolwords;
28173 int tap1, tap2, tap3, tap4, tap5;
28174 } poolinfo_table[] = {
28175 +#ifdef CONFIG_GRKERNSEC_RANDNET
28176 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28177 + { 512, 411, 308, 208, 104, 1 },
28178 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28179 + { 128, 103, 76, 51, 25, 1 },
28180 +#else
28181 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28182 { 128, 103, 76, 51, 25, 1 },
28183 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28184 { 32, 26, 20, 14, 7, 1 },
28185 +#endif
28186 #if 0
28187 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28188 { 2048, 1638, 1231, 819, 411, 1 },
28189 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28190 #include <linux/sysctl.h>
28191
28192 static int min_read_thresh = 8, min_write_thresh;
28193 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
28194 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28195 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28196 static char sysctl_bootid[16];
28197
28198 diff -urNp linux-2.6.32.45/drivers/char/rocket.c linux-2.6.32.45/drivers/char/rocket.c
28199 --- linux-2.6.32.45/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
28200 +++ linux-2.6.32.45/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
28201 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
28202 struct rocket_ports tmp;
28203 int board;
28204
28205 + pax_track_stack();
28206 +
28207 if (!retports)
28208 return -EFAULT;
28209 memset(&tmp, 0, sizeof (tmp));
28210 diff -urNp linux-2.6.32.45/drivers/char/sonypi.c linux-2.6.32.45/drivers/char/sonypi.c
28211 --- linux-2.6.32.45/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
28212 +++ linux-2.6.32.45/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
28213 @@ -55,6 +55,7 @@
28214 #include <asm/uaccess.h>
28215 #include <asm/io.h>
28216 #include <asm/system.h>
28217 +#include <asm/local.h>
28218
28219 #include <linux/sonypi.h>
28220
28221 @@ -491,7 +492,7 @@ static struct sonypi_device {
28222 spinlock_t fifo_lock;
28223 wait_queue_head_t fifo_proc_list;
28224 struct fasync_struct *fifo_async;
28225 - int open_count;
28226 + local_t open_count;
28227 int model;
28228 struct input_dev *input_jog_dev;
28229 struct input_dev *input_key_dev;
28230 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
28231 static int sonypi_misc_release(struct inode *inode, struct file *file)
28232 {
28233 mutex_lock(&sonypi_device.lock);
28234 - sonypi_device.open_count--;
28235 + local_dec(&sonypi_device.open_count);
28236 mutex_unlock(&sonypi_device.lock);
28237 return 0;
28238 }
28239 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
28240 lock_kernel();
28241 mutex_lock(&sonypi_device.lock);
28242 /* Flush input queue on first open */
28243 - if (!sonypi_device.open_count)
28244 + if (!local_read(&sonypi_device.open_count))
28245 kfifo_reset(sonypi_device.fifo);
28246 - sonypi_device.open_count++;
28247 + local_inc(&sonypi_device.open_count);
28248 mutex_unlock(&sonypi_device.lock);
28249 unlock_kernel();
28250 return 0;
28251 diff -urNp linux-2.6.32.45/drivers/char/stallion.c linux-2.6.32.45/drivers/char/stallion.c
28252 --- linux-2.6.32.45/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
28253 +++ linux-2.6.32.45/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
28254 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
28255 struct stlport stl_dummyport;
28256 struct stlport *portp;
28257
28258 + pax_track_stack();
28259 +
28260 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
28261 return -EFAULT;
28262 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
28263 diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm_bios.c linux-2.6.32.45/drivers/char/tpm/tpm_bios.c
28264 --- linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
28265 +++ linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
28266 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
28267 event = addr;
28268
28269 if ((event->event_type == 0 && event->event_size == 0) ||
28270 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28271 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28272 return NULL;
28273
28274 return addr;
28275 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
28276 return NULL;
28277
28278 if ((event->event_type == 0 && event->event_size == 0) ||
28279 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28280 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28281 return NULL;
28282
28283 (*pos)++;
28284 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
28285 int i;
28286
28287 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28288 - seq_putc(m, data[i]);
28289 + if (!seq_putc(m, data[i]))
28290 + return -EFAULT;
28291
28292 return 0;
28293 }
28294 @@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
28295 log->bios_event_log_end = log->bios_event_log + len;
28296
28297 virt = acpi_os_map_memory(start, len);
28298 + if (!virt) {
28299 + kfree(log->bios_event_log);
28300 + log->bios_event_log = NULL;
28301 + return -EFAULT;
28302 + }
28303
28304 memcpy(log->bios_event_log, virt, len);
28305
28306 diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm.c linux-2.6.32.45/drivers/char/tpm/tpm.c
28307 --- linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
28308 +++ linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
28309 @@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
28310 chip->vendor.req_complete_val)
28311 goto out_recv;
28312
28313 - if ((status == chip->vendor.req_canceled)) {
28314 + if (status == chip->vendor.req_canceled) {
28315 dev_err(chip->dev, "Operation Canceled\n");
28316 rc = -ECANCELED;
28317 goto out;
28318 @@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
28319
28320 struct tpm_chip *chip = dev_get_drvdata(dev);
28321
28322 + pax_track_stack();
28323 +
28324 tpm_cmd.header.in = tpm_readpubek_header;
28325 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
28326 "attempting to read the PUBEK");
28327 diff -urNp linux-2.6.32.45/drivers/char/tty_io.c linux-2.6.32.45/drivers/char/tty_io.c
28328 --- linux-2.6.32.45/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
28329 +++ linux-2.6.32.45/drivers/char/tty_io.c 2011-08-05 20:33:55.000000000 -0400
28330 @@ -2582,8 +2582,10 @@ long tty_ioctl(struct file *file, unsign
28331 return retval;
28332 }
28333
28334 +EXPORT_SYMBOL(tty_ioctl);
28335 +
28336 #ifdef CONFIG_COMPAT
28337 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
28338 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
28339 unsigned long arg)
28340 {
28341 struct inode *inode = file->f_dentry->d_inode;
28342 @@ -2607,6 +2609,8 @@ static long tty_compat_ioctl(struct file
28343
28344 return retval;
28345 }
28346 +
28347 +EXPORT_SYMBOL(tty_compat_ioctl);
28348 #endif
28349
28350 /*
28351 @@ -3052,7 +3056,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
28352
28353 void tty_default_fops(struct file_operations *fops)
28354 {
28355 - *fops = tty_fops;
28356 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
28357 }
28358
28359 /*
28360 diff -urNp linux-2.6.32.45/drivers/char/tty_ldisc.c linux-2.6.32.45/drivers/char/tty_ldisc.c
28361 --- linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:04.000000000 -0400
28362 +++ linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:18.000000000 -0400
28363 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
28364 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
28365 struct tty_ldisc_ops *ldo = ld->ops;
28366
28367 - ldo->refcount--;
28368 + atomic_dec(&ldo->refcount);
28369 module_put(ldo->owner);
28370 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28371
28372 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
28373 spin_lock_irqsave(&tty_ldisc_lock, flags);
28374 tty_ldiscs[disc] = new_ldisc;
28375 new_ldisc->num = disc;
28376 - new_ldisc->refcount = 0;
28377 + atomic_set(&new_ldisc->refcount, 0);
28378 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28379
28380 return ret;
28381 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
28382 return -EINVAL;
28383
28384 spin_lock_irqsave(&tty_ldisc_lock, flags);
28385 - if (tty_ldiscs[disc]->refcount)
28386 + if (atomic_read(&tty_ldiscs[disc]->refcount))
28387 ret = -EBUSY;
28388 else
28389 tty_ldiscs[disc] = NULL;
28390 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
28391 if (ldops) {
28392 ret = ERR_PTR(-EAGAIN);
28393 if (try_module_get(ldops->owner)) {
28394 - ldops->refcount++;
28395 + atomic_inc(&ldops->refcount);
28396 ret = ldops;
28397 }
28398 }
28399 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
28400 unsigned long flags;
28401
28402 spin_lock_irqsave(&tty_ldisc_lock, flags);
28403 - ldops->refcount--;
28404 + atomic_dec(&ldops->refcount);
28405 module_put(ldops->owner);
28406 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28407 }
28408 diff -urNp linux-2.6.32.45/drivers/char/virtio_console.c linux-2.6.32.45/drivers/char/virtio_console.c
28409 --- linux-2.6.32.45/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
28410 +++ linux-2.6.32.45/drivers/char/virtio_console.c 2011-08-05 20:33:55.000000000 -0400
28411 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *
28412 * virtqueue, so we let the drivers do some boutique early-output thing. */
28413 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
28414 {
28415 - virtio_cons.put_chars = put_chars;
28416 + pax_open_kernel();
28417 + *(void **)&virtio_cons.put_chars = put_chars;
28418 + pax_close_kernel();
28419 return hvc_instantiate(0, 0, &virtio_cons);
28420 }
28421
28422 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(stru
28423 out_vq = vqs[1];
28424
28425 /* Start using the new console output. */
28426 - virtio_cons.get_chars = get_chars;
28427 - virtio_cons.put_chars = put_chars;
28428 - virtio_cons.notifier_add = notifier_add_vio;
28429 - virtio_cons.notifier_del = notifier_del_vio;
28430 - virtio_cons.notifier_hangup = notifier_del_vio;
28431 + pax_open_kernel();
28432 + *(void **)&virtio_cons.get_chars = get_chars;
28433 + *(void **)&virtio_cons.put_chars = put_chars;
28434 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
28435 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
28436 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
28437 + pax_close_kernel();
28438
28439 /* The first argument of hvc_alloc() is the virtual console number, so
28440 * we use zero. The second argument is the parameter for the
28441 diff -urNp linux-2.6.32.45/drivers/char/vt.c linux-2.6.32.45/drivers/char/vt.c
28442 --- linux-2.6.32.45/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
28443 +++ linux-2.6.32.45/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
28444 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
28445
28446 static void notify_write(struct vc_data *vc, unsigned int unicode)
28447 {
28448 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
28449 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
28450 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28451 }
28452
28453 diff -urNp linux-2.6.32.45/drivers/char/vt_ioctl.c linux-2.6.32.45/drivers/char/vt_ioctl.c
28454 --- linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28455 +++ linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28456 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28457 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28458 return -EFAULT;
28459
28460 - if (!capable(CAP_SYS_TTY_CONFIG))
28461 - perm = 0;
28462 -
28463 switch (cmd) {
28464 case KDGKBENT:
28465 key_map = key_maps[s];
28466 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28467 val = (i ? K_HOLE : K_NOSUCHMAP);
28468 return put_user(val, &user_kbe->kb_value);
28469 case KDSKBENT:
28470 + if (!capable(CAP_SYS_TTY_CONFIG))
28471 + perm = 0;
28472 +
28473 if (!perm)
28474 return -EPERM;
28475 +
28476 if (!i && v == K_NOSUCHMAP) {
28477 /* deallocate map */
28478 key_map = key_maps[s];
28479 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28480 int i, j, k;
28481 int ret;
28482
28483 - if (!capable(CAP_SYS_TTY_CONFIG))
28484 - perm = 0;
28485 -
28486 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28487 if (!kbs) {
28488 ret = -ENOMEM;
28489 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28490 kfree(kbs);
28491 return ((p && *p) ? -EOVERFLOW : 0);
28492 case KDSKBSENT:
28493 + if (!capable(CAP_SYS_TTY_CONFIG))
28494 + perm = 0;
28495 +
28496 if (!perm) {
28497 ret = -EPERM;
28498 goto reterr;
28499 diff -urNp linux-2.6.32.45/drivers/cpufreq/cpufreq.c linux-2.6.32.45/drivers/cpufreq/cpufreq.c
28500 --- linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28501 +++ linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28502 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28503 complete(&policy->kobj_unregister);
28504 }
28505
28506 -static struct sysfs_ops sysfs_ops = {
28507 +static const struct sysfs_ops sysfs_ops = {
28508 .show = show,
28509 .store = store,
28510 };
28511 diff -urNp linux-2.6.32.45/drivers/cpuidle/sysfs.c linux-2.6.32.45/drivers/cpuidle/sysfs.c
28512 --- linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28513 +++ linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28514 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28515 return ret;
28516 }
28517
28518 -static struct sysfs_ops cpuidle_sysfs_ops = {
28519 +static const struct sysfs_ops cpuidle_sysfs_ops = {
28520 .show = cpuidle_show,
28521 .store = cpuidle_store,
28522 };
28523 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28524 return ret;
28525 }
28526
28527 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
28528 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28529 .show = cpuidle_state_show,
28530 };
28531
28532 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28533 .release = cpuidle_state_sysfs_release,
28534 };
28535
28536 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28537 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28538 {
28539 kobject_put(&device->kobjs[i]->kobj);
28540 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28541 diff -urNp linux-2.6.32.45/drivers/crypto/hifn_795x.c linux-2.6.32.45/drivers/crypto/hifn_795x.c
28542 --- linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28543 +++ linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28544 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28545 0xCA, 0x34, 0x2B, 0x2E};
28546 struct scatterlist sg;
28547
28548 + pax_track_stack();
28549 +
28550 memset(src, 0, sizeof(src));
28551 memset(ctx.key, 0, sizeof(ctx.key));
28552
28553 diff -urNp linux-2.6.32.45/drivers/crypto/padlock-aes.c linux-2.6.32.45/drivers/crypto/padlock-aes.c
28554 --- linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
28555 +++ linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
28556 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
28557 struct crypto_aes_ctx gen_aes;
28558 int cpu;
28559
28560 + pax_track_stack();
28561 +
28562 if (key_len % 8) {
28563 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
28564 return -EINVAL;
28565 diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.c linux-2.6.32.45/drivers/dma/ioat/dma.c
28566 --- linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28567 +++ linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28568 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28569 return entry->show(&chan->common, page);
28570 }
28571
28572 -struct sysfs_ops ioat_sysfs_ops = {
28573 +const struct sysfs_ops ioat_sysfs_ops = {
28574 .show = ioat_attr_show,
28575 };
28576
28577 diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.h linux-2.6.32.45/drivers/dma/ioat/dma.h
28578 --- linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28579 +++ linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28580 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28581 unsigned long *phys_complete);
28582 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28583 void ioat_kobject_del(struct ioatdma_device *device);
28584 -extern struct sysfs_ops ioat_sysfs_ops;
28585 +extern const struct sysfs_ops ioat_sysfs_ops;
28586 extern struct ioat_sysfs_entry ioat_version_attr;
28587 extern struct ioat_sysfs_entry ioat_cap_attr;
28588 #endif /* IOATDMA_H */
28589 diff -urNp linux-2.6.32.45/drivers/edac/edac_device_sysfs.c linux-2.6.32.45/drivers/edac/edac_device_sysfs.c
28590 --- linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28591 +++ linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28592 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28593 }
28594
28595 /* edac_dev file operations for an 'ctl_info' */
28596 -static struct sysfs_ops device_ctl_info_ops = {
28597 +static const struct sysfs_ops device_ctl_info_ops = {
28598 .show = edac_dev_ctl_info_show,
28599 .store = edac_dev_ctl_info_store
28600 };
28601 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
28602 }
28603
28604 /* edac_dev file operations for an 'instance' */
28605 -static struct sysfs_ops device_instance_ops = {
28606 +static const struct sysfs_ops device_instance_ops = {
28607 .show = edac_dev_instance_show,
28608 .store = edac_dev_instance_store
28609 };
28610 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
28611 }
28612
28613 /* edac_dev file operations for a 'block' */
28614 -static struct sysfs_ops device_block_ops = {
28615 +static const struct sysfs_ops device_block_ops = {
28616 .show = edac_dev_block_show,
28617 .store = edac_dev_block_store
28618 };
28619 diff -urNp linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c
28620 --- linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28621 +++ linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28622 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
28623 return -EIO;
28624 }
28625
28626 -static struct sysfs_ops csrowfs_ops = {
28627 +static const struct sysfs_ops csrowfs_ops = {
28628 .show = csrowdev_show,
28629 .store = csrowdev_store
28630 };
28631 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
28632 }
28633
28634 /* Intermediate show/store table */
28635 -static struct sysfs_ops mci_ops = {
28636 +static const struct sysfs_ops mci_ops = {
28637 .show = mcidev_show,
28638 .store = mcidev_store
28639 };
28640 diff -urNp linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c
28641 --- linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28642 +++ linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
28643 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
28644 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28645 static int edac_pci_poll_msec = 1000; /* one second workq period */
28646
28647 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
28648 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28649 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28650 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28651
28652 static struct kobject *edac_pci_top_main_kobj;
28653 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28654 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
28655 }
28656
28657 /* fs_ops table */
28658 -static struct sysfs_ops pci_instance_ops = {
28659 +static const struct sysfs_ops pci_instance_ops = {
28660 .show = edac_pci_instance_show,
28661 .store = edac_pci_instance_store
28662 };
28663 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
28664 return -EIO;
28665 }
28666
28667 -static struct sysfs_ops edac_pci_sysfs_ops = {
28668 +static const struct sysfs_ops edac_pci_sysfs_ops = {
28669 .show = edac_pci_dev_show,
28670 .store = edac_pci_dev_store
28671 };
28672 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
28673 edac_printk(KERN_CRIT, EDAC_PCI,
28674 "Signaled System Error on %s\n",
28675 pci_name(dev));
28676 - atomic_inc(&pci_nonparity_count);
28677 + atomic_inc_unchecked(&pci_nonparity_count);
28678 }
28679
28680 if (status & (PCI_STATUS_PARITY)) {
28681 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
28682 "Master Data Parity Error on %s\n",
28683 pci_name(dev));
28684
28685 - atomic_inc(&pci_parity_count);
28686 + atomic_inc_unchecked(&pci_parity_count);
28687 }
28688
28689 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28690 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
28691 "Detected Parity Error on %s\n",
28692 pci_name(dev));
28693
28694 - atomic_inc(&pci_parity_count);
28695 + atomic_inc_unchecked(&pci_parity_count);
28696 }
28697 }
28698
28699 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
28700 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28701 "Signaled System Error on %s\n",
28702 pci_name(dev));
28703 - atomic_inc(&pci_nonparity_count);
28704 + atomic_inc_unchecked(&pci_nonparity_count);
28705 }
28706
28707 if (status & (PCI_STATUS_PARITY)) {
28708 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
28709 "Master Data Parity Error on "
28710 "%s\n", pci_name(dev));
28711
28712 - atomic_inc(&pci_parity_count);
28713 + atomic_inc_unchecked(&pci_parity_count);
28714 }
28715
28716 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28717 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
28718 "Detected Parity Error on %s\n",
28719 pci_name(dev));
28720
28721 - atomic_inc(&pci_parity_count);
28722 + atomic_inc_unchecked(&pci_parity_count);
28723 }
28724 }
28725 }
28726 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
28727 if (!check_pci_errors)
28728 return;
28729
28730 - before_count = atomic_read(&pci_parity_count);
28731 + before_count = atomic_read_unchecked(&pci_parity_count);
28732
28733 /* scan all PCI devices looking for a Parity Error on devices and
28734 * bridges.
28735 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
28736 /* Only if operator has selected panic on PCI Error */
28737 if (edac_pci_get_panic_on_pe()) {
28738 /* If the count is different 'after' from 'before' */
28739 - if (before_count != atomic_read(&pci_parity_count))
28740 + if (before_count != atomic_read_unchecked(&pci_parity_count))
28741 panic("EDAC: PCI Parity Error");
28742 }
28743 }
28744 diff -urNp linux-2.6.32.45/drivers/firewire/core-card.c linux-2.6.32.45/drivers/firewire/core-card.c
28745 --- linux-2.6.32.45/drivers/firewire/core-card.c 2011-03-27 14:31:47.000000000 -0400
28746 +++ linux-2.6.32.45/drivers/firewire/core-card.c 2011-08-05 20:33:55.000000000 -0400
28747 @@ -569,8 +569,10 @@ void fw_core_remove_card(struct fw_card
28748 mutex_unlock(&card_mutex);
28749
28750 /* Switch off most of the card driver interface. */
28751 - dummy_driver.free_iso_context = card->driver->free_iso_context;
28752 - dummy_driver.stop_iso = card->driver->stop_iso;
28753 + pax_open_kernel();
28754 + *(void **)&dummy_driver.free_iso_context = card->driver->free_iso_context;
28755 + *(void **)&dummy_driver.stop_iso = card->driver->stop_iso;
28756 + pax_close_kernel();
28757 card->driver = &dummy_driver;
28758
28759 fw_destroy_nodes(card);
28760 diff -urNp linux-2.6.32.45/drivers/firewire/core-cdev.c linux-2.6.32.45/drivers/firewire/core-cdev.c
28761 --- linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
28762 +++ linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
28763 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
28764 int ret;
28765
28766 if ((request->channels == 0 && request->bandwidth == 0) ||
28767 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
28768 - request->bandwidth < 0)
28769 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
28770 return -EINVAL;
28771
28772 r = kmalloc(sizeof(*r), GFP_KERNEL);
28773 diff -urNp linux-2.6.32.45/drivers/firewire/core-transaction.c linux-2.6.32.45/drivers/firewire/core-transaction.c
28774 --- linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
28775 +++ linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
28776 @@ -36,6 +36,7 @@
28777 #include <linux/string.h>
28778 #include <linux/timer.h>
28779 #include <linux/types.h>
28780 +#include <linux/sched.h>
28781
28782 #include <asm/byteorder.h>
28783
28784 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
28785 struct transaction_callback_data d;
28786 struct fw_transaction t;
28787
28788 + pax_track_stack();
28789 +
28790 init_completion(&d.done);
28791 d.payload = payload;
28792 fw_send_request(card, &t, tcode, destination_id, generation, speed,
28793 diff -urNp linux-2.6.32.45/drivers/firmware/dmi_scan.c linux-2.6.32.45/drivers/firmware/dmi_scan.c
28794 --- linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
28795 +++ linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
28796 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
28797 }
28798 }
28799 else {
28800 - /*
28801 - * no iounmap() for that ioremap(); it would be a no-op, but
28802 - * it's so early in setup that sucker gets confused into doing
28803 - * what it shouldn't if we actually call it.
28804 - */
28805 p = dmi_ioremap(0xF0000, 0x10000);
28806 if (p == NULL)
28807 goto error;
28808 diff -urNp linux-2.6.32.45/drivers/firmware/edd.c linux-2.6.32.45/drivers/firmware/edd.c
28809 --- linux-2.6.32.45/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
28810 +++ linux-2.6.32.45/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
28811 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
28812 return ret;
28813 }
28814
28815 -static struct sysfs_ops edd_attr_ops = {
28816 +static const struct sysfs_ops edd_attr_ops = {
28817 .show = edd_attr_show,
28818 };
28819
28820 diff -urNp linux-2.6.32.45/drivers/firmware/efivars.c linux-2.6.32.45/drivers/firmware/efivars.c
28821 --- linux-2.6.32.45/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
28822 +++ linux-2.6.32.45/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
28823 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
28824 return ret;
28825 }
28826
28827 -static struct sysfs_ops efivar_attr_ops = {
28828 +static const struct sysfs_ops efivar_attr_ops = {
28829 .show = efivar_attr_show,
28830 .store = efivar_attr_store,
28831 };
28832 diff -urNp linux-2.6.32.45/drivers/firmware/iscsi_ibft.c linux-2.6.32.45/drivers/firmware/iscsi_ibft.c
28833 --- linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
28834 +++ linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
28835 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
28836 return ret;
28837 }
28838
28839 -static struct sysfs_ops ibft_attr_ops = {
28840 +static const struct sysfs_ops ibft_attr_ops = {
28841 .show = ibft_show_attribute,
28842 };
28843
28844 diff -urNp linux-2.6.32.45/drivers/firmware/memmap.c linux-2.6.32.45/drivers/firmware/memmap.c
28845 --- linux-2.6.32.45/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
28846 +++ linux-2.6.32.45/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
28847 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
28848 NULL
28849 };
28850
28851 -static struct sysfs_ops memmap_attr_ops = {
28852 +static const struct sysfs_ops memmap_attr_ops = {
28853 .show = memmap_attr_show,
28854 };
28855
28856 diff -urNp linux-2.6.32.45/drivers/gpio/vr41xx_giu.c linux-2.6.32.45/drivers/gpio/vr41xx_giu.c
28857 --- linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
28858 +++ linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
28859 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
28860 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
28861 maskl, pendl, maskh, pendh);
28862
28863 - atomic_inc(&irq_err_count);
28864 + atomic_inc_unchecked(&irq_err_count);
28865
28866 return -EINVAL;
28867 }
28868 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c
28869 --- linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
28870 +++ linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
28871 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
28872 struct drm_crtc *tmp;
28873 int crtc_mask = 1;
28874
28875 - WARN(!crtc, "checking null crtc?");
28876 + BUG_ON(!crtc);
28877
28878 dev = crtc->dev;
28879
28880 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
28881
28882 adjusted_mode = drm_mode_duplicate(dev, mode);
28883
28884 + pax_track_stack();
28885 +
28886 crtc->enabled = drm_helper_crtc_in_use(crtc);
28887
28888 if (!crtc->enabled)
28889 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_drv.c linux-2.6.32.45/drivers/gpu/drm/drm_drv.c
28890 --- linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
28891 +++ linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
28892 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
28893 char *kdata = NULL;
28894
28895 atomic_inc(&dev->ioctl_count);
28896 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
28897 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
28898 ++file_priv->ioctl_count;
28899
28900 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
28901 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_fops.c linux-2.6.32.45/drivers/gpu/drm/drm_fops.c
28902 --- linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
28903 +++ linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
28904 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
28905 }
28906
28907 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
28908 - atomic_set(&dev->counts[i], 0);
28909 + atomic_set_unchecked(&dev->counts[i], 0);
28910
28911 dev->sigdata.lock = NULL;
28912
28913 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
28914
28915 retcode = drm_open_helper(inode, filp, dev);
28916 if (!retcode) {
28917 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
28918 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
28919 spin_lock(&dev->count_lock);
28920 - if (!dev->open_count++) {
28921 + if (local_inc_return(&dev->open_count) == 1) {
28922 spin_unlock(&dev->count_lock);
28923 retcode = drm_setup(dev);
28924 goto out;
28925 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
28926
28927 lock_kernel();
28928
28929 - DRM_DEBUG("open_count = %d\n", dev->open_count);
28930 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
28931
28932 if (dev->driver->preclose)
28933 dev->driver->preclose(dev, file_priv);
28934 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
28935 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
28936 task_pid_nr(current),
28937 (long)old_encode_dev(file_priv->minor->device),
28938 - dev->open_count);
28939 + local_read(&dev->open_count));
28940
28941 /* if the master has gone away we can't do anything with the lock */
28942 if (file_priv->minor->master)
28943 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
28944 * End inline drm_release
28945 */
28946
28947 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
28948 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
28949 spin_lock(&dev->count_lock);
28950 - if (!--dev->open_count) {
28951 + if (local_dec_and_test(&dev->open_count)) {
28952 if (atomic_read(&dev->ioctl_count)) {
28953 DRM_ERROR("Device busy: %d\n",
28954 atomic_read(&dev->ioctl_count));
28955 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_gem.c linux-2.6.32.45/drivers/gpu/drm/drm_gem.c
28956 --- linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
28957 +++ linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
28958 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
28959 spin_lock_init(&dev->object_name_lock);
28960 idr_init(&dev->object_name_idr);
28961 atomic_set(&dev->object_count, 0);
28962 - atomic_set(&dev->object_memory, 0);
28963 + atomic_set_unchecked(&dev->object_memory, 0);
28964 atomic_set(&dev->pin_count, 0);
28965 - atomic_set(&dev->pin_memory, 0);
28966 + atomic_set_unchecked(&dev->pin_memory, 0);
28967 atomic_set(&dev->gtt_count, 0);
28968 - atomic_set(&dev->gtt_memory, 0);
28969 + atomic_set_unchecked(&dev->gtt_memory, 0);
28970
28971 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
28972 if (!mm) {
28973 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
28974 goto fput;
28975 }
28976 atomic_inc(&dev->object_count);
28977 - atomic_add(obj->size, &dev->object_memory);
28978 + atomic_add_unchecked(obj->size, &dev->object_memory);
28979 return obj;
28980 fput:
28981 fput(obj->filp);
28982 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
28983
28984 fput(obj->filp);
28985 atomic_dec(&dev->object_count);
28986 - atomic_sub(obj->size, &dev->object_memory);
28987 + atomic_sub_unchecked(obj->size, &dev->object_memory);
28988 kfree(obj);
28989 }
28990 EXPORT_SYMBOL(drm_gem_object_free);
28991 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_info.c linux-2.6.32.45/drivers/gpu/drm/drm_info.c
28992 --- linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
28993 +++ linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
28994 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
28995 struct drm_local_map *map;
28996 struct drm_map_list *r_list;
28997
28998 - /* Hardcoded from _DRM_FRAME_BUFFER,
28999 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
29000 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
29001 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
29002 + static const char * const types[] = {
29003 + [_DRM_FRAME_BUFFER] = "FB",
29004 + [_DRM_REGISTERS] = "REG",
29005 + [_DRM_SHM] = "SHM",
29006 + [_DRM_AGP] = "AGP",
29007 + [_DRM_SCATTER_GATHER] = "SG",
29008 + [_DRM_CONSISTENT] = "PCI",
29009 + [_DRM_GEM] = "GEM" };
29010 const char *type;
29011 int i;
29012
29013 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
29014 map = r_list->map;
29015 if (!map)
29016 continue;
29017 - if (map->type < 0 || map->type > 5)
29018 + if (map->type >= ARRAY_SIZE(types))
29019 type = "??";
29020 else
29021 type = types[map->type];
29022 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
29023 struct drm_device *dev = node->minor->dev;
29024
29025 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
29026 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
29027 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
29028 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
29029 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
29030 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
29031 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
29032 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
29033 seq_printf(m, "%d gtt total\n", dev->gtt_total);
29034 return 0;
29035 }
29036 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
29037 mutex_lock(&dev->struct_mutex);
29038 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
29039 atomic_read(&dev->vma_count),
29040 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29041 + NULL, 0);
29042 +#else
29043 high_memory, (u64)virt_to_phys(high_memory));
29044 +#endif
29045
29046 list_for_each_entry(pt, &dev->vmalist, head) {
29047 vma = pt->vma;
29048 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
29049 continue;
29050 seq_printf(m,
29051 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
29052 - pt->pid, vma->vm_start, vma->vm_end,
29053 + pt->pid,
29054 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29055 + 0, 0,
29056 +#else
29057 + vma->vm_start, vma->vm_end,
29058 +#endif
29059 vma->vm_flags & VM_READ ? 'r' : '-',
29060 vma->vm_flags & VM_WRITE ? 'w' : '-',
29061 vma->vm_flags & VM_EXEC ? 'x' : '-',
29062 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29063 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29064 vma->vm_flags & VM_IO ? 'i' : '-',
29065 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29066 + 0);
29067 +#else
29068 vma->vm_pgoff);
29069 +#endif
29070
29071 #if defined(__i386__)
29072 pgprot = pgprot_val(vma->vm_page_prot);
29073 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c
29074 --- linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
29075 +++ linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
29076 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
29077 stats->data[i].value =
29078 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29079 else
29080 - stats->data[i].value = atomic_read(&dev->counts[i]);
29081 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29082 stats->data[i].type = dev->types[i];
29083 }
29084
29085 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_lock.c linux-2.6.32.45/drivers/gpu/drm/drm_lock.c
29086 --- linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
29087 +++ linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
29088 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
29089 if (drm_lock_take(&master->lock, lock->context)) {
29090 master->lock.file_priv = file_priv;
29091 master->lock.lock_time = jiffies;
29092 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29093 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29094 break; /* Got lock */
29095 }
29096
29097 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
29098 return -EINVAL;
29099 }
29100
29101 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29102 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29103
29104 /* kernel_context_switch isn't used by any of the x86 drm
29105 * modules but is required by the Sparc driver.
29106 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c
29107 --- linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
29108 +++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
29109 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
29110 dma->buflist[vertex->idx],
29111 vertex->discard, vertex->used);
29112
29113 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29114 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29115 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29116 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29117 sarea_priv->last_enqueue = dev_priv->counter - 1;
29118 sarea_priv->last_dispatch = (int)hw_status[5];
29119
29120 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
29121 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29122 mc->last_render);
29123
29124 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29125 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29126 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29127 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29128 sarea_priv->last_enqueue = dev_priv->counter - 1;
29129 sarea_priv->last_dispatch = (int)hw_status[5];
29130
29131 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h
29132 --- linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
29133 +++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
29134 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29135 int page_flipping;
29136
29137 wait_queue_head_t irq_queue;
29138 - atomic_t irq_received;
29139 - atomic_t irq_emitted;
29140 + atomic_unchecked_t irq_received;
29141 + atomic_unchecked_t irq_emitted;
29142
29143 int front_offset;
29144 } drm_i810_private_t;
29145 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h
29146 --- linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
29147 +++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
29148 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
29149 int page_flipping;
29150
29151 wait_queue_head_t irq_queue;
29152 - atomic_t irq_received;
29153 - atomic_t irq_emitted;
29154 + atomic_unchecked_t irq_received;
29155 + atomic_unchecked_t irq_emitted;
29156
29157 int use_mi_batchbuffer_start;
29158
29159 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c
29160 --- linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
29161 +++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
29162 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
29163
29164 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
29165
29166 - atomic_inc(&dev_priv->irq_received);
29167 + atomic_inc_unchecked(&dev_priv->irq_received);
29168 wake_up_interruptible(&dev_priv->irq_queue);
29169
29170 return IRQ_HANDLED;
29171 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
29172
29173 DRM_DEBUG("%s\n", __func__);
29174
29175 - atomic_inc(&dev_priv->irq_emitted);
29176 + atomic_inc_unchecked(&dev_priv->irq_emitted);
29177
29178 BEGIN_LP_RING(2);
29179 OUT_RING(0);
29180 OUT_RING(GFX_OP_USER_INTERRUPT);
29181 ADVANCE_LP_RING();
29182
29183 - return atomic_read(&dev_priv->irq_emitted);
29184 + return atomic_read_unchecked(&dev_priv->irq_emitted);
29185 }
29186
29187 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
29188 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
29189
29190 DRM_DEBUG("%s\n", __func__);
29191
29192 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29193 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29194 return 0;
29195
29196 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
29197 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
29198
29199 for (;;) {
29200 __set_current_state(TASK_INTERRUPTIBLE);
29201 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29202 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29203 break;
29204 if ((signed)(end - jiffies) <= 0) {
29205 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
29206 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
29207 I830_WRITE16(I830REG_HWSTAM, 0xffff);
29208 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
29209 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
29210 - atomic_set(&dev_priv->irq_received, 0);
29211 - atomic_set(&dev_priv->irq_emitted, 0);
29212 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29213 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
29214 init_waitqueue_head(&dev_priv->irq_queue);
29215 }
29216
29217 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c
29218 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
29219 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
29220 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
29221 }
29222 }
29223
29224 -struct intel_dvo_dev_ops ch7017_ops = {
29225 +const struct intel_dvo_dev_ops ch7017_ops = {
29226 .init = ch7017_init,
29227 .detect = ch7017_detect,
29228 .mode_valid = ch7017_mode_valid,
29229 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c
29230 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
29231 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
29232 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
29233 }
29234 }
29235
29236 -struct intel_dvo_dev_ops ch7xxx_ops = {
29237 +const struct intel_dvo_dev_ops ch7xxx_ops = {
29238 .init = ch7xxx_init,
29239 .detect = ch7xxx_detect,
29240 .mode_valid = ch7xxx_mode_valid,
29241 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h
29242 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
29243 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
29244 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
29245 *
29246 * \return singly-linked list of modes or NULL if no modes found.
29247 */
29248 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
29249 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
29250
29251 /**
29252 * Clean up driver-specific bits of the output
29253 */
29254 - void (*destroy) (struct intel_dvo_device *dvo);
29255 + void (* const destroy) (struct intel_dvo_device *dvo);
29256
29257 /**
29258 * Debugging hook to dump device registers to log file
29259 */
29260 - void (*dump_regs)(struct intel_dvo_device *dvo);
29261 + void (* const dump_regs)(struct intel_dvo_device *dvo);
29262 };
29263
29264 -extern struct intel_dvo_dev_ops sil164_ops;
29265 -extern struct intel_dvo_dev_ops ch7xxx_ops;
29266 -extern struct intel_dvo_dev_ops ivch_ops;
29267 -extern struct intel_dvo_dev_ops tfp410_ops;
29268 -extern struct intel_dvo_dev_ops ch7017_ops;
29269 +extern const struct intel_dvo_dev_ops sil164_ops;
29270 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
29271 +extern const struct intel_dvo_dev_ops ivch_ops;
29272 +extern const struct intel_dvo_dev_ops tfp410_ops;
29273 +extern const struct intel_dvo_dev_ops ch7017_ops;
29274
29275 #endif /* _INTEL_DVO_H */
29276 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c
29277 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
29278 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
29279 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
29280 }
29281 }
29282
29283 -struct intel_dvo_dev_ops ivch_ops= {
29284 +const struct intel_dvo_dev_ops ivch_ops= {
29285 .init = ivch_init,
29286 .dpms = ivch_dpms,
29287 .save = ivch_save,
29288 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c
29289 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
29290 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
29291 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
29292 }
29293 }
29294
29295 -struct intel_dvo_dev_ops sil164_ops = {
29296 +const struct intel_dvo_dev_ops sil164_ops = {
29297 .init = sil164_init,
29298 .detect = sil164_detect,
29299 .mode_valid = sil164_mode_valid,
29300 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c
29301 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
29302 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
29303 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
29304 }
29305 }
29306
29307 -struct intel_dvo_dev_ops tfp410_ops = {
29308 +const struct intel_dvo_dev_ops tfp410_ops = {
29309 .init = tfp410_init,
29310 .detect = tfp410_detect,
29311 .mode_valid = tfp410_mode_valid,
29312 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c
29313 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
29314 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
29315 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
29316 I915_READ(GTIMR));
29317 }
29318 seq_printf(m, "Interrupts received: %d\n",
29319 - atomic_read(&dev_priv->irq_received));
29320 + atomic_read_unchecked(&dev_priv->irq_received));
29321 if (dev_priv->hw_status_page != NULL) {
29322 seq_printf(m, "Current sequence: %d\n",
29323 i915_get_gem_seqno(dev));
29324 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c
29325 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
29326 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
29327 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
29328 return i915_resume(dev);
29329 }
29330
29331 -static struct vm_operations_struct i915_gem_vm_ops = {
29332 +static const struct vm_operations_struct i915_gem_vm_ops = {
29333 .fault = i915_gem_fault,
29334 .open = drm_gem_vm_open,
29335 .close = drm_gem_vm_close,
29336 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h
29337 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
29338 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:33:55.000000000 -0400
29339 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
29340 /* display clock increase/decrease */
29341 /* pll clock increase/decrease */
29342 /* clock gating init */
29343 -};
29344 +} __no_const;
29345
29346 typedef struct drm_i915_private {
29347 struct drm_device *dev;
29348 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
29349 int page_flipping;
29350
29351 wait_queue_head_t irq_queue;
29352 - atomic_t irq_received;
29353 + atomic_unchecked_t irq_received;
29354 /** Protects user_irq_refcount and irq_mask_reg */
29355 spinlock_t user_irq_lock;
29356 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
29357 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c
29358 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
29359 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
29360 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
29361
29362 args->aper_size = dev->gtt_total;
29363 args->aper_available_size = (args->aper_size -
29364 - atomic_read(&dev->pin_memory));
29365 + atomic_read_unchecked(&dev->pin_memory));
29366
29367 return 0;
29368 }
29369 @@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
29370 return -EINVAL;
29371 }
29372
29373 + if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29374 + drm_gem_object_unreference(obj);
29375 + return -EFAULT;
29376 + }
29377 +
29378 if (i915_gem_object_needs_bit17_swizzle(obj)) {
29379 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
29380 } else {
29381 @@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
29382 return -EINVAL;
29383 }
29384
29385 + if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29386 + drm_gem_object_unreference(obj);
29387 + return -EFAULT;
29388 + }
29389 +
29390 /* We can only do the GTT pwrite on untiled buffers, as otherwise
29391 * it would end up going through the fenced access, and we'll get
29392 * different detiling behavior between reading and writing.
29393 @@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
29394
29395 if (obj_priv->gtt_space) {
29396 atomic_dec(&dev->gtt_count);
29397 - atomic_sub(obj->size, &dev->gtt_memory);
29398 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
29399
29400 drm_mm_put_block(obj_priv->gtt_space);
29401 obj_priv->gtt_space = NULL;
29402 @@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
29403 goto search_free;
29404 }
29405 atomic_inc(&dev->gtt_count);
29406 - atomic_add(obj->size, &dev->gtt_memory);
29407 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
29408
29409 /* Assert that the object is not currently in any GPU domain. As it
29410 * wasn't in the GTT, there shouldn't be any way it could have been in
29411 @@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
29412 "%d/%d gtt bytes\n",
29413 atomic_read(&dev->object_count),
29414 atomic_read(&dev->pin_count),
29415 - atomic_read(&dev->object_memory),
29416 - atomic_read(&dev->pin_memory),
29417 - atomic_read(&dev->gtt_memory),
29418 + atomic_read_unchecked(&dev->object_memory),
29419 + atomic_read_unchecked(&dev->pin_memory),
29420 + atomic_read_unchecked(&dev->gtt_memory),
29421 dev->gtt_total);
29422 }
29423 goto err;
29424 @@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
29425 */
29426 if (obj_priv->pin_count == 1) {
29427 atomic_inc(&dev->pin_count);
29428 - atomic_add(obj->size, &dev->pin_memory);
29429 + atomic_add_unchecked(obj->size, &dev->pin_memory);
29430 if (!obj_priv->active &&
29431 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
29432 !list_empty(&obj_priv->list))
29433 @@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
29434 list_move_tail(&obj_priv->list,
29435 &dev_priv->mm.inactive_list);
29436 atomic_dec(&dev->pin_count);
29437 - atomic_sub(obj->size, &dev->pin_memory);
29438 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
29439 }
29440 i915_verify_inactive(dev, __FILE__, __LINE__);
29441 }
29442 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c
29443 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
29444 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
29445 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
29446 int irq_received;
29447 int ret = IRQ_NONE;
29448
29449 - atomic_inc(&dev_priv->irq_received);
29450 + atomic_inc_unchecked(&dev_priv->irq_received);
29451
29452 if (IS_IGDNG(dev))
29453 return igdng_irq_handler(dev);
29454 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
29455 {
29456 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29457
29458 - atomic_set(&dev_priv->irq_received, 0);
29459 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29460
29461 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29462 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29463 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c
29464 --- linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-03-27 14:31:47.000000000 -0400
29465 +++ linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-08-05 20:33:55.000000000 -0400
29466 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *
29467 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
29468
29469 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
29470 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29471 + pax_open_kernel();
29472 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29473 + pax_close_kernel();
29474
29475 /* Read the regs to test if we can talk to the device */
29476 for (i = 0; i < 0x40; i++) {
29477 diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h
29478 --- linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
29479 +++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
29480 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29481 u32 clear_cmd;
29482 u32 maccess;
29483
29484 - atomic_t vbl_received; /**< Number of vblanks received. */
29485 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29486 wait_queue_head_t fence_queue;
29487 - atomic_t last_fence_retired;
29488 + atomic_unchecked_t last_fence_retired;
29489 u32 next_fence_to_post;
29490
29491 unsigned int fb_cpp;
29492 diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c
29493 --- linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29494 +++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29495 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29496 if (crtc != 0)
29497 return 0;
29498
29499 - return atomic_read(&dev_priv->vbl_received);
29500 + return atomic_read_unchecked(&dev_priv->vbl_received);
29501 }
29502
29503
29504 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29505 /* VBLANK interrupt */
29506 if (status & MGA_VLINEPEN) {
29507 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29508 - atomic_inc(&dev_priv->vbl_received);
29509 + atomic_inc_unchecked(&dev_priv->vbl_received);
29510 drm_handle_vblank(dev, 0);
29511 handled = 1;
29512 }
29513 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29514 MGA_WRITE(MGA_PRIMEND, prim_end);
29515 }
29516
29517 - atomic_inc(&dev_priv->last_fence_retired);
29518 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
29519 DRM_WAKEUP(&dev_priv->fence_queue);
29520 handled = 1;
29521 }
29522 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29523 * using fences.
29524 */
29525 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29526 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29527 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29528 - *sequence) <= (1 << 23)));
29529
29530 *sequence = cur_fence;
29531 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c
29532 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29533 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29534 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29535
29536 /* GH: Simple idle check.
29537 */
29538 - atomic_set(&dev_priv->idle_count, 0);
29539 + atomic_set_unchecked(&dev_priv->idle_count, 0);
29540
29541 /* We don't support anything other than bus-mastering ring mode,
29542 * but the ring can be in either AGP or PCI space for the ring
29543 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h
29544 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29545 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29546 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29547 int is_pci;
29548 unsigned long cce_buffers_offset;
29549
29550 - atomic_t idle_count;
29551 + atomic_unchecked_t idle_count;
29552
29553 int page_flipping;
29554 int current_page;
29555 u32 crtc_offset;
29556 u32 crtc_offset_cntl;
29557
29558 - atomic_t vbl_received;
29559 + atomic_unchecked_t vbl_received;
29560
29561 u32 color_fmt;
29562 unsigned int front_offset;
29563 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c
29564 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
29565 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
29566 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
29567 if (crtc != 0)
29568 return 0;
29569
29570 - return atomic_read(&dev_priv->vbl_received);
29571 + return atomic_read_unchecked(&dev_priv->vbl_received);
29572 }
29573
29574 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
29575 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
29576 /* VBLANK interrupt */
29577 if (status & R128_CRTC_VBLANK_INT) {
29578 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
29579 - atomic_inc(&dev_priv->vbl_received);
29580 + atomic_inc_unchecked(&dev_priv->vbl_received);
29581 drm_handle_vblank(dev, 0);
29582 return IRQ_HANDLED;
29583 }
29584 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c
29585 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
29586 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
29587 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
29588
29589 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
29590 {
29591 - if (atomic_read(&dev_priv->idle_count) == 0) {
29592 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
29593 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
29594 } else {
29595 - atomic_set(&dev_priv->idle_count, 0);
29596 + atomic_set_unchecked(&dev_priv->idle_count, 0);
29597 }
29598 }
29599
29600 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c
29601 --- linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
29602 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
29603 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
29604 char name[512];
29605 int i;
29606
29607 + pax_track_stack();
29608 +
29609 ctx->card = card;
29610 ctx->bios = bios;
29611
29612 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c
29613 --- linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
29614 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
29615 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
29616 regex_t mask_rex;
29617 regmatch_t match[4];
29618 char buf[1024];
29619 - size_t end;
29620 + long end;
29621 int len;
29622 int done = 0;
29623 int r;
29624 unsigned o;
29625 struct offset *offset;
29626 char last_reg_s[10];
29627 - int last_reg;
29628 + unsigned long last_reg;
29629
29630 if (regcomp
29631 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
29632 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c
29633 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
29634 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
29635 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
29636 bool linkb;
29637 struct radeon_i2c_bus_rec ddc_bus;
29638
29639 + pax_track_stack();
29640 +
29641 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29642
29643 if (data_offset == 0)
29644 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
29645 }
29646 }
29647
29648 -struct bios_connector {
29649 +static struct bios_connector {
29650 bool valid;
29651 uint16_t line_mux;
29652 uint16_t devices;
29653 int connector_type;
29654 struct radeon_i2c_bus_rec ddc_bus;
29655 -};
29656 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29657
29658 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
29659 drm_device
29660 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
29661 uint8_t dac;
29662 union atom_supported_devices *supported_devices;
29663 int i, j;
29664 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29665
29666 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29667
29668 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c
29669 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
29670 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
29671 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
29672
29673 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
29674 error = freq - current_freq;
29675 - error = error < 0 ? 0xffffffff : error;
29676 + error = (int32_t)error < 0 ? 0xffffffff : error;
29677 } else
29678 error = abs(current_freq - freq);
29679 vco_diff = abs(vco - best_vco);
29680 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h
29681 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
29682 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
29683 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
29684
29685 /* SW interrupt */
29686 wait_queue_head_t swi_queue;
29687 - atomic_t swi_emitted;
29688 + atomic_unchecked_t swi_emitted;
29689 int vblank_crtc;
29690 uint32_t irq_enable_reg;
29691 uint32_t r500_disp_irq_reg;
29692 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c
29693 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
29694 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
29695 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
29696 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
29697 return 0;
29698 }
29699 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
29700 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
29701 if (!rdev->cp.ready) {
29702 /* FIXME: cp is not running assume everythings is done right
29703 * away
29704 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
29705 return r;
29706 }
29707 WREG32(rdev->fence_drv.scratch_reg, 0);
29708 - atomic_set(&rdev->fence_drv.seq, 0);
29709 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
29710 INIT_LIST_HEAD(&rdev->fence_drv.created);
29711 INIT_LIST_HEAD(&rdev->fence_drv.emited);
29712 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
29713 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h
29714 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
29715 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:33:55.000000000 -0400
29716 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
29717 */
29718 struct radeon_fence_driver {
29719 uint32_t scratch_reg;
29720 - atomic_t seq;
29721 + atomic_unchecked_t seq;
29722 uint32_t last_seq;
29723 unsigned long count_timeout;
29724 wait_queue_head_t queue;
29725 @@ -640,7 +640,7 @@ struct radeon_asic {
29726 uint32_t offset, uint32_t obj_size);
29727 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
29728 void (*bandwidth_update)(struct radeon_device *rdev);
29729 -};
29730 +} __no_const;
29731
29732 /*
29733 * Asic structures
29734 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c
29735 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
29736 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
29737 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
29738 request = compat_alloc_user_space(sizeof(*request));
29739 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
29740 || __put_user(req32.param, &request->param)
29741 - || __put_user((void __user *)(unsigned long)req32.value,
29742 + || __put_user((unsigned long)req32.value,
29743 &request->value))
29744 return -EFAULT;
29745
29746 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c
29747 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
29748 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
29749 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
29750 unsigned int ret;
29751 RING_LOCALS;
29752
29753 - atomic_inc(&dev_priv->swi_emitted);
29754 - ret = atomic_read(&dev_priv->swi_emitted);
29755 + atomic_inc_unchecked(&dev_priv->swi_emitted);
29756 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
29757
29758 BEGIN_RING(4);
29759 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
29760 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
29761 drm_radeon_private_t *dev_priv =
29762 (drm_radeon_private_t *) dev->dev_private;
29763
29764 - atomic_set(&dev_priv->swi_emitted, 0);
29765 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
29766 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
29767
29768 dev->max_vblank_count = 0x001fffff;
29769 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c
29770 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
29771 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
29772 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
29773 {
29774 drm_radeon_private_t *dev_priv = dev->dev_private;
29775 drm_radeon_getparam_t *param = data;
29776 - int value;
29777 + int value = 0;
29778
29779 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
29780
29781 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c
29782 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
29783 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
29784 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
29785 DRM_INFO("radeon: ttm finalized\n");
29786 }
29787
29788 -static struct vm_operations_struct radeon_ttm_vm_ops;
29789 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
29790 -
29791 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
29792 -{
29793 - struct ttm_buffer_object *bo;
29794 - int r;
29795 -
29796 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
29797 - if (bo == NULL) {
29798 - return VM_FAULT_NOPAGE;
29799 - }
29800 - r = ttm_vm_ops->fault(vma, vmf);
29801 - return r;
29802 -}
29803 -
29804 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29805 {
29806 struct drm_file *file_priv;
29807 struct radeon_device *rdev;
29808 - int r;
29809
29810 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
29811 return drm_mmap(filp, vma);
29812 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
29813
29814 file_priv = (struct drm_file *)filp->private_data;
29815 rdev = file_priv->minor->dev->dev_private;
29816 - if (rdev == NULL) {
29817 + if (!rdev)
29818 return -EINVAL;
29819 - }
29820 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29821 - if (unlikely(r != 0)) {
29822 - return r;
29823 - }
29824 - if (unlikely(ttm_vm_ops == NULL)) {
29825 - ttm_vm_ops = vma->vm_ops;
29826 - radeon_ttm_vm_ops = *ttm_vm_ops;
29827 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29828 - }
29829 - vma->vm_ops = &radeon_ttm_vm_ops;
29830 - return 0;
29831 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29832 }
29833
29834
29835 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c
29836 --- linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
29837 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
29838 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
29839 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29840 rdev->pm.sideport_bandwidth.full)
29841 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29842 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
29843 + read_delay_latency.full = rfixed_const(800 * 1000);
29844 read_delay_latency.full = rfixed_div(read_delay_latency,
29845 rdev->pm.igp_sideport_mclk);
29846 + a.full = rfixed_const(370);
29847 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
29848 } else {
29849 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29850 rdev->pm.k8_bandwidth.full)
29851 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c
29852 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
29853 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
29854 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
29855 NULL
29856 };
29857
29858 -static struct sysfs_ops ttm_bo_global_ops = {
29859 +static const struct sysfs_ops ttm_bo_global_ops = {
29860 .show = &ttm_bo_global_show
29861 };
29862
29863 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c
29864 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
29865 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
29866 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
29867 {
29868 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
29869 vma->vm_private_data;
29870 - struct ttm_bo_device *bdev = bo->bdev;
29871 + struct ttm_bo_device *bdev;
29872 unsigned long bus_base;
29873 unsigned long bus_offset;
29874 unsigned long bus_size;
29875 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
29876 unsigned long address = (unsigned long)vmf->virtual_address;
29877 int retval = VM_FAULT_NOPAGE;
29878
29879 + if (!bo)
29880 + return VM_FAULT_NOPAGE;
29881 + bdev = bo->bdev;
29882 +
29883 /*
29884 * Work around locking order reversal in fault / nopfn
29885 * between mmap_sem and bo_reserve: Perform a trylock operation
29886 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c
29887 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
29888 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
29889 @@ -36,7 +36,7 @@
29890 struct ttm_global_item {
29891 struct mutex mutex;
29892 void *object;
29893 - int refcount;
29894 + atomic_t refcount;
29895 };
29896
29897 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
29898 @@ -49,7 +49,7 @@ void ttm_global_init(void)
29899 struct ttm_global_item *item = &glob[i];
29900 mutex_init(&item->mutex);
29901 item->object = NULL;
29902 - item->refcount = 0;
29903 + atomic_set(&item->refcount, 0);
29904 }
29905 }
29906
29907 @@ -59,7 +59,7 @@ void ttm_global_release(void)
29908 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
29909 struct ttm_global_item *item = &glob[i];
29910 BUG_ON(item->object != NULL);
29911 - BUG_ON(item->refcount != 0);
29912 + BUG_ON(atomic_read(&item->refcount) != 0);
29913 }
29914 }
29915
29916 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
29917 void *object;
29918
29919 mutex_lock(&item->mutex);
29920 - if (item->refcount == 0) {
29921 + if (atomic_read(&item->refcount) == 0) {
29922 item->object = kzalloc(ref->size, GFP_KERNEL);
29923 if (unlikely(item->object == NULL)) {
29924 ret = -ENOMEM;
29925 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
29926 goto out_err;
29927
29928 }
29929 - ++item->refcount;
29930 + atomic_inc(&item->refcount);
29931 ref->object = item->object;
29932 object = item->object;
29933 mutex_unlock(&item->mutex);
29934 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
29935 struct ttm_global_item *item = &glob[ref->global_type];
29936
29937 mutex_lock(&item->mutex);
29938 - BUG_ON(item->refcount == 0);
29939 + BUG_ON(atomic_read(&item->refcount) == 0);
29940 BUG_ON(ref->object != item->object);
29941 - if (--item->refcount == 0) {
29942 + if (atomic_dec_and_test(&item->refcount)) {
29943 ref->release(ref);
29944 item->object = NULL;
29945 }
29946 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c
29947 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
29948 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
29949 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
29950 NULL
29951 };
29952
29953 -static struct sysfs_ops ttm_mem_zone_ops = {
29954 +static const struct sysfs_ops ttm_mem_zone_ops = {
29955 .show = &ttm_mem_zone_show,
29956 .store = &ttm_mem_zone_store
29957 };
29958 diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h
29959 --- linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
29960 +++ linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
29961 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
29962 typedef uint32_t maskarray_t[5];
29963
29964 typedef struct drm_via_irq {
29965 - atomic_t irq_received;
29966 + atomic_unchecked_t irq_received;
29967 uint32_t pending_mask;
29968 uint32_t enable_mask;
29969 wait_queue_head_t irq_queue;
29970 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
29971 struct timeval last_vblank;
29972 int last_vblank_valid;
29973 unsigned usec_per_vblank;
29974 - atomic_t vbl_received;
29975 + atomic_unchecked_t vbl_received;
29976 drm_via_state_t hc_state;
29977 char pci_buf[VIA_PCI_BUF_SIZE];
29978 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
29979 diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c
29980 --- linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
29981 +++ linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
29982 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
29983 if (crtc != 0)
29984 return 0;
29985
29986 - return atomic_read(&dev_priv->vbl_received);
29987 + return atomic_read_unchecked(&dev_priv->vbl_received);
29988 }
29989
29990 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29991 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
29992
29993 status = VIA_READ(VIA_REG_INTERRUPT);
29994 if (status & VIA_IRQ_VBLANK_PENDING) {
29995 - atomic_inc(&dev_priv->vbl_received);
29996 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29997 + atomic_inc_unchecked(&dev_priv->vbl_received);
29998 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29999 do_gettimeofday(&cur_vblank);
30000 if (dev_priv->last_vblank_valid) {
30001 dev_priv->usec_per_vblank =
30002 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30003 dev_priv->last_vblank = cur_vblank;
30004 dev_priv->last_vblank_valid = 1;
30005 }
30006 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30007 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30008 DRM_DEBUG("US per vblank is: %u\n",
30009 dev_priv->usec_per_vblank);
30010 }
30011 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30012
30013 for (i = 0; i < dev_priv->num_irqs; ++i) {
30014 if (status & cur_irq->pending_mask) {
30015 - atomic_inc(&cur_irq->irq_received);
30016 + atomic_inc_unchecked(&cur_irq->irq_received);
30017 DRM_WAKEUP(&cur_irq->irq_queue);
30018 handled = 1;
30019 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
30020 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
30021 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30022 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30023 masks[irq][4]));
30024 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30025 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30026 } else {
30027 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30028 (((cur_irq_sequence =
30029 - atomic_read(&cur_irq->irq_received)) -
30030 + atomic_read_unchecked(&cur_irq->irq_received)) -
30031 *sequence) <= (1 << 23)));
30032 }
30033 *sequence = cur_irq_sequence;
30034 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
30035 }
30036
30037 for (i = 0; i < dev_priv->num_irqs; ++i) {
30038 - atomic_set(&cur_irq->irq_received, 0);
30039 + atomic_set_unchecked(&cur_irq->irq_received, 0);
30040 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30041 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30042 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30043 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
30044 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30045 case VIA_IRQ_RELATIVE:
30046 irqwait->request.sequence +=
30047 - atomic_read(&cur_irq->irq_received);
30048 + atomic_read_unchecked(&cur_irq->irq_received);
30049 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30050 case VIA_IRQ_ABSOLUTE:
30051 break;
30052 diff -urNp linux-2.6.32.45/drivers/hid/hid-core.c linux-2.6.32.45/drivers/hid/hid-core.c
30053 --- linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
30054 +++ linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
30055 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
30056
30057 int hid_add_device(struct hid_device *hdev)
30058 {
30059 - static atomic_t id = ATOMIC_INIT(0);
30060 + static atomic_unchecked_t id = ATOMIC_INIT(0);
30061 int ret;
30062
30063 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30064 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
30065 /* XXX hack, any other cleaner solution after the driver core
30066 * is converted to allow more than 20 bytes as the device name? */
30067 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30068 - hdev->vendor, hdev->product, atomic_inc_return(&id));
30069 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30070
30071 ret = device_add(&hdev->dev);
30072 if (!ret)
30073 diff -urNp linux-2.6.32.45/drivers/hid/usbhid/hiddev.c linux-2.6.32.45/drivers/hid/usbhid/hiddev.c
30074 --- linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
30075 +++ linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
30076 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
30077 return put_user(HID_VERSION, (int __user *)arg);
30078
30079 case HIDIOCAPPLICATION:
30080 - if (arg < 0 || arg >= hid->maxapplication)
30081 + if (arg >= hid->maxapplication)
30082 return -EINVAL;
30083
30084 for (i = 0; i < hid->maxcollection; i++)
30085 diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.c linux-2.6.32.45/drivers/hwmon/lis3lv02d.c
30086 --- linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
30087 +++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
30088 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
30089 * the lid is closed. This leads to interrupts as soon as a little move
30090 * is done.
30091 */
30092 - atomic_inc(&lis3_dev.count);
30093 + atomic_inc_unchecked(&lis3_dev.count);
30094
30095 wake_up_interruptible(&lis3_dev.misc_wait);
30096 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30097 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
30098 if (test_and_set_bit(0, &lis3_dev.misc_opened))
30099 return -EBUSY; /* already open */
30100
30101 - atomic_set(&lis3_dev.count, 0);
30102 + atomic_set_unchecked(&lis3_dev.count, 0);
30103
30104 /*
30105 * The sensor can generate interrupts for free-fall and direction
30106 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
30107 add_wait_queue(&lis3_dev.misc_wait, &wait);
30108 while (true) {
30109 set_current_state(TASK_INTERRUPTIBLE);
30110 - data = atomic_xchg(&lis3_dev.count, 0);
30111 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30112 if (data)
30113 break;
30114
30115 @@ -244,7 +244,7 @@ out:
30116 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30117 {
30118 poll_wait(file, &lis3_dev.misc_wait, wait);
30119 - if (atomic_read(&lis3_dev.count))
30120 + if (atomic_read_unchecked(&lis3_dev.count))
30121 return POLLIN | POLLRDNORM;
30122 return 0;
30123 }
30124 diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.h linux-2.6.32.45/drivers/hwmon/lis3lv02d.h
30125 --- linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
30126 +++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
30127 @@ -201,7 +201,7 @@ struct lis3lv02d {
30128
30129 struct input_polled_dev *idev; /* input device */
30130 struct platform_device *pdev; /* platform device */
30131 - atomic_t count; /* interrupt count after last read */
30132 + atomic_unchecked_t count; /* interrupt count after last read */
30133 int xcalib; /* calibrated null value for x */
30134 int ycalib; /* calibrated null value for y */
30135 int zcalib; /* calibrated null value for z */
30136 diff -urNp linux-2.6.32.45/drivers/hwmon/sht15.c linux-2.6.32.45/drivers/hwmon/sht15.c
30137 --- linux-2.6.32.45/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
30138 +++ linux-2.6.32.45/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
30139 @@ -112,7 +112,7 @@ struct sht15_data {
30140 int supply_uV;
30141 int supply_uV_valid;
30142 struct work_struct update_supply_work;
30143 - atomic_t interrupt_handled;
30144 + atomic_unchecked_t interrupt_handled;
30145 };
30146
30147 /**
30148 @@ -245,13 +245,13 @@ static inline int sht15_update_single_va
30149 return ret;
30150
30151 gpio_direction_input(data->pdata->gpio_data);
30152 - atomic_set(&data->interrupt_handled, 0);
30153 + atomic_set_unchecked(&data->interrupt_handled, 0);
30154
30155 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30156 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30157 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30158 /* Only relevant if the interrupt hasn't occured. */
30159 - if (!atomic_read(&data->interrupt_handled))
30160 + if (!atomic_read_unchecked(&data->interrupt_handled))
30161 schedule_work(&data->read_work);
30162 }
30163 ret = wait_event_timeout(data->wait_queue,
30164 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
30165 struct sht15_data *data = d;
30166 /* First disable the interrupt */
30167 disable_irq_nosync(irq);
30168 - atomic_inc(&data->interrupt_handled);
30169 + atomic_inc_unchecked(&data->interrupt_handled);
30170 /* Then schedule a reading work struct */
30171 if (data->flag != SHT15_READING_NOTHING)
30172 schedule_work(&data->read_work);
30173 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
30174 here as could have gone low in meantime so verify
30175 it hasn't!
30176 */
30177 - atomic_set(&data->interrupt_handled, 0);
30178 + atomic_set_unchecked(&data->interrupt_handled, 0);
30179 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30180 /* If still not occured or another handler has been scheduled */
30181 if (gpio_get_value(data->pdata->gpio_data)
30182 - || atomic_read(&data->interrupt_handled))
30183 + || atomic_read_unchecked(&data->interrupt_handled))
30184 return;
30185 }
30186 /* Read the data back from the device */
30187 diff -urNp linux-2.6.32.45/drivers/hwmon/w83791d.c linux-2.6.32.45/drivers/hwmon/w83791d.c
30188 --- linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
30189 +++ linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
30190 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
30191 struct i2c_board_info *info);
30192 static int w83791d_remove(struct i2c_client *client);
30193
30194 -static int w83791d_read(struct i2c_client *client, u8 register);
30195 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
30196 +static int w83791d_read(struct i2c_client *client, u8 reg);
30197 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
30198 static struct w83791d_data *w83791d_update_device(struct device *dev);
30199
30200 #ifdef DEBUG
30201 diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c
30202 --- linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-03-27 14:31:47.000000000 -0400
30203 +++ linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-05 20:33:55.000000000 -0400
30204 @@ -189,23 +189,23 @@ static int __init amd756_s4882_init(void
30205 }
30206
30207 /* Fill in the new structures */
30208 - s4882_algo[0] = *(amd756_smbus.algo);
30209 - s4882_algo[0].smbus_xfer = amd756_access_virt0;
30210 + memcpy((void *)&s4882_algo[0], amd756_smbus.algo, sizeof(s4882_algo[0]));
30211 + *(void **)&s4882_algo[0].smbus_xfer = amd756_access_virt0;
30212 s4882_adapter[0] = amd756_smbus;
30213 s4882_adapter[0].algo = s4882_algo;
30214 - s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
30215 + *(void **)&s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
30216 for (i = 1; i < 5; i++) {
30217 - s4882_algo[i] = *(amd756_smbus.algo);
30218 + memcpy((void *)&s4882_algo[i], amd756_smbus.algo, sizeof(s4882_algo[i]));
30219 s4882_adapter[i] = amd756_smbus;
30220 snprintf(s4882_adapter[i].name, sizeof(s4882_adapter[i].name),
30221 "SMBus 8111 adapter (CPU%d)", i-1);
30222 s4882_adapter[i].algo = s4882_algo+i;
30223 s4882_adapter[i].dev.parent = amd756_smbus.dev.parent;
30224 }
30225 - s4882_algo[1].smbus_xfer = amd756_access_virt1;
30226 - s4882_algo[2].smbus_xfer = amd756_access_virt2;
30227 - s4882_algo[3].smbus_xfer = amd756_access_virt3;
30228 - s4882_algo[4].smbus_xfer = amd756_access_virt4;
30229 + *(void **)&s4882_algo[1].smbus_xfer = amd756_access_virt1;
30230 + *(void **)&s4882_algo[2].smbus_xfer = amd756_access_virt2;
30231 + *(void **)&s4882_algo[3].smbus_xfer = amd756_access_virt3;
30232 + *(void **)&s4882_algo[4].smbus_xfer = amd756_access_virt4;
30233
30234 /* Register virtual adapters */
30235 for (i = 0; i < 5; i++) {
30236 diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c
30237 --- linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-03-27 14:31:47.000000000 -0400
30238 +++ linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-05 20:33:55.000000000 -0400
30239 @@ -184,23 +184,23 @@ static int __init nforce2_s4985_init(voi
30240 }
30241
30242 /* Fill in the new structures */
30243 - s4985_algo[0] = *(nforce2_smbus->algo);
30244 - s4985_algo[0].smbus_xfer = nforce2_access_virt0;
30245 + memcpy((void *)&s4985_algo[0], nforce2_smbus->algo, sizeof(s4985_algo[0]));
30246 + *(void **)&s4985_algo[0].smbus_xfer = nforce2_access_virt0;
30247 s4985_adapter[0] = *nforce2_smbus;
30248 s4985_adapter[0].algo = s4985_algo;
30249 s4985_adapter[0].dev.parent = nforce2_smbus->dev.parent;
30250 for (i = 1; i < 5; i++) {
30251 - s4985_algo[i] = *(nforce2_smbus->algo);
30252 + memcpy((void *)&s4985_algo[i], nforce2_smbus->algo, sizeof(s4985_algo[i]));
30253 s4985_adapter[i] = *nforce2_smbus;
30254 snprintf(s4985_adapter[i].name, sizeof(s4985_adapter[i].name),
30255 "SMBus nForce2 adapter (CPU%d)", i - 1);
30256 s4985_adapter[i].algo = s4985_algo + i;
30257 s4985_adapter[i].dev.parent = nforce2_smbus->dev.parent;
30258 }
30259 - s4985_algo[1].smbus_xfer = nforce2_access_virt1;
30260 - s4985_algo[2].smbus_xfer = nforce2_access_virt2;
30261 - s4985_algo[3].smbus_xfer = nforce2_access_virt3;
30262 - s4985_algo[4].smbus_xfer = nforce2_access_virt4;
30263 + *(void **)&s4985_algo[1].smbus_xfer = nforce2_access_virt1;
30264 + *(void **)&s4985_algo[2].smbus_xfer = nforce2_access_virt2;
30265 + *(void **)&s4985_algo[3].smbus_xfer = nforce2_access_virt3;
30266 + *(void **)&s4985_algo[4].smbus_xfer = nforce2_access_virt4;
30267
30268 /* Register virtual adapters */
30269 for (i = 0; i < 5; i++) {
30270 diff -urNp linux-2.6.32.45/drivers/ide/ide-cd.c linux-2.6.32.45/drivers/ide/ide-cd.c
30271 --- linux-2.6.32.45/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
30272 +++ linux-2.6.32.45/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
30273 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
30274 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30275 if ((unsigned long)buf & alignment
30276 || blk_rq_bytes(rq) & q->dma_pad_mask
30277 - || object_is_on_stack(buf))
30278 + || object_starts_on_stack(buf))
30279 drive->dma = 0;
30280 }
30281 }
30282 diff -urNp linux-2.6.32.45/drivers/ide/ide-floppy.c linux-2.6.32.45/drivers/ide/ide-floppy.c
30283 --- linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
30284 +++ linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
30285 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
30286 u8 pc_buf[256], header_len, desc_cnt;
30287 int i, rc = 1, blocks, length;
30288
30289 + pax_track_stack();
30290 +
30291 ide_debug_log(IDE_DBG_FUNC, "enter");
30292
30293 drive->bios_cyl = 0;
30294 diff -urNp linux-2.6.32.45/drivers/ide/setup-pci.c linux-2.6.32.45/drivers/ide/setup-pci.c
30295 --- linux-2.6.32.45/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
30296 +++ linux-2.6.32.45/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
30297 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
30298 int ret, i, n_ports = dev2 ? 4 : 2;
30299 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
30300
30301 + pax_track_stack();
30302 +
30303 for (i = 0; i < n_ports / 2; i++) {
30304 ret = ide_setup_pci_controller(pdev[i], d, !i);
30305 if (ret < 0)
30306 diff -urNp linux-2.6.32.45/drivers/ieee1394/dv1394.c linux-2.6.32.45/drivers/ieee1394/dv1394.c
30307 --- linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
30308 +++ linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
30309 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
30310 based upon DIF section and sequence
30311 */
30312
30313 -static void inline
30314 +static inline void
30315 frame_put_packet (struct frame *f, struct packet *p)
30316 {
30317 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
30318 diff -urNp linux-2.6.32.45/drivers/ieee1394/hosts.c linux-2.6.32.45/drivers/ieee1394/hosts.c
30319 --- linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
30320 +++ linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
30321 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
30322 }
30323
30324 static struct hpsb_host_driver dummy_driver = {
30325 + .name = "dummy",
30326 .transmit_packet = dummy_transmit_packet,
30327 .devctl = dummy_devctl,
30328 .isoctl = dummy_isoctl
30329 diff -urNp linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c
30330 --- linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
30331 +++ linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
30332 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
30333 for (func = 0; func < 8; func++) {
30334 u32 class = read_pci_config(num,slot,func,
30335 PCI_CLASS_REVISION);
30336 - if ((class == 0xffffffff))
30337 + if (class == 0xffffffff)
30338 continue; /* No device at this func */
30339
30340 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
30341 diff -urNp linux-2.6.32.45/drivers/ieee1394/ohci1394.c linux-2.6.32.45/drivers/ieee1394/ohci1394.c
30342 --- linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
30343 +++ linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
30344 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
30345 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
30346
30347 /* Module Parameters */
30348 -static int phys_dma = 1;
30349 +static int phys_dma;
30350 module_param(phys_dma, int, 0444);
30351 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
30352 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
30353
30354 static void dma_trm_tasklet(unsigned long data);
30355 static void dma_trm_reset(struct dma_trm_ctx *d);
30356 diff -urNp linux-2.6.32.45/drivers/ieee1394/sbp2.c linux-2.6.32.45/drivers/ieee1394/sbp2.c
30357 --- linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
30358 +++ linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
30359 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
30360 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
30361 MODULE_LICENSE("GPL");
30362
30363 -static int sbp2_module_init(void)
30364 +static int __init sbp2_module_init(void)
30365 {
30366 int ret;
30367
30368 diff -urNp linux-2.6.32.45/drivers/infiniband/core/cm.c linux-2.6.32.45/drivers/infiniband/core/cm.c
30369 --- linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
30370 +++ linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
30371 @@ -112,7 +112,7 @@ static char const counter_group_names[CM
30372
30373 struct cm_counter_group {
30374 struct kobject obj;
30375 - atomic_long_t counter[CM_ATTR_COUNT];
30376 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
30377 };
30378
30379 struct cm_counter_attribute {
30380 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
30381 struct ib_mad_send_buf *msg = NULL;
30382 int ret;
30383
30384 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30385 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30386 counter[CM_REQ_COUNTER]);
30387
30388 /* Quick state check to discard duplicate REQs. */
30389 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
30390 if (!cm_id_priv)
30391 return;
30392
30393 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30394 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30395 counter[CM_REP_COUNTER]);
30396 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30397 if (ret)
30398 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
30399 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30400 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30401 spin_unlock_irq(&cm_id_priv->lock);
30402 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30403 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30404 counter[CM_RTU_COUNTER]);
30405 goto out;
30406 }
30407 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
30408 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30409 dreq_msg->local_comm_id);
30410 if (!cm_id_priv) {
30411 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30412 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30413 counter[CM_DREQ_COUNTER]);
30414 cm_issue_drep(work->port, work->mad_recv_wc);
30415 return -EINVAL;
30416 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
30417 case IB_CM_MRA_REP_RCVD:
30418 break;
30419 case IB_CM_TIMEWAIT:
30420 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30421 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30422 counter[CM_DREQ_COUNTER]);
30423 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30424 goto unlock;
30425 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
30426 cm_free_msg(msg);
30427 goto deref;
30428 case IB_CM_DREQ_RCVD:
30429 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30430 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30431 counter[CM_DREQ_COUNTER]);
30432 goto unlock;
30433 default:
30434 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
30435 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30436 cm_id_priv->msg, timeout)) {
30437 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30438 - atomic_long_inc(&work->port->
30439 + atomic_long_inc_unchecked(&work->port->
30440 counter_group[CM_RECV_DUPLICATES].
30441 counter[CM_MRA_COUNTER]);
30442 goto out;
30443 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
30444 break;
30445 case IB_CM_MRA_REQ_RCVD:
30446 case IB_CM_MRA_REP_RCVD:
30447 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30448 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30449 counter[CM_MRA_COUNTER]);
30450 /* fall through */
30451 default:
30452 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
30453 case IB_CM_LAP_IDLE:
30454 break;
30455 case IB_CM_MRA_LAP_SENT:
30456 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30457 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30458 counter[CM_LAP_COUNTER]);
30459 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30460 goto unlock;
30461 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
30462 cm_free_msg(msg);
30463 goto deref;
30464 case IB_CM_LAP_RCVD:
30465 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30466 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30467 counter[CM_LAP_COUNTER]);
30468 goto unlock;
30469 default:
30470 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
30471 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30472 if (cur_cm_id_priv) {
30473 spin_unlock_irq(&cm.lock);
30474 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30475 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30476 counter[CM_SIDR_REQ_COUNTER]);
30477 goto out; /* Duplicate message. */
30478 }
30479 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
30480 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30481 msg->retries = 1;
30482
30483 - atomic_long_add(1 + msg->retries,
30484 + atomic_long_add_unchecked(1 + msg->retries,
30485 &port->counter_group[CM_XMIT].counter[attr_index]);
30486 if (msg->retries)
30487 - atomic_long_add(msg->retries,
30488 + atomic_long_add_unchecked(msg->retries,
30489 &port->counter_group[CM_XMIT_RETRIES].
30490 counter[attr_index]);
30491
30492 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
30493 }
30494
30495 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30496 - atomic_long_inc(&port->counter_group[CM_RECV].
30497 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30498 counter[attr_id - CM_ATTR_ID_OFFSET]);
30499
30500 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30501 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
30502 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30503
30504 return sprintf(buf, "%ld\n",
30505 - atomic_long_read(&group->counter[cm_attr->index]));
30506 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30507 }
30508
30509 -static struct sysfs_ops cm_counter_ops = {
30510 +static const struct sysfs_ops cm_counter_ops = {
30511 .show = cm_show_counter
30512 };
30513
30514 diff -urNp linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c
30515 --- linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
30516 +++ linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
30517 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
30518
30519 struct task_struct *thread;
30520
30521 - atomic_t req_ser;
30522 - atomic_t flush_ser;
30523 + atomic_unchecked_t req_ser;
30524 + atomic_unchecked_t flush_ser;
30525
30526 wait_queue_head_t force_wait;
30527 };
30528 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
30529 struct ib_fmr_pool *pool = pool_ptr;
30530
30531 do {
30532 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30533 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30534 ib_fmr_batch_release(pool);
30535
30536 - atomic_inc(&pool->flush_ser);
30537 + atomic_inc_unchecked(&pool->flush_ser);
30538 wake_up_interruptible(&pool->force_wait);
30539
30540 if (pool->flush_function)
30541 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
30542 }
30543
30544 set_current_state(TASK_INTERRUPTIBLE);
30545 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30546 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30547 !kthread_should_stop())
30548 schedule();
30549 __set_current_state(TASK_RUNNING);
30550 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
30551 pool->dirty_watermark = params->dirty_watermark;
30552 pool->dirty_len = 0;
30553 spin_lock_init(&pool->pool_lock);
30554 - atomic_set(&pool->req_ser, 0);
30555 - atomic_set(&pool->flush_ser, 0);
30556 + atomic_set_unchecked(&pool->req_ser, 0);
30557 + atomic_set_unchecked(&pool->flush_ser, 0);
30558 init_waitqueue_head(&pool->force_wait);
30559
30560 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30561 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
30562 }
30563 spin_unlock_irq(&pool->pool_lock);
30564
30565 - serial = atomic_inc_return(&pool->req_ser);
30566 + serial = atomic_inc_return_unchecked(&pool->req_ser);
30567 wake_up_process(pool->thread);
30568
30569 if (wait_event_interruptible(pool->force_wait,
30570 - atomic_read(&pool->flush_ser) - serial >= 0))
30571 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30572 return -EINTR;
30573
30574 return 0;
30575 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
30576 } else {
30577 list_add_tail(&fmr->list, &pool->dirty_list);
30578 if (++pool->dirty_len >= pool->dirty_watermark) {
30579 - atomic_inc(&pool->req_ser);
30580 + atomic_inc_unchecked(&pool->req_ser);
30581 wake_up_process(pool->thread);
30582 }
30583 }
30584 diff -urNp linux-2.6.32.45/drivers/infiniband/core/sysfs.c linux-2.6.32.45/drivers/infiniband/core/sysfs.c
30585 --- linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
30586 +++ linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
30587 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
30588 return port_attr->show(p, port_attr, buf);
30589 }
30590
30591 -static struct sysfs_ops port_sysfs_ops = {
30592 +static const struct sysfs_ops port_sysfs_ops = {
30593 .show = port_attr_show
30594 };
30595
30596 diff -urNp linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c
30597 --- linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
30598 +++ linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
30599 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
30600 dst->grh.sgid_index = src->grh.sgid_index;
30601 dst->grh.hop_limit = src->grh.hop_limit;
30602 dst->grh.traffic_class = src->grh.traffic_class;
30603 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
30604 dst->dlid = src->dlid;
30605 dst->sl = src->sl;
30606 dst->src_path_bits = src->src_path_bits;
30607 dst->static_rate = src->static_rate;
30608 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
30609 dst->port_num = src->port_num;
30610 + dst->reserved = 0;
30611 }
30612 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
30613
30614 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
30615 struct ib_qp_attr *src)
30616 {
30617 + dst->qp_state = src->qp_state;
30618 dst->cur_qp_state = src->cur_qp_state;
30619 dst->path_mtu = src->path_mtu;
30620 dst->path_mig_state = src->path_mig_state;
30621 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
30622 dst->rnr_retry = src->rnr_retry;
30623 dst->alt_port_num = src->alt_port_num;
30624 dst->alt_timeout = src->alt_timeout;
30625 + memset(dst->reserved, 0, sizeof(dst->reserved));
30626 }
30627 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
30628
30629 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c
30630 --- linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
30631 +++ linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
30632 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
30633 struct infinipath_counters counters;
30634 struct ipath_devdata *dd;
30635
30636 + pax_track_stack();
30637 +
30638 dd = file->f_path.dentry->d_inode->i_private;
30639 dd->ipath_f_read_counters(dd, &counters);
30640
30641 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c
30642 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
30643 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
30644 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
30645 LIST_HEAD(nes_adapter_list);
30646 static LIST_HEAD(nes_dev_list);
30647
30648 -atomic_t qps_destroyed;
30649 +atomic_unchecked_t qps_destroyed;
30650
30651 static unsigned int ee_flsh_adapter;
30652 static unsigned int sysfs_nonidx_addr;
30653 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
30654 struct nes_adapter *nesadapter = nesdev->nesadapter;
30655 u32 qp_id;
30656
30657 - atomic_inc(&qps_destroyed);
30658 + atomic_inc_unchecked(&qps_destroyed);
30659
30660 /* Free the control structures */
30661
30662 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c
30663 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
30664 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
30665 @@ -69,11 +69,11 @@ u32 cm_packets_received;
30666 u32 cm_listens_created;
30667 u32 cm_listens_destroyed;
30668 u32 cm_backlog_drops;
30669 -atomic_t cm_loopbacks;
30670 -atomic_t cm_nodes_created;
30671 -atomic_t cm_nodes_destroyed;
30672 -atomic_t cm_accel_dropped_pkts;
30673 -atomic_t cm_resets_recvd;
30674 +atomic_unchecked_t cm_loopbacks;
30675 +atomic_unchecked_t cm_nodes_created;
30676 +atomic_unchecked_t cm_nodes_destroyed;
30677 +atomic_unchecked_t cm_accel_dropped_pkts;
30678 +atomic_unchecked_t cm_resets_recvd;
30679
30680 static inline int mini_cm_accelerated(struct nes_cm_core *,
30681 struct nes_cm_node *);
30682 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
30683
30684 static struct nes_cm_core *g_cm_core;
30685
30686 -atomic_t cm_connects;
30687 -atomic_t cm_accepts;
30688 -atomic_t cm_disconnects;
30689 -atomic_t cm_closes;
30690 -atomic_t cm_connecteds;
30691 -atomic_t cm_connect_reqs;
30692 -atomic_t cm_rejects;
30693 +atomic_unchecked_t cm_connects;
30694 +atomic_unchecked_t cm_accepts;
30695 +atomic_unchecked_t cm_disconnects;
30696 +atomic_unchecked_t cm_closes;
30697 +atomic_unchecked_t cm_connecteds;
30698 +atomic_unchecked_t cm_connect_reqs;
30699 +atomic_unchecked_t cm_rejects;
30700
30701
30702 /**
30703 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
30704 cm_node->rem_mac);
30705
30706 add_hte_node(cm_core, cm_node);
30707 - atomic_inc(&cm_nodes_created);
30708 + atomic_inc_unchecked(&cm_nodes_created);
30709
30710 return cm_node;
30711 }
30712 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
30713 }
30714
30715 atomic_dec(&cm_core->node_cnt);
30716 - atomic_inc(&cm_nodes_destroyed);
30717 + atomic_inc_unchecked(&cm_nodes_destroyed);
30718 nesqp = cm_node->nesqp;
30719 if (nesqp) {
30720 nesqp->cm_node = NULL;
30721 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
30722
30723 static void drop_packet(struct sk_buff *skb)
30724 {
30725 - atomic_inc(&cm_accel_dropped_pkts);
30726 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30727 dev_kfree_skb_any(skb);
30728 }
30729
30730 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
30731
30732 int reset = 0; /* whether to send reset in case of err.. */
30733 int passive_state;
30734 - atomic_inc(&cm_resets_recvd);
30735 + atomic_inc_unchecked(&cm_resets_recvd);
30736 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30737 " refcnt=%d\n", cm_node, cm_node->state,
30738 atomic_read(&cm_node->ref_count));
30739 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
30740 rem_ref_cm_node(cm_node->cm_core, cm_node);
30741 return NULL;
30742 }
30743 - atomic_inc(&cm_loopbacks);
30744 + atomic_inc_unchecked(&cm_loopbacks);
30745 loopbackremotenode->loopbackpartner = cm_node;
30746 loopbackremotenode->tcp_cntxt.rcv_wscale =
30747 NES_CM_DEFAULT_RCV_WND_SCALE;
30748 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
30749 add_ref_cm_node(cm_node);
30750 } else if (cm_node->state == NES_CM_STATE_TSA) {
30751 rem_ref_cm_node(cm_core, cm_node);
30752 - atomic_inc(&cm_accel_dropped_pkts);
30753 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30754 dev_kfree_skb_any(skb);
30755 break;
30756 }
30757 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
30758
30759 if ((cm_id) && (cm_id->event_handler)) {
30760 if (issue_disconn) {
30761 - atomic_inc(&cm_disconnects);
30762 + atomic_inc_unchecked(&cm_disconnects);
30763 cm_event.event = IW_CM_EVENT_DISCONNECT;
30764 cm_event.status = disconn_status;
30765 cm_event.local_addr = cm_id->local_addr;
30766 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
30767 }
30768
30769 if (issue_close) {
30770 - atomic_inc(&cm_closes);
30771 + atomic_inc_unchecked(&cm_closes);
30772 nes_disconnect(nesqp, 1);
30773
30774 cm_id->provider_data = nesqp;
30775 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
30776
30777 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30778 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30779 - atomic_inc(&cm_accepts);
30780 + atomic_inc_unchecked(&cm_accepts);
30781
30782 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30783 atomic_read(&nesvnic->netdev->refcnt));
30784 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
30785
30786 struct nes_cm_core *cm_core;
30787
30788 - atomic_inc(&cm_rejects);
30789 + atomic_inc_unchecked(&cm_rejects);
30790 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30791 loopback = cm_node->loopbackpartner;
30792 cm_core = cm_node->cm_core;
30793 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
30794 ntohl(cm_id->local_addr.sin_addr.s_addr),
30795 ntohs(cm_id->local_addr.sin_port));
30796
30797 - atomic_inc(&cm_connects);
30798 + atomic_inc_unchecked(&cm_connects);
30799 nesqp->active_conn = 1;
30800
30801 /* cache the cm_id in the qp */
30802 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
30803 if (nesqp->destroyed) {
30804 return;
30805 }
30806 - atomic_inc(&cm_connecteds);
30807 + atomic_inc_unchecked(&cm_connecteds);
30808 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30809 " local port 0x%04X. jiffies = %lu.\n",
30810 nesqp->hwqp.qp_id,
30811 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
30812
30813 ret = cm_id->event_handler(cm_id, &cm_event);
30814 cm_id->add_ref(cm_id);
30815 - atomic_inc(&cm_closes);
30816 + atomic_inc_unchecked(&cm_closes);
30817 cm_event.event = IW_CM_EVENT_CLOSE;
30818 cm_event.status = IW_CM_EVENT_STATUS_OK;
30819 cm_event.provider_data = cm_id->provider_data;
30820 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
30821 return;
30822 cm_id = cm_node->cm_id;
30823
30824 - atomic_inc(&cm_connect_reqs);
30825 + atomic_inc_unchecked(&cm_connect_reqs);
30826 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30827 cm_node, cm_id, jiffies);
30828
30829 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
30830 return;
30831 cm_id = cm_node->cm_id;
30832
30833 - atomic_inc(&cm_connect_reqs);
30834 + atomic_inc_unchecked(&cm_connect_reqs);
30835 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30836 cm_node, cm_id, jiffies);
30837
30838 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h
30839 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
30840 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
30841 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
30842 extern unsigned int wqm_quanta;
30843 extern struct list_head nes_adapter_list;
30844
30845 -extern atomic_t cm_connects;
30846 -extern atomic_t cm_accepts;
30847 -extern atomic_t cm_disconnects;
30848 -extern atomic_t cm_closes;
30849 -extern atomic_t cm_connecteds;
30850 -extern atomic_t cm_connect_reqs;
30851 -extern atomic_t cm_rejects;
30852 -extern atomic_t mod_qp_timouts;
30853 -extern atomic_t qps_created;
30854 -extern atomic_t qps_destroyed;
30855 -extern atomic_t sw_qps_destroyed;
30856 +extern atomic_unchecked_t cm_connects;
30857 +extern atomic_unchecked_t cm_accepts;
30858 +extern atomic_unchecked_t cm_disconnects;
30859 +extern atomic_unchecked_t cm_closes;
30860 +extern atomic_unchecked_t cm_connecteds;
30861 +extern atomic_unchecked_t cm_connect_reqs;
30862 +extern atomic_unchecked_t cm_rejects;
30863 +extern atomic_unchecked_t mod_qp_timouts;
30864 +extern atomic_unchecked_t qps_created;
30865 +extern atomic_unchecked_t qps_destroyed;
30866 +extern atomic_unchecked_t sw_qps_destroyed;
30867 extern u32 mh_detected;
30868 extern u32 mh_pauses_sent;
30869 extern u32 cm_packets_sent;
30870 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
30871 extern u32 cm_listens_created;
30872 extern u32 cm_listens_destroyed;
30873 extern u32 cm_backlog_drops;
30874 -extern atomic_t cm_loopbacks;
30875 -extern atomic_t cm_nodes_created;
30876 -extern atomic_t cm_nodes_destroyed;
30877 -extern atomic_t cm_accel_dropped_pkts;
30878 -extern atomic_t cm_resets_recvd;
30879 +extern atomic_unchecked_t cm_loopbacks;
30880 +extern atomic_unchecked_t cm_nodes_created;
30881 +extern atomic_unchecked_t cm_nodes_destroyed;
30882 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30883 +extern atomic_unchecked_t cm_resets_recvd;
30884
30885 extern u32 int_mod_timer_init;
30886 extern u32 int_mod_cq_depth_256;
30887 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c
30888 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
30889 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
30890 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
30891 target_stat_values[++index] = mh_detected;
30892 target_stat_values[++index] = mh_pauses_sent;
30893 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30894 - target_stat_values[++index] = atomic_read(&cm_connects);
30895 - target_stat_values[++index] = atomic_read(&cm_accepts);
30896 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30897 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30898 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30899 - target_stat_values[++index] = atomic_read(&cm_rejects);
30900 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30901 - target_stat_values[++index] = atomic_read(&qps_created);
30902 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30903 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30904 - target_stat_values[++index] = atomic_read(&cm_closes);
30905 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30906 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30907 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30908 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30909 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30910 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30911 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30912 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30913 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30914 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30915 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30916 target_stat_values[++index] = cm_packets_sent;
30917 target_stat_values[++index] = cm_packets_bounced;
30918 target_stat_values[++index] = cm_packets_created;
30919 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
30920 target_stat_values[++index] = cm_listens_created;
30921 target_stat_values[++index] = cm_listens_destroyed;
30922 target_stat_values[++index] = cm_backlog_drops;
30923 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30924 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30925 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30926 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30927 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30928 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30929 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30930 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30931 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30932 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30933 target_stat_values[++index] = int_mod_timer_init;
30934 target_stat_values[++index] = int_mod_cq_depth_1;
30935 target_stat_values[++index] = int_mod_cq_depth_4;
30936 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c
30937 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
30938 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
30939 @@ -45,9 +45,9 @@
30940
30941 #include <rdma/ib_umem.h>
30942
30943 -atomic_t mod_qp_timouts;
30944 -atomic_t qps_created;
30945 -atomic_t sw_qps_destroyed;
30946 +atomic_unchecked_t mod_qp_timouts;
30947 +atomic_unchecked_t qps_created;
30948 +atomic_unchecked_t sw_qps_destroyed;
30949
30950 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30951
30952 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
30953 if (init_attr->create_flags)
30954 return ERR_PTR(-EINVAL);
30955
30956 - atomic_inc(&qps_created);
30957 + atomic_inc_unchecked(&qps_created);
30958 switch (init_attr->qp_type) {
30959 case IB_QPT_RC:
30960 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30961 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
30962 struct iw_cm_event cm_event;
30963 int ret;
30964
30965 - atomic_inc(&sw_qps_destroyed);
30966 + atomic_inc_unchecked(&sw_qps_destroyed);
30967 nesqp->destroyed = 1;
30968
30969 /* Blow away the connection if it exists. */
30970 diff -urNp linux-2.6.32.45/drivers/input/gameport/gameport.c linux-2.6.32.45/drivers/input/gameport/gameport.c
30971 --- linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
30972 +++ linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
30973 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
30974 */
30975 static void gameport_init_port(struct gameport *gameport)
30976 {
30977 - static atomic_t gameport_no = ATOMIC_INIT(0);
30978 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30979
30980 __module_get(THIS_MODULE);
30981
30982 mutex_init(&gameport->drv_mutex);
30983 device_initialize(&gameport->dev);
30984 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
30985 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30986 gameport->dev.bus = &gameport_bus;
30987 gameport->dev.release = gameport_release_port;
30988 if (gameport->parent)
30989 diff -urNp linux-2.6.32.45/drivers/input/input.c linux-2.6.32.45/drivers/input/input.c
30990 --- linux-2.6.32.45/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
30991 +++ linux-2.6.32.45/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
30992 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
30993 */
30994 int input_register_device(struct input_dev *dev)
30995 {
30996 - static atomic_t input_no = ATOMIC_INIT(0);
30997 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30998 struct input_handler *handler;
30999 const char *path;
31000 int error;
31001 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
31002 dev->setkeycode = input_default_setkeycode;
31003
31004 dev_set_name(&dev->dev, "input%ld",
31005 - (unsigned long) atomic_inc_return(&input_no) - 1);
31006 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
31007
31008 error = device_add(&dev->dev);
31009 if (error)
31010 diff -urNp linux-2.6.32.45/drivers/input/joystick/sidewinder.c linux-2.6.32.45/drivers/input/joystick/sidewinder.c
31011 --- linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
31012 +++ linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
31013 @@ -30,6 +30,7 @@
31014 #include <linux/kernel.h>
31015 #include <linux/module.h>
31016 #include <linux/slab.h>
31017 +#include <linux/sched.h>
31018 #include <linux/init.h>
31019 #include <linux/input.h>
31020 #include <linux/gameport.h>
31021 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
31022 unsigned char buf[SW_LENGTH];
31023 int i;
31024
31025 + pax_track_stack();
31026 +
31027 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
31028
31029 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
31030 diff -urNp linux-2.6.32.45/drivers/input/joystick/xpad.c linux-2.6.32.45/drivers/input/joystick/xpad.c
31031 --- linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
31032 +++ linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
31033 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
31034
31035 static int xpad_led_probe(struct usb_xpad *xpad)
31036 {
31037 - static atomic_t led_seq = ATOMIC_INIT(0);
31038 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
31039 long led_no;
31040 struct xpad_led *led;
31041 struct led_classdev *led_cdev;
31042 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
31043 if (!led)
31044 return -ENOMEM;
31045
31046 - led_no = (long)atomic_inc_return(&led_seq) - 1;
31047 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
31048
31049 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
31050 led->xpad = xpad;
31051 diff -urNp linux-2.6.32.45/drivers/input/serio/serio.c linux-2.6.32.45/drivers/input/serio/serio.c
31052 --- linux-2.6.32.45/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
31053 +++ linux-2.6.32.45/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
31054 @@ -527,7 +527,7 @@ static void serio_release_port(struct de
31055 */
31056 static void serio_init_port(struct serio *serio)
31057 {
31058 - static atomic_t serio_no = ATOMIC_INIT(0);
31059 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
31060
31061 __module_get(THIS_MODULE);
31062
31063 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio
31064 mutex_init(&serio->drv_mutex);
31065 device_initialize(&serio->dev);
31066 dev_set_name(&serio->dev, "serio%ld",
31067 - (long)atomic_inc_return(&serio_no) - 1);
31068 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
31069 serio->dev.bus = &serio_bus;
31070 serio->dev.release = serio_release_port;
31071 if (serio->parent) {
31072 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/common.c linux-2.6.32.45/drivers/isdn/gigaset/common.c
31073 --- linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
31074 +++ linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
31075 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
31076 cs->commands_pending = 0;
31077 cs->cur_at_seq = 0;
31078 cs->gotfwver = -1;
31079 - cs->open_count = 0;
31080 + local_set(&cs->open_count, 0);
31081 cs->dev = NULL;
31082 cs->tty = NULL;
31083 cs->tty_dev = NULL;
31084 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h
31085 --- linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
31086 +++ linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
31087 @@ -34,6 +34,7 @@
31088 #include <linux/tty_driver.h>
31089 #include <linux/list.h>
31090 #include <asm/atomic.h>
31091 +#include <asm/local.h>
31092
31093 #define GIG_VERSION {0,5,0,0}
31094 #define GIG_COMPAT {0,4,0,0}
31095 @@ -446,7 +447,7 @@ struct cardstate {
31096 spinlock_t cmdlock;
31097 unsigned curlen, cmdbytes;
31098
31099 - unsigned open_count;
31100 + local_t open_count;
31101 struct tty_struct *tty;
31102 struct tasklet_struct if_wake_tasklet;
31103 unsigned control_state;
31104 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/interface.c linux-2.6.32.45/drivers/isdn/gigaset/interface.c
31105 --- linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
31106 +++ linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
31107 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
31108 return -ERESTARTSYS; // FIXME -EINTR?
31109 tty->driver_data = cs;
31110
31111 - ++cs->open_count;
31112 -
31113 - if (cs->open_count == 1) {
31114 + if (local_inc_return(&cs->open_count) == 1) {
31115 spin_lock_irqsave(&cs->lock, flags);
31116 cs->tty = tty;
31117 spin_unlock_irqrestore(&cs->lock, flags);
31118 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
31119
31120 if (!cs->connected)
31121 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31122 - else if (!cs->open_count)
31123 + else if (!local_read(&cs->open_count))
31124 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31125 else {
31126 - if (!--cs->open_count) {
31127 + if (!local_dec_return(&cs->open_count)) {
31128 spin_lock_irqsave(&cs->lock, flags);
31129 cs->tty = NULL;
31130 spin_unlock_irqrestore(&cs->lock, flags);
31131 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
31132 if (!cs->connected) {
31133 gig_dbg(DEBUG_IF, "not connected");
31134 retval = -ENODEV;
31135 - } else if (!cs->open_count)
31136 + } else if (!local_read(&cs->open_count))
31137 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31138 else {
31139 retval = 0;
31140 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
31141 if (!cs->connected) {
31142 gig_dbg(DEBUG_IF, "not connected");
31143 retval = -ENODEV;
31144 - } else if (!cs->open_count)
31145 + } else if (!local_read(&cs->open_count))
31146 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31147 else if (cs->mstate != MS_LOCKED) {
31148 dev_warn(cs->dev, "can't write to unlocked device\n");
31149 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
31150 if (!cs->connected) {
31151 gig_dbg(DEBUG_IF, "not connected");
31152 retval = -ENODEV;
31153 - } else if (!cs->open_count)
31154 + } else if (!local_read(&cs->open_count))
31155 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31156 else if (cs->mstate != MS_LOCKED) {
31157 dev_warn(cs->dev, "can't write to unlocked device\n");
31158 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
31159
31160 if (!cs->connected)
31161 gig_dbg(DEBUG_IF, "not connected");
31162 - else if (!cs->open_count)
31163 + else if (!local_read(&cs->open_count))
31164 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31165 else if (cs->mstate != MS_LOCKED)
31166 dev_warn(cs->dev, "can't write to unlocked device\n");
31167 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
31168
31169 if (!cs->connected)
31170 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31171 - else if (!cs->open_count)
31172 + else if (!local_read(&cs->open_count))
31173 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31174 else {
31175 //FIXME
31176 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
31177
31178 if (!cs->connected)
31179 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31180 - else if (!cs->open_count)
31181 + else if (!local_read(&cs->open_count))
31182 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31183 else {
31184 //FIXME
31185 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
31186 goto out;
31187 }
31188
31189 - if (!cs->open_count) {
31190 + if (!local_read(&cs->open_count)) {
31191 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31192 goto out;
31193 }
31194 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c
31195 --- linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
31196 +++ linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
31197 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
31198 }
31199 if (left) {
31200 if (t4file->user) {
31201 - if (copy_from_user(buf, dp, left))
31202 + if (left > sizeof buf || copy_from_user(buf, dp, left))
31203 return -EFAULT;
31204 } else {
31205 memcpy(buf, dp, left);
31206 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
31207 }
31208 if (left) {
31209 if (config->user) {
31210 - if (copy_from_user(buf, dp, left))
31211 + if (left > sizeof buf || copy_from_user(buf, dp, left))
31212 return -EFAULT;
31213 } else {
31214 memcpy(buf, dp, left);
31215 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c
31216 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
31217 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
31218 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
31219 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
31220 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
31221
31222 + pax_track_stack();
31223
31224 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
31225 {
31226 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c
31227 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
31228 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
31229 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
31230 IDI_SYNC_REQ req;
31231 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31232
31233 + pax_track_stack();
31234 +
31235 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31236
31237 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31238 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c
31239 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
31240 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
31241 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
31242 IDI_SYNC_REQ req;
31243 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31244
31245 + pax_track_stack();
31246 +
31247 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31248
31249 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31250 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c
31251 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
31252 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
31253 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
31254 IDI_SYNC_REQ req;
31255 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31256
31257 + pax_track_stack();
31258 +
31259 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31260
31261 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31262 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h
31263 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-03-27 14:31:47.000000000 -0400
31264 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:33:55.000000000 -0400
31265 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31266 } diva_didd_add_adapter_t;
31267 typedef struct _diva_didd_remove_adapter {
31268 IDI_CALL p_request;
31269 -} diva_didd_remove_adapter_t;
31270 +} __no_const diva_didd_remove_adapter_t;
31271 typedef struct _diva_didd_read_adapter_array {
31272 void * buffer;
31273 dword length;
31274 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c
31275 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
31276 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
31277 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
31278 IDI_SYNC_REQ req;
31279 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31280
31281 + pax_track_stack();
31282 +
31283 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31284
31285 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31286 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c
31287 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
31288 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
31289 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
31290 dword d;
31291 word w;
31292
31293 + pax_track_stack();
31294 +
31295 a = plci->adapter;
31296 Id = ((word)plci->Id<<8)|a->Id;
31297 PUT_WORD(&SS_Ind[4],0x0000);
31298 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
31299 word j, n, w;
31300 dword d;
31301
31302 + pax_track_stack();
31303 +
31304
31305 for(i=0;i<8;i++) bp_parms[i].length = 0;
31306 for(i=0;i<2;i++) global_config[i].length = 0;
31307 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
31308 const byte llc3[] = {4,3,2,2,6,6,0};
31309 const byte header[] = {0,2,3,3,0,0,0};
31310
31311 + pax_track_stack();
31312 +
31313 for(i=0;i<8;i++) bp_parms[i].length = 0;
31314 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
31315 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
31316 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
31317 word appl_number_group_type[MAX_APPL];
31318 PLCI *auxplci;
31319
31320 + pax_track_stack();
31321 +
31322 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
31323
31324 if(!a->group_optimization_enabled)
31325 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c
31326 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
31327 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
31328 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
31329 IDI_SYNC_REQ req;
31330 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31331
31332 + pax_track_stack();
31333 +
31334 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31335
31336 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31337 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h
31338 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-03-27 14:31:47.000000000 -0400
31339 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:33:55.000000000 -0400
31340 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31341 typedef struct _diva_os_idi_adapter_interface {
31342 diva_init_card_proc_t cleanup_adapter_proc;
31343 diva_cmd_card_proc_t cmd_proc;
31344 -} diva_os_idi_adapter_interface_t;
31345 +} __no_const diva_os_idi_adapter_interface_t;
31346
31347 typedef struct _diva_os_xdi_adapter {
31348 struct list_head link;
31349 diff -urNp linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c
31350 --- linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
31351 +++ linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
31352 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
31353 } iocpar;
31354 void __user *argp = (void __user *)arg;
31355
31356 + pax_track_stack();
31357 +
31358 #define name iocpar.name
31359 #define bname iocpar.bname
31360 #define iocts iocpar.iocts
31361 diff -urNp linux-2.6.32.45/drivers/isdn/icn/icn.c linux-2.6.32.45/drivers/isdn/icn/icn.c
31362 --- linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
31363 +++ linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
31364 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
31365 if (count > len)
31366 count = len;
31367 if (user) {
31368 - if (copy_from_user(msg, buf, count))
31369 + if (count > sizeof msg || copy_from_user(msg, buf, count))
31370 return -EFAULT;
31371 } else
31372 memcpy(msg, buf, count);
31373 diff -urNp linux-2.6.32.45/drivers/isdn/mISDN/socket.c linux-2.6.32.45/drivers/isdn/mISDN/socket.c
31374 --- linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
31375 +++ linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
31376 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
31377 if (dev) {
31378 struct mISDN_devinfo di;
31379
31380 + memset(&di, 0, sizeof(di));
31381 di.id = dev->id;
31382 di.Dprotocols = dev->Dprotocols;
31383 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31384 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
31385 if (dev) {
31386 struct mISDN_devinfo di;
31387
31388 + memset(&di, 0, sizeof(di));
31389 di.id = dev->id;
31390 di.Dprotocols = dev->Dprotocols;
31391 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31392 diff -urNp linux-2.6.32.45/drivers/isdn/sc/interrupt.c linux-2.6.32.45/drivers/isdn/sc/interrupt.c
31393 --- linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
31394 +++ linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
31395 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
31396 }
31397 else if(callid>=0x0000 && callid<=0x7FFF)
31398 {
31399 + int len;
31400 +
31401 pr_debug("%s: Got Incoming Call\n",
31402 sc_adapter[card]->devicename);
31403 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
31404 - strcpy(setup.eazmsn,
31405 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
31406 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
31407 + sizeof(setup.phone));
31408 + if (len >= sizeof(setup.phone))
31409 + continue;
31410 + len = strlcpy(setup.eazmsn,
31411 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31412 + sizeof(setup.eazmsn));
31413 + if (len >= sizeof(setup.eazmsn))
31414 + continue;
31415 setup.si1 = 7;
31416 setup.si2 = 0;
31417 setup.plan = 0;
31418 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
31419 * Handle a GetMyNumber Rsp
31420 */
31421 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
31422 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
31423 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31424 + rcvmsg.msg_data.byte_array,
31425 + sizeof(rcvmsg.msg_data.byte_array));
31426 continue;
31427 }
31428
31429 diff -urNp linux-2.6.32.45/drivers/lguest/core.c linux-2.6.32.45/drivers/lguest/core.c
31430 --- linux-2.6.32.45/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
31431 +++ linux-2.6.32.45/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
31432 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
31433 * it's worked so far. The end address needs +1 because __get_vm_area
31434 * allocates an extra guard page, so we need space for that.
31435 */
31436 +
31437 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31438 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31439 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31440 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31441 +#else
31442 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31443 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31444 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31445 +#endif
31446 +
31447 if (!switcher_vma) {
31448 err = -ENOMEM;
31449 printk("lguest: could not map switcher pages high\n");
31450 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
31451 * Now the Switcher is mapped at the right address, we can't fail!
31452 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
31453 */
31454 - memcpy(switcher_vma->addr, start_switcher_text,
31455 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31456 end_switcher_text - start_switcher_text);
31457
31458 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31459 diff -urNp linux-2.6.32.45/drivers/lguest/x86/core.c linux-2.6.32.45/drivers/lguest/x86/core.c
31460 --- linux-2.6.32.45/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
31461 +++ linux-2.6.32.45/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
31462 @@ -59,7 +59,7 @@ static struct {
31463 /* Offset from where switcher.S was compiled to where we've copied it */
31464 static unsigned long switcher_offset(void)
31465 {
31466 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31467 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31468 }
31469
31470 /* This cpu's struct lguest_pages. */
31471 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
31472 * These copies are pretty cheap, so we do them unconditionally: */
31473 /* Save the current Host top-level page directory.
31474 */
31475 +
31476 +#ifdef CONFIG_PAX_PER_CPU_PGD
31477 + pages->state.host_cr3 = read_cr3();
31478 +#else
31479 pages->state.host_cr3 = __pa(current->mm->pgd);
31480 +#endif
31481 +
31482 /*
31483 * Set up the Guest's page tables to see this CPU's pages (and no
31484 * other CPU's pages).
31485 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
31486 * compiled-in switcher code and the high-mapped copy we just made.
31487 */
31488 for (i = 0; i < IDT_ENTRIES; i++)
31489 - default_idt_entries[i] += switcher_offset();
31490 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31491
31492 /*
31493 * Set up the Switcher's per-cpu areas.
31494 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
31495 * it will be undisturbed when we switch. To change %cs and jump we
31496 * need this structure to feed to Intel's "lcall" instruction.
31497 */
31498 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31499 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31500 lguest_entry.segment = LGUEST_CS;
31501
31502 /*
31503 diff -urNp linux-2.6.32.45/drivers/lguest/x86/switcher_32.S linux-2.6.32.45/drivers/lguest/x86/switcher_32.S
31504 --- linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
31505 +++ linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
31506 @@ -87,6 +87,7 @@
31507 #include <asm/page.h>
31508 #include <asm/segment.h>
31509 #include <asm/lguest.h>
31510 +#include <asm/processor-flags.h>
31511
31512 // We mark the start of the code to copy
31513 // It's placed in .text tho it's never run here
31514 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31515 // Changes type when we load it: damn Intel!
31516 // For after we switch over our page tables
31517 // That entry will be read-only: we'd crash.
31518 +
31519 +#ifdef CONFIG_PAX_KERNEXEC
31520 + mov %cr0, %edx
31521 + xor $X86_CR0_WP, %edx
31522 + mov %edx, %cr0
31523 +#endif
31524 +
31525 movl $(GDT_ENTRY_TSS*8), %edx
31526 ltr %dx
31527
31528 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31529 // Let's clear it again for our return.
31530 // The GDT descriptor of the Host
31531 // Points to the table after two "size" bytes
31532 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31533 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31534 // Clear "used" from type field (byte 5, bit 2)
31535 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31536 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31537 +
31538 +#ifdef CONFIG_PAX_KERNEXEC
31539 + mov %cr0, %eax
31540 + xor $X86_CR0_WP, %eax
31541 + mov %eax, %cr0
31542 +#endif
31543
31544 // Once our page table's switched, the Guest is live!
31545 // The Host fades as we run this final step.
31546 @@ -295,13 +309,12 @@ deliver_to_host:
31547 // I consulted gcc, and it gave
31548 // These instructions, which I gladly credit:
31549 leal (%edx,%ebx,8), %eax
31550 - movzwl (%eax),%edx
31551 - movl 4(%eax), %eax
31552 - xorw %ax, %ax
31553 - orl %eax, %edx
31554 + movl 4(%eax), %edx
31555 + movw (%eax), %dx
31556 // Now the address of the handler's in %edx
31557 // We call it now: its "iret" drops us home.
31558 - jmp *%edx
31559 + ljmp $__KERNEL_CS, $1f
31560 +1: jmp *%edx
31561
31562 // Every interrupt can come to us here
31563 // But we must truly tell each apart.
31564 diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c
31565 --- linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
31566 +++ linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
31567 @@ -15,7 +15,7 @@
31568
31569 #define MAX_PMU_LEVEL 0xFF
31570
31571 -static struct backlight_ops pmu_backlight_data;
31572 +static const struct backlight_ops pmu_backlight_data;
31573 static DEFINE_SPINLOCK(pmu_backlight_lock);
31574 static int sleeping, uses_pmu_bl;
31575 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
31576 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
31577 return bd->props.brightness;
31578 }
31579
31580 -static struct backlight_ops pmu_backlight_data = {
31581 +static const struct backlight_ops pmu_backlight_data = {
31582 .get_brightness = pmu_backlight_get_brightness,
31583 .update_status = pmu_backlight_update_status,
31584
31585 diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu.c linux-2.6.32.45/drivers/macintosh/via-pmu.c
31586 --- linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
31587 +++ linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
31588 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
31589 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
31590 }
31591
31592 -static struct platform_suspend_ops pmu_pm_ops = {
31593 +static const struct platform_suspend_ops pmu_pm_ops = {
31594 .enter = powerbook_sleep,
31595 .valid = pmu_sleep_valid,
31596 };
31597 diff -urNp linux-2.6.32.45/drivers/md/dm.c linux-2.6.32.45/drivers/md/dm.c
31598 --- linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:35:29.000000000 -0400
31599 +++ linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:33:59.000000000 -0400
31600 @@ -165,9 +165,9 @@ struct mapped_device {
31601 /*
31602 * Event handling.
31603 */
31604 - atomic_t event_nr;
31605 + atomic_unchecked_t event_nr;
31606 wait_queue_head_t eventq;
31607 - atomic_t uevent_seq;
31608 + atomic_unchecked_t uevent_seq;
31609 struct list_head uevent_list;
31610 spinlock_t uevent_lock; /* Protect access to uevent_list */
31611
31612 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(i
31613 rwlock_init(&md->map_lock);
31614 atomic_set(&md->holders, 1);
31615 atomic_set(&md->open_count, 0);
31616 - atomic_set(&md->event_nr, 0);
31617 - atomic_set(&md->uevent_seq, 0);
31618 + atomic_set_unchecked(&md->event_nr, 0);
31619 + atomic_set_unchecked(&md->uevent_seq, 0);
31620 INIT_LIST_HEAD(&md->uevent_list);
31621 spin_lock_init(&md->uevent_lock);
31622
31623 @@ -1927,7 +1927,7 @@ static void event_callback(void *context
31624
31625 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31626
31627 - atomic_inc(&md->event_nr);
31628 + atomic_inc_unchecked(&md->event_nr);
31629 wake_up(&md->eventq);
31630 }
31631
31632 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_dev
31633
31634 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31635 {
31636 - return atomic_add_return(1, &md->uevent_seq);
31637 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31638 }
31639
31640 uint32_t dm_get_event_nr(struct mapped_device *md)
31641 {
31642 - return atomic_read(&md->event_nr);
31643 + return atomic_read_unchecked(&md->event_nr);
31644 }
31645
31646 int dm_wait_event(struct mapped_device *md, int event_nr)
31647 {
31648 return wait_event_interruptible(md->eventq,
31649 - (event_nr != atomic_read(&md->event_nr)));
31650 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31651 }
31652
31653 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31654 diff -urNp linux-2.6.32.45/drivers/md/dm-ioctl.c linux-2.6.32.45/drivers/md/dm-ioctl.c
31655 --- linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
31656 +++ linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
31657 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
31658 cmd == DM_LIST_VERSIONS_CMD)
31659 return 0;
31660
31661 - if ((cmd == DM_DEV_CREATE_CMD)) {
31662 + if (cmd == DM_DEV_CREATE_CMD) {
31663 if (!*param->name) {
31664 DMWARN("name not supplied when creating device");
31665 return -EINVAL;
31666 diff -urNp linux-2.6.32.45/drivers/md/dm-raid1.c linux-2.6.32.45/drivers/md/dm-raid1.c
31667 --- linux-2.6.32.45/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
31668 +++ linux-2.6.32.45/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
31669 @@ -41,7 +41,7 @@ enum dm_raid1_error {
31670
31671 struct mirror {
31672 struct mirror_set *ms;
31673 - atomic_t error_count;
31674 + atomic_unchecked_t error_count;
31675 unsigned long error_type;
31676 struct dm_dev *dev;
31677 sector_t offset;
31678 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
31679 * simple way to tell if a device has encountered
31680 * errors.
31681 */
31682 - atomic_inc(&m->error_count);
31683 + atomic_inc_unchecked(&m->error_count);
31684
31685 if (test_and_set_bit(error_type, &m->error_type))
31686 return;
31687 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
31688 }
31689
31690 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
31691 - if (!atomic_read(&new->error_count)) {
31692 + if (!atomic_read_unchecked(&new->error_count)) {
31693 set_default_mirror(new);
31694 break;
31695 }
31696 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
31697 struct mirror *m = get_default_mirror(ms);
31698
31699 do {
31700 - if (likely(!atomic_read(&m->error_count)))
31701 + if (likely(!atomic_read_unchecked(&m->error_count)))
31702 return m;
31703
31704 if (m-- == ms->mirror)
31705 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
31706 {
31707 struct mirror *default_mirror = get_default_mirror(m->ms);
31708
31709 - return !atomic_read(&default_mirror->error_count);
31710 + return !atomic_read_unchecked(&default_mirror->error_count);
31711 }
31712
31713 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31714 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
31715 */
31716 if (likely(region_in_sync(ms, region, 1)))
31717 m = choose_mirror(ms, bio->bi_sector);
31718 - else if (m && atomic_read(&m->error_count))
31719 + else if (m && atomic_read_unchecked(&m->error_count))
31720 m = NULL;
31721
31722 if (likely(m))
31723 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
31724 }
31725
31726 ms->mirror[mirror].ms = ms;
31727 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31728 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31729 ms->mirror[mirror].error_type = 0;
31730 ms->mirror[mirror].offset = offset;
31731
31732 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
31733 */
31734 static char device_status_char(struct mirror *m)
31735 {
31736 - if (!atomic_read(&(m->error_count)))
31737 + if (!atomic_read_unchecked(&(m->error_count)))
31738 return 'A';
31739
31740 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
31741 diff -urNp linux-2.6.32.45/drivers/md/dm-stripe.c linux-2.6.32.45/drivers/md/dm-stripe.c
31742 --- linux-2.6.32.45/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
31743 +++ linux-2.6.32.45/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
31744 @@ -20,7 +20,7 @@ struct stripe {
31745 struct dm_dev *dev;
31746 sector_t physical_start;
31747
31748 - atomic_t error_count;
31749 + atomic_unchecked_t error_count;
31750 };
31751
31752 struct stripe_c {
31753 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
31754 kfree(sc);
31755 return r;
31756 }
31757 - atomic_set(&(sc->stripe[i].error_count), 0);
31758 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31759 }
31760
31761 ti->private = sc;
31762 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
31763 DMEMIT("%d ", sc->stripes);
31764 for (i = 0; i < sc->stripes; i++) {
31765 DMEMIT("%s ", sc->stripe[i].dev->name);
31766 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31767 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31768 'D' : 'A';
31769 }
31770 buffer[i] = '\0';
31771 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
31772 */
31773 for (i = 0; i < sc->stripes; i++)
31774 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31775 - atomic_inc(&(sc->stripe[i].error_count));
31776 - if (atomic_read(&(sc->stripe[i].error_count)) <
31777 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31778 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31779 DM_IO_ERROR_THRESHOLD)
31780 queue_work(kstriped, &sc->kstriped_ws);
31781 }
31782 diff -urNp linux-2.6.32.45/drivers/md/dm-sysfs.c linux-2.6.32.45/drivers/md/dm-sysfs.c
31783 --- linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
31784 +++ linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
31785 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
31786 NULL,
31787 };
31788
31789 -static struct sysfs_ops dm_sysfs_ops = {
31790 +static const struct sysfs_ops dm_sysfs_ops = {
31791 .show = dm_attr_show,
31792 };
31793
31794 diff -urNp linux-2.6.32.45/drivers/md/dm-table.c linux-2.6.32.45/drivers/md/dm-table.c
31795 --- linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
31796 +++ linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
31797 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
31798 if (!dev_size)
31799 return 0;
31800
31801 - if ((start >= dev_size) || (start + len > dev_size)) {
31802 + if ((start >= dev_size) || (len > dev_size - start)) {
31803 DMWARN("%s: %s too small for target: "
31804 "start=%llu, len=%llu, dev_size=%llu",
31805 dm_device_name(ti->table->md), bdevname(bdev, b),
31806 diff -urNp linux-2.6.32.45/drivers/md/md.c linux-2.6.32.45/drivers/md/md.c
31807 --- linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:04.000000000 -0400
31808 +++ linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:18.000000000 -0400
31809 @@ -153,10 +153,10 @@ static int start_readonly;
31810 * start build, activate spare
31811 */
31812 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31813 -static atomic_t md_event_count;
31814 +static atomic_unchecked_t md_event_count;
31815 void md_new_event(mddev_t *mddev)
31816 {
31817 - atomic_inc(&md_event_count);
31818 + atomic_inc_unchecked(&md_event_count);
31819 wake_up(&md_event_waiters);
31820 }
31821 EXPORT_SYMBOL_GPL(md_new_event);
31822 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31823 */
31824 static void md_new_event_inintr(mddev_t *mddev)
31825 {
31826 - atomic_inc(&md_event_count);
31827 + atomic_inc_unchecked(&md_event_count);
31828 wake_up(&md_event_waiters);
31829 }
31830
31831 @@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
31832
31833 rdev->preferred_minor = 0xffff;
31834 rdev->data_offset = le64_to_cpu(sb->data_offset);
31835 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31836 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31837
31838 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31839 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31840 @@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
31841 else
31842 sb->resync_offset = cpu_to_le64(0);
31843
31844 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31845 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31846
31847 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31848 sb->size = cpu_to_le64(mddev->dev_sectors);
31849 @@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
31850 static ssize_t
31851 errors_show(mdk_rdev_t *rdev, char *page)
31852 {
31853 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31854 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31855 }
31856
31857 static ssize_t
31858 @@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
31859 char *e;
31860 unsigned long n = simple_strtoul(buf, &e, 10);
31861 if (*buf && (*e == 0 || *e == '\n')) {
31862 - atomic_set(&rdev->corrected_errors, n);
31863 + atomic_set_unchecked(&rdev->corrected_errors, n);
31864 return len;
31865 }
31866 return -EINVAL;
31867 @@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
31868 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
31869 kfree(rdev);
31870 }
31871 -static struct sysfs_ops rdev_sysfs_ops = {
31872 +static const struct sysfs_ops rdev_sysfs_ops = {
31873 .show = rdev_attr_show,
31874 .store = rdev_attr_store,
31875 };
31876 @@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
31877 rdev->data_offset = 0;
31878 rdev->sb_events = 0;
31879 atomic_set(&rdev->nr_pending, 0);
31880 - atomic_set(&rdev->read_errors, 0);
31881 - atomic_set(&rdev->corrected_errors, 0);
31882 + atomic_set_unchecked(&rdev->read_errors, 0);
31883 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31884
31885 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
31886 if (!size) {
31887 @@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
31888 kfree(mddev);
31889 }
31890
31891 -static struct sysfs_ops md_sysfs_ops = {
31892 +static const struct sysfs_ops md_sysfs_ops = {
31893 .show = md_attr_show,
31894 .store = md_attr_store,
31895 };
31896 @@ -4474,7 +4474,8 @@ out:
31897 err = 0;
31898 blk_integrity_unregister(disk);
31899 md_new_event(mddev);
31900 - sysfs_notify_dirent(mddev->sysfs_state);
31901 + if (mddev->sysfs_state)
31902 + sysfs_notify_dirent(mddev->sysfs_state);
31903 return err;
31904 }
31905
31906 @@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
31907
31908 spin_unlock(&pers_lock);
31909 seq_printf(seq, "\n");
31910 - mi->event = atomic_read(&md_event_count);
31911 + mi->event = atomic_read_unchecked(&md_event_count);
31912 return 0;
31913 }
31914 if (v == (void*)2) {
31915 @@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
31916 chunk_kb ? "KB" : "B");
31917 if (bitmap->file) {
31918 seq_printf(seq, ", file: ");
31919 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31920 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31921 }
31922
31923 seq_printf(seq, "\n");
31924 @@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
31925 else {
31926 struct seq_file *p = file->private_data;
31927 p->private = mi;
31928 - mi->event = atomic_read(&md_event_count);
31929 + mi->event = atomic_read_unchecked(&md_event_count);
31930 }
31931 return error;
31932 }
31933 @@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
31934 /* always allow read */
31935 mask = POLLIN | POLLRDNORM;
31936
31937 - if (mi->event != atomic_read(&md_event_count))
31938 + if (mi->event != atomic_read_unchecked(&md_event_count))
31939 mask |= POLLERR | POLLPRI;
31940 return mask;
31941 }
31942 @@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
31943 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31944 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31945 (int)part_stat_read(&disk->part0, sectors[1]) -
31946 - atomic_read(&disk->sync_io);
31947 + atomic_read_unchecked(&disk->sync_io);
31948 /* sync IO will cause sync_io to increase before the disk_stats
31949 * as sync_io is counted when a request starts, and
31950 * disk_stats is counted when it completes.
31951 diff -urNp linux-2.6.32.45/drivers/md/md.h linux-2.6.32.45/drivers/md/md.h
31952 --- linux-2.6.32.45/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
31953 +++ linux-2.6.32.45/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
31954 @@ -94,10 +94,10 @@ struct mdk_rdev_s
31955 * only maintained for arrays that
31956 * support hot removal
31957 */
31958 - atomic_t read_errors; /* number of consecutive read errors that
31959 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31960 * we have tried to ignore.
31961 */
31962 - atomic_t corrected_errors; /* number of corrected read errors,
31963 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31964 * for reporting to userspace and storing
31965 * in superblock.
31966 */
31967 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
31968
31969 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31970 {
31971 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31972 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31973 }
31974
31975 struct mdk_personality
31976 diff -urNp linux-2.6.32.45/drivers/md/raid10.c linux-2.6.32.45/drivers/md/raid10.c
31977 --- linux-2.6.32.45/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
31978 +++ linux-2.6.32.45/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
31979 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
31980 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
31981 set_bit(R10BIO_Uptodate, &r10_bio->state);
31982 else {
31983 - atomic_add(r10_bio->sectors,
31984 + atomic_add_unchecked(r10_bio->sectors,
31985 &conf->mirrors[d].rdev->corrected_errors);
31986 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
31987 md_error(r10_bio->mddev,
31988 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
31989 test_bit(In_sync, &rdev->flags)) {
31990 atomic_inc(&rdev->nr_pending);
31991 rcu_read_unlock();
31992 - atomic_add(s, &rdev->corrected_errors);
31993 + atomic_add_unchecked(s, &rdev->corrected_errors);
31994 if (sync_page_io(rdev->bdev,
31995 r10_bio->devs[sl].addr +
31996 sect + rdev->data_offset,
31997 diff -urNp linux-2.6.32.45/drivers/md/raid1.c linux-2.6.32.45/drivers/md/raid1.c
31998 --- linux-2.6.32.45/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
31999 +++ linux-2.6.32.45/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
32000 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
32001 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
32002 continue;
32003 rdev = conf->mirrors[d].rdev;
32004 - atomic_add(s, &rdev->corrected_errors);
32005 + atomic_add_unchecked(s, &rdev->corrected_errors);
32006 if (sync_page_io(rdev->bdev,
32007 sect + rdev->data_offset,
32008 s<<9,
32009 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
32010 /* Well, this device is dead */
32011 md_error(mddev, rdev);
32012 else {
32013 - atomic_add(s, &rdev->corrected_errors);
32014 + atomic_add_unchecked(s, &rdev->corrected_errors);
32015 printk(KERN_INFO
32016 "raid1:%s: read error corrected "
32017 "(%d sectors at %llu on %s)\n",
32018 diff -urNp linux-2.6.32.45/drivers/md/raid5.c linux-2.6.32.45/drivers/md/raid5.c
32019 --- linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
32020 +++ linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
32021 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
32022 bi->bi_next = NULL;
32023 if ((rw & WRITE) &&
32024 test_bit(R5_ReWrite, &sh->dev[i].flags))
32025 - atomic_add(STRIPE_SECTORS,
32026 + atomic_add_unchecked(STRIPE_SECTORS,
32027 &rdev->corrected_errors);
32028 generic_make_request(bi);
32029 } else {
32030 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
32031 clear_bit(R5_ReadError, &sh->dev[i].flags);
32032 clear_bit(R5_ReWrite, &sh->dev[i].flags);
32033 }
32034 - if (atomic_read(&conf->disks[i].rdev->read_errors))
32035 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
32036 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
32037 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
32038 } else {
32039 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
32040 int retry = 0;
32041 rdev = conf->disks[i].rdev;
32042
32043 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
32044 - atomic_inc(&rdev->read_errors);
32045 + atomic_inc_unchecked(&rdev->read_errors);
32046 if (conf->mddev->degraded >= conf->max_degraded)
32047 printk_rl(KERN_WARNING
32048 "raid5:%s: read error not correctable "
32049 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
32050 (unsigned long long)(sh->sector
32051 + rdev->data_offset),
32052 bdn);
32053 - else if (atomic_read(&rdev->read_errors)
32054 + else if (atomic_read_unchecked(&rdev->read_errors)
32055 > conf->max_nr_stripes)
32056 printk(KERN_WARNING
32057 "raid5:%s: Too many read errors, failing device %s.\n",
32058 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
32059 sector_t r_sector;
32060 struct stripe_head sh2;
32061
32062 + pax_track_stack();
32063
32064 chunk_offset = sector_div(new_sector, sectors_per_chunk);
32065 stripe = new_sector;
32066 diff -urNp linux-2.6.32.45/drivers/media/common/saa7146_fops.c linux-2.6.32.45/drivers/media/common/saa7146_fops.c
32067 --- linux-2.6.32.45/drivers/media/common/saa7146_fops.c 2011-03-27 14:31:47.000000000 -0400
32068 +++ linux-2.6.32.45/drivers/media/common/saa7146_fops.c 2011-08-05 20:33:55.000000000 -0400
32069 @@ -458,7 +458,7 @@ int saa7146_vv_init(struct saa7146_dev*
32070 ERR(("out of memory. aborting.\n"));
32071 return -ENOMEM;
32072 }
32073 - ext_vv->ops = saa7146_video_ioctl_ops;
32074 + memcpy((void *)&ext_vv->ops, &saa7146_video_ioctl_ops, sizeof(saa7146_video_ioctl_ops));
32075 ext_vv->core_ops = &saa7146_video_ioctl_ops;
32076
32077 DEB_EE(("dev:%p\n",dev));
32078 diff -urNp linux-2.6.32.45/drivers/media/common/saa7146_hlp.c linux-2.6.32.45/drivers/media/common/saa7146_hlp.c
32079 --- linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
32080 +++ linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
32081 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
32082
32083 int x[32], y[32], w[32], h[32];
32084
32085 + pax_track_stack();
32086 +
32087 /* clear out memory */
32088 memset(&line_list[0], 0x00, sizeof(u32)*32);
32089 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
32090 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
32091 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
32092 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
32093 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
32094 u8 buf[HOST_LINK_BUF_SIZE];
32095 int i;
32096
32097 + pax_track_stack();
32098 +
32099 dprintk("%s\n", __func__);
32100
32101 /* check if we have space for a link buf in the rx_buffer */
32102 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
32103 unsigned long timeout;
32104 int written;
32105
32106 + pax_track_stack();
32107 +
32108 dprintk("%s\n", __func__);
32109
32110 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
32111 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h
32112 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-03-27 14:31:47.000000000 -0400
32113 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:33:55.000000000 -0400
32114 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
32115 union {
32116 dmx_ts_cb ts;
32117 dmx_section_cb sec;
32118 - } cb;
32119 + } __no_const cb;
32120
32121 struct dvb_demux *demux;
32122 void *priv;
32123 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c
32124 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
32125 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-05 20:33:55.000000000 -0400
32126 @@ -228,8 +228,8 @@ int dvb_register_device(struct dvb_adapt
32127 dvbdev->fops = dvbdevfops;
32128 init_waitqueue_head (&dvbdev->wait_queue);
32129
32130 - memcpy(dvbdevfops, template->fops, sizeof(struct file_operations));
32131 - dvbdevfops->owner = adap->module;
32132 + memcpy((void *)dvbdevfops, template->fops, sizeof(struct file_operations));
32133 + *(void **)&dvbdevfops->owner = adap->module;
32134
32135 list_add_tail (&dvbdev->list_head, &adap->device_list);
32136
32137 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c
32138 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-03-27 14:31:47.000000000 -0400
32139 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:33:55.000000000 -0400
32140 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_di
32141 struct dib0700_adapter_state {
32142 int (*set_param_save) (struct dvb_frontend *,
32143 struct dvb_frontend_parameters *);
32144 -};
32145 +} __no_const;
32146
32147 static int dib7070_set_param_override(struct dvb_frontend *fe,
32148 struct dvb_frontend_parameters *fep)
32149 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c
32150 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
32151 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
32152 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
32153
32154 u8 buf[260];
32155
32156 + pax_track_stack();
32157 +
32158 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
32159 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
32160
32161 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c
32162 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-05-10 22:12:01.000000000 -0400
32163 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-08-05 20:33:55.000000000 -0400
32164 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "
32165
32166 struct dib0700_adapter_state {
32167 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
32168 -};
32169 +} __no_const;
32170
32171 /* Hauppauge Nova-T 500 (aka Bristol)
32172 * has a LNA on GPIO0 which is enabled by setting 1 */
32173 diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h
32174 --- linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-03-27 14:31:47.000000000 -0400
32175 +++ linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:33:55.000000000 -0400
32176 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
32177 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
32178 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
32179 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
32180 -};
32181 +} __no_const;
32182
32183 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
32184 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
32185 diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c
32186 --- linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
32187 +++ linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
32188 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
32189 u8 tudata[585];
32190 int i;
32191
32192 + pax_track_stack();
32193 +
32194 dprintk("Firmware is %zd bytes\n",fw->size);
32195
32196 /* Get eprom data */
32197 diff -urNp linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c
32198 --- linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c 2011-03-27 14:31:47.000000000 -0400
32199 +++ linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c 2011-08-05 20:33:55.000000000 -0400
32200 @@ -796,18 +796,18 @@ int av7110_init_v4l(struct av7110 *av711
32201 ERR(("cannot init capture device. skipping.\n"));
32202 return -ENODEV;
32203 }
32204 - vv_data->ops.vidioc_enum_input = vidioc_enum_input;
32205 - vv_data->ops.vidioc_g_input = vidioc_g_input;
32206 - vv_data->ops.vidioc_s_input = vidioc_s_input;
32207 - vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
32208 - vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
32209 - vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
32210 - vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
32211 - vv_data->ops.vidioc_g_audio = vidioc_g_audio;
32212 - vv_data->ops.vidioc_s_audio = vidioc_s_audio;
32213 - vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
32214 - vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
32215 - vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
32216 + *(void **)&vv_data->ops.vidioc_enum_input = vidioc_enum_input;
32217 + *(void **)&vv_data->ops.vidioc_g_input = vidioc_g_input;
32218 + *(void **)&vv_data->ops.vidioc_s_input = vidioc_s_input;
32219 + *(void **)&vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
32220 + *(void **)&vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
32221 + *(void **)&vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
32222 + *(void **)&vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
32223 + *(void **)&vv_data->ops.vidioc_g_audio = vidioc_g_audio;
32224 + *(void **)&vv_data->ops.vidioc_s_audio = vidioc_s_audio;
32225 + *(void **)&vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
32226 + *(void **)&vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
32227 + *(void **)&vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
32228
32229 if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) {
32230 ERR(("cannot register capture device. skipping.\n"));
32231 diff -urNp linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c
32232 --- linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c 2011-03-27 14:31:47.000000000 -0400
32233 +++ linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c 2011-08-05 20:33:55.000000000 -0400
32234 @@ -1477,9 +1477,9 @@ static int budget_av_attach(struct saa71
32235 ERR(("cannot init vv subsystem.\n"));
32236 return err;
32237 }
32238 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32239 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32240 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32241 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32242 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32243 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32244
32245 if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) {
32246 /* fixme: proper cleanup here */
32247 diff -urNp linux-2.6.32.45/drivers/media/radio/radio-cadet.c linux-2.6.32.45/drivers/media/radio/radio-cadet.c
32248 --- linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
32249 +++ linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
32250 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
32251 while (i < count && dev->rdsin != dev->rdsout)
32252 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
32253
32254 - if (copy_to_user(data, readbuf, i))
32255 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
32256 return -EFAULT;
32257 return i;
32258 }
32259 diff -urNp linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c
32260 --- linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
32261 +++ linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
32262 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
32263
32264 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
32265
32266 -static atomic_t cx18_instance = ATOMIC_INIT(0);
32267 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
32268
32269 /* Parameter declarations */
32270 static int cardtype[CX18_MAX_CARDS];
32271 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
32272 struct i2c_client c;
32273 u8 eedata[256];
32274
32275 + pax_track_stack();
32276 +
32277 memset(&c, 0, sizeof(c));
32278 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
32279 c.adapter = &cx->i2c_adap[0];
32280 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
32281 struct cx18 *cx;
32282
32283 /* FIXME - module parameter arrays constrain max instances */
32284 - i = atomic_inc_return(&cx18_instance) - 1;
32285 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
32286 if (i >= CX18_MAX_CARDS) {
32287 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
32288 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
32289 diff -urNp linux-2.6.32.45/drivers/media/video/hexium_gemini.c linux-2.6.32.45/drivers/media/video/hexium_gemini.c
32290 --- linux-2.6.32.45/drivers/media/video/hexium_gemini.c 2011-03-27 14:31:47.000000000 -0400
32291 +++ linux-2.6.32.45/drivers/media/video/hexium_gemini.c 2011-08-05 20:33:55.000000000 -0400
32292 @@ -394,12 +394,12 @@ static int hexium_attach(struct saa7146_
32293 hexium->cur_input = 0;
32294
32295 saa7146_vv_init(dev, &vv_data);
32296 - vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32297 - vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32298 - vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32299 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32300 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32301 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32302 + *(void **)&vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32303 + *(void **)&vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32304 + *(void **)&vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32305 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32306 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32307 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32308 if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER)) {
32309 printk("hexium_gemini: cannot register capture v4l2 device. skipping.\n");
32310 return -1;
32311 diff -urNp linux-2.6.32.45/drivers/media/video/hexium_orion.c linux-2.6.32.45/drivers/media/video/hexium_orion.c
32312 --- linux-2.6.32.45/drivers/media/video/hexium_orion.c 2011-03-27 14:31:47.000000000 -0400
32313 +++ linux-2.6.32.45/drivers/media/video/hexium_orion.c 2011-08-05 20:33:55.000000000 -0400
32314 @@ -369,9 +369,9 @@ static int hexium_attach(struct saa7146_
32315 DEB_EE((".\n"));
32316
32317 saa7146_vv_init(dev, &vv_data);
32318 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32319 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32320 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32321 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32322 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32323 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32324 if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) {
32325 printk("hexium_orion: cannot register capture v4l2 device. skipping.\n");
32326 return -1;
32327 diff -urNp linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c
32328 --- linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
32329 +++ linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
32330 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
32331 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
32332
32333 /* ivtv instance counter */
32334 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
32335 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
32336
32337 /* Parameter declarations */
32338 static int cardtype[IVTV_MAX_CARDS];
32339 diff -urNp linux-2.6.32.45/drivers/media/video/mxb.c linux-2.6.32.45/drivers/media/video/mxb.c
32340 --- linux-2.6.32.45/drivers/media/video/mxb.c 2011-03-27 14:31:47.000000000 -0400
32341 +++ linux-2.6.32.45/drivers/media/video/mxb.c 2011-08-05 20:33:55.000000000 -0400
32342 @@ -703,23 +703,23 @@ static int mxb_attach(struct saa7146_dev
32343 already did this in "mxb_vl42_probe" */
32344
32345 saa7146_vv_init(dev, &vv_data);
32346 - vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32347 - vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32348 - vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32349 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32350 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32351 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32352 - vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
32353 - vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
32354 - vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
32355 - vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
32356 - vv_data.ops.vidioc_g_audio = vidioc_g_audio;
32357 - vv_data.ops.vidioc_s_audio = vidioc_s_audio;
32358 + *(void **)&vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32359 + *(void **)&vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32360 + *(void **)&vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32361 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32362 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32363 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32364 + *(void **)&vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
32365 + *(void **)&vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
32366 + *(void **)&vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
32367 + *(void **)&vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
32368 + *(void **)&vv_data.ops.vidioc_g_audio = vidioc_g_audio;
32369 + *(void **)&vv_data.ops.vidioc_s_audio = vidioc_s_audio;
32370 #ifdef CONFIG_VIDEO_ADV_DEBUG
32371 - vv_data.ops.vidioc_g_register = vidioc_g_register;
32372 - vv_data.ops.vidioc_s_register = vidioc_s_register;
32373 + *(void **)&vv_data.ops.vidioc_g_register = vidioc_g_register;
32374 + *(void **)&vv_data.ops.vidioc_s_register = vidioc_s_register;
32375 #endif
32376 - vv_data.ops.vidioc_default = vidioc_default;
32377 + *(void **)&vv_data.ops.vidioc_default = vidioc_default;
32378 if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
32379 ERR(("cannot register capture v4l2 device. skipping.\n"));
32380 return -1;
32381 diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.c linux-2.6.32.45/drivers/media/video/omap24xxcam.c
32382 --- linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
32383 +++ linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
32384 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
32385 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
32386
32387 do_gettimeofday(&vb->ts);
32388 - vb->field_count = atomic_add_return(2, &fh->field_count);
32389 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
32390 if (csr & csr_error) {
32391 vb->state = VIDEOBUF_ERROR;
32392 if (!atomic_read(&fh->cam->in_reset)) {
32393 diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.h linux-2.6.32.45/drivers/media/video/omap24xxcam.h
32394 --- linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
32395 +++ linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
32396 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
32397 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
32398 struct videobuf_queue vbq;
32399 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
32400 - atomic_t field_count; /* field counter for videobuf_buffer */
32401 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
32402 /* accessing cam here doesn't need serialisation: it's constant */
32403 struct omap24xxcam_device *cam;
32404 };
32405 diff -urNp linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32406 --- linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
32407 +++ linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
32408 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
32409 u8 *eeprom;
32410 struct tveeprom tvdata;
32411
32412 + pax_track_stack();
32413 +
32414 memset(&tvdata,0,sizeof(tvdata));
32415
32416 eeprom = pvr2_eeprom_fetch(hdw);
32417 diff -urNp linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c
32418 --- linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
32419 +++ linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
32420 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
32421 unsigned char localPAT[256];
32422 unsigned char localPMT[256];
32423
32424 + pax_track_stack();
32425 +
32426 /* Set video format - must be done first as it resets other settings */
32427 set_reg8(client, 0x41, h->video_format);
32428
32429 diff -urNp linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c
32430 --- linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
32431 +++ linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
32432 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
32433 wait_queue_head_t *q = 0;
32434 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32435
32436 + pax_track_stack();
32437 +
32438 /* While any outstand message on the bus exists... */
32439 do {
32440
32441 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
32442 u8 tmp[512];
32443 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32444
32445 + pax_track_stack();
32446 +
32447 while (loop) {
32448
32449 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
32450 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c
32451 --- linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-03-27 14:31:47.000000000 -0400
32452 +++ linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-08-05 20:33:55.000000000 -0400
32453 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] =
32454 static int __init ibmcam_init(void)
32455 {
32456 struct usbvideo_cb cbTbl;
32457 - memset(&cbTbl, 0, sizeof(cbTbl));
32458 - cbTbl.probe = ibmcam_probe;
32459 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
32460 - cbTbl.videoStart = ibmcam_video_start;
32461 - cbTbl.videoStop = ibmcam_video_stop;
32462 - cbTbl.processData = ibmcam_ProcessIsocData;
32463 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32464 - cbTbl.adjustPicture = ibmcam_adjust_picture;
32465 - cbTbl.getFPS = ibmcam_calculate_fps;
32466 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
32467 + *(void **)&cbTbl.probe = ibmcam_probe;
32468 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
32469 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
32470 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
32471 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
32472 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32473 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
32474 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
32475 return usbvideo_register(
32476 &cams,
32477 MAX_IBMCAM,
32478 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c
32479 --- linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
32480 +++ linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-08-05 20:33:55.000000000 -0400
32481 @@ -225,7 +225,7 @@ static void konicawc_register_input(stru
32482 int error;
32483
32484 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32485 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32486 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32487
32488 cam->input = input_dev = input_allocate_device();
32489 if (!input_dev) {
32490 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
32491 struct usbvideo_cb cbTbl;
32492 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
32493 DRIVER_DESC "\n");
32494 - memset(&cbTbl, 0, sizeof(cbTbl));
32495 - cbTbl.probe = konicawc_probe;
32496 - cbTbl.setupOnOpen = konicawc_setup_on_open;
32497 - cbTbl.processData = konicawc_process_isoc;
32498 - cbTbl.getFPS = konicawc_calculate_fps;
32499 - cbTbl.setVideoMode = konicawc_set_video_mode;
32500 - cbTbl.startDataPump = konicawc_start_data;
32501 - cbTbl.stopDataPump = konicawc_stop_data;
32502 - cbTbl.adjustPicture = konicawc_adjust_picture;
32503 - cbTbl.userFree = konicawc_free_uvd;
32504 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
32505 + *(void **)&cbTbl.probe = konicawc_probe;
32506 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
32507 + *(void **)&cbTbl.processData = konicawc_process_isoc;
32508 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
32509 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
32510 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
32511 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
32512 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
32513 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
32514 return usbvideo_register(
32515 &cams,
32516 MAX_CAMERAS,
32517 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c
32518 --- linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
32519 +++ linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
32520 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
32521 int error;
32522
32523 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32524 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32525 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32526
32527 cam->input = input_dev = input_allocate_device();
32528 if (!input_dev) {
32529 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c
32530 --- linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-03-27 14:31:47.000000000 -0400
32531 +++ linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-08-05 20:33:55.000000000 -0400
32532 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
32533 {
32534 struct usbvideo_cb cbTbl;
32535 memset(&cbTbl, 0, sizeof(cbTbl));
32536 - cbTbl.probe = ultracam_probe;
32537 - cbTbl.setupOnOpen = ultracam_setup_on_open;
32538 - cbTbl.videoStart = ultracam_video_start;
32539 - cbTbl.videoStop = ultracam_video_stop;
32540 - cbTbl.processData = ultracam_ProcessIsocData;
32541 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32542 - cbTbl.adjustPicture = ultracam_adjust_picture;
32543 - cbTbl.getFPS = ultracam_calculate_fps;
32544 + *(void **)&cbTbl.probe = ultracam_probe;
32545 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
32546 + *(void **)&cbTbl.videoStart = ultracam_video_start;
32547 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
32548 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
32549 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32550 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
32551 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
32552 return usbvideo_register(
32553 &cams,
32554 MAX_CAMERAS,
32555 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c
32556 --- linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-03-27 14:31:47.000000000 -0400
32557 +++ linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-08-05 20:33:55.000000000 -0400
32558 @@ -697,15 +697,15 @@ int usbvideo_register(
32559 __func__, cams, base_size, num_cams);
32560
32561 /* Copy callbacks, apply defaults for those that are not set */
32562 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
32563 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
32564 if (cams->cb.getFrame == NULL)
32565 - cams->cb.getFrame = usbvideo_GetFrame;
32566 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
32567 if (cams->cb.disconnect == NULL)
32568 - cams->cb.disconnect = usbvideo_Disconnect;
32569 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
32570 if (cams->cb.startDataPump == NULL)
32571 - cams->cb.startDataPump = usbvideo_StartDataPump;
32572 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
32573 if (cams->cb.stopDataPump == NULL)
32574 - cams->cb.stopDataPump = usbvideo_StopDataPump;
32575 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
32576
32577 cams->num_cameras = num_cams;
32578 cams->cam = (struct uvd *) &cams[1];
32579 diff -urNp linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c
32580 --- linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
32581 +++ linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
32582 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
32583 unsigned char rv, gv, bv;
32584 static unsigned char *Y, *U, *V;
32585
32586 + pax_track_stack();
32587 +
32588 frame = usbvision->curFrame;
32589 imageSize = frame->frmwidth * frame->frmheight;
32590 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
32591 diff -urNp linux-2.6.32.45/drivers/media/video/v4l2-device.c linux-2.6.32.45/drivers/media/video/v4l2-device.c
32592 --- linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
32593 +++ linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
32594 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
32595 EXPORT_SYMBOL_GPL(v4l2_device_register);
32596
32597 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
32598 - atomic_t *instance)
32599 + atomic_unchecked_t *instance)
32600 {
32601 - int num = atomic_inc_return(instance) - 1;
32602 + int num = atomic_inc_return_unchecked(instance) - 1;
32603 int len = strlen(basename);
32604
32605 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
32606 diff -urNp linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c
32607 --- linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
32608 +++ linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
32609 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
32610 {
32611 struct videobuf_queue q;
32612
32613 + pax_track_stack();
32614 +
32615 /* Required to make generic handler to call __videobuf_alloc */
32616 q.int_ops = &sg_ops;
32617
32618 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptbase.c linux-2.6.32.45/drivers/message/fusion/mptbase.c
32619 --- linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
32620 +++ linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
32621 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
32622 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
32623 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
32624
32625 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32626 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32627 + NULL, NULL);
32628 +#else
32629 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32630 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
32631 +#endif
32632 +
32633 /*
32634 * Rounding UP to nearest 4-kB boundary here...
32635 */
32636 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptsas.c linux-2.6.32.45/drivers/message/fusion/mptsas.c
32637 --- linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
32638 +++ linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
32639 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
32640 return 0;
32641 }
32642
32643 +static inline void
32644 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32645 +{
32646 + if (phy_info->port_details) {
32647 + phy_info->port_details->rphy = rphy;
32648 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32649 + ioc->name, rphy));
32650 + }
32651 +
32652 + if (rphy) {
32653 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32654 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32655 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32656 + ioc->name, rphy, rphy->dev.release));
32657 + }
32658 +}
32659 +
32660 /* no mutex */
32661 static void
32662 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
32663 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
32664 return NULL;
32665 }
32666
32667 -static inline void
32668 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32669 -{
32670 - if (phy_info->port_details) {
32671 - phy_info->port_details->rphy = rphy;
32672 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32673 - ioc->name, rphy));
32674 - }
32675 -
32676 - if (rphy) {
32677 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32678 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32679 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32680 - ioc->name, rphy, rphy->dev.release));
32681 - }
32682 -}
32683 -
32684 static inline struct sas_port *
32685 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32686 {
32687 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptscsih.c linux-2.6.32.45/drivers/message/fusion/mptscsih.c
32688 --- linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
32689 +++ linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
32690 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32691
32692 h = shost_priv(SChost);
32693
32694 - if (h) {
32695 - if (h->info_kbuf == NULL)
32696 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32697 - return h->info_kbuf;
32698 - h->info_kbuf[0] = '\0';
32699 + if (!h)
32700 + return NULL;
32701
32702 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32703 - h->info_kbuf[size-1] = '\0';
32704 - }
32705 + if (h->info_kbuf == NULL)
32706 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32707 + return h->info_kbuf;
32708 + h->info_kbuf[0] = '\0';
32709 +
32710 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32711 + h->info_kbuf[size-1] = '\0';
32712
32713 return h->info_kbuf;
32714 }
32715 diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_config.c linux-2.6.32.45/drivers/message/i2o/i2o_config.c
32716 --- linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
32717 +++ linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
32718 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
32719 struct i2o_message *msg;
32720 unsigned int iop;
32721
32722 + pax_track_stack();
32723 +
32724 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
32725 return -EFAULT;
32726
32727 diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_proc.c linux-2.6.32.45/drivers/message/i2o/i2o_proc.c
32728 --- linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
32729 +++ linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
32730 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
32731 "Array Controller Device"
32732 };
32733
32734 -static char *chtostr(u8 * chars, int n)
32735 -{
32736 - char tmp[256];
32737 - tmp[0] = 0;
32738 - return strncat(tmp, (char *)chars, n);
32739 -}
32740 -
32741 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32742 char *group)
32743 {
32744 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
32745
32746 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32747 seq_printf(seq, "%-#8x", ddm_table.module_id);
32748 - seq_printf(seq, "%-29s",
32749 - chtostr(ddm_table.module_name_version, 28));
32750 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32751 seq_printf(seq, "%9d ", ddm_table.data_size);
32752 seq_printf(seq, "%8d", ddm_table.code_size);
32753
32754 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
32755
32756 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32757 seq_printf(seq, "%-#8x", dst->module_id);
32758 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32759 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32760 + seq_printf(seq, "%-.28s", dst->module_name_version);
32761 + seq_printf(seq, "%-.8s", dst->date);
32762 seq_printf(seq, "%8d ", dst->module_size);
32763 seq_printf(seq, "%8d ", dst->mpb_size);
32764 seq_printf(seq, "0x%04x", dst->module_flags);
32765 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
32766 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32767 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32768 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32769 - seq_printf(seq, "Vendor info : %s\n",
32770 - chtostr((u8 *) (work32 + 2), 16));
32771 - seq_printf(seq, "Product info : %s\n",
32772 - chtostr((u8 *) (work32 + 6), 16));
32773 - seq_printf(seq, "Description : %s\n",
32774 - chtostr((u8 *) (work32 + 10), 16));
32775 - seq_printf(seq, "Product rev. : %s\n",
32776 - chtostr((u8 *) (work32 + 14), 8));
32777 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32778 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32779 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32780 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32781
32782 seq_printf(seq, "Serial number : ");
32783 print_serial_number(seq, (u8 *) (work32 + 16),
32784 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
32785 }
32786
32787 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32788 - seq_printf(seq, "Module name : %s\n",
32789 - chtostr(result.module_name, 24));
32790 - seq_printf(seq, "Module revision : %s\n",
32791 - chtostr(result.module_rev, 8));
32792 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
32793 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32794
32795 seq_printf(seq, "Serial number : ");
32796 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32797 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
32798 return 0;
32799 }
32800
32801 - seq_printf(seq, "Device name : %s\n",
32802 - chtostr(result.device_name, 64));
32803 - seq_printf(seq, "Service name : %s\n",
32804 - chtostr(result.service_name, 64));
32805 - seq_printf(seq, "Physical name : %s\n",
32806 - chtostr(result.physical_location, 64));
32807 - seq_printf(seq, "Instance number : %s\n",
32808 - chtostr(result.instance_number, 4));
32809 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
32810 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
32811 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32812 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32813
32814 return 0;
32815 }
32816 diff -urNp linux-2.6.32.45/drivers/message/i2o/iop.c linux-2.6.32.45/drivers/message/i2o/iop.c
32817 --- linux-2.6.32.45/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
32818 +++ linux-2.6.32.45/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
32819 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
32820
32821 spin_lock_irqsave(&c->context_list_lock, flags);
32822
32823 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32824 - atomic_inc(&c->context_list_counter);
32825 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32826 + atomic_inc_unchecked(&c->context_list_counter);
32827
32828 - entry->context = atomic_read(&c->context_list_counter);
32829 + entry->context = atomic_read_unchecked(&c->context_list_counter);
32830
32831 list_add(&entry->list, &c->context_list);
32832
32833 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
32834
32835 #if BITS_PER_LONG == 64
32836 spin_lock_init(&c->context_list_lock);
32837 - atomic_set(&c->context_list_counter, 0);
32838 + atomic_set_unchecked(&c->context_list_counter, 0);
32839 INIT_LIST_HEAD(&c->context_list);
32840 #endif
32841
32842 diff -urNp linux-2.6.32.45/drivers/mfd/wm8350-i2c.c linux-2.6.32.45/drivers/mfd/wm8350-i2c.c
32843 --- linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
32844 +++ linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
32845 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
32846 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32847 int ret;
32848
32849 + pax_track_stack();
32850 +
32851 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32852 return -EINVAL;
32853
32854 diff -urNp linux-2.6.32.45/drivers/misc/kgdbts.c linux-2.6.32.45/drivers/misc/kgdbts.c
32855 --- linux-2.6.32.45/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
32856 +++ linux-2.6.32.45/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
32857 @@ -118,7 +118,7 @@
32858 } while (0)
32859 #define MAX_CONFIG_LEN 40
32860
32861 -static struct kgdb_io kgdbts_io_ops;
32862 +static const struct kgdb_io kgdbts_io_ops;
32863 static char get_buf[BUFMAX];
32864 static int get_buf_cnt;
32865 static char put_buf[BUFMAX];
32866 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
32867 module_put(THIS_MODULE);
32868 }
32869
32870 -static struct kgdb_io kgdbts_io_ops = {
32871 +static const struct kgdb_io kgdbts_io_ops = {
32872 .name = "kgdbts",
32873 .read_char = kgdbts_get_char,
32874 .write_char = kgdbts_put_char,
32875 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c
32876 --- linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
32877 +++ linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
32878 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
32879
32880 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32881 {
32882 - atomic_long_inc(&mcs_op_statistics[op].count);
32883 - atomic_long_add(clks, &mcs_op_statistics[op].total);
32884 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32885 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
32886 if (mcs_op_statistics[op].max < clks)
32887 mcs_op_statistics[op].max = clks;
32888 }
32889 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c
32890 --- linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
32891 +++ linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
32892 @@ -32,9 +32,9 @@
32893
32894 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32895
32896 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32897 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32898 {
32899 - unsigned long val = atomic_long_read(v);
32900 + unsigned long val = atomic_long_read_unchecked(v);
32901
32902 if (val)
32903 seq_printf(s, "%16lu %s\n", val, id);
32904 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
32905 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
32906
32907 for (op = 0; op < mcsop_last; op++) {
32908 - count = atomic_long_read(&mcs_op_statistics[op].count);
32909 - total = atomic_long_read(&mcs_op_statistics[op].total);
32910 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32911 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32912 max = mcs_op_statistics[op].max;
32913 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32914 count ? total / count : 0, max);
32915 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h
32916 --- linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
32917 +++ linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
32918 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
32919 * GRU statistics.
32920 */
32921 struct gru_stats_s {
32922 - atomic_long_t vdata_alloc;
32923 - atomic_long_t vdata_free;
32924 - atomic_long_t gts_alloc;
32925 - atomic_long_t gts_free;
32926 - atomic_long_t vdata_double_alloc;
32927 - atomic_long_t gts_double_allocate;
32928 - atomic_long_t assign_context;
32929 - atomic_long_t assign_context_failed;
32930 - atomic_long_t free_context;
32931 - atomic_long_t load_user_context;
32932 - atomic_long_t load_kernel_context;
32933 - atomic_long_t lock_kernel_context;
32934 - atomic_long_t unlock_kernel_context;
32935 - atomic_long_t steal_user_context;
32936 - atomic_long_t steal_kernel_context;
32937 - atomic_long_t steal_context_failed;
32938 - atomic_long_t nopfn;
32939 - atomic_long_t break_cow;
32940 - atomic_long_t asid_new;
32941 - atomic_long_t asid_next;
32942 - atomic_long_t asid_wrap;
32943 - atomic_long_t asid_reuse;
32944 - atomic_long_t intr;
32945 - atomic_long_t intr_mm_lock_failed;
32946 - atomic_long_t call_os;
32947 - atomic_long_t call_os_offnode_reference;
32948 - atomic_long_t call_os_check_for_bug;
32949 - atomic_long_t call_os_wait_queue;
32950 - atomic_long_t user_flush_tlb;
32951 - atomic_long_t user_unload_context;
32952 - atomic_long_t user_exception;
32953 - atomic_long_t set_context_option;
32954 - atomic_long_t migrate_check;
32955 - atomic_long_t migrated_retarget;
32956 - atomic_long_t migrated_unload;
32957 - atomic_long_t migrated_unload_delay;
32958 - atomic_long_t migrated_nopfn_retarget;
32959 - atomic_long_t migrated_nopfn_unload;
32960 - atomic_long_t tlb_dropin;
32961 - atomic_long_t tlb_dropin_fail_no_asid;
32962 - atomic_long_t tlb_dropin_fail_upm;
32963 - atomic_long_t tlb_dropin_fail_invalid;
32964 - atomic_long_t tlb_dropin_fail_range_active;
32965 - atomic_long_t tlb_dropin_fail_idle;
32966 - atomic_long_t tlb_dropin_fail_fmm;
32967 - atomic_long_t tlb_dropin_fail_no_exception;
32968 - atomic_long_t tlb_dropin_fail_no_exception_war;
32969 - atomic_long_t tfh_stale_on_fault;
32970 - atomic_long_t mmu_invalidate_range;
32971 - atomic_long_t mmu_invalidate_page;
32972 - atomic_long_t mmu_clear_flush_young;
32973 - atomic_long_t flush_tlb;
32974 - atomic_long_t flush_tlb_gru;
32975 - atomic_long_t flush_tlb_gru_tgh;
32976 - atomic_long_t flush_tlb_gru_zero_asid;
32977 -
32978 - atomic_long_t copy_gpa;
32979 -
32980 - atomic_long_t mesq_receive;
32981 - atomic_long_t mesq_receive_none;
32982 - atomic_long_t mesq_send;
32983 - atomic_long_t mesq_send_failed;
32984 - atomic_long_t mesq_noop;
32985 - atomic_long_t mesq_send_unexpected_error;
32986 - atomic_long_t mesq_send_lb_overflow;
32987 - atomic_long_t mesq_send_qlimit_reached;
32988 - atomic_long_t mesq_send_amo_nacked;
32989 - atomic_long_t mesq_send_put_nacked;
32990 - atomic_long_t mesq_qf_not_full;
32991 - atomic_long_t mesq_qf_locked;
32992 - atomic_long_t mesq_qf_noop_not_full;
32993 - atomic_long_t mesq_qf_switch_head_failed;
32994 - atomic_long_t mesq_qf_unexpected_error;
32995 - atomic_long_t mesq_noop_unexpected_error;
32996 - atomic_long_t mesq_noop_lb_overflow;
32997 - atomic_long_t mesq_noop_qlimit_reached;
32998 - atomic_long_t mesq_noop_amo_nacked;
32999 - atomic_long_t mesq_noop_put_nacked;
33000 + atomic_long_unchecked_t vdata_alloc;
33001 + atomic_long_unchecked_t vdata_free;
33002 + atomic_long_unchecked_t gts_alloc;
33003 + atomic_long_unchecked_t gts_free;
33004 + atomic_long_unchecked_t vdata_double_alloc;
33005 + atomic_long_unchecked_t gts_double_allocate;
33006 + atomic_long_unchecked_t assign_context;
33007 + atomic_long_unchecked_t assign_context_failed;
33008 + atomic_long_unchecked_t free_context;
33009 + atomic_long_unchecked_t load_user_context;
33010 + atomic_long_unchecked_t load_kernel_context;
33011 + atomic_long_unchecked_t lock_kernel_context;
33012 + atomic_long_unchecked_t unlock_kernel_context;
33013 + atomic_long_unchecked_t steal_user_context;
33014 + atomic_long_unchecked_t steal_kernel_context;
33015 + atomic_long_unchecked_t steal_context_failed;
33016 + atomic_long_unchecked_t nopfn;
33017 + atomic_long_unchecked_t break_cow;
33018 + atomic_long_unchecked_t asid_new;
33019 + atomic_long_unchecked_t asid_next;
33020 + atomic_long_unchecked_t asid_wrap;
33021 + atomic_long_unchecked_t asid_reuse;
33022 + atomic_long_unchecked_t intr;
33023 + atomic_long_unchecked_t intr_mm_lock_failed;
33024 + atomic_long_unchecked_t call_os;
33025 + atomic_long_unchecked_t call_os_offnode_reference;
33026 + atomic_long_unchecked_t call_os_check_for_bug;
33027 + atomic_long_unchecked_t call_os_wait_queue;
33028 + atomic_long_unchecked_t user_flush_tlb;
33029 + atomic_long_unchecked_t user_unload_context;
33030 + atomic_long_unchecked_t user_exception;
33031 + atomic_long_unchecked_t set_context_option;
33032 + atomic_long_unchecked_t migrate_check;
33033 + atomic_long_unchecked_t migrated_retarget;
33034 + atomic_long_unchecked_t migrated_unload;
33035 + atomic_long_unchecked_t migrated_unload_delay;
33036 + atomic_long_unchecked_t migrated_nopfn_retarget;
33037 + atomic_long_unchecked_t migrated_nopfn_unload;
33038 + atomic_long_unchecked_t tlb_dropin;
33039 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33040 + atomic_long_unchecked_t tlb_dropin_fail_upm;
33041 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
33042 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
33043 + atomic_long_unchecked_t tlb_dropin_fail_idle;
33044 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
33045 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33046 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
33047 + atomic_long_unchecked_t tfh_stale_on_fault;
33048 + atomic_long_unchecked_t mmu_invalidate_range;
33049 + atomic_long_unchecked_t mmu_invalidate_page;
33050 + atomic_long_unchecked_t mmu_clear_flush_young;
33051 + atomic_long_unchecked_t flush_tlb;
33052 + atomic_long_unchecked_t flush_tlb_gru;
33053 + atomic_long_unchecked_t flush_tlb_gru_tgh;
33054 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33055 +
33056 + atomic_long_unchecked_t copy_gpa;
33057 +
33058 + atomic_long_unchecked_t mesq_receive;
33059 + atomic_long_unchecked_t mesq_receive_none;
33060 + atomic_long_unchecked_t mesq_send;
33061 + atomic_long_unchecked_t mesq_send_failed;
33062 + atomic_long_unchecked_t mesq_noop;
33063 + atomic_long_unchecked_t mesq_send_unexpected_error;
33064 + atomic_long_unchecked_t mesq_send_lb_overflow;
33065 + atomic_long_unchecked_t mesq_send_qlimit_reached;
33066 + atomic_long_unchecked_t mesq_send_amo_nacked;
33067 + atomic_long_unchecked_t mesq_send_put_nacked;
33068 + atomic_long_unchecked_t mesq_qf_not_full;
33069 + atomic_long_unchecked_t mesq_qf_locked;
33070 + atomic_long_unchecked_t mesq_qf_noop_not_full;
33071 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
33072 + atomic_long_unchecked_t mesq_qf_unexpected_error;
33073 + atomic_long_unchecked_t mesq_noop_unexpected_error;
33074 + atomic_long_unchecked_t mesq_noop_lb_overflow;
33075 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
33076 + atomic_long_unchecked_t mesq_noop_amo_nacked;
33077 + atomic_long_unchecked_t mesq_noop_put_nacked;
33078
33079 };
33080
33081 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
33082 cchop_deallocate, tghop_invalidate, mcsop_last};
33083
33084 struct mcs_op_statistic {
33085 - atomic_long_t count;
33086 - atomic_long_t total;
33087 + atomic_long_unchecked_t count;
33088 + atomic_long_unchecked_t total;
33089 unsigned long max;
33090 };
33091
33092 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
33093
33094 #define STAT(id) do { \
33095 if (gru_options & OPT_STATS) \
33096 - atomic_long_inc(&gru_stats.id); \
33097 + atomic_long_inc_unchecked(&gru_stats.id); \
33098 } while (0)
33099
33100 #ifdef CONFIG_SGI_GRU_DEBUG
33101 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h
33102 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-03-27 14:31:47.000000000 -0400
33103 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-08-05 20:33:55.000000000 -0400
33104 @@ -876,7 +876,7 @@ extern struct xpc_registration xpc_regis
33105 /* found in xpc_main.c */
33106 extern struct device *xpc_part;
33107 extern struct device *xpc_chan;
33108 -extern struct xpc_arch_operations xpc_arch_ops;
33109 +extern const struct xpc_arch_operations xpc_arch_ops;
33110 extern int xpc_disengage_timelimit;
33111 extern int xpc_disengage_timedout;
33112 extern int xpc_activate_IRQ_rcvd;
33113 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c
33114 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-03-27 14:31:47.000000000 -0400
33115 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-08-05 20:33:55.000000000 -0400
33116 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_not
33117 .notifier_call = xpc_system_die,
33118 };
33119
33120 -struct xpc_arch_operations xpc_arch_ops;
33121 +const struct xpc_arch_operations xpc_arch_ops;
33122
33123 /*
33124 * Timer function to enforce the timelimit on the partition disengage.
33125 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c
33126 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-03-27 14:31:47.000000000 -0400
33127 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-08-05 20:33:55.000000000 -0400
33128 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_chan
33129 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
33130 }
33131
33132 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
33133 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
33134 .setup_partitions = xpc_setup_partitions_sn2,
33135 .teardown_partitions = xpc_teardown_partitions_sn2,
33136 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
33137 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
33138 int ret;
33139 size_t buf_size;
33140
33141 - xpc_arch_ops = xpc_arch_ops_sn2;
33142 + pax_open_kernel();
33143 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
33144 + pax_close_kernel();
33145
33146 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
33147 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
33148 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c
33149 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-03-27 14:31:47.000000000 -0400
33150 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-08-05 20:33:55.000000000 -0400
33151 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_chann
33152 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
33153 }
33154
33155 -static struct xpc_arch_operations xpc_arch_ops_uv = {
33156 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
33157 .setup_partitions = xpc_setup_partitions_uv,
33158 .teardown_partitions = xpc_teardown_partitions_uv,
33159 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
33160 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_ar
33161 int
33162 xpc_init_uv(void)
33163 {
33164 - xpc_arch_ops = xpc_arch_ops_uv;
33165 + pax_open_kernel();
33166 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
33167 + pax_close_kernel();
33168
33169 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
33170 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
33171 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xp.h linux-2.6.32.45/drivers/misc/sgi-xp/xp.h
33172 --- linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-03-27 14:31:47.000000000 -0400
33173 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-08-05 20:33:55.000000000 -0400
33174 @@ -289,7 +289,7 @@ struct xpc_interface {
33175 xpc_notify_func, void *);
33176 void (*received) (short, int, void *);
33177 enum xp_retval (*partid_to_nasids) (short, void *);
33178 -};
33179 +} __no_const;
33180
33181 extern struct xpc_interface xpc_interface;
33182
33183 diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c
33184 --- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
33185 +++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
33186 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
33187 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
33188 unsigned long timeo = jiffies + HZ;
33189
33190 + pax_track_stack();
33191 +
33192 /* Prevent setting state FL_SYNCING for chip in suspended state. */
33193 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
33194 goto sleep;
33195 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
33196 unsigned long initial_adr;
33197 int initial_len = len;
33198
33199 + pax_track_stack();
33200 +
33201 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
33202 adr += chip->start;
33203 initial_adr = adr;
33204 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
33205 int retries = 3;
33206 int ret;
33207
33208 + pax_track_stack();
33209 +
33210 adr += chip->start;
33211
33212 retry:
33213 diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c
33214 --- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
33215 +++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
33216 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
33217 unsigned long cmd_addr;
33218 struct cfi_private *cfi = map->fldrv_priv;
33219
33220 + pax_track_stack();
33221 +
33222 adr += chip->start;
33223
33224 /* Ensure cmd read/writes are aligned. */
33225 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
33226 DECLARE_WAITQUEUE(wait, current);
33227 int wbufsize, z;
33228
33229 + pax_track_stack();
33230 +
33231 /* M58LW064A requires bus alignment for buffer wriets -- saw */
33232 if (adr & (map_bankwidth(map)-1))
33233 return -EINVAL;
33234 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
33235 DECLARE_WAITQUEUE(wait, current);
33236 int ret = 0;
33237
33238 + pax_track_stack();
33239 +
33240 adr += chip->start;
33241
33242 /* Let's determine this according to the interleave only once */
33243 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
33244 unsigned long timeo = jiffies + HZ;
33245 DECLARE_WAITQUEUE(wait, current);
33246
33247 + pax_track_stack();
33248 +
33249 adr += chip->start;
33250
33251 /* Let's determine this according to the interleave only once */
33252 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
33253 unsigned long timeo = jiffies + HZ;
33254 DECLARE_WAITQUEUE(wait, current);
33255
33256 + pax_track_stack();
33257 +
33258 adr += chip->start;
33259
33260 /* Let's determine this according to the interleave only once */
33261 diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2000.c linux-2.6.32.45/drivers/mtd/devices/doc2000.c
33262 --- linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
33263 +++ linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
33264 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
33265
33266 /* The ECC will not be calculated correctly if less than 512 is written */
33267 /* DBB-
33268 - if (len != 0x200 && eccbuf)
33269 + if (len != 0x200)
33270 printk(KERN_WARNING
33271 "ECC needs a full sector write (adr: %lx size %lx)\n",
33272 (long) to, (long) len);
33273 diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2001.c linux-2.6.32.45/drivers/mtd/devices/doc2001.c
33274 --- linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
33275 +++ linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
33276 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
33277 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33278
33279 /* Don't allow read past end of device */
33280 - if (from >= this->totlen)
33281 + if (from >= this->totlen || !len)
33282 return -EINVAL;
33283
33284 /* Don't allow a single read to cross a 512-byte block boundary */
33285 diff -urNp linux-2.6.32.45/drivers/mtd/ftl.c linux-2.6.32.45/drivers/mtd/ftl.c
33286 --- linux-2.6.32.45/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
33287 +++ linux-2.6.32.45/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
33288 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
33289 loff_t offset;
33290 uint16_t srcunitswap = cpu_to_le16(srcunit);
33291
33292 + pax_track_stack();
33293 +
33294 eun = &part->EUNInfo[srcunit];
33295 xfer = &part->XferInfo[xferunit];
33296 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
33297 diff -urNp linux-2.6.32.45/drivers/mtd/inftlcore.c linux-2.6.32.45/drivers/mtd/inftlcore.c
33298 --- linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
33299 +++ linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
33300 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
33301 struct inftl_oob oob;
33302 size_t retlen;
33303
33304 + pax_track_stack();
33305 +
33306 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
33307 "pending=%d)\n", inftl, thisVUC, pendingblock);
33308
33309 diff -urNp linux-2.6.32.45/drivers/mtd/inftlmount.c linux-2.6.32.45/drivers/mtd/inftlmount.c
33310 --- linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
33311 +++ linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
33312 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
33313 struct INFTLPartition *ip;
33314 size_t retlen;
33315
33316 + pax_track_stack();
33317 +
33318 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
33319
33320 /*
33321 diff -urNp linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c
33322 --- linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
33323 +++ linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
33324 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
33325 {
33326 map_word pfow_val[4];
33327
33328 + pax_track_stack();
33329 +
33330 /* Check identification string */
33331 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
33332 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
33333 diff -urNp linux-2.6.32.45/drivers/mtd/mtdchar.c linux-2.6.32.45/drivers/mtd/mtdchar.c
33334 --- linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
33335 +++ linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
33336 @@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
33337 u_long size;
33338 struct mtd_info_user info;
33339
33340 + pax_track_stack();
33341 +
33342 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
33343
33344 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
33345 diff -urNp linux-2.6.32.45/drivers/mtd/nftlcore.c linux-2.6.32.45/drivers/mtd/nftlcore.c
33346 --- linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
33347 +++ linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
33348 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
33349 int inplace = 1;
33350 size_t retlen;
33351
33352 + pax_track_stack();
33353 +
33354 memset(BlockMap, 0xff, sizeof(BlockMap));
33355 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
33356
33357 diff -urNp linux-2.6.32.45/drivers/mtd/nftlmount.c linux-2.6.32.45/drivers/mtd/nftlmount.c
33358 --- linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
33359 +++ linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
33360 @@ -23,6 +23,7 @@
33361 #include <asm/errno.h>
33362 #include <linux/delay.h>
33363 #include <linux/slab.h>
33364 +#include <linux/sched.h>
33365 #include <linux/mtd/mtd.h>
33366 #include <linux/mtd/nand.h>
33367 #include <linux/mtd/nftl.h>
33368 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
33369 struct mtd_info *mtd = nftl->mbd.mtd;
33370 unsigned int i;
33371
33372 + pax_track_stack();
33373 +
33374 /* Assume logical EraseSize == physical erasesize for starting the scan.
33375 We'll sort it out later if we find a MediaHeader which says otherwise */
33376 /* Actually, we won't. The new DiskOnChip driver has already scanned
33377 diff -urNp linux-2.6.32.45/drivers/mtd/ubi/build.c linux-2.6.32.45/drivers/mtd/ubi/build.c
33378 --- linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
33379 +++ linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
33380 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
33381 static int __init bytes_str_to_int(const char *str)
33382 {
33383 char *endp;
33384 - unsigned long result;
33385 + unsigned long result, scale = 1;
33386
33387 result = simple_strtoul(str, &endp, 0);
33388 if (str == endp || result >= INT_MAX) {
33389 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
33390
33391 switch (*endp) {
33392 case 'G':
33393 - result *= 1024;
33394 + scale *= 1024;
33395 case 'M':
33396 - result *= 1024;
33397 + scale *= 1024;
33398 case 'K':
33399 - result *= 1024;
33400 + scale *= 1024;
33401 if (endp[1] == 'i' && endp[2] == 'B')
33402 endp += 2;
33403 case '\0':
33404 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
33405 return -EINVAL;
33406 }
33407
33408 - return result;
33409 + if ((intoverflow_t)result*scale >= INT_MAX) {
33410 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33411 + str);
33412 + return -EINVAL;
33413 + }
33414 +
33415 + return result*scale;
33416 }
33417
33418 /**
33419 diff -urNp linux-2.6.32.45/drivers/net/bnx2.c linux-2.6.32.45/drivers/net/bnx2.c
33420 --- linux-2.6.32.45/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
33421 +++ linux-2.6.32.45/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
33422 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
33423 int rc = 0;
33424 u32 magic, csum;
33425
33426 + pax_track_stack();
33427 +
33428 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
33429 goto test_nvram_done;
33430
33431 diff -urNp linux-2.6.32.45/drivers/net/cxgb3/l2t.h linux-2.6.32.45/drivers/net/cxgb3/l2t.h
33432 --- linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-03-27 14:31:47.000000000 -0400
33433 +++ linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-08-05 20:33:55.000000000 -0400
33434 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
33435 */
33436 struct l2t_skb_cb {
33437 arp_failure_handler_func arp_failure_handler;
33438 -};
33439 +} __no_const;
33440
33441 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33442
33443 diff -urNp linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c
33444 --- linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
33445 +++ linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
33446 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
33447 int i, addr, ret;
33448 struct t3_vpd vpd;
33449
33450 + pax_track_stack();
33451 +
33452 /*
33453 * Card information is normally at VPD_BASE but some early cards had
33454 * it at 0.
33455 diff -urNp linux-2.6.32.45/drivers/net/e1000e/82571.c linux-2.6.32.45/drivers/net/e1000e/82571.c
33456 --- linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
33457 +++ linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-08-05 20:33:55.000000000 -0400
33458 @@ -245,22 +245,22 @@ static s32 e1000_init_mac_params_82571(s
33459 /* check for link */
33460 switch (hw->phy.media_type) {
33461 case e1000_media_type_copper:
33462 - func->setup_physical_interface = e1000_setup_copper_link_82571;
33463 - func->check_for_link = e1000e_check_for_copper_link;
33464 - func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
33465 + *(void **)&func->setup_physical_interface = e1000_setup_copper_link_82571;
33466 + *(void **)&func->check_for_link = e1000e_check_for_copper_link;
33467 + *(void **)&func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
33468 break;
33469 case e1000_media_type_fiber:
33470 - func->setup_physical_interface =
33471 + *(void **)&func->setup_physical_interface =
33472 e1000_setup_fiber_serdes_link_82571;
33473 - func->check_for_link = e1000e_check_for_fiber_link;
33474 - func->get_link_up_info =
33475 + *(void **)&func->check_for_link = e1000e_check_for_fiber_link;
33476 + *(void **)&func->get_link_up_info =
33477 e1000e_get_speed_and_duplex_fiber_serdes;
33478 break;
33479 case e1000_media_type_internal_serdes:
33480 - func->setup_physical_interface =
33481 + *(void **)&func->setup_physical_interface =
33482 e1000_setup_fiber_serdes_link_82571;
33483 - func->check_for_link = e1000_check_for_serdes_link_82571;
33484 - func->get_link_up_info =
33485 + *(void **)&func->check_for_link = e1000_check_for_serdes_link_82571;
33486 + *(void **)&func->get_link_up_info =
33487 e1000e_get_speed_and_duplex_fiber_serdes;
33488 break;
33489 default:
33490 @@ -271,12 +271,12 @@ static s32 e1000_init_mac_params_82571(s
33491 switch (hw->mac.type) {
33492 case e1000_82574:
33493 case e1000_82583:
33494 - func->check_mng_mode = e1000_check_mng_mode_82574;
33495 - func->led_on = e1000_led_on_82574;
33496 + *(void **)&func->check_mng_mode = e1000_check_mng_mode_82574;
33497 + *(void **)&func->led_on = e1000_led_on_82574;
33498 break;
33499 default:
33500 - func->check_mng_mode = e1000e_check_mng_mode_generic;
33501 - func->led_on = e1000e_led_on_generic;
33502 + *(void **)&func->check_mng_mode = e1000e_check_mng_mode_generic;
33503 + *(void **)&func->led_on = e1000e_led_on_generic;
33504 break;
33505 }
33506
33507 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(s
33508 temp = er32(ICRXDMTC);
33509 }
33510
33511 -static struct e1000_mac_operations e82571_mac_ops = {
33512 +static const struct e1000_mac_operations e82571_mac_ops = {
33513 /* .check_mng_mode: mac type dependent */
33514 /* .check_for_link: media type dependent */
33515 .id_led_init = e1000e_id_led_init,
33516 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e8257
33517 .setup_led = e1000e_setup_led_generic,
33518 };
33519
33520 -static struct e1000_phy_operations e82_phy_ops_igp = {
33521 +static const struct e1000_phy_operations e82_phy_ops_igp = {
33522 .acquire_phy = e1000_get_hw_semaphore_82571,
33523 .check_reset_block = e1000e_check_reset_block_generic,
33524 .commit_phy = NULL,
33525 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_p
33526 .cfg_on_link_up = NULL,
33527 };
33528
33529 -static struct e1000_phy_operations e82_phy_ops_m88 = {
33530 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
33531 .acquire_phy = e1000_get_hw_semaphore_82571,
33532 .check_reset_block = e1000e_check_reset_block_generic,
33533 .commit_phy = e1000e_phy_sw_reset,
33534 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_p
33535 .cfg_on_link_up = NULL,
33536 };
33537
33538 -static struct e1000_phy_operations e82_phy_ops_bm = {
33539 +static const struct e1000_phy_operations e82_phy_ops_bm = {
33540 .acquire_phy = e1000_get_hw_semaphore_82571,
33541 .check_reset_block = e1000e_check_reset_block_generic,
33542 .commit_phy = e1000e_phy_sw_reset,
33543 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_p
33544 .cfg_on_link_up = NULL,
33545 };
33546
33547 -static struct e1000_nvm_operations e82571_nvm_ops = {
33548 +static const struct e1000_nvm_operations e82571_nvm_ops = {
33549 .acquire_nvm = e1000_acquire_nvm_82571,
33550 .read_nvm = e1000e_read_nvm_eerd,
33551 .release_nvm = e1000_release_nvm_82571,
33552 diff -urNp linux-2.6.32.45/drivers/net/e1000e/e1000.h linux-2.6.32.45/drivers/net/e1000e/e1000.h
33553 --- linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
33554 +++ linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
33555 @@ -375,9 +375,9 @@ struct e1000_info {
33556 u32 pba;
33557 u32 max_hw_frame_size;
33558 s32 (*get_variants)(struct e1000_adapter *);
33559 - struct e1000_mac_operations *mac_ops;
33560 - struct e1000_phy_operations *phy_ops;
33561 - struct e1000_nvm_operations *nvm_ops;
33562 + const struct e1000_mac_operations *mac_ops;
33563 + const struct e1000_phy_operations *phy_ops;
33564 + const struct e1000_nvm_operations *nvm_ops;
33565 };
33566
33567 /* hardware capability, feature, and workaround flags */
33568 diff -urNp linux-2.6.32.45/drivers/net/e1000e/es2lan.c linux-2.6.32.45/drivers/net/e1000e/es2lan.c
33569 --- linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
33570 +++ linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-08-05 20:33:55.000000000 -0400
33571 @@ -229,16 +229,16 @@ static s32 e1000_init_mac_params_80003es
33572 /* check for link */
33573 switch (hw->phy.media_type) {
33574 case e1000_media_type_copper:
33575 - func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
33576 - func->check_for_link = e1000e_check_for_copper_link;
33577 + *(void **)&func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
33578 + *(void **)&func->check_for_link = e1000e_check_for_copper_link;
33579 break;
33580 case e1000_media_type_fiber:
33581 - func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33582 - func->check_for_link = e1000e_check_for_fiber_link;
33583 + *(void **)&func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33584 + *(void **)&func->check_for_link = e1000e_check_for_fiber_link;
33585 break;
33586 case e1000_media_type_internal_serdes:
33587 - func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33588 - func->check_for_link = e1000e_check_for_serdes_link;
33589 + *(void **)&func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33590 + *(void **)&func->check_for_link = e1000e_check_for_serdes_link;
33591 break;
33592 default:
33593 return -E1000_ERR_CONFIG;
33594 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es
33595 temp = er32(ICRXDMTC);
33596 }
33597
33598 -static struct e1000_mac_operations es2_mac_ops = {
33599 +static const struct e1000_mac_operations es2_mac_ops = {
33600 .id_led_init = e1000e_id_led_init,
33601 .check_mng_mode = e1000e_check_mng_mode_generic,
33602 /* check_for_link dependent on media type */
33603 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_m
33604 .setup_led = e1000e_setup_led_generic,
33605 };
33606
33607 -static struct e1000_phy_operations es2_phy_ops = {
33608 +static const struct e1000_phy_operations es2_phy_ops = {
33609 .acquire_phy = e1000_acquire_phy_80003es2lan,
33610 .check_reset_block = e1000e_check_reset_block_generic,
33611 .commit_phy = e1000e_phy_sw_reset,
33612 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_p
33613 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
33614 };
33615
33616 -static struct e1000_nvm_operations es2_nvm_ops = {
33617 +static const struct e1000_nvm_operations es2_nvm_ops = {
33618 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
33619 .read_nvm = e1000e_read_nvm_eerd,
33620 .release_nvm = e1000_release_nvm_80003es2lan,
33621 diff -urNp linux-2.6.32.45/drivers/net/e1000e/hw.h linux-2.6.32.45/drivers/net/e1000e/hw.h
33622 --- linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
33623 +++ linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
33624 @@ -756,34 +756,34 @@ struct e1000_mac_operations {
33625
33626 /* Function pointers for the PHY. */
33627 struct e1000_phy_operations {
33628 - s32 (*acquire_phy)(struct e1000_hw *);
33629 - s32 (*check_polarity)(struct e1000_hw *);
33630 - s32 (*check_reset_block)(struct e1000_hw *);
33631 - s32 (*commit_phy)(struct e1000_hw *);
33632 - s32 (*force_speed_duplex)(struct e1000_hw *);
33633 - s32 (*get_cfg_done)(struct e1000_hw *hw);
33634 - s32 (*get_cable_length)(struct e1000_hw *);
33635 - s32 (*get_phy_info)(struct e1000_hw *);
33636 - s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
33637 - s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
33638 - void (*release_phy)(struct e1000_hw *);
33639 - s32 (*reset_phy)(struct e1000_hw *);
33640 - s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
33641 - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33642 - s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
33643 - s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33644 - s32 (*cfg_on_link_up)(struct e1000_hw *);
33645 + s32 (* acquire_phy)(struct e1000_hw *);
33646 + s32 (* check_polarity)(struct e1000_hw *);
33647 + s32 (* check_reset_block)(struct e1000_hw *);
33648 + s32 (* commit_phy)(struct e1000_hw *);
33649 + s32 (* force_speed_duplex)(struct e1000_hw *);
33650 + s32 (* get_cfg_done)(struct e1000_hw *hw);
33651 + s32 (* get_cable_length)(struct e1000_hw *);
33652 + s32 (* get_phy_info)(struct e1000_hw *);
33653 + s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
33654 + s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
33655 + void (* release_phy)(struct e1000_hw *);
33656 + s32 (* reset_phy)(struct e1000_hw *);
33657 + s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
33658 + s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
33659 + s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
33660 + s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33661 + s32 (* cfg_on_link_up)(struct e1000_hw *);
33662 };
33663
33664 /* Function pointers for the NVM. */
33665 struct e1000_nvm_operations {
33666 - s32 (*acquire_nvm)(struct e1000_hw *);
33667 - s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
33668 - void (*release_nvm)(struct e1000_hw *);
33669 - s32 (*update_nvm)(struct e1000_hw *);
33670 - s32 (*valid_led_default)(struct e1000_hw *, u16 *);
33671 - s32 (*validate_nvm)(struct e1000_hw *);
33672 - s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33673 + s32 (* const acquire_nvm)(struct e1000_hw *);
33674 + s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
33675 + void (* const release_nvm)(struct e1000_hw *);
33676 + s32 (* const update_nvm)(struct e1000_hw *);
33677 + s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
33678 + s32 (* const validate_nvm)(struct e1000_hw *);
33679 + s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33680 };
33681
33682 struct e1000_mac_info {
33683 diff -urNp linux-2.6.32.45/drivers/net/e1000e/ich8lan.c linux-2.6.32.45/drivers/net/e1000e/ich8lan.c
33684 --- linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
33685 +++ linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-08-05 20:33:55.000000000 -0400
33686 @@ -265,13 +265,13 @@ static s32 e1000_init_phy_params_pchlan(
33687 phy->addr = 1;
33688 phy->reset_delay_us = 100;
33689
33690 - phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33691 - phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
33692 - phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
33693 - phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
33694 - phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
33695 - phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
33696 - phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
33697 + *(void **)&phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33698 + *(void **)&phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
33699 + *(void **)&phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
33700 + *(void **)&phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
33701 + *(void **)&phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
33702 + *(void **)&phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
33703 + *(void **)&phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
33704 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33705
33706 /*
33707 @@ -289,12 +289,12 @@ static s32 e1000_init_phy_params_pchlan(
33708 phy->type = e1000e_get_phy_type_from_id(phy->id);
33709
33710 if (phy->type == e1000_phy_82577) {
33711 - phy->ops.check_polarity = e1000_check_polarity_82577;
33712 - phy->ops.force_speed_duplex =
33713 + *(void **)&phy->ops.check_polarity = e1000_check_polarity_82577;
33714 + *(void **)&phy->ops.force_speed_duplex =
33715 e1000_phy_force_speed_duplex_82577;
33716 - phy->ops.get_cable_length = e1000_get_cable_length_82577;
33717 - phy->ops.get_phy_info = e1000_get_phy_info_82577;
33718 - phy->ops.commit_phy = e1000e_phy_sw_reset;
33719 + *(void **)&phy->ops.get_cable_length = e1000_get_cable_length_82577;
33720 + *(void **)&phy->ops.get_phy_info = e1000_get_phy_info_82577;
33721 + *(void **)&phy->ops.commit_phy = e1000e_phy_sw_reset;
33722 }
33723
33724 out:
33725 @@ -322,8 +322,8 @@ static s32 e1000_init_phy_params_ich8lan
33726 */
33727 ret_val = e1000e_determine_phy_address(hw);
33728 if (ret_val) {
33729 - hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33730 - hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33731 + *(void **)&hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33732 + *(void **)&hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33733 ret_val = e1000e_determine_phy_address(hw);
33734 if (ret_val)
33735 return ret_val;
33736 @@ -343,8 +343,8 @@ static s32 e1000_init_phy_params_ich8lan
33737 case IGP03E1000_E_PHY_ID:
33738 phy->type = e1000_phy_igp_3;
33739 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33740 - phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
33741 - phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
33742 + *(void **)&phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
33743 + *(void **)&phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
33744 break;
33745 case IFE_E_PHY_ID:
33746 case IFE_PLUS_E_PHY_ID:
33747 @@ -355,16 +355,16 @@ static s32 e1000_init_phy_params_ich8lan
33748 case BME1000_E_PHY_ID:
33749 phy->type = e1000_phy_bm;
33750 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33751 - hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33752 - hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33753 - hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
33754 + *(void **)&hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33755 + *(void **)&hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33756 + *(void **)&hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
33757 break;
33758 default:
33759 return -E1000_ERR_PHY;
33760 break;
33761 }
33762
33763 - phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33764 + *(void **)&phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33765
33766 return 0;
33767 }
33768 @@ -455,25 +455,25 @@ static s32 e1000_init_mac_params_ich8lan
33769 case e1000_ich9lan:
33770 case e1000_ich10lan:
33771 /* ID LED init */
33772 - mac->ops.id_led_init = e1000e_id_led_init;
33773 + *(void **)&mac->ops.id_led_init = e1000e_id_led_init;
33774 /* setup LED */
33775 - mac->ops.setup_led = e1000e_setup_led_generic;
33776 + *(void **)&mac->ops.setup_led = e1000e_setup_led_generic;
33777 /* cleanup LED */
33778 - mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
33779 + *(void **)&mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
33780 /* turn on/off LED */
33781 - mac->ops.led_on = e1000_led_on_ich8lan;
33782 - mac->ops.led_off = e1000_led_off_ich8lan;
33783 + *(void **)&mac->ops.led_on = e1000_led_on_ich8lan;
33784 + *(void **)&mac->ops.led_off = e1000_led_off_ich8lan;
33785 break;
33786 case e1000_pchlan:
33787 /* ID LED init */
33788 - mac->ops.id_led_init = e1000_id_led_init_pchlan;
33789 + *(void **)&mac->ops.id_led_init = e1000_id_led_init_pchlan;
33790 /* setup LED */
33791 - mac->ops.setup_led = e1000_setup_led_pchlan;
33792 + *(void **)&mac->ops.setup_led = e1000_setup_led_pchlan;
33793 /* cleanup LED */
33794 - mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
33795 + *(void **)&mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
33796 /* turn on/off LED */
33797 - mac->ops.led_on = e1000_led_on_pchlan;
33798 - mac->ops.led_off = e1000_led_off_pchlan;
33799 + *(void **)&mac->ops.led_on = e1000_led_on_pchlan;
33800 + *(void **)&mac->ops.led_off = e1000_led_off_pchlan;
33801 break;
33802 default:
33803 break;
33804 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
33805 }
33806 }
33807
33808 -static struct e1000_mac_operations ich8_mac_ops = {
33809 +static const struct e1000_mac_operations ich8_mac_ops = {
33810 .id_led_init = e1000e_id_led_init,
33811 .check_mng_mode = e1000_check_mng_mode_ich8lan,
33812 .check_for_link = e1000_check_for_copper_link_ich8lan,
33813 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
33814 /* id_led_init dependent on mac type */
33815 };
33816
33817 -static struct e1000_phy_operations ich8_phy_ops = {
33818 +static const struct e1000_phy_operations ich8_phy_ops = {
33819 .acquire_phy = e1000_acquire_swflag_ich8lan,
33820 .check_reset_block = e1000_check_reset_block_ich8lan,
33821 .commit_phy = NULL,
33822 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
33823 .write_phy_reg = e1000e_write_phy_reg_igp,
33824 };
33825
33826 -static struct e1000_nvm_operations ich8_nvm_ops = {
33827 +static const struct e1000_nvm_operations ich8_nvm_ops = {
33828 .acquire_nvm = e1000_acquire_nvm_ich8lan,
33829 .read_nvm = e1000_read_nvm_ich8lan,
33830 .release_nvm = e1000_release_nvm_ich8lan,
33831 diff -urNp linux-2.6.32.45/drivers/net/e1000e/netdev.c linux-2.6.32.45/drivers/net/e1000e/netdev.c
33832 --- linux-2.6.32.45/drivers/net/e1000e/netdev.c 2011-03-27 14:31:47.000000000 -0400
33833 +++ linux-2.6.32.45/drivers/net/e1000e/netdev.c 2011-08-05 20:33:55.000000000 -0400
33834 @@ -5071,9 +5071,9 @@ static int __devinit e1000_probe(struct
33835
33836 err = -EIO;
33837
33838 - memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
33839 - memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
33840 - memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
33841 + memcpy((void *)&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
33842 + memcpy((void *)&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
33843 + memcpy((void *)&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
33844
33845 err = ei->get_variants(adapter);
33846 if (err)
33847 diff -urNp linux-2.6.32.45/drivers/net/hamradio/6pack.c linux-2.6.32.45/drivers/net/hamradio/6pack.c
33848 --- linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:04.000000000 -0400
33849 +++ linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:18.000000000 -0400
33850 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
33851 unsigned char buf[512];
33852 int count1;
33853
33854 + pax_track_stack();
33855 +
33856 if (!count)
33857 return;
33858
33859 diff -urNp linux-2.6.32.45/drivers/net/ibmveth.c linux-2.6.32.45/drivers/net/ibmveth.c
33860 --- linux-2.6.32.45/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
33861 +++ linux-2.6.32.45/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
33862 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
33863 NULL,
33864 };
33865
33866 -static struct sysfs_ops veth_pool_ops = {
33867 +static const struct sysfs_ops veth_pool_ops = {
33868 .show = veth_pool_show,
33869 .store = veth_pool_store,
33870 };
33871 diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_82575.c linux-2.6.32.45/drivers/net/igb/e1000_82575.c
33872 --- linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
33873 +++ linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-08-05 20:33:55.000000000 -0400
33874 @@ -135,7 +135,7 @@ static s32 igb_get_invariants_82575(stru
33875 ? true : false;
33876
33877 /* physical interface link setup */
33878 - mac->ops.setup_physical_interface =
33879 + *(void **)&mac->ops.setup_physical_interface =
33880 (hw->phy.media_type == e1000_media_type_copper)
33881 ? igb_setup_copper_link_82575
33882 : igb_setup_serdes_link_82575;
33883 @@ -191,13 +191,13 @@ static s32 igb_get_invariants_82575(stru
33884
33885 /* PHY function pointers */
33886 if (igb_sgmii_active_82575(hw)) {
33887 - phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
33888 - phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
33889 - phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
33890 + *(void **)&phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
33891 + *(void **)&phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
33892 + *(void **)&phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
33893 } else {
33894 - phy->ops.reset = igb_phy_hw_reset;
33895 - phy->ops.read_reg = igb_read_phy_reg_igp;
33896 - phy->ops.write_reg = igb_write_phy_reg_igp;
33897 + *(void **)&phy->ops.reset = igb_phy_hw_reset;
33898 + *(void **)&phy->ops.read_reg = igb_read_phy_reg_igp;
33899 + *(void **)&phy->ops.write_reg = igb_write_phy_reg_igp;
33900 }
33901
33902 /* set lan id */
33903 @@ -213,17 +213,17 @@ static s32 igb_get_invariants_82575(stru
33904 switch (phy->id) {
33905 case M88E1111_I_PHY_ID:
33906 phy->type = e1000_phy_m88;
33907 - phy->ops.get_phy_info = igb_get_phy_info_m88;
33908 - phy->ops.get_cable_length = igb_get_cable_length_m88;
33909 - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
33910 + *(void **)&phy->ops.get_phy_info = igb_get_phy_info_m88;
33911 + *(void **)&phy->ops.get_cable_length = igb_get_cable_length_m88;
33912 + *(void **)&phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
33913 break;
33914 case IGP03E1000_E_PHY_ID:
33915 phy->type = e1000_phy_igp_3;
33916 - phy->ops.get_phy_info = igb_get_phy_info_igp;
33917 - phy->ops.get_cable_length = igb_get_cable_length_igp_2;
33918 - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
33919 - phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
33920 - phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
33921 + *(void **)&phy->ops.get_phy_info = igb_get_phy_info_igp;
33922 + *(void **)&phy->ops.get_cable_length = igb_get_cable_length_igp_2;
33923 + *(void **)&phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
33924 + *(void **)&phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
33925 + *(void **)&phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
33926 break;
33927 default:
33928 return -E1000_ERR_PHY;
33929 @@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
33930 wr32(E1000_VT_CTL, vt_ctl);
33931 }
33932
33933 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
33934 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
33935 .reset_hw = igb_reset_hw_82575,
33936 .init_hw = igb_init_hw_82575,
33937 .check_for_link = igb_check_for_link_82575,
33938 @@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
33939 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
33940 };
33941
33942 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
33943 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
33944 .acquire = igb_acquire_phy_82575,
33945 .get_cfg_done = igb_get_cfg_done_82575,
33946 .release = igb_release_phy_82575,
33947 };
33948
33949 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33950 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33951 .acquire = igb_acquire_nvm_82575,
33952 .read = igb_read_nvm_eerd,
33953 .release = igb_release_nvm_82575,
33954 diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_hw.h linux-2.6.32.45/drivers/net/igb/e1000_hw.h
33955 --- linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
33956 +++ linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
33957 @@ -305,17 +305,17 @@ struct e1000_phy_operations {
33958 };
33959
33960 struct e1000_nvm_operations {
33961 - s32 (*acquire)(struct e1000_hw *);
33962 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
33963 - void (*release)(struct e1000_hw *);
33964 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
33965 + s32 (* const acquire)(struct e1000_hw *);
33966 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
33967 + void (* const release)(struct e1000_hw *);
33968 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
33969 };
33970
33971 struct e1000_info {
33972 s32 (*get_invariants)(struct e1000_hw *);
33973 - struct e1000_mac_operations *mac_ops;
33974 - struct e1000_phy_operations *phy_ops;
33975 - struct e1000_nvm_operations *nvm_ops;
33976 + const struct e1000_mac_operations *mac_ops;
33977 + const struct e1000_phy_operations *phy_ops;
33978 + const struct e1000_nvm_operations *nvm_ops;
33979 };
33980
33981 extern const struct e1000_info e1000_82575_info;
33982 diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_mbx.c linux-2.6.32.45/drivers/net/igb/e1000_mbx.c
33983 --- linux-2.6.32.45/drivers/net/igb/e1000_mbx.c 2011-03-27 14:31:47.000000000 -0400
33984 +++ linux-2.6.32.45/drivers/net/igb/e1000_mbx.c 2011-08-05 20:33:55.000000000 -0400
33985 @@ -414,13 +414,13 @@ s32 igb_init_mbx_params_pf(struct e1000_
33986
33987 mbx->size = E1000_VFMAILBOX_SIZE;
33988
33989 - mbx->ops.read = igb_read_mbx_pf;
33990 - mbx->ops.write = igb_write_mbx_pf;
33991 - mbx->ops.read_posted = igb_read_posted_mbx;
33992 - mbx->ops.write_posted = igb_write_posted_mbx;
33993 - mbx->ops.check_for_msg = igb_check_for_msg_pf;
33994 - mbx->ops.check_for_ack = igb_check_for_ack_pf;
33995 - mbx->ops.check_for_rst = igb_check_for_rst_pf;
33996 + *(void **)&mbx->ops.read = igb_read_mbx_pf;
33997 + *(void **)&mbx->ops.write = igb_write_mbx_pf;
33998 + *(void **)&mbx->ops.read_posted = igb_read_posted_mbx;
33999 + *(void **)&mbx->ops.write_posted = igb_write_posted_mbx;
34000 + *(void **)&mbx->ops.check_for_msg = igb_check_for_msg_pf;
34001 + *(void **)&mbx->ops.check_for_ack = igb_check_for_ack_pf;
34002 + *(void **)&mbx->ops.check_for_rst = igb_check_for_rst_pf;
34003
34004 mbx->stats.msgs_tx = 0;
34005 mbx->stats.msgs_rx = 0;
34006 diff -urNp linux-2.6.32.45/drivers/net/igb/igb_main.c linux-2.6.32.45/drivers/net/igb/igb_main.c
34007 --- linux-2.6.32.45/drivers/net/igb/igb_main.c 2011-03-27 14:31:47.000000000 -0400
34008 +++ linux-2.6.32.45/drivers/net/igb/igb_main.c 2011-08-05 20:33:55.000000000 -0400
34009 @@ -1295,9 +1295,9 @@ static int __devinit igb_probe(struct pc
34010 /* setup the private structure */
34011 hw->back = adapter;
34012 /* Copy the default MAC, PHY and NVM function pointers */
34013 - memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
34014 - memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
34015 - memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
34016 + memcpy((void *)&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
34017 + memcpy((void *)&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
34018 + memcpy((void *)&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
34019 /* Initialize skew-specific constants */
34020 err = ei->get_invariants(hw);
34021 if (err)
34022 diff -urNp linux-2.6.32.45/drivers/net/igbvf/mbx.c linux-2.6.32.45/drivers/net/igbvf/mbx.c
34023 --- linux-2.6.32.45/drivers/net/igbvf/mbx.c 2011-03-27 14:31:47.000000000 -0400
34024 +++ linux-2.6.32.45/drivers/net/igbvf/mbx.c 2011-08-05 20:33:55.000000000 -0400
34025 @@ -331,13 +331,13 @@ s32 e1000_init_mbx_params_vf(struct e100
34026
34027 mbx->size = E1000_VFMAILBOX_SIZE;
34028
34029 - mbx->ops.read = e1000_read_mbx_vf;
34030 - mbx->ops.write = e1000_write_mbx_vf;
34031 - mbx->ops.read_posted = e1000_read_posted_mbx;
34032 - mbx->ops.write_posted = e1000_write_posted_mbx;
34033 - mbx->ops.check_for_msg = e1000_check_for_msg_vf;
34034 - mbx->ops.check_for_ack = e1000_check_for_ack_vf;
34035 - mbx->ops.check_for_rst = e1000_check_for_rst_vf;
34036 + *(void **)&mbx->ops.read = e1000_read_mbx_vf;
34037 + *(void **)&mbx->ops.write = e1000_write_mbx_vf;
34038 + *(void **)&mbx->ops.read_posted = e1000_read_posted_mbx;
34039 + *(void **)&mbx->ops.write_posted = e1000_write_posted_mbx;
34040 + *(void **)&mbx->ops.check_for_msg = e1000_check_for_msg_vf;
34041 + *(void **)&mbx->ops.check_for_ack = e1000_check_for_ack_vf;
34042 + *(void **)&mbx->ops.check_for_rst = e1000_check_for_rst_vf;
34043
34044 mbx->stats.msgs_tx = 0;
34045 mbx->stats.msgs_rx = 0;
34046 diff -urNp linux-2.6.32.45/drivers/net/igbvf/vf.c linux-2.6.32.45/drivers/net/igbvf/vf.c
34047 --- linux-2.6.32.45/drivers/net/igbvf/vf.c 2011-03-27 14:31:47.000000000 -0400
34048 +++ linux-2.6.32.45/drivers/net/igbvf/vf.c 2011-08-05 20:33:55.000000000 -0400
34049 @@ -55,21 +55,21 @@ static s32 e1000_init_mac_params_vf(stru
34050
34051 /* Function pointers */
34052 /* reset */
34053 - mac->ops.reset_hw = e1000_reset_hw_vf;
34054 + *(void **)&mac->ops.reset_hw = e1000_reset_hw_vf;
34055 /* hw initialization */
34056 - mac->ops.init_hw = e1000_init_hw_vf;
34057 + *(void **)&mac->ops.init_hw = e1000_init_hw_vf;
34058 /* check for link */
34059 - mac->ops.check_for_link = e1000_check_for_link_vf;
34060 + *(void **)&mac->ops.check_for_link = e1000_check_for_link_vf;
34061 /* link info */
34062 - mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
34063 + *(void **)&mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
34064 /* multicast address update */
34065 - mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
34066 + *(void **)&mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
34067 /* set mac address */
34068 - mac->ops.rar_set = e1000_rar_set_vf;
34069 + *(void **)&mac->ops.rar_set = e1000_rar_set_vf;
34070 /* read mac address */
34071 - mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
34072 + *(void **)&mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
34073 /* set vlan filter table array */
34074 - mac->ops.set_vfta = e1000_set_vfta_vf;
34075 + *(void **)&mac->ops.set_vfta = e1000_set_vfta_vf;
34076
34077 return E1000_SUCCESS;
34078 }
34079 @@ -80,8 +80,8 @@ static s32 e1000_init_mac_params_vf(stru
34080 **/
34081 void e1000_init_function_pointers_vf(struct e1000_hw *hw)
34082 {
34083 - hw->mac.ops.init_params = e1000_init_mac_params_vf;
34084 - hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
34085 + *(void **)&hw->mac.ops.init_params = e1000_init_mac_params_vf;
34086 + *(void **)&hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
34087 }
34088
34089 /**
34090 diff -urNp linux-2.6.32.45/drivers/net/iseries_veth.c linux-2.6.32.45/drivers/net/iseries_veth.c
34091 --- linux-2.6.32.45/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
34092 +++ linux-2.6.32.45/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
34093 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
34094 NULL
34095 };
34096
34097 -static struct sysfs_ops veth_cnx_sysfs_ops = {
34098 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
34099 .show = veth_cnx_attribute_show
34100 };
34101
34102 @@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
34103 NULL
34104 };
34105
34106 -static struct sysfs_ops veth_port_sysfs_ops = {
34107 +static const struct sysfs_ops veth_port_sysfs_ops = {
34108 .show = veth_port_attribute_show
34109 };
34110
34111 diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c
34112 --- linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
34113 +++ linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
34114 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
34115 u32 rctl;
34116 int i;
34117
34118 + pax_track_stack();
34119 +
34120 /* Check for Promiscuous and All Multicast modes */
34121
34122 rctl = IXGB_READ_REG(hw, RCTL);
34123 diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c
34124 --- linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
34125 +++ linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
34126 @@ -260,6 +260,9 @@ void __devinit
34127 ixgb_check_options(struct ixgb_adapter *adapter)
34128 {
34129 int bd = adapter->bd_number;
34130 +
34131 + pax_track_stack();
34132 +
34133 if (bd >= IXGB_MAX_NIC) {
34134 printk(KERN_NOTICE
34135 "Warning: no configuration for board #%i\n", bd);
34136 diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c
34137 --- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c 2011-03-27 14:31:47.000000000 -0400
34138 +++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c 2011-08-05 20:33:55.000000000 -0400
34139 @@ -154,19 +154,19 @@ static s32 ixgbe_init_phy_ops_82598(stru
34140
34141 /* Overwrite the link function pointers if copper PHY */
34142 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
34143 - mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
34144 - mac->ops.get_link_capabilities =
34145 + *(void **)&mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
34146 + *(void **)&mac->ops.get_link_capabilities =
34147 &ixgbe_get_copper_link_capabilities_82598;
34148 }
34149
34150 switch (hw->phy.type) {
34151 case ixgbe_phy_tn:
34152 - phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34153 - phy->ops.get_firmware_version =
34154 + *(void **)&phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34155 + *(void **)&phy->ops.get_firmware_version =
34156 &ixgbe_get_phy_firmware_version_tnx;
34157 break;
34158 case ixgbe_phy_nl:
34159 - phy->ops.reset = &ixgbe_reset_phy_nl;
34160 + *(void **)&phy->ops.reset = &ixgbe_reset_phy_nl;
34161
34162 /* Call SFP+ identify routine to get the SFP+ module type */
34163 ret_val = phy->ops.identify_sfp(hw);
34164 diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c
34165 --- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c 2011-03-27 14:31:47.000000000 -0400
34166 +++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c 2011-08-05 20:33:55.000000000 -0400
34167 @@ -62,9 +62,9 @@ static void ixgbe_init_mac_link_ops_8259
34168 struct ixgbe_mac_info *mac = &hw->mac;
34169 if (hw->phy.multispeed_fiber) {
34170 /* Set up dual speed SFP+ support */
34171 - mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
34172 + *(void **)&mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
34173 } else {
34174 - mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
34175 + *(void **)&mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
34176 }
34177 }
34178
34179 @@ -76,7 +76,7 @@ static s32 ixgbe_setup_sfp_modules_82599
34180 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
34181 ixgbe_init_mac_link_ops_82599(hw);
34182
34183 - hw->phy.ops.reset = NULL;
34184 + *(void **)&hw->phy.ops.reset = NULL;
34185
34186 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
34187 &data_offset);
34188 @@ -171,16 +171,16 @@ static s32 ixgbe_init_phy_ops_82599(stru
34189
34190 /* If copper media, overwrite with copper function pointers */
34191 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
34192 - mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
34193 - mac->ops.get_link_capabilities =
34194 + *(void **)&mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
34195 + *(void **)&mac->ops.get_link_capabilities =
34196 &ixgbe_get_copper_link_capabilities_82599;
34197 }
34198
34199 /* Set necessary function pointers based on phy type */
34200 switch (hw->phy.type) {
34201 case ixgbe_phy_tn:
34202 - phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34203 - phy->ops.get_firmware_version =
34204 + *(void **)&phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34205 + *(void **)&phy->ops.get_firmware_version =
34206 &ixgbe_get_phy_firmware_version_tnx;
34207 break;
34208 default:
34209 diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c
34210 --- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c 2011-03-27 14:31:47.000000000 -0400
34211 +++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c 2011-08-05 20:33:55.000000000 -0400
34212 @@ -5638,18 +5638,18 @@ static int __devinit ixgbe_probe(struct
34213 adapter->bd_number = cards_found;
34214
34215 /* Setup hw api */
34216 - memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
34217 + memcpy((void *)&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
34218 hw->mac.type = ii->mac;
34219
34220 /* EEPROM */
34221 - memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
34222 + memcpy((void *)&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
34223 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
34224 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
34225 if (!(eec & (1 << 8)))
34226 - hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
34227 + *(void **)&hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
34228
34229 /* PHY */
34230 - memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
34231 + memcpy((void *)&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
34232 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
34233 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
34234 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
34235 diff -urNp linux-2.6.32.45/drivers/net/mlx4/main.c linux-2.6.32.45/drivers/net/mlx4/main.c
34236 --- linux-2.6.32.45/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
34237 +++ linux-2.6.32.45/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
34238 @@ -38,6 +38,7 @@
34239 #include <linux/errno.h>
34240 #include <linux/pci.h>
34241 #include <linux/dma-mapping.h>
34242 +#include <linux/sched.h>
34243
34244 #include <linux/mlx4/device.h>
34245 #include <linux/mlx4/doorbell.h>
34246 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
34247 u64 icm_size;
34248 int err;
34249
34250 + pax_track_stack();
34251 +
34252 err = mlx4_QUERY_FW(dev);
34253 if (err) {
34254 if (err == -EACCES)
34255 diff -urNp linux-2.6.32.45/drivers/net/niu.c linux-2.6.32.45/drivers/net/niu.c
34256 --- linux-2.6.32.45/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
34257 +++ linux-2.6.32.45/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
34258 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
34259 int i, num_irqs, err;
34260 u8 first_ldg;
34261
34262 + pax_track_stack();
34263 +
34264 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
34265 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
34266 ldg_num_map[i] = first_ldg + i;
34267 diff -urNp linux-2.6.32.45/drivers/net/pcnet32.c linux-2.6.32.45/drivers/net/pcnet32.c
34268 --- linux-2.6.32.45/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
34269 +++ linux-2.6.32.45/drivers/net/pcnet32.c 2011-08-05 20:33:55.000000000 -0400
34270 @@ -79,7 +79,7 @@ static int cards_found;
34271 /*
34272 * VLB I/O addresses
34273 */
34274 -static unsigned int pcnet32_portlist[] __initdata =
34275 +static unsigned int pcnet32_portlist[] __devinitdata =
34276 { 0x300, 0x320, 0x340, 0x360, 0 };
34277
34278 static int pcnet32_debug = 0;
34279 @@ -267,7 +267,7 @@ struct pcnet32_private {
34280 struct sk_buff **rx_skbuff;
34281 dma_addr_t *tx_dma_addr;
34282 dma_addr_t *rx_dma_addr;
34283 - struct pcnet32_access a;
34284 + struct pcnet32_access *a;
34285 spinlock_t lock; /* Guard lock */
34286 unsigned int cur_rx, cur_tx; /* The next free ring entry */
34287 unsigned int rx_ring_size; /* current rx ring size */
34288 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct n
34289 u16 val;
34290
34291 netif_wake_queue(dev);
34292 - val = lp->a.read_csr(ioaddr, CSR3);
34293 + val = lp->a->read_csr(ioaddr, CSR3);
34294 val &= 0x00ff;
34295 - lp->a.write_csr(ioaddr, CSR3, val);
34296 + lp->a->write_csr(ioaddr, CSR3, val);
34297 napi_enable(&lp->napi);
34298 }
34299
34300 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_d
34301 r = mii_link_ok(&lp->mii_if);
34302 } else if (lp->chip_version >= PCNET32_79C970A) {
34303 ulong ioaddr = dev->base_addr; /* card base I/O address */
34304 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34305 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34306 } else { /* can not detect link on really old chips */
34307 r = 1;
34308 }
34309 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct
34310 pcnet32_netif_stop(dev);
34311
34312 spin_lock_irqsave(&lp->lock, flags);
34313 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34314 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34315
34316 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
34317
34318 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct
34319 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
34320 {
34321 struct pcnet32_private *lp = netdev_priv(dev);
34322 - struct pcnet32_access *a = &lp->a; /* access to registers */
34323 + struct pcnet32_access *a = lp->a; /* access to registers */
34324 ulong ioaddr = dev->base_addr; /* card base I/O address */
34325 struct sk_buff *skb; /* sk buff */
34326 int x, i; /* counters */
34327 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct
34328 pcnet32_netif_stop(dev);
34329
34330 spin_lock_irqsave(&lp->lock, flags);
34331 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34332 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34333
34334 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
34335
34336 /* Reset the PCNET32 */
34337 - lp->a.reset(ioaddr);
34338 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34339 + lp->a->reset(ioaddr);
34340 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34341
34342 /* switch pcnet32 to 32bit mode */
34343 - lp->a.write_bcr(ioaddr, 20, 2);
34344 + lp->a->write_bcr(ioaddr, 20, 2);
34345
34346 /* purge & init rings but don't actually restart */
34347 pcnet32_restart(dev, 0x0000);
34348
34349 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34350 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34351
34352 /* Initialize Transmit buffers. */
34353 size = data_len + 15;
34354 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct
34355
34356 /* set int loopback in CSR15 */
34357 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
34358 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
34359 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
34360
34361 teststatus = cpu_to_le16(0x8000);
34362 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34363 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34364
34365 /* Check status of descriptors */
34366 for (x = 0; x < numbuffs; x++) {
34367 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct
34368 }
34369 }
34370
34371 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34372 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34373 wmb();
34374 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
34375 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
34376 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct
34377 pcnet32_restart(dev, CSR0_NORMAL);
34378 } else {
34379 pcnet32_purge_rx_ring(dev);
34380 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34381 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34382 }
34383 spin_unlock_irqrestore(&lp->lock, flags);
34384
34385 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct
34386 static void pcnet32_led_blink_callback(struct net_device *dev)
34387 {
34388 struct pcnet32_private *lp = netdev_priv(dev);
34389 - struct pcnet32_access *a = &lp->a;
34390 + struct pcnet32_access *a = lp->a;
34391 ulong ioaddr = dev->base_addr;
34392 unsigned long flags;
34393 int i;
34394 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(s
34395 static int pcnet32_phys_id(struct net_device *dev, u32 data)
34396 {
34397 struct pcnet32_private *lp = netdev_priv(dev);
34398 - struct pcnet32_access *a = &lp->a;
34399 + struct pcnet32_access *a = lp->a;
34400 ulong ioaddr = dev->base_addr;
34401 unsigned long flags;
34402 int i, regs[4];
34403 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_de
34404 {
34405 int csr5;
34406 struct pcnet32_private *lp = netdev_priv(dev);
34407 - struct pcnet32_access *a = &lp->a;
34408 + struct pcnet32_access *a = lp->a;
34409 ulong ioaddr = dev->base_addr;
34410 int ticks;
34411
34412 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_stru
34413 spin_lock_irqsave(&lp->lock, flags);
34414 if (pcnet32_tx(dev)) {
34415 /* reset the chip to clear the error condition, then restart */
34416 - lp->a.reset(ioaddr);
34417 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34418 + lp->a->reset(ioaddr);
34419 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34420 pcnet32_restart(dev, CSR0_START);
34421 netif_wake_queue(dev);
34422 }
34423 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_stru
34424 __napi_complete(napi);
34425
34426 /* clear interrupt masks */
34427 - val = lp->a.read_csr(ioaddr, CSR3);
34428 + val = lp->a->read_csr(ioaddr, CSR3);
34429 val &= 0x00ff;
34430 - lp->a.write_csr(ioaddr, CSR3, val);
34431 + lp->a->write_csr(ioaddr, CSR3, val);
34432
34433 /* Set interrupt enable. */
34434 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
34435 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
34436
34437 spin_unlock_irqrestore(&lp->lock, flags);
34438 }
34439 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_
34440 int i, csr0;
34441 u16 *buff = ptr;
34442 struct pcnet32_private *lp = netdev_priv(dev);
34443 - struct pcnet32_access *a = &lp->a;
34444 + struct pcnet32_access *a = lp->a;
34445 ulong ioaddr = dev->base_addr;
34446 unsigned long flags;
34447
34448 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_
34449 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
34450 if (lp->phymask & (1 << j)) {
34451 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
34452 - lp->a.write_bcr(ioaddr, 33,
34453 + lp->a->write_bcr(ioaddr, 33,
34454 (j << 5) | i);
34455 - *buff++ = lp->a.read_bcr(ioaddr, 34);
34456 + *buff++ = lp->a->read_bcr(ioaddr, 34);
34457 }
34458 }
34459 }
34460 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34461 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
34462 lp->options |= PCNET32_PORT_FD;
34463
34464 - lp->a = *a;
34465 + lp->a = a;
34466
34467 /* prior to register_netdev, dev->name is not yet correct */
34468 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
34469 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34470 if (lp->mii) {
34471 /* lp->phycount and lp->phymask are set to 0 by memset above */
34472
34473 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34474 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34475 /* scan for PHYs */
34476 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34477 unsigned short id1, id2;
34478 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34479 "Found PHY %04x:%04x at address %d.\n",
34480 id1, id2, i);
34481 }
34482 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34483 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34484 if (lp->phycount > 1) {
34485 lp->options |= PCNET32_PORT_MII;
34486 }
34487 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_devic
34488 }
34489
34490 /* Reset the PCNET32 */
34491 - lp->a.reset(ioaddr);
34492 + lp->a->reset(ioaddr);
34493
34494 /* switch pcnet32 to 32bit mode */
34495 - lp->a.write_bcr(ioaddr, 20, 2);
34496 + lp->a->write_bcr(ioaddr, 20, 2);
34497
34498 if (netif_msg_ifup(lp))
34499 printk(KERN_DEBUG
34500 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_devic
34501 (u32) (lp->init_dma_addr));
34502
34503 /* set/reset autoselect bit */
34504 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
34505 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
34506 if (lp->options & PCNET32_PORT_ASEL)
34507 val |= 2;
34508 - lp->a.write_bcr(ioaddr, 2, val);
34509 + lp->a->write_bcr(ioaddr, 2, val);
34510
34511 /* handle full duplex setting */
34512 if (lp->mii_if.full_duplex) {
34513 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
34514 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
34515 if (lp->options & PCNET32_PORT_FD) {
34516 val |= 1;
34517 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
34518 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_devic
34519 if (lp->chip_version == 0x2627)
34520 val |= 3;
34521 }
34522 - lp->a.write_bcr(ioaddr, 9, val);
34523 + lp->a->write_bcr(ioaddr, 9, val);
34524 }
34525
34526 /* set/reset GPSI bit in test register */
34527 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
34528 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
34529 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
34530 val |= 0x10;
34531 - lp->a.write_csr(ioaddr, 124, val);
34532 + lp->a->write_csr(ioaddr, 124, val);
34533
34534 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
34535 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
34536 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_devic
34537 * duplex, and/or enable auto negotiation, and clear DANAS
34538 */
34539 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
34540 - lp->a.write_bcr(ioaddr, 32,
34541 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
34542 + lp->a->write_bcr(ioaddr, 32,
34543 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
34544 /* disable Auto Negotiation, set 10Mpbs, HD */
34545 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
34546 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
34547 if (lp->options & PCNET32_PORT_FD)
34548 val |= 0x10;
34549 if (lp->options & PCNET32_PORT_100)
34550 val |= 0x08;
34551 - lp->a.write_bcr(ioaddr, 32, val);
34552 + lp->a->write_bcr(ioaddr, 32, val);
34553 } else {
34554 if (lp->options & PCNET32_PORT_ASEL) {
34555 - lp->a.write_bcr(ioaddr, 32,
34556 - lp->a.read_bcr(ioaddr,
34557 + lp->a->write_bcr(ioaddr, 32,
34558 + lp->a->read_bcr(ioaddr,
34559 32) | 0x0080);
34560 /* enable auto negotiate, setup, disable fd */
34561 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
34562 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
34563 val |= 0x20;
34564 - lp->a.write_bcr(ioaddr, 32, val);
34565 + lp->a->write_bcr(ioaddr, 32, val);
34566 }
34567 }
34568 } else {
34569 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_devic
34570 * There is really no good other way to handle multiple PHYs
34571 * other than turning off all automatics
34572 */
34573 - val = lp->a.read_bcr(ioaddr, 2);
34574 - lp->a.write_bcr(ioaddr, 2, val & ~2);
34575 - val = lp->a.read_bcr(ioaddr, 32);
34576 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34577 + val = lp->a->read_bcr(ioaddr, 2);
34578 + lp->a->write_bcr(ioaddr, 2, val & ~2);
34579 + val = lp->a->read_bcr(ioaddr, 32);
34580 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34581
34582 if (!(lp->options & PCNET32_PORT_ASEL)) {
34583 /* setup ecmd */
34584 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_devic
34585 ecmd.speed =
34586 lp->
34587 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
34588 - bcr9 = lp->a.read_bcr(ioaddr, 9);
34589 + bcr9 = lp->a->read_bcr(ioaddr, 9);
34590
34591 if (lp->options & PCNET32_PORT_FD) {
34592 ecmd.duplex = DUPLEX_FULL;
34593 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_devic
34594 ecmd.duplex = DUPLEX_HALF;
34595 bcr9 |= ~(1 << 0);
34596 }
34597 - lp->a.write_bcr(ioaddr, 9, bcr9);
34598 + lp->a->write_bcr(ioaddr, 9, bcr9);
34599 }
34600
34601 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34602 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_devic
34603
34604 #ifdef DO_DXSUFLO
34605 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
34606 - val = lp->a.read_csr(ioaddr, CSR3);
34607 + val = lp->a->read_csr(ioaddr, CSR3);
34608 val |= 0x40;
34609 - lp->a.write_csr(ioaddr, CSR3, val);
34610 + lp->a->write_csr(ioaddr, CSR3, val);
34611 }
34612 #endif
34613
34614 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_devic
34615 napi_enable(&lp->napi);
34616
34617 /* Re-initialize the PCNET32, and start it when done. */
34618 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34619 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34620 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34621 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34622
34623 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34624 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34625 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34626 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34627
34628 netif_start_queue(dev);
34629
34630 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_devic
34631
34632 i = 0;
34633 while (i++ < 100)
34634 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34635 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34636 break;
34637 /*
34638 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
34639 * reports that doing so triggers a bug in the '974.
34640 */
34641 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
34642 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
34643
34644 if (netif_msg_ifup(lp))
34645 printk(KERN_DEBUG
34646 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
34647 dev->name, i,
34648 (u32) (lp->init_dma_addr),
34649 - lp->a.read_csr(ioaddr, CSR0));
34650 + lp->a->read_csr(ioaddr, CSR0));
34651
34652 spin_unlock_irqrestore(&lp->lock, flags);
34653
34654 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_devic
34655 * Switch back to 16bit mode to avoid problems with dumb
34656 * DOS packet driver after a warm reboot
34657 */
34658 - lp->a.write_bcr(ioaddr, 20, 4);
34659 + lp->a->write_bcr(ioaddr, 20, 4);
34660
34661 err_free_irq:
34662 spin_unlock_irqrestore(&lp->lock, flags);
34663 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_d
34664
34665 /* wait for stop */
34666 for (i = 0; i < 100; i++)
34667 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
34668 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
34669 break;
34670
34671 if (i >= 100 && netif_msg_drv(lp))
34672 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_d
34673 return;
34674
34675 /* ReInit Ring */
34676 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34677 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34678 i = 0;
34679 while (i++ < 1000)
34680 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34681 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34682 break;
34683
34684 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
34685 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
34686 }
34687
34688 static void pcnet32_tx_timeout(struct net_device *dev)
34689 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct ne
34690 if (pcnet32_debug & NETIF_MSG_DRV)
34691 printk(KERN_ERR
34692 "%s: transmit timed out, status %4.4x, resetting.\n",
34693 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34694 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34695 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34696 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34697 dev->stats.tx_errors++;
34698 if (netif_msg_tx_err(lp)) {
34699 int i;
34700 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34701 if (netif_msg_tx_queued(lp)) {
34702 printk(KERN_DEBUG
34703 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
34704 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34705 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34706 }
34707
34708 /* Default status -- will not enable Successful-TxDone
34709 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34710 dev->stats.tx_bytes += skb->len;
34711
34712 /* Trigger an immediate send poll. */
34713 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34714 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34715
34716 dev->trans_start = jiffies;
34717
34718 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
34719
34720 spin_lock(&lp->lock);
34721
34722 - csr0 = lp->a.read_csr(ioaddr, CSR0);
34723 + csr0 = lp->a->read_csr(ioaddr, CSR0);
34724 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
34725 if (csr0 == 0xffff) {
34726 break; /* PCMCIA remove happened */
34727 }
34728 /* Acknowledge all of the current interrupt sources ASAP. */
34729 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34730 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34731
34732 if (netif_msg_intr(lp))
34733 printk(KERN_DEBUG
34734 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
34735 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
34736 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
34737
34738 /* Log misc errors. */
34739 if (csr0 & 0x4000)
34740 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
34741 if (napi_schedule_prep(&lp->napi)) {
34742 u16 val;
34743 /* set interrupt masks */
34744 - val = lp->a.read_csr(ioaddr, CSR3);
34745 + val = lp->a->read_csr(ioaddr, CSR3);
34746 val |= 0x5f00;
34747 - lp->a.write_csr(ioaddr, CSR3, val);
34748 + lp->a->write_csr(ioaddr, CSR3, val);
34749
34750 __napi_schedule(&lp->napi);
34751 break;
34752 }
34753 - csr0 = lp->a.read_csr(ioaddr, CSR0);
34754 + csr0 = lp->a->read_csr(ioaddr, CSR0);
34755 }
34756
34757 if (netif_msg_intr(lp))
34758 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
34759 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34760 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34761
34762 spin_unlock(&lp->lock);
34763
34764 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_devi
34765
34766 spin_lock_irqsave(&lp->lock, flags);
34767
34768 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34769 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34770
34771 if (netif_msg_ifdown(lp))
34772 printk(KERN_DEBUG
34773 "%s: Shutting down ethercard, status was %2.2x.\n",
34774 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34775 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34776
34777 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
34778 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34779 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34780
34781 /*
34782 * Switch back to 16bit mode to avoid problems with dumb
34783 * DOS packet driver after a warm reboot
34784 */
34785 - lp->a.write_bcr(ioaddr, 20, 4);
34786 + lp->a->write_bcr(ioaddr, 20, 4);
34787
34788 spin_unlock_irqrestore(&lp->lock, flags);
34789
34790 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_
34791 unsigned long flags;
34792
34793 spin_lock_irqsave(&lp->lock, flags);
34794 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34795 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34796 spin_unlock_irqrestore(&lp->lock, flags);
34797
34798 return &dev->stats;
34799 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struc
34800 if (dev->flags & IFF_ALLMULTI) {
34801 ib->filter[0] = cpu_to_le32(~0U);
34802 ib->filter[1] = cpu_to_le32(~0U);
34803 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34804 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34805 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34806 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34807 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34808 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34809 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34810 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34811 return;
34812 }
34813 /* clear the multicast filter */
34814 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struc
34815 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
34816 }
34817 for (i = 0; i < 4; i++)
34818 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
34819 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
34820 le16_to_cpu(mcast_table[i]));
34821 return;
34822 }
34823 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(s
34824
34825 spin_lock_irqsave(&lp->lock, flags);
34826 suspended = pcnet32_suspend(dev, &flags, 0);
34827 - csr15 = lp->a.read_csr(ioaddr, CSR15);
34828 + csr15 = lp->a->read_csr(ioaddr, CSR15);
34829 if (dev->flags & IFF_PROMISC) {
34830 /* Log any net taps. */
34831 if (netif_msg_hw(lp))
34832 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(s
34833 lp->init_block->mode =
34834 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
34835 7);
34836 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
34837 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
34838 } else {
34839 lp->init_block->mode =
34840 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
34841 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34842 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34843 pcnet32_load_multicast(dev);
34844 }
34845
34846 if (suspended) {
34847 int csr5;
34848 /* clear SUSPEND (SPND) - CSR5 bit 0 */
34849 - csr5 = lp->a.read_csr(ioaddr, CSR5);
34850 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34851 + csr5 = lp->a->read_csr(ioaddr, CSR5);
34852 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34853 } else {
34854 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34855 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34856 pcnet32_restart(dev, CSR0_NORMAL);
34857 netif_wake_queue(dev);
34858 }
34859 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *
34860 if (!lp->mii)
34861 return 0;
34862
34863 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34864 - val_out = lp->a.read_bcr(ioaddr, 34);
34865 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34866 + val_out = lp->a->read_bcr(ioaddr, 34);
34867
34868 return val_out;
34869 }
34870 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device
34871 if (!lp->mii)
34872 return;
34873
34874 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34875 - lp->a.write_bcr(ioaddr, 34, val);
34876 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34877 + lp->a->write_bcr(ioaddr, 34, val);
34878 }
34879
34880 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34881 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct n
34882 curr_link = mii_link_ok(&lp->mii_if);
34883 } else {
34884 ulong ioaddr = dev->base_addr; /* card base I/O address */
34885 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34886 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34887 }
34888 if (!curr_link) {
34889 if (prev_link || verbose) {
34890 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct n
34891 (ecmd.duplex ==
34892 DUPLEX_FULL) ? "full" : "half");
34893 }
34894 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
34895 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
34896 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
34897 if (lp->mii_if.full_duplex)
34898 bcr9 |= (1 << 0);
34899 else
34900 bcr9 &= ~(1 << 0);
34901 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
34902 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
34903 }
34904 } else {
34905 if (netif_msg_link(lp))
34906 diff -urNp linux-2.6.32.45/drivers/net/tg3.h linux-2.6.32.45/drivers/net/tg3.h
34907 --- linux-2.6.32.45/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
34908 +++ linux-2.6.32.45/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
34909 @@ -95,6 +95,7 @@
34910 #define CHIPREV_ID_5750_A0 0x4000
34911 #define CHIPREV_ID_5750_A1 0x4001
34912 #define CHIPREV_ID_5750_A3 0x4003
34913 +#define CHIPREV_ID_5750_C1 0x4201
34914 #define CHIPREV_ID_5750_C2 0x4202
34915 #define CHIPREV_ID_5752_A0_HW 0x5000
34916 #define CHIPREV_ID_5752_A0 0x6000
34917 diff -urNp linux-2.6.32.45/drivers/net/tokenring/abyss.c linux-2.6.32.45/drivers/net/tokenring/abyss.c
34918 --- linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-03-27 14:31:47.000000000 -0400
34919 +++ linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-08-05 20:33:55.000000000 -0400
34920 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
34921
34922 static int __init abyss_init (void)
34923 {
34924 - abyss_netdev_ops = tms380tr_netdev_ops;
34925 + pax_open_kernel();
34926 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34927
34928 - abyss_netdev_ops.ndo_open = abyss_open;
34929 - abyss_netdev_ops.ndo_stop = abyss_close;
34930 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34931 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34932 + pax_close_kernel();
34933
34934 return pci_register_driver(&abyss_driver);
34935 }
34936 diff -urNp linux-2.6.32.45/drivers/net/tokenring/madgemc.c linux-2.6.32.45/drivers/net/tokenring/madgemc.c
34937 --- linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-03-27 14:31:47.000000000 -0400
34938 +++ linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-08-05 20:33:55.000000000 -0400
34939 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver
34940
34941 static int __init madgemc_init (void)
34942 {
34943 - madgemc_netdev_ops = tms380tr_netdev_ops;
34944 - madgemc_netdev_ops.ndo_open = madgemc_open;
34945 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34946 + pax_open_kernel();
34947 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34948 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34949 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34950 + pax_close_kernel();
34951
34952 return mca_register_driver (&madgemc_driver);
34953 }
34954 diff -urNp linux-2.6.32.45/drivers/net/tokenring/proteon.c linux-2.6.32.45/drivers/net/tokenring/proteon.c
34955 --- linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-03-27 14:31:47.000000000 -0400
34956 +++ linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-08-05 20:33:55.000000000 -0400
34957 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
34958 struct platform_device *pdev;
34959 int i, num = 0, err = 0;
34960
34961 - proteon_netdev_ops = tms380tr_netdev_ops;
34962 - proteon_netdev_ops.ndo_open = proteon_open;
34963 - proteon_netdev_ops.ndo_stop = tms380tr_close;
34964 + pax_open_kernel();
34965 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34966 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34967 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34968 + pax_close_kernel();
34969
34970 err = platform_driver_register(&proteon_driver);
34971 if (err)
34972 diff -urNp linux-2.6.32.45/drivers/net/tokenring/skisa.c linux-2.6.32.45/drivers/net/tokenring/skisa.c
34973 --- linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-03-27 14:31:47.000000000 -0400
34974 +++ linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-08-05 20:33:55.000000000 -0400
34975 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34976 struct platform_device *pdev;
34977 int i, num = 0, err = 0;
34978
34979 - sk_isa_netdev_ops = tms380tr_netdev_ops;
34980 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
34981 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34982 + pax_open_kernel();
34983 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34984 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34985 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34986 + pax_close_kernel();
34987
34988 err = platform_driver_register(&sk_isa_driver);
34989 if (err)
34990 diff -urNp linux-2.6.32.45/drivers/net/tulip/de2104x.c linux-2.6.32.45/drivers/net/tulip/de2104x.c
34991 --- linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
34992 +++ linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
34993 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
34994 struct de_srom_info_leaf *il;
34995 void *bufp;
34996
34997 + pax_track_stack();
34998 +
34999 /* download entire eeprom */
35000 for (i = 0; i < DE_EEPROM_WORDS; i++)
35001 ((__le16 *)ee_data)[i] =
35002 diff -urNp linux-2.6.32.45/drivers/net/tulip/de4x5.c linux-2.6.32.45/drivers/net/tulip/de4x5.c
35003 --- linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
35004 +++ linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
35005 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
35006 for (i=0; i<ETH_ALEN; i++) {
35007 tmp.addr[i] = dev->dev_addr[i];
35008 }
35009 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35010 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35011 break;
35012
35013 case DE4X5_SET_HWADDR: /* Set the hardware address */
35014 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
35015 spin_lock_irqsave(&lp->lock, flags);
35016 memcpy(&statbuf, &lp->pktStats, ioc->len);
35017 spin_unlock_irqrestore(&lp->lock, flags);
35018 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
35019 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
35020 return -EFAULT;
35021 break;
35022 }
35023 diff -urNp linux-2.6.32.45/drivers/net/usb/hso.c linux-2.6.32.45/drivers/net/usb/hso.c
35024 --- linux-2.6.32.45/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
35025 +++ linux-2.6.32.45/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
35026 @@ -71,7 +71,7 @@
35027 #include <asm/byteorder.h>
35028 #include <linux/serial_core.h>
35029 #include <linux/serial.h>
35030 -
35031 +#include <asm/local.h>
35032
35033 #define DRIVER_VERSION "1.2"
35034 #define MOD_AUTHOR "Option Wireless"
35035 @@ -258,7 +258,7 @@ struct hso_serial {
35036
35037 /* from usb_serial_port */
35038 struct tty_struct *tty;
35039 - int open_count;
35040 + local_t open_count;
35041 spinlock_t serial_lock;
35042
35043 int (*write_data) (struct hso_serial *serial);
35044 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
35045 struct urb *urb;
35046
35047 urb = serial->rx_urb[0];
35048 - if (serial->open_count > 0) {
35049 + if (local_read(&serial->open_count) > 0) {
35050 count = put_rxbuf_data(urb, serial);
35051 if (count == -1)
35052 return;
35053 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
35054 DUMP1(urb->transfer_buffer, urb->actual_length);
35055
35056 /* Anyone listening? */
35057 - if (serial->open_count == 0)
35058 + if (local_read(&serial->open_count) == 0)
35059 return;
35060
35061 if (status == 0) {
35062 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
35063 spin_unlock_irq(&serial->serial_lock);
35064
35065 /* check for port already opened, if not set the termios */
35066 - serial->open_count++;
35067 - if (serial->open_count == 1) {
35068 + if (local_inc_return(&serial->open_count) == 1) {
35069 tty->low_latency = 1;
35070 serial->rx_state = RX_IDLE;
35071 /* Force default termio settings */
35072 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
35073 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35074 if (result) {
35075 hso_stop_serial_device(serial->parent);
35076 - serial->open_count--;
35077 + local_dec(&serial->open_count);
35078 kref_put(&serial->parent->ref, hso_serial_ref_free);
35079 }
35080 } else {
35081 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
35082
35083 /* reset the rts and dtr */
35084 /* do the actual close */
35085 - serial->open_count--;
35086 + local_dec(&serial->open_count);
35087
35088 - if (serial->open_count <= 0) {
35089 - serial->open_count = 0;
35090 + if (local_read(&serial->open_count) <= 0) {
35091 + local_set(&serial->open_count, 0);
35092 spin_lock_irq(&serial->serial_lock);
35093 if (serial->tty == tty) {
35094 serial->tty->driver_data = NULL;
35095 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
35096
35097 /* the actual setup */
35098 spin_lock_irqsave(&serial->serial_lock, flags);
35099 - if (serial->open_count)
35100 + if (local_read(&serial->open_count))
35101 _hso_serial_set_termios(tty, old);
35102 else
35103 tty->termios = old;
35104 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
35105 /* Start all serial ports */
35106 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35107 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35108 - if (dev2ser(serial_table[i])->open_count) {
35109 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
35110 result =
35111 hso_start_serial_device(serial_table[i], GFP_NOIO);
35112 hso_kick_transmit(dev2ser(serial_table[i]));
35113 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-config.h linux-2.6.32.45/drivers/net/vxge/vxge-config.h
35114 --- linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-03-27 14:31:47.000000000 -0400
35115 +++ linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-08-05 20:33:55.000000000 -0400
35116 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
35117 void (*link_down)(struct __vxge_hw_device *devh);
35118 void (*crit_err)(struct __vxge_hw_device *devh,
35119 enum vxge_hw_event type, u64 ext_data);
35120 -};
35121 +} __no_const;
35122
35123 /*
35124 * struct __vxge_hw_blockpool_entry - Block private data structure
35125 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-main.c linux-2.6.32.45/drivers/net/vxge/vxge-main.c
35126 --- linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
35127 +++ linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
35128 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
35129 struct sk_buff *completed[NR_SKB_COMPLETED];
35130 int more;
35131
35132 + pax_track_stack();
35133 +
35134 do {
35135 more = 0;
35136 skb_ptr = completed;
35137 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
35138 u8 mtable[256] = {0}; /* CPU to vpath mapping */
35139 int index;
35140
35141 + pax_track_stack();
35142 +
35143 /*
35144 * Filling
35145 * - itable with bucket numbers
35146 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h
35147 --- linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-03-27 14:31:47.000000000 -0400
35148 +++ linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:33:55.000000000 -0400
35149 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
35150 struct vxge_hw_mempool_dma *dma_object,
35151 u32 index,
35152 u32 is_last);
35153 -};
35154 +} __no_const;
35155
35156 void
35157 __vxge_hw_mempool_destroy(
35158 diff -urNp linux-2.6.32.45/drivers/net/wan/cycx_x25.c linux-2.6.32.45/drivers/net/wan/cycx_x25.c
35159 --- linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
35160 +++ linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
35161 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
35162 unsigned char hex[1024],
35163 * phex = hex;
35164
35165 + pax_track_stack();
35166 +
35167 if (len >= (sizeof(hex) / 2))
35168 len = (sizeof(hex) / 2) - 1;
35169
35170 diff -urNp linux-2.6.32.45/drivers/net/wan/hdlc_x25.c linux-2.6.32.45/drivers/net/wan/hdlc_x25.c
35171 --- linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-03-27 14:31:47.000000000 -0400
35172 +++ linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-08-05 20:33:55.000000000 -0400
35173 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
35174
35175 static int x25_open(struct net_device *dev)
35176 {
35177 - struct lapb_register_struct cb;
35178 + static struct lapb_register_struct cb = {
35179 + .connect_confirmation = x25_connected,
35180 + .connect_indication = x25_connected,
35181 + .disconnect_confirmation = x25_disconnected,
35182 + .disconnect_indication = x25_disconnected,
35183 + .data_indication = x25_data_indication,
35184 + .data_transmit = x25_data_transmit
35185 + };
35186 int result;
35187
35188 - cb.connect_confirmation = x25_connected;
35189 - cb.connect_indication = x25_connected;
35190 - cb.disconnect_confirmation = x25_disconnected;
35191 - cb.disconnect_indication = x25_disconnected;
35192 - cb.data_indication = x25_data_indication;
35193 - cb.data_transmit = x25_data_transmit;
35194 -
35195 result = lapb_register(dev, &cb);
35196 if (result != LAPB_OK)
35197 return result;
35198 diff -urNp linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c
35199 --- linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
35200 +++ linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
35201 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
35202 int do_autopm = 1;
35203 DECLARE_COMPLETION_ONSTACK(notif_completion);
35204
35205 + pax_track_stack();
35206 +
35207 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
35208 i2400m, ack, ack_size);
35209 BUG_ON(_ack == i2400m->bm_ack_buf);
35210 diff -urNp linux-2.6.32.45/drivers/net/wireless/airo.c linux-2.6.32.45/drivers/net/wireless/airo.c
35211 --- linux-2.6.32.45/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
35212 +++ linux-2.6.32.45/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
35213 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
35214 BSSListElement * loop_net;
35215 BSSListElement * tmp_net;
35216
35217 + pax_track_stack();
35218 +
35219 /* Blow away current list of scan results */
35220 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
35221 list_move_tail (&loop_net->list, &ai->network_free_list);
35222 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
35223 WepKeyRid wkr;
35224 int rc;
35225
35226 + pax_track_stack();
35227 +
35228 memset( &mySsid, 0, sizeof( mySsid ) );
35229 kfree (ai->flash);
35230 ai->flash = NULL;
35231 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
35232 __le32 *vals = stats.vals;
35233 int len;
35234
35235 + pax_track_stack();
35236 +
35237 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35238 return -ENOMEM;
35239 data = (struct proc_data *)file->private_data;
35240 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
35241 /* If doLoseSync is not 1, we won't do a Lose Sync */
35242 int doLoseSync = -1;
35243
35244 + pax_track_stack();
35245 +
35246 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35247 return -ENOMEM;
35248 data = (struct proc_data *)file->private_data;
35249 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
35250 int i;
35251 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
35252
35253 + pax_track_stack();
35254 +
35255 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
35256 if (!qual)
35257 return -ENOMEM;
35258 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
35259 CapabilityRid cap_rid;
35260 __le32 *vals = stats_rid.vals;
35261
35262 + pax_track_stack();
35263 +
35264 /* Get stats out of the card */
35265 clear_bit(JOB_WSTATS, &local->jobs);
35266 if (local->power.event) {
35267 diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c
35268 --- linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
35269 +++ linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
35270 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
35271 unsigned int v;
35272 u64 tsf;
35273
35274 + pax_track_stack();
35275 +
35276 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
35277 len += snprintf(buf+len, sizeof(buf)-len,
35278 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
35279 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
35280 unsigned int len = 0;
35281 unsigned int i;
35282
35283 + pax_track_stack();
35284 +
35285 len += snprintf(buf+len, sizeof(buf)-len,
35286 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
35287
35288 diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c
35289 --- linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
35290 +++ linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
35291 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
35292 char buf[512];
35293 unsigned int len = 0;
35294
35295 + pax_track_stack();
35296 +
35297 len += snprintf(buf + len, sizeof(buf) - len,
35298 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
35299 len += snprintf(buf + len, sizeof(buf) - len,
35300 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
35301 int i;
35302 u8 addr[ETH_ALEN];
35303
35304 + pax_track_stack();
35305 +
35306 len += snprintf(buf + len, sizeof(buf) - len,
35307 "primary: %s (%s chan=%d ht=%d)\n",
35308 wiphy_name(sc->pri_wiphy->hw->wiphy),
35309 diff -urNp linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c
35310 --- linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35311 +++ linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35312 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
35313 struct b43_debugfs_fops {
35314 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
35315 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
35316 - struct file_operations fops;
35317 + const struct file_operations fops;
35318 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
35319 size_t file_struct_offset;
35320 };
35321 diff -urNp linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c
35322 --- linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35323 +++ linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35324 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
35325 struct b43legacy_debugfs_fops {
35326 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
35327 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
35328 - struct file_operations fops;
35329 + const struct file_operations fops;
35330 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
35331 size_t file_struct_offset;
35332 /* Take wl->irq_lock before calling read/write? */
35333 diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c
35334 --- linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
35335 +++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
35336 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
35337 int err;
35338 DECLARE_SSID_BUF(ssid);
35339
35340 + pax_track_stack();
35341 +
35342 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
35343
35344 if (ssid_len)
35345 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
35346 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
35347 int err;
35348
35349 + pax_track_stack();
35350 +
35351 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
35352 idx, keylen, len);
35353
35354 diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c
35355 --- linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
35356 +++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
35357 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
35358 unsigned long flags;
35359 DECLARE_SSID_BUF(ssid);
35360
35361 + pax_track_stack();
35362 +
35363 LIBIPW_DEBUG_SCAN("'%s' (%pM"
35364 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
35365 print_ssid(ssid, info_element->data, info_element->len),
35366 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c
35367 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
35368 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
35369 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
35370 },
35371 };
35372
35373 -static struct iwl_ops iwl1000_ops = {
35374 +static const struct iwl_ops iwl1000_ops = {
35375 .ucode = &iwl5000_ucode,
35376 .lib = &iwl1000_lib,
35377 .hcmd = &iwl5000_hcmd,
35378 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c
35379 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-03-27 14:31:47.000000000 -0400
35380 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-08-05 20:33:55.000000000 -0400
35381 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_
35382 */
35383 if (iwl3945_mod_params.disable_hw_scan) {
35384 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
35385 - iwl3945_hw_ops.hw_scan = NULL;
35386 + pax_open_kernel();
35387 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
35388 + pax_close_kernel();
35389 }
35390
35391
35392 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c
35393 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
35394 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
35395 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
35396 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
35397 };
35398
35399 -static struct iwl_ops iwl3945_ops = {
35400 +static const struct iwl_ops iwl3945_ops = {
35401 .ucode = &iwl3945_ucode,
35402 .lib = &iwl3945_lib,
35403 .hcmd = &iwl3945_hcmd,
35404 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c
35405 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
35406 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
35407 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
35408 },
35409 };
35410
35411 -static struct iwl_ops iwl4965_ops = {
35412 +static const struct iwl_ops iwl4965_ops = {
35413 .ucode = &iwl4965_ucode,
35414 .lib = &iwl4965_lib,
35415 .hcmd = &iwl4965_hcmd,
35416 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c
35417 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
35418 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
35419 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
35420 },
35421 };
35422
35423 -struct iwl_ops iwl5000_ops = {
35424 +const struct iwl_ops iwl5000_ops = {
35425 .ucode = &iwl5000_ucode,
35426 .lib = &iwl5000_lib,
35427 .hcmd = &iwl5000_hcmd,
35428 .utils = &iwl5000_hcmd_utils,
35429 };
35430
35431 -static struct iwl_ops iwl5150_ops = {
35432 +static const struct iwl_ops iwl5150_ops = {
35433 .ucode = &iwl5000_ucode,
35434 .lib = &iwl5150_lib,
35435 .hcmd = &iwl5000_hcmd,
35436 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c
35437 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
35438 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
35439 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
35440 .calc_rssi = iwl5000_calc_rssi,
35441 };
35442
35443 -static struct iwl_ops iwl6000_ops = {
35444 +static const struct iwl_ops iwl6000_ops = {
35445 .ucode = &iwl5000_ucode,
35446 .lib = &iwl6000_lib,
35447 .hcmd = &iwl5000_hcmd,
35448 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c
35449 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-03-27 14:31:47.000000000 -0400
35450 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:33:55.000000000 -0400
35451 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev
35452 if (iwl_debug_level & IWL_DL_INFO)
35453 dev_printk(KERN_DEBUG, &(pdev->dev),
35454 "Disabling hw_scan\n");
35455 - iwl_hw_ops.hw_scan = NULL;
35456 + pax_open_kernel();
35457 + *(void **)&iwl_hw_ops.hw_scan = NULL;
35458 + pax_close_kernel();
35459 }
35460
35461 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
35462 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35463 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
35464 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
35465 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
35466 u8 active_index = 0;
35467 s32 tpt = 0;
35468
35469 + pax_track_stack();
35470 +
35471 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
35472
35473 if (!ieee80211_is_data(hdr->frame_control) ||
35474 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
35475 u8 valid_tx_ant = 0;
35476 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
35477
35478 + pax_track_stack();
35479 +
35480 /* Override starting rate (index 0) if needed for debug purposes */
35481 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
35482
35483 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35484 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
35485 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
35486 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
35487 int pos = 0;
35488 const size_t bufsz = sizeof(buf);
35489
35490 + pax_track_stack();
35491 +
35492 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
35493 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
35494 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
35495 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
35496 const size_t bufsz = sizeof(buf);
35497 ssize_t ret;
35498
35499 + pax_track_stack();
35500 +
35501 for (i = 0; i < AC_NUM; i++) {
35502 pos += scnprintf(buf + pos, bufsz - pos,
35503 "\tcw_min\tcw_max\taifsn\ttxop\n");
35504 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h
35505 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
35506 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
35507 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
35508 #endif
35509
35510 #else
35511 -#define IWL_DEBUG(__priv, level, fmt, args...)
35512 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
35513 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
35514 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
35515 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
35516 void *p, u32 len)
35517 {}
35518 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h
35519 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
35520 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
35521 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
35522
35523 /* shared structures from iwl-5000.c */
35524 extern struct iwl_mod_params iwl50_mod_params;
35525 -extern struct iwl_ops iwl5000_ops;
35526 +extern const struct iwl_ops iwl5000_ops;
35527 extern struct iwl_ucode_ops iwl5000_ucode;
35528 extern struct iwl_lib_ops iwl5000_lib;
35529 extern struct iwl_hcmd_ops iwl5000_hcmd;
35530 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c
35531 --- linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35532 +++ linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
35533 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
35534 int buf_len = 512;
35535 size_t len = 0;
35536
35537 + pax_track_stack();
35538 +
35539 if (*ppos != 0)
35540 return 0;
35541 if (count < sizeof(buf))
35542 diff -urNp linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c
35543 --- linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35544 +++ linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35545 @@ -708,7 +708,7 @@ out_unlock:
35546 struct lbs_debugfs_files {
35547 const char *name;
35548 int perm;
35549 - struct file_operations fops;
35550 + const struct file_operations fops;
35551 };
35552
35553 static const struct lbs_debugfs_files debugfs_files[] = {
35554 diff -urNp linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c
35555 --- linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
35556 +++ linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
35557 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
35558
35559 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
35560
35561 - if (rts_threshold < 0 || rts_threshold > 2347)
35562 + if (rts_threshold > 2347)
35563 rts_threshold = 2347;
35564
35565 tmp = cpu_to_le32(rts_threshold);
35566 diff -urNp linux-2.6.32.45/drivers/oprofile/buffer_sync.c linux-2.6.32.45/drivers/oprofile/buffer_sync.c
35567 --- linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
35568 +++ linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
35569 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
35570 if (cookie == NO_COOKIE)
35571 offset = pc;
35572 if (cookie == INVALID_COOKIE) {
35573 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35574 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35575 offset = pc;
35576 }
35577 if (cookie != last_cookie) {
35578 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
35579 /* add userspace sample */
35580
35581 if (!mm) {
35582 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35583 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35584 return 0;
35585 }
35586
35587 cookie = lookup_dcookie(mm, s->eip, &offset);
35588
35589 if (cookie == INVALID_COOKIE) {
35590 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35591 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35592 return 0;
35593 }
35594
35595 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
35596 /* ignore backtraces if failed to add a sample */
35597 if (state == sb_bt_start) {
35598 state = sb_bt_ignore;
35599 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35600 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35601 }
35602 }
35603 release_mm(mm);
35604 diff -urNp linux-2.6.32.45/drivers/oprofile/event_buffer.c linux-2.6.32.45/drivers/oprofile/event_buffer.c
35605 --- linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
35606 +++ linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
35607 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
35608 }
35609
35610 if (buffer_pos == buffer_size) {
35611 - atomic_inc(&oprofile_stats.event_lost_overflow);
35612 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35613 return;
35614 }
35615
35616 diff -urNp linux-2.6.32.45/drivers/oprofile/oprof.c linux-2.6.32.45/drivers/oprofile/oprof.c
35617 --- linux-2.6.32.45/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
35618 +++ linux-2.6.32.45/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
35619 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
35620 if (oprofile_ops.switch_events())
35621 return;
35622
35623 - atomic_inc(&oprofile_stats.multiplex_counter);
35624 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35625 start_switch_worker();
35626 }
35627
35628 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofilefs.c linux-2.6.32.45/drivers/oprofile/oprofilefs.c
35629 --- linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
35630 +++ linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
35631 @@ -187,7 +187,7 @@ static const struct file_operations atom
35632
35633
35634 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35635 - char const *name, atomic_t *val)
35636 + char const *name, atomic_unchecked_t *val)
35637 {
35638 struct dentry *d = __oprofilefs_create_file(sb, root, name,
35639 &atomic_ro_fops, 0444);
35640 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.c linux-2.6.32.45/drivers/oprofile/oprofile_stats.c
35641 --- linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
35642 +++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
35643 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35644 cpu_buf->sample_invalid_eip = 0;
35645 }
35646
35647 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35648 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35649 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35650 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35651 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35652 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35653 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35654 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35655 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35656 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35657 }
35658
35659
35660 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.h linux-2.6.32.45/drivers/oprofile/oprofile_stats.h
35661 --- linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
35662 +++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
35663 @@ -13,11 +13,11 @@
35664 #include <asm/atomic.h>
35665
35666 struct oprofile_stat_struct {
35667 - atomic_t sample_lost_no_mm;
35668 - atomic_t sample_lost_no_mapping;
35669 - atomic_t bt_lost_no_mapping;
35670 - atomic_t event_lost_overflow;
35671 - atomic_t multiplex_counter;
35672 + atomic_unchecked_t sample_lost_no_mm;
35673 + atomic_unchecked_t sample_lost_no_mapping;
35674 + atomic_unchecked_t bt_lost_no_mapping;
35675 + atomic_unchecked_t event_lost_overflow;
35676 + atomic_unchecked_t multiplex_counter;
35677 };
35678
35679 extern struct oprofile_stat_struct oprofile_stats;
35680 diff -urNp linux-2.6.32.45/drivers/parisc/pdc_stable.c linux-2.6.32.45/drivers/parisc/pdc_stable.c
35681 --- linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
35682 +++ linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
35683 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
35684 return ret;
35685 }
35686
35687 -static struct sysfs_ops pdcspath_attr_ops = {
35688 +static const struct sysfs_ops pdcspath_attr_ops = {
35689 .show = pdcspath_attr_show,
35690 .store = pdcspath_attr_store,
35691 };
35692 diff -urNp linux-2.6.32.45/drivers/parport/procfs.c linux-2.6.32.45/drivers/parport/procfs.c
35693 --- linux-2.6.32.45/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
35694 +++ linux-2.6.32.45/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
35695 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
35696
35697 *ppos += len;
35698
35699 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35700 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35701 }
35702
35703 #ifdef CONFIG_PARPORT_1284
35704 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
35705
35706 *ppos += len;
35707
35708 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35709 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35710 }
35711 #endif /* IEEE1284.3 support. */
35712
35713 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c
35714 --- linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
35715 +++ linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
35716 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
35717 }
35718
35719
35720 -static struct acpi_dock_ops acpiphp_dock_ops = {
35721 +static const struct acpi_dock_ops acpiphp_dock_ops = {
35722 .handler = handle_hotplug_event_func,
35723 };
35724
35725 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h
35726 --- linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-03-27 14:31:47.000000000 -0400
35727 +++ linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:33:55.000000000 -0400
35728 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35729 int (*hardware_test) (struct slot* slot, u32 value);
35730 u8 (*get_power) (struct slot* slot);
35731 int (*set_power) (struct slot* slot, int value);
35732 -};
35733 +} __no_const;
35734
35735 struct cpci_hp_controller {
35736 unsigned int irq;
35737 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c
35738 --- linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
35739 +++ linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
35740 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
35741
35742 void compaq_nvram_init (void __iomem *rom_start)
35743 {
35744 +
35745 +#ifndef CONFIG_PAX_KERNEXEC
35746 if (rom_start) {
35747 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35748 }
35749 +#endif
35750 +
35751 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35752
35753 /* initialize our int15 lock */
35754 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/fakephp.c linux-2.6.32.45/drivers/pci/hotplug/fakephp.c
35755 --- linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
35756 +++ linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
35757 @@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
35758 }
35759
35760 static struct kobj_type legacy_ktype = {
35761 - .sysfs_ops = &(struct sysfs_ops){
35762 + .sysfs_ops = &(const struct sysfs_ops){
35763 .store = legacy_store, .show = legacy_show
35764 },
35765 .release = &legacy_release,
35766 diff -urNp linux-2.6.32.45/drivers/pci/intel-iommu.c linux-2.6.32.45/drivers/pci/intel-iommu.c
35767 --- linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
35768 +++ linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
35769 @@ -2643,7 +2643,7 @@ error:
35770 return 0;
35771 }
35772
35773 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
35774 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
35775 unsigned long offset, size_t size,
35776 enum dma_data_direction dir,
35777 struct dma_attrs *attrs)
35778 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
35779 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
35780 }
35781
35782 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35783 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35784 size_t size, enum dma_data_direction dir,
35785 struct dma_attrs *attrs)
35786 {
35787 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
35788 }
35789 }
35790
35791 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
35792 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
35793 dma_addr_t *dma_handle, gfp_t flags)
35794 {
35795 void *vaddr;
35796 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
35797 return NULL;
35798 }
35799
35800 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35801 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35802 dma_addr_t dma_handle)
35803 {
35804 int order;
35805 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
35806 free_pages((unsigned long)vaddr, order);
35807 }
35808
35809 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35810 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35811 int nelems, enum dma_data_direction dir,
35812 struct dma_attrs *attrs)
35813 {
35814 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
35815 return nelems;
35816 }
35817
35818 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35819 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35820 enum dma_data_direction dir, struct dma_attrs *attrs)
35821 {
35822 int i;
35823 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
35824 return nelems;
35825 }
35826
35827 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35828 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35829 {
35830 return !dma_addr;
35831 }
35832
35833 -struct dma_map_ops intel_dma_ops = {
35834 +const struct dma_map_ops intel_dma_ops = {
35835 .alloc_coherent = intel_alloc_coherent,
35836 .free_coherent = intel_free_coherent,
35837 .map_sg = intel_map_sg,
35838 diff -urNp linux-2.6.32.45/drivers/pci/pcie/aspm.c linux-2.6.32.45/drivers/pci/pcie/aspm.c
35839 --- linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
35840 +++ linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
35841 @@ -27,9 +27,9 @@
35842 #define MODULE_PARAM_PREFIX "pcie_aspm."
35843
35844 /* Note: those are not register definitions */
35845 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35846 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35847 -#define ASPM_STATE_L1 (4) /* L1 state */
35848 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35849 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35850 +#define ASPM_STATE_L1 (4U) /* L1 state */
35851 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35852 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35853
35854 diff -urNp linux-2.6.32.45/drivers/pci/probe.c linux-2.6.32.45/drivers/pci/probe.c
35855 --- linux-2.6.32.45/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
35856 +++ linux-2.6.32.45/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
35857 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
35858 return ret;
35859 }
35860
35861 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
35862 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
35863 struct device_attribute *attr,
35864 char *buf)
35865 {
35866 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
35867 }
35868
35869 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
35870 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
35871 struct device_attribute *attr,
35872 char *buf)
35873 {
35874 diff -urNp linux-2.6.32.45/drivers/pci/proc.c linux-2.6.32.45/drivers/pci/proc.c
35875 --- linux-2.6.32.45/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
35876 +++ linux-2.6.32.45/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
35877 @@ -480,7 +480,16 @@ static const struct file_operations proc
35878 static int __init pci_proc_init(void)
35879 {
35880 struct pci_dev *dev = NULL;
35881 +
35882 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
35883 +#ifdef CONFIG_GRKERNSEC_PROC_USER
35884 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35885 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35886 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35887 +#endif
35888 +#else
35889 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35890 +#endif
35891 proc_create("devices", 0, proc_bus_pci_dir,
35892 &proc_bus_pci_dev_operations);
35893 proc_initialized = 1;
35894 diff -urNp linux-2.6.32.45/drivers/pci/slot.c linux-2.6.32.45/drivers/pci/slot.c
35895 --- linux-2.6.32.45/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
35896 +++ linux-2.6.32.45/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
35897 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
35898 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
35899 }
35900
35901 -static struct sysfs_ops pci_slot_sysfs_ops = {
35902 +static const struct sysfs_ops pci_slot_sysfs_ops = {
35903 .show = pci_slot_attr_show,
35904 .store = pci_slot_attr_store,
35905 };
35906 diff -urNp linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c
35907 --- linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
35908 +++ linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
35909 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
35910 return -EFAULT;
35911 }
35912 }
35913 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35914 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35915 if (!buf)
35916 return -ENOMEM;
35917
35918 diff -urNp linux-2.6.32.45/drivers/platform/x86/acer-wmi.c linux-2.6.32.45/drivers/platform/x86/acer-wmi.c
35919 --- linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
35920 +++ linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
35921 @@ -918,7 +918,7 @@ static int update_bl_status(struct backl
35922 return 0;
35923 }
35924
35925 -static struct backlight_ops acer_bl_ops = {
35926 +static const struct backlight_ops acer_bl_ops = {
35927 .get_brightness = read_brightness,
35928 .update_status = update_bl_status,
35929 };
35930 diff -urNp linux-2.6.32.45/drivers/platform/x86/asus_acpi.c linux-2.6.32.45/drivers/platform/x86/asus_acpi.c
35931 --- linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
35932 +++ linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
35933 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
35934 return 0;
35935 }
35936
35937 -static struct backlight_ops asus_backlight_data = {
35938 +static const struct backlight_ops asus_backlight_data = {
35939 .get_brightness = read_brightness,
35940 .update_status = set_brightness_status,
35941 };
35942 diff -urNp linux-2.6.32.45/drivers/platform/x86/asus-laptop.c linux-2.6.32.45/drivers/platform/x86/asus-laptop.c
35943 --- linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
35944 +++ linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
35945 @@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
35946 */
35947 static int read_brightness(struct backlight_device *bd);
35948 static int update_bl_status(struct backlight_device *bd);
35949 -static struct backlight_ops asusbl_ops = {
35950 +static const struct backlight_ops asusbl_ops = {
35951 .get_brightness = read_brightness,
35952 .update_status = update_bl_status,
35953 };
35954 diff -urNp linux-2.6.32.45/drivers/platform/x86/compal-laptop.c linux-2.6.32.45/drivers/platform/x86/compal-laptop.c
35955 --- linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
35956 +++ linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
35957 @@ -163,7 +163,7 @@ static int bl_update_status(struct backl
35958 return set_lcd_level(b->props.brightness);
35959 }
35960
35961 -static struct backlight_ops compalbl_ops = {
35962 +static const struct backlight_ops compalbl_ops = {
35963 .get_brightness = bl_get_brightness,
35964 .update_status = bl_update_status,
35965 };
35966 diff -urNp linux-2.6.32.45/drivers/platform/x86/dell-laptop.c linux-2.6.32.45/drivers/platform/x86/dell-laptop.c
35967 --- linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
35968 +++ linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
35969 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
35970 return buffer.output[1];
35971 }
35972
35973 -static struct backlight_ops dell_ops = {
35974 +static const struct backlight_ops dell_ops = {
35975 .get_brightness = dell_get_intensity,
35976 .update_status = dell_send_intensity,
35977 };
35978 diff -urNp linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c
35979 --- linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
35980 +++ linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
35981 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
35982 */
35983 static int read_brightness(struct backlight_device *bd);
35984 static int update_bl_status(struct backlight_device *bd);
35985 -static struct backlight_ops eeepcbl_ops = {
35986 +static const struct backlight_ops eeepcbl_ops = {
35987 .get_brightness = read_brightness,
35988 .update_status = update_bl_status,
35989 };
35990 diff -urNp linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c
35991 --- linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
35992 +++ linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
35993 @@ -436,7 +436,7 @@ static int bl_update_status(struct backl
35994 return ret;
35995 }
35996
35997 -static struct backlight_ops fujitsubl_ops = {
35998 +static const struct backlight_ops fujitsubl_ops = {
35999 .get_brightness = bl_get_brightness,
36000 .update_status = bl_update_status,
36001 };
36002 diff -urNp linux-2.6.32.45/drivers/platform/x86/msi-laptop.c linux-2.6.32.45/drivers/platform/x86/msi-laptop.c
36003 --- linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
36004 +++ linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
36005 @@ -161,7 +161,7 @@ static int bl_update_status(struct backl
36006 return set_lcd_level(b->props.brightness);
36007 }
36008
36009 -static struct backlight_ops msibl_ops = {
36010 +static const struct backlight_ops msibl_ops = {
36011 .get_brightness = bl_get_brightness,
36012 .update_status = bl_update_status,
36013 };
36014 diff -urNp linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c
36015 --- linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
36016 +++ linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
36017 @@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
36018 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
36019 }
36020
36021 -static struct backlight_ops pcc_backlight_ops = {
36022 +static const struct backlight_ops pcc_backlight_ops = {
36023 .get_brightness = bl_get,
36024 .update_status = bl_set_status,
36025 };
36026 diff -urNp linux-2.6.32.45/drivers/platform/x86/sony-laptop.c linux-2.6.32.45/drivers/platform/x86/sony-laptop.c
36027 --- linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
36028 +++ linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
36029 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
36030 }
36031
36032 static struct backlight_device *sony_backlight_device;
36033 -static struct backlight_ops sony_backlight_ops = {
36034 +static const struct backlight_ops sony_backlight_ops = {
36035 .update_status = sony_backlight_update_status,
36036 .get_brightness = sony_backlight_get_brightness,
36037 };
36038 diff -urNp linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c
36039 --- linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
36040 +++ linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:33:55.000000000 -0400
36041 @@ -2137,7 +2137,7 @@ static int hotkey_mask_get(void)
36042 return 0;
36043 }
36044
36045 -void static hotkey_mask_warn_incomplete_mask(void)
36046 +static void hotkey_mask_warn_incomplete_mask(void)
36047 {
36048 /* log only what the user can fix... */
36049 const u32 wantedmask = hotkey_driver_mask &
36050 @@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
36051 BACKLIGHT_UPDATE_HOTKEY);
36052 }
36053
36054 -static struct backlight_ops ibm_backlight_data = {
36055 +static const struct backlight_ops ibm_backlight_data = {
36056 .get_brightness = brightness_get,
36057 .update_status = brightness_update_status,
36058 };
36059 diff -urNp linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c
36060 --- linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
36061 +++ linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
36062 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
36063 return AE_OK;
36064 }
36065
36066 -static struct backlight_ops toshiba_backlight_data = {
36067 +static const struct backlight_ops toshiba_backlight_data = {
36068 .get_brightness = get_lcd,
36069 .update_status = set_lcd_status,
36070 };
36071 diff -urNp linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c
36072 --- linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
36073 +++ linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
36074 @@ -60,7 +60,7 @@ do { \
36075 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36076 } while(0)
36077
36078 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36079 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36080 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36081
36082 /*
36083 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
36084
36085 cpu = get_cpu();
36086 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36087 +
36088 + pax_open_kernel();
36089 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36090 + pax_close_kernel();
36091
36092 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36093 spin_lock_irqsave(&pnp_bios_lock, flags);
36094 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
36095 :"memory");
36096 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36097
36098 + pax_open_kernel();
36099 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36100 + pax_close_kernel();
36101 +
36102 put_cpu();
36103
36104 /* If we get here and this is set then the PnP BIOS faulted on us. */
36105 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
36106 return status;
36107 }
36108
36109 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
36110 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36111 {
36112 int i;
36113
36114 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
36115 pnp_bios_callpoint.offset = header->fields.pm16offset;
36116 pnp_bios_callpoint.segment = PNP_CS16;
36117
36118 + pax_open_kernel();
36119 +
36120 for_each_possible_cpu(i) {
36121 struct desc_struct *gdt = get_cpu_gdt_table(i);
36122 if (!gdt)
36123 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
36124 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36125 (unsigned long)__va(header->fields.pm16dseg));
36126 }
36127 +
36128 + pax_close_kernel();
36129 }
36130 diff -urNp linux-2.6.32.45/drivers/pnp/resource.c linux-2.6.32.45/drivers/pnp/resource.c
36131 --- linux-2.6.32.45/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
36132 +++ linux-2.6.32.45/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
36133 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
36134 return 1;
36135
36136 /* check if the resource is valid */
36137 - if (*irq < 0 || *irq > 15)
36138 + if (*irq > 15)
36139 return 0;
36140
36141 /* check if the resource is reserved */
36142 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
36143 return 1;
36144
36145 /* check if the resource is valid */
36146 - if (*dma < 0 || *dma == 4 || *dma > 7)
36147 + if (*dma == 4 || *dma > 7)
36148 return 0;
36149
36150 /* check if the resource is reserved */
36151 diff -urNp linux-2.6.32.45/drivers/power/bq27x00_battery.c linux-2.6.32.45/drivers/power/bq27x00_battery.c
36152 --- linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-03-27 14:31:47.000000000 -0400
36153 +++ linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-08-05 20:33:55.000000000 -0400
36154 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
36155 struct bq27x00_access_methods {
36156 int (*read)(u8 reg, int *rt_value, int b_single,
36157 struct bq27x00_device_info *di);
36158 -};
36159 +} __no_const;
36160
36161 struct bq27x00_device_info {
36162 struct device *dev;
36163 diff -urNp linux-2.6.32.45/drivers/rtc/rtc-dev.c linux-2.6.32.45/drivers/rtc/rtc-dev.c
36164 --- linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
36165 +++ linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
36166 @@ -14,6 +14,7 @@
36167 #include <linux/module.h>
36168 #include <linux/rtc.h>
36169 #include <linux/sched.h>
36170 +#include <linux/grsecurity.h>
36171 #include "rtc-core.h"
36172
36173 static dev_t rtc_devt;
36174 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
36175 if (copy_from_user(&tm, uarg, sizeof(tm)))
36176 return -EFAULT;
36177
36178 + gr_log_timechange();
36179 +
36180 return rtc_set_time(rtc, &tm);
36181
36182 case RTC_PIE_ON:
36183 diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.c linux-2.6.32.45/drivers/s390/cio/qdio_perf.c
36184 --- linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
36185 +++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
36186 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
36187 static int qdio_perf_proc_show(struct seq_file *m, void *v)
36188 {
36189 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
36190 - (long)atomic_long_read(&perf_stats.qdio_int));
36191 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
36192 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
36193 - (long)atomic_long_read(&perf_stats.pci_int));
36194 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
36195 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
36196 - (long)atomic_long_read(&perf_stats.thin_int));
36197 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
36198 seq_printf(m, "\n");
36199 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
36200 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
36201 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
36202 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
36203 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
36204 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
36205 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
36206 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
36207 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
36208 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
36209 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
36210 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
36211 - (long)atomic_long_read(&perf_stats.thinint_inbound),
36212 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
36213 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
36214 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
36215 seq_printf(m, "\n");
36216 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
36217 - (long)atomic_long_read(&perf_stats.siga_in));
36218 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
36219 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
36220 - (long)atomic_long_read(&perf_stats.siga_out));
36221 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
36222 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
36223 - (long)atomic_long_read(&perf_stats.siga_sync));
36224 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
36225 seq_printf(m, "\n");
36226 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
36227 - (long)atomic_long_read(&perf_stats.inbound_handler));
36228 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
36229 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
36230 - (long)atomic_long_read(&perf_stats.outbound_handler));
36231 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
36232 seq_printf(m, "\n");
36233 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
36234 - (long)atomic_long_read(&perf_stats.fast_requeue));
36235 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
36236 seq_printf(m, "Number of outbound target full condition\t: %li\n",
36237 - (long)atomic_long_read(&perf_stats.outbound_target_full));
36238 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
36239 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
36240 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
36241 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
36242 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
36243 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
36244 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
36245 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
36246 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
36247 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
36248 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
36249 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
36250 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
36251 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
36252 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
36253 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
36254 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
36255 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
36256 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
36257 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
36258 seq_printf(m, "\n");
36259 return 0;
36260 }
36261 diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.h linux-2.6.32.45/drivers/s390/cio/qdio_perf.h
36262 --- linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
36263 +++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
36264 @@ -13,46 +13,46 @@
36265
36266 struct qdio_perf_stats {
36267 /* interrupt handler calls */
36268 - atomic_long_t qdio_int;
36269 - atomic_long_t pci_int;
36270 - atomic_long_t thin_int;
36271 + atomic_long_unchecked_t qdio_int;
36272 + atomic_long_unchecked_t pci_int;
36273 + atomic_long_unchecked_t thin_int;
36274
36275 /* tasklet runs */
36276 - atomic_long_t tasklet_inbound;
36277 - atomic_long_t tasklet_outbound;
36278 - atomic_long_t tasklet_thinint;
36279 - atomic_long_t tasklet_thinint_loop;
36280 - atomic_long_t thinint_inbound;
36281 - atomic_long_t thinint_inbound_loop;
36282 - atomic_long_t thinint_inbound_loop2;
36283 + atomic_long_unchecked_t tasklet_inbound;
36284 + atomic_long_unchecked_t tasklet_outbound;
36285 + atomic_long_unchecked_t tasklet_thinint;
36286 + atomic_long_unchecked_t tasklet_thinint_loop;
36287 + atomic_long_unchecked_t thinint_inbound;
36288 + atomic_long_unchecked_t thinint_inbound_loop;
36289 + atomic_long_unchecked_t thinint_inbound_loop2;
36290
36291 /* signal adapter calls */
36292 - atomic_long_t siga_out;
36293 - atomic_long_t siga_in;
36294 - atomic_long_t siga_sync;
36295 + atomic_long_unchecked_t siga_out;
36296 + atomic_long_unchecked_t siga_in;
36297 + atomic_long_unchecked_t siga_sync;
36298
36299 /* misc */
36300 - atomic_long_t inbound_handler;
36301 - atomic_long_t outbound_handler;
36302 - atomic_long_t fast_requeue;
36303 - atomic_long_t outbound_target_full;
36304 + atomic_long_unchecked_t inbound_handler;
36305 + atomic_long_unchecked_t outbound_handler;
36306 + atomic_long_unchecked_t fast_requeue;
36307 + atomic_long_unchecked_t outbound_target_full;
36308
36309 /* for debugging */
36310 - atomic_long_t debug_tl_out_timer;
36311 - atomic_long_t debug_stop_polling;
36312 - atomic_long_t debug_eqbs_all;
36313 - atomic_long_t debug_eqbs_incomplete;
36314 - atomic_long_t debug_sqbs_all;
36315 - atomic_long_t debug_sqbs_incomplete;
36316 + atomic_long_unchecked_t debug_tl_out_timer;
36317 + atomic_long_unchecked_t debug_stop_polling;
36318 + atomic_long_unchecked_t debug_eqbs_all;
36319 + atomic_long_unchecked_t debug_eqbs_incomplete;
36320 + atomic_long_unchecked_t debug_sqbs_all;
36321 + atomic_long_unchecked_t debug_sqbs_incomplete;
36322 };
36323
36324 extern struct qdio_perf_stats perf_stats;
36325 extern int qdio_performance_stats;
36326
36327 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
36328 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
36329 {
36330 if (qdio_performance_stats)
36331 - atomic_long_inc(count);
36332 + atomic_long_inc_unchecked(count);
36333 }
36334
36335 int qdio_setup_perf_stats(void);
36336 diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h
36337 --- linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-03-27 14:31:47.000000000 -0400
36338 +++ linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:33:55.000000000 -0400
36339 @@ -471,7 +471,7 @@ struct adapter_ops
36340 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36341 /* Administrative operations */
36342 int (*adapter_comm)(struct aac_dev * dev, int comm);
36343 -};
36344 +} __no_const;
36345
36346 /*
36347 * Define which interrupt handler needs to be installed
36348 diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c
36349 --- linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
36350 +++ linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
36351 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
36352 u32 actual_fibsize64, actual_fibsize = 0;
36353 int i;
36354
36355 + pax_track_stack();
36356
36357 if (dev->in_reset) {
36358 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
36359 diff -urNp linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c
36360 --- linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
36361 +++ linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
36362 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
36363 flash_error_table[i].reason);
36364 }
36365
36366 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
36367 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
36368 asd_show_update_bios, asd_store_update_bios);
36369
36370 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
36371 diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h
36372 --- linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-03-27 14:31:47.000000000 -0400
36373 +++ linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-08-05 20:33:55.000000000 -0400
36374 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
36375 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
36376 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
36377 u32 *nvecs, u32 *maxvec);
36378 -};
36379 +} __no_const;
36380 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36381
36382 struct bfa_iocfc_s {
36383 diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h
36384 --- linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-03-27 14:31:47.000000000 -0400
36385 +++ linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:33:55.000000000 -0400
36386 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
36387 bfa_ioc_disable_cbfn_t disable_cbfn;
36388 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36389 bfa_ioc_reset_cbfn_t reset_cbfn;
36390 -};
36391 +} __no_const;
36392
36393 /**
36394 * Heartbeat failure notification queue element.
36395 diff -urNp linux-2.6.32.45/drivers/scsi/BusLogic.c linux-2.6.32.45/drivers/scsi/BusLogic.c
36396 --- linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
36397 +++ linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
36398 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
36399 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
36400 *PrototypeHostAdapter)
36401 {
36402 + pax_track_stack();
36403 +
36404 /*
36405 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
36406 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
36407 diff -urNp linux-2.6.32.45/drivers/scsi/dpt_i2o.c linux-2.6.32.45/drivers/scsi/dpt_i2o.c
36408 --- linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
36409 +++ linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
36410 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
36411 dma_addr_t addr;
36412 ulong flags = 0;
36413
36414 + pax_track_stack();
36415 +
36416 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
36417 // get user msg size in u32s
36418 if(get_user(size, &user_msg[0])){
36419 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
36420 s32 rcode;
36421 dma_addr_t addr;
36422
36423 + pax_track_stack();
36424 +
36425 memset(msg, 0 , sizeof(msg));
36426 len = scsi_bufflen(cmd);
36427 direction = 0x00000000;
36428 diff -urNp linux-2.6.32.45/drivers/scsi/eata.c linux-2.6.32.45/drivers/scsi/eata.c
36429 --- linux-2.6.32.45/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
36430 +++ linux-2.6.32.45/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
36431 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
36432 struct hostdata *ha;
36433 char name[16];
36434
36435 + pax_track_stack();
36436 +
36437 sprintf(name, "%s%d", driver_name, j);
36438
36439 if (!request_region(port_base, REGION_SIZE, driver_name)) {
36440 diff -urNp linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c
36441 --- linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
36442 +++ linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
36443 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
36444 size_t rlen;
36445 size_t dlen;
36446
36447 + pax_track_stack();
36448 +
36449 fiph = (struct fip_header *)skb->data;
36450 sub = fiph->fip_subcode;
36451 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
36452 diff -urNp linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c
36453 --- linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-03-27 14:31:47.000000000 -0400
36454 +++ linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-08-05 20:33:55.000000000 -0400
36455 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct p
36456 /* Start local port initiatialization */
36457
36458 lp->link_up = 0;
36459 - lp->tt = fnic_transport_template;
36460 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
36461
36462 lp->max_retry_count = fnic->config.flogi_retries;
36463 lp->max_rport_retry_count = fnic->config.plogi_retries;
36464 diff -urNp linux-2.6.32.45/drivers/scsi/gdth.c linux-2.6.32.45/drivers/scsi/gdth.c
36465 --- linux-2.6.32.45/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
36466 +++ linux-2.6.32.45/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
36467 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
36468 ulong flags;
36469 gdth_ha_str *ha;
36470
36471 + pax_track_stack();
36472 +
36473 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
36474 return -EFAULT;
36475 ha = gdth_find_ha(ldrv.ionode);
36476 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
36477 gdth_ha_str *ha;
36478 int rval;
36479
36480 + pax_track_stack();
36481 +
36482 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
36483 res.number >= MAX_HDRIVES)
36484 return -EFAULT;
36485 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
36486 gdth_ha_str *ha;
36487 int rval;
36488
36489 + pax_track_stack();
36490 +
36491 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
36492 return -EFAULT;
36493 ha = gdth_find_ha(gen.ionode);
36494 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
36495 int i;
36496 gdth_cmd_str gdtcmd;
36497 char cmnd[MAX_COMMAND_SIZE];
36498 +
36499 + pax_track_stack();
36500 +
36501 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
36502
36503 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
36504 diff -urNp linux-2.6.32.45/drivers/scsi/gdth_proc.c linux-2.6.32.45/drivers/scsi/gdth_proc.c
36505 --- linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
36506 +++ linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
36507 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
36508 ulong64 paddr;
36509
36510 char cmnd[MAX_COMMAND_SIZE];
36511 +
36512 + pax_track_stack();
36513 +
36514 memset(cmnd, 0xff, 12);
36515 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
36516
36517 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
36518 gdth_hget_str *phg;
36519 char cmnd[MAX_COMMAND_SIZE];
36520
36521 + pax_track_stack();
36522 +
36523 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
36524 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
36525 if (!gdtcmd || !estr)
36526 diff -urNp linux-2.6.32.45/drivers/scsi/hosts.c linux-2.6.32.45/drivers/scsi/hosts.c
36527 --- linux-2.6.32.45/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
36528 +++ linux-2.6.32.45/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
36529 @@ -40,7 +40,7 @@
36530 #include "scsi_logging.h"
36531
36532
36533 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36534 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36535
36536
36537 static void scsi_host_cls_release(struct device *dev)
36538 @@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
36539 * subtract one because we increment first then return, but we need to
36540 * know what the next host number was before increment
36541 */
36542 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36543 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36544 shost->dma_channel = 0xff;
36545
36546 /* These three are default values which can be overridden */
36547 diff -urNp linux-2.6.32.45/drivers/scsi/ipr.c linux-2.6.32.45/drivers/scsi/ipr.c
36548 --- linux-2.6.32.45/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
36549 +++ linux-2.6.32.45/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
36550 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
36551 return true;
36552 }
36553
36554 -static struct ata_port_operations ipr_sata_ops = {
36555 +static const struct ata_port_operations ipr_sata_ops = {
36556 .phy_reset = ipr_ata_phy_reset,
36557 .hardreset = ipr_sata_reset,
36558 .post_internal_cmd = ipr_ata_post_internal,
36559 diff -urNp linux-2.6.32.45/drivers/scsi/ips.h linux-2.6.32.45/drivers/scsi/ips.h
36560 --- linux-2.6.32.45/drivers/scsi/ips.h 2011-03-27 14:31:47.000000000 -0400
36561 +++ linux-2.6.32.45/drivers/scsi/ips.h 2011-08-05 20:33:55.000000000 -0400
36562 @@ -1027,7 +1027,7 @@ typedef struct {
36563 int (*intr)(struct ips_ha *);
36564 void (*enableint)(struct ips_ha *);
36565 uint32_t (*statupd)(struct ips_ha *);
36566 -} ips_hw_func_t;
36567 +} __no_const ips_hw_func_t;
36568
36569 typedef struct ips_ha {
36570 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36571 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c
36572 --- linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c 2011-03-27 14:31:47.000000000 -0400
36573 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c 2011-08-05 20:33:55.000000000 -0400
36574 @@ -715,16 +715,16 @@ int fc_disc_init(struct fc_lport *lport)
36575 struct fc_disc *disc;
36576
36577 if (!lport->tt.disc_start)
36578 - lport->tt.disc_start = fc_disc_start;
36579 + *(void **)&lport->tt.disc_start = fc_disc_start;
36580
36581 if (!lport->tt.disc_stop)
36582 - lport->tt.disc_stop = fc_disc_stop;
36583 + *(void **)&lport->tt.disc_stop = fc_disc_stop;
36584
36585 if (!lport->tt.disc_stop_final)
36586 - lport->tt.disc_stop_final = fc_disc_stop_final;
36587 + *(void **)&lport->tt.disc_stop_final = fc_disc_stop_final;
36588
36589 if (!lport->tt.disc_recv_req)
36590 - lport->tt.disc_recv_req = fc_disc_recv_req;
36591 + *(void **)&lport->tt.disc_recv_req = fc_disc_recv_req;
36592
36593 disc = &lport->disc;
36594 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
36595 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c
36596 --- linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c 2011-03-27 14:31:47.000000000 -0400
36597 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c 2011-08-05 20:33:55.000000000 -0400
36598 @@ -67,7 +67,7 @@ static struct fc_seq *fc_elsct_send(stru
36599 int fc_elsct_init(struct fc_lport *lport)
36600 {
36601 if (!lport->tt.elsct_send)
36602 - lport->tt.elsct_send = fc_elsct_send;
36603 + *(void **)&lport->tt.elsct_send = fc_elsct_send;
36604
36605 return 0;
36606 }
36607 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c
36608 --- linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
36609 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-08-05 20:33:55.000000000 -0400
36610 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
36611 * all together if not used XXX
36612 */
36613 struct {
36614 - atomic_t no_free_exch;
36615 - atomic_t no_free_exch_xid;
36616 - atomic_t xid_not_found;
36617 - atomic_t xid_busy;
36618 - atomic_t seq_not_found;
36619 - atomic_t non_bls_resp;
36620 + atomic_unchecked_t no_free_exch;
36621 + atomic_unchecked_t no_free_exch_xid;
36622 + atomic_unchecked_t xid_not_found;
36623 + atomic_unchecked_t xid_busy;
36624 + atomic_unchecked_t seq_not_found;
36625 + atomic_unchecked_t non_bls_resp;
36626 } stats;
36627 };
36628 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
36629 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
36630 /* allocate memory for exchange */
36631 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36632 if (!ep) {
36633 - atomic_inc(&mp->stats.no_free_exch);
36634 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36635 goto out;
36636 }
36637 memset(ep, 0, sizeof(*ep));
36638 @@ -557,7 +557,7 @@ out:
36639 return ep;
36640 err:
36641 spin_unlock_bh(&pool->lock);
36642 - atomic_inc(&mp->stats.no_free_exch_xid);
36643 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36644 mempool_free(ep, mp->ep_pool);
36645 return NULL;
36646 }
36647 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36648 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36649 ep = fc_exch_find(mp, xid);
36650 if (!ep) {
36651 - atomic_inc(&mp->stats.xid_not_found);
36652 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36653 reject = FC_RJT_OX_ID;
36654 goto out;
36655 }
36656 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36657 ep = fc_exch_find(mp, xid);
36658 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36659 if (ep) {
36660 - atomic_inc(&mp->stats.xid_busy);
36661 + atomic_inc_unchecked(&mp->stats.xid_busy);
36662 reject = FC_RJT_RX_ID;
36663 goto rel;
36664 }
36665 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36666 }
36667 xid = ep->xid; /* get our XID */
36668 } else if (!ep) {
36669 - atomic_inc(&mp->stats.xid_not_found);
36670 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36671 reject = FC_RJT_RX_ID; /* XID not found */
36672 goto out;
36673 }
36674 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36675 } else {
36676 sp = &ep->seq;
36677 if (sp->id != fh->fh_seq_id) {
36678 - atomic_inc(&mp->stats.seq_not_found);
36679 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36680 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
36681 goto rel;
36682 }
36683 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
36684
36685 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36686 if (!ep) {
36687 - atomic_inc(&mp->stats.xid_not_found);
36688 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36689 goto out;
36690 }
36691 if (ep->esb_stat & ESB_ST_COMPLETE) {
36692 - atomic_inc(&mp->stats.xid_not_found);
36693 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36694 goto out;
36695 }
36696 if (ep->rxid == FC_XID_UNKNOWN)
36697 ep->rxid = ntohs(fh->fh_rx_id);
36698 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36699 - atomic_inc(&mp->stats.xid_not_found);
36700 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36701 goto rel;
36702 }
36703 if (ep->did != ntoh24(fh->fh_s_id) &&
36704 ep->did != FC_FID_FLOGI) {
36705 - atomic_inc(&mp->stats.xid_not_found);
36706 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36707 goto rel;
36708 }
36709 sof = fr_sof(fp);
36710 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
36711 } else {
36712 sp = &ep->seq;
36713 if (sp->id != fh->fh_seq_id) {
36714 - atomic_inc(&mp->stats.seq_not_found);
36715 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36716 goto rel;
36717 }
36718 }
36719 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
36720 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36721
36722 if (!sp)
36723 - atomic_inc(&mp->stats.xid_not_found);
36724 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36725 else
36726 - atomic_inc(&mp->stats.non_bls_resp);
36727 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36728
36729 fc_frame_free(fp);
36730 }
36731 @@ -2027,25 +2027,25 @@ EXPORT_SYMBOL(fc_exch_recv);
36732 int fc_exch_init(struct fc_lport *lp)
36733 {
36734 if (!lp->tt.seq_start_next)
36735 - lp->tt.seq_start_next = fc_seq_start_next;
36736 + *(void **)&lp->tt.seq_start_next = fc_seq_start_next;
36737
36738 if (!lp->tt.exch_seq_send)
36739 - lp->tt.exch_seq_send = fc_exch_seq_send;
36740 + *(void **)&lp->tt.exch_seq_send = fc_exch_seq_send;
36741
36742 if (!lp->tt.seq_send)
36743 - lp->tt.seq_send = fc_seq_send;
36744 + *(void **)&lp->tt.seq_send = fc_seq_send;
36745
36746 if (!lp->tt.seq_els_rsp_send)
36747 - lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
36748 + *(void **)&lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
36749
36750 if (!lp->tt.exch_done)
36751 - lp->tt.exch_done = fc_exch_done;
36752 + *(void **)&lp->tt.exch_done = fc_exch_done;
36753
36754 if (!lp->tt.exch_mgr_reset)
36755 - lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
36756 + *(void **)&lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
36757
36758 if (!lp->tt.seq_exch_abort)
36759 - lp->tt.seq_exch_abort = fc_seq_exch_abort;
36760 + *(void **)&lp->tt.seq_exch_abort = fc_seq_exch_abort;
36761
36762 /*
36763 * Initialize fc_cpu_mask and fc_cpu_order. The
36764 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c
36765 --- linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c 2011-03-27 14:31:47.000000000 -0400
36766 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c 2011-08-05 20:33:55.000000000 -0400
36767 @@ -2105,13 +2105,13 @@ int fc_fcp_init(struct fc_lport *lp)
36768 struct fc_fcp_internal *si;
36769
36770 if (!lp->tt.fcp_cmd_send)
36771 - lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
36772 + *(void **)&lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
36773
36774 if (!lp->tt.fcp_cleanup)
36775 - lp->tt.fcp_cleanup = fc_fcp_cleanup;
36776 + *(void **)&lp->tt.fcp_cleanup = fc_fcp_cleanup;
36777
36778 if (!lp->tt.fcp_abort_io)
36779 - lp->tt.fcp_abort_io = fc_fcp_abort_io;
36780 + *(void **)&lp->tt.fcp_abort_io = fc_fcp_abort_io;
36781
36782 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
36783 if (!si)
36784 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c
36785 --- linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c 2011-03-27 14:31:47.000000000 -0400
36786 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c 2011-08-05 20:33:55.000000000 -0400
36787 @@ -569,7 +569,7 @@ int fc_lport_destroy(struct fc_lport *lp
36788 mutex_lock(&lport->lp_mutex);
36789 lport->state = LPORT_ST_DISABLED;
36790 lport->link_up = 0;
36791 - lport->tt.frame_send = fc_frame_drop;
36792 + *(void **)&lport->tt.frame_send = fc_frame_drop;
36793 mutex_unlock(&lport->lp_mutex);
36794
36795 lport->tt.fcp_abort_io(lport);
36796 @@ -1477,10 +1477,10 @@ EXPORT_SYMBOL(fc_lport_config);
36797 int fc_lport_init(struct fc_lport *lport)
36798 {
36799 if (!lport->tt.lport_recv)
36800 - lport->tt.lport_recv = fc_lport_recv_req;
36801 + *(void **)&lport->tt.lport_recv = fc_lport_recv_req;
36802
36803 if (!lport->tt.lport_reset)
36804 - lport->tt.lport_reset = fc_lport_reset;
36805 + *(void **)&lport->tt.lport_reset = fc_lport_reset;
36806
36807 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
36808 fc_host_node_name(lport->host) = lport->wwnn;
36809 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c
36810 --- linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c 2011-03-27 14:31:47.000000000 -0400
36811 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c 2011-08-05 20:33:55.000000000 -0400
36812 @@ -1566,25 +1566,25 @@ static void fc_rport_flush_queue(void)
36813 int fc_rport_init(struct fc_lport *lport)
36814 {
36815 if (!lport->tt.rport_lookup)
36816 - lport->tt.rport_lookup = fc_rport_lookup;
36817 + *(void **)&lport->tt.rport_lookup = fc_rport_lookup;
36818
36819 if (!lport->tt.rport_create)
36820 - lport->tt.rport_create = fc_rport_create;
36821 + *(void **)&lport->tt.rport_create = fc_rport_create;
36822
36823 if (!lport->tt.rport_login)
36824 - lport->tt.rport_login = fc_rport_login;
36825 + *(void **)&lport->tt.rport_login = fc_rport_login;
36826
36827 if (!lport->tt.rport_logoff)
36828 - lport->tt.rport_logoff = fc_rport_logoff;
36829 + *(void **)&lport->tt.rport_logoff = fc_rport_logoff;
36830
36831 if (!lport->tt.rport_recv_req)
36832 - lport->tt.rport_recv_req = fc_rport_recv_req;
36833 + *(void **)&lport->tt.rport_recv_req = fc_rport_recv_req;
36834
36835 if (!lport->tt.rport_flush_queue)
36836 - lport->tt.rport_flush_queue = fc_rport_flush_queue;
36837 + *(void **)&lport->tt.rport_flush_queue = fc_rport_flush_queue;
36838
36839 if (!lport->tt.rport_destroy)
36840 - lport->tt.rport_destroy = fc_rport_destroy;
36841 + *(void **)&lport->tt.rport_destroy = fc_rport_destroy;
36842
36843 return 0;
36844 }
36845 diff -urNp linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c
36846 --- linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
36847 +++ linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
36848 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
36849 }
36850 }
36851
36852 -static struct ata_port_operations sas_sata_ops = {
36853 +static const struct ata_port_operations sas_sata_ops = {
36854 .phy_reset = sas_ata_phy_reset,
36855 .post_internal_cmd = sas_ata_post_internal,
36856 .qc_defer = ata_std_qc_defer,
36857 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c
36858 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
36859 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
36860 @@ -124,7 +124,7 @@ struct lpfc_debug {
36861 int len;
36862 };
36863
36864 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36865 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36866 static unsigned long lpfc_debugfs_start_time = 0L;
36867
36868 /**
36869 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
36870 lpfc_debugfs_enable = 0;
36871
36872 len = 0;
36873 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36874 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36875 (lpfc_debugfs_max_disc_trc - 1);
36876 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36877 dtp = vport->disc_trc + i;
36878 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
36879 lpfc_debugfs_enable = 0;
36880
36881 len = 0;
36882 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36883 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36884 (lpfc_debugfs_max_slow_ring_trc - 1);
36885 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36886 dtp = phba->slow_ring_trc + i;
36887 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
36888 uint32_t *ptr;
36889 char buffer[1024];
36890
36891 + pax_track_stack();
36892 +
36893 off = 0;
36894 spin_lock_irq(&phba->hbalock);
36895
36896 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
36897 !vport || !vport->disc_trc)
36898 return;
36899
36900 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36901 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36902 (lpfc_debugfs_max_disc_trc - 1);
36903 dtp = vport->disc_trc + index;
36904 dtp->fmt = fmt;
36905 dtp->data1 = data1;
36906 dtp->data2 = data2;
36907 dtp->data3 = data3;
36908 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36909 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36910 dtp->jif = jiffies;
36911 #endif
36912 return;
36913 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
36914 !phba || !phba->slow_ring_trc)
36915 return;
36916
36917 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36918 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36919 (lpfc_debugfs_max_slow_ring_trc - 1);
36920 dtp = phba->slow_ring_trc + index;
36921 dtp->fmt = fmt;
36922 dtp->data1 = data1;
36923 dtp->data2 = data2;
36924 dtp->data3 = data3;
36925 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36926 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36927 dtp->jif = jiffies;
36928 #endif
36929 return;
36930 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36931 "slow_ring buffer\n");
36932 goto debug_failed;
36933 }
36934 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36935 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36936 memset(phba->slow_ring_trc, 0,
36937 (sizeof(struct lpfc_debugfs_trc) *
36938 lpfc_debugfs_max_slow_ring_trc));
36939 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36940 "buffer\n");
36941 goto debug_failed;
36942 }
36943 - atomic_set(&vport->disc_trc_cnt, 0);
36944 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36945
36946 snprintf(name, sizeof(name), "discovery_trace");
36947 vport->debug_disc_trc =
36948 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h
36949 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
36950 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
36951 @@ -400,7 +400,7 @@ struct lpfc_vport {
36952 struct dentry *debug_nodelist;
36953 struct dentry *vport_debugfs_root;
36954 struct lpfc_debugfs_trc *disc_trc;
36955 - atomic_t disc_trc_cnt;
36956 + atomic_unchecked_t disc_trc_cnt;
36957 #endif
36958 uint8_t stat_data_enabled;
36959 uint8_t stat_data_blocked;
36960 @@ -725,8 +725,8 @@ struct lpfc_hba {
36961 struct timer_list fabric_block_timer;
36962 unsigned long bit_flags;
36963 #define FABRIC_COMANDS_BLOCKED 0
36964 - atomic_t num_rsrc_err;
36965 - atomic_t num_cmd_success;
36966 + atomic_unchecked_t num_rsrc_err;
36967 + atomic_unchecked_t num_cmd_success;
36968 unsigned long last_rsrc_error_time;
36969 unsigned long last_ramp_down_time;
36970 unsigned long last_ramp_up_time;
36971 @@ -740,7 +740,7 @@ struct lpfc_hba {
36972 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
36973 struct dentry *debug_slow_ring_trc;
36974 struct lpfc_debugfs_trc *slow_ring_trc;
36975 - atomic_t slow_ring_trc_cnt;
36976 + atomic_unchecked_t slow_ring_trc_cnt;
36977 #endif
36978
36979 /* Used for deferred freeing of ELS data buffers */
36980 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c
36981 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-03-27 14:31:47.000000000 -0400
36982 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:33:55.000000000 -0400
36983 @@ -8021,8 +8021,10 @@ lpfc_init(void)
36984 printk(LPFC_COPYRIGHT "\n");
36985
36986 if (lpfc_enable_npiv) {
36987 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36988 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36989 + pax_open_kernel();
36990 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36991 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36992 + pax_close_kernel();
36993 }
36994 lpfc_transport_template =
36995 fc_attach_transport(&lpfc_transport_functions);
36996 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c
36997 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
36998 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
36999 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
37000 uint32_t evt_posted;
37001
37002 spin_lock_irqsave(&phba->hbalock, flags);
37003 - atomic_inc(&phba->num_rsrc_err);
37004 + atomic_inc_unchecked(&phba->num_rsrc_err);
37005 phba->last_rsrc_error_time = jiffies;
37006
37007 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
37008 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
37009 unsigned long flags;
37010 struct lpfc_hba *phba = vport->phba;
37011 uint32_t evt_posted;
37012 - atomic_inc(&phba->num_cmd_success);
37013 + atomic_inc_unchecked(&phba->num_cmd_success);
37014
37015 if (vport->cfg_lun_queue_depth <= queue_depth)
37016 return;
37017 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
37018 int i;
37019 struct lpfc_rport_data *rdata;
37020
37021 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
37022 - num_cmd_success = atomic_read(&phba->num_cmd_success);
37023 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
37024 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
37025
37026 vports = lpfc_create_vport_work_array(phba);
37027 if (vports != NULL)
37028 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
37029 }
37030 }
37031 lpfc_destroy_vport_work_array(phba, vports);
37032 - atomic_set(&phba->num_rsrc_err, 0);
37033 - atomic_set(&phba->num_cmd_success, 0);
37034 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37035 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37036 }
37037
37038 /**
37039 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
37040 }
37041 }
37042 lpfc_destroy_vport_work_array(phba, vports);
37043 - atomic_set(&phba->num_rsrc_err, 0);
37044 - atomic_set(&phba->num_cmd_success, 0);
37045 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37046 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37047 }
37048
37049 /**
37050 diff -urNp linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c
37051 --- linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
37052 +++ linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
37053 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
37054 int rval;
37055 int i;
37056
37057 + pax_track_stack();
37058 +
37059 // Allocate memory for the base list of scb for management module.
37060 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
37061
37062 diff -urNp linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c
37063 --- linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
37064 +++ linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
37065 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
37066 int nelem = ARRAY_SIZE(get_attrs), a = 0;
37067 int ret;
37068
37069 + pax_track_stack();
37070 +
37071 or = osd_start_request(od, GFP_KERNEL);
37072 if (!or)
37073 return -ENOMEM;
37074 diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.c linux-2.6.32.45/drivers/scsi/pmcraid.c
37075 --- linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:35:29.000000000 -0400
37076 +++ linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:33:59.000000000 -0400
37077 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
37078 res->scsi_dev = scsi_dev;
37079 scsi_dev->hostdata = res;
37080 res->change_detected = 0;
37081 - atomic_set(&res->read_failures, 0);
37082 - atomic_set(&res->write_failures, 0);
37083 + atomic_set_unchecked(&res->read_failures, 0);
37084 + atomic_set_unchecked(&res->write_failures, 0);
37085 rc = 0;
37086 }
37087 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37088 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
37089
37090 /* If this was a SCSI read/write command keep count of errors */
37091 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37092 - atomic_inc(&res->read_failures);
37093 + atomic_inc_unchecked(&res->read_failures);
37094 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37095 - atomic_inc(&res->write_failures);
37096 + atomic_inc_unchecked(&res->write_failures);
37097
37098 if (!RES_IS_GSCSI(res->cfg_entry) &&
37099 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37100 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(stru
37101
37102 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37103 /* add resources only after host is added into system */
37104 - if (!atomic_read(&pinstance->expose_resources))
37105 + if (!atomic_read_unchecked(&pinstance->expose_resources))
37106 return;
37107
37108 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
37109 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instan
37110 init_waitqueue_head(&pinstance->reset_wait_q);
37111
37112 atomic_set(&pinstance->outstanding_cmds, 0);
37113 - atomic_set(&pinstance->expose_resources, 0);
37114 + atomic_set_unchecked(&pinstance->expose_resources, 0);
37115
37116 INIT_LIST_HEAD(&pinstance->free_res_q);
37117 INIT_LIST_HEAD(&pinstance->used_res_q);
37118 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
37119 /* Schedule worker thread to handle CCN and take care of adding and
37120 * removing devices to OS
37121 */
37122 - atomic_set(&pinstance->expose_resources, 1);
37123 + atomic_set_unchecked(&pinstance->expose_resources, 1);
37124 schedule_work(&pinstance->worker_q);
37125 return rc;
37126
37127 diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.h linux-2.6.32.45/drivers/scsi/pmcraid.h
37128 --- linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
37129 +++ linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
37130 @@ -690,7 +690,7 @@ struct pmcraid_instance {
37131 atomic_t outstanding_cmds;
37132
37133 /* should add/delete resources to mid-layer now ?*/
37134 - atomic_t expose_resources;
37135 + atomic_unchecked_t expose_resources;
37136
37137 /* Tasklet to handle deferred processing */
37138 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
37139 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
37140 struct list_head queue; /* link to "to be exposed" resources */
37141 struct pmcraid_config_table_entry cfg_entry;
37142 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37143 - atomic_t read_failures; /* count of failed READ commands */
37144 - atomic_t write_failures; /* count of failed WRITE commands */
37145 + atomic_unchecked_t read_failures; /* count of failed READ commands */
37146 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37147
37148 /* To indicate add/delete/modify during CCN */
37149 u8 change_detected;
37150 diff -urNp linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h
37151 --- linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-03-27 14:31:47.000000000 -0400
37152 +++ linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:33:55.000000000 -0400
37153 @@ -2089,7 +2089,7 @@ struct isp_operations {
37154
37155 int (*get_flash_version) (struct scsi_qla_host *, void *);
37156 int (*start_scsi) (srb_t *);
37157 -};
37158 +} __no_const;
37159
37160 /* MSI-X Support *************************************************************/
37161
37162 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h
37163 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
37164 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
37165 @@ -240,7 +240,7 @@ struct ddb_entry {
37166 atomic_t retry_relogin_timer; /* Min Time between relogins
37167 * (4000 only) */
37168 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
37169 - atomic_t relogin_retry_count; /* Num of times relogin has been
37170 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37171 * retried */
37172
37173 uint16_t port;
37174 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c
37175 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
37176 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
37177 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
37178 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
37179 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37180 atomic_set(&ddb_entry->relogin_timer, 0);
37181 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37182 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37183 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37184 list_add_tail(&ddb_entry->list, &ha->ddb_list);
37185 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
37186 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
37187 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37188 atomic_set(&ddb_entry->port_down_timer,
37189 ha->port_down_retry_count);
37190 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37191 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37192 atomic_set(&ddb_entry->relogin_timer, 0);
37193 clear_bit(DF_RELOGIN, &ddb_entry->flags);
37194 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
37195 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c
37196 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
37197 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
37198 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
37199 ddb_entry->fw_ddb_device_state ==
37200 DDB_DS_SESSION_FAILED) {
37201 /* Reset retry relogin timer */
37202 - atomic_inc(&ddb_entry->relogin_retry_count);
37203 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37204 DEBUG2(printk("scsi%ld: index[%d] relogin"
37205 " timed out-retrying"
37206 " relogin (%d)\n",
37207 ha->host_no,
37208 ddb_entry->fw_ddb_index,
37209 - atomic_read(&ddb_entry->
37210 + atomic_read_unchecked(&ddb_entry->
37211 relogin_retry_count))
37212 );
37213 start_dpc++;
37214 diff -urNp linux-2.6.32.45/drivers/scsi/scsi.c linux-2.6.32.45/drivers/scsi/scsi.c
37215 --- linux-2.6.32.45/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
37216 +++ linux-2.6.32.45/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
37217 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
37218 unsigned long timeout;
37219 int rtn = 0;
37220
37221 - atomic_inc(&cmd->device->iorequest_cnt);
37222 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37223
37224 /* check if the device is still usable */
37225 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37226 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_debug.c linux-2.6.32.45/drivers/scsi/scsi_debug.c
37227 --- linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
37228 +++ linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
37229 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
37230 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
37231 unsigned char *cmd = (unsigned char *)scp->cmnd;
37232
37233 + pax_track_stack();
37234 +
37235 if ((errsts = check_readiness(scp, 1, devip)))
37236 return errsts;
37237 memset(arr, 0, sizeof(arr));
37238 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
37239 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
37240 unsigned char *cmd = (unsigned char *)scp->cmnd;
37241
37242 + pax_track_stack();
37243 +
37244 if ((errsts = check_readiness(scp, 1, devip)))
37245 return errsts;
37246 memset(arr, 0, sizeof(arr));
37247 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_lib.c linux-2.6.32.45/drivers/scsi/scsi_lib.c
37248 --- linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
37249 +++ linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
37250 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
37251
37252 scsi_init_cmd_errh(cmd);
37253 cmd->result = DID_NO_CONNECT << 16;
37254 - atomic_inc(&cmd->device->iorequest_cnt);
37255 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37256
37257 /*
37258 * SCSI request completion path will do scsi_device_unbusy(),
37259 @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
37260 */
37261 cmd->serial_number = 0;
37262
37263 - atomic_inc(&cmd->device->iodone_cnt);
37264 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
37265 if (cmd->result)
37266 - atomic_inc(&cmd->device->ioerr_cnt);
37267 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37268
37269 disposition = scsi_decide_disposition(cmd);
37270 if (disposition != SUCCESS &&
37271 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_sysfs.c linux-2.6.32.45/drivers/scsi/scsi_sysfs.c
37272 --- linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
37273 +++ linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
37274 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
37275 char *buf) \
37276 { \
37277 struct scsi_device *sdev = to_scsi_device(dev); \
37278 - unsigned long long count = atomic_read(&sdev->field); \
37279 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
37280 return snprintf(buf, 20, "0x%llx\n", count); \
37281 } \
37282 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37283 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c
37284 --- linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
37285 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
37286 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
37287 * Netlink Infrastructure
37288 */
37289
37290 -static atomic_t fc_event_seq;
37291 +static atomic_unchecked_t fc_event_seq;
37292
37293 /**
37294 * fc_get_event_number - Obtain the next sequential FC event number
37295 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
37296 u32
37297 fc_get_event_number(void)
37298 {
37299 - return atomic_add_return(1, &fc_event_seq);
37300 + return atomic_add_return_unchecked(1, &fc_event_seq);
37301 }
37302 EXPORT_SYMBOL(fc_get_event_number);
37303
37304 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void
37305 {
37306 int error;
37307
37308 - atomic_set(&fc_event_seq, 0);
37309 + atomic_set_unchecked(&fc_event_seq, 0);
37310
37311 error = transport_class_register(&fc_host_class);
37312 if (error)
37313 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c
37314 --- linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
37315 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
37316 @@ -81,7 +81,7 @@ struct iscsi_internal {
37317 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
37318 };
37319
37320 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37321 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37322 static struct workqueue_struct *iscsi_eh_timer_workq;
37323
37324 /*
37325 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
37326 int err;
37327
37328 ihost = shost->shost_data;
37329 - session->sid = atomic_add_return(1, &iscsi_session_nr);
37330 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37331
37332 if (id == ISCSI_MAX_TARGET) {
37333 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
37334 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
37335 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37336 ISCSI_TRANSPORT_VERSION);
37337
37338 - atomic_set(&iscsi_session_nr, 0);
37339 + atomic_set_unchecked(&iscsi_session_nr, 0);
37340
37341 err = class_register(&iscsi_transport_class);
37342 if (err)
37343 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c
37344 --- linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
37345 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
37346 @@ -33,7 +33,7 @@
37347 #include "scsi_transport_srp_internal.h"
37348
37349 struct srp_host_attrs {
37350 - atomic_t next_port_id;
37351 + atomic_unchecked_t next_port_id;
37352 };
37353 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37354
37355 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
37356 struct Scsi_Host *shost = dev_to_shost(dev);
37357 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37358
37359 - atomic_set(&srp_host->next_port_id, 0);
37360 + atomic_set_unchecked(&srp_host->next_port_id, 0);
37361 return 0;
37362 }
37363
37364 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
37365 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37366 rport->roles = ids->roles;
37367
37368 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37369 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37370 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37371
37372 transport_setup_device(&rport->dev);
37373 diff -urNp linux-2.6.32.45/drivers/scsi/sg.c linux-2.6.32.45/drivers/scsi/sg.c
37374 --- linux-2.6.32.45/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
37375 +++ linux-2.6.32.45/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
37376 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
37377 const struct file_operations * fops;
37378 };
37379
37380 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37381 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37382 {"allow_dio", &adio_fops},
37383 {"debug", &debug_fops},
37384 {"def_reserved_size", &dressz_fops},
37385 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
37386 {
37387 int k, mask;
37388 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
37389 - struct sg_proc_leaf * leaf;
37390 + const struct sg_proc_leaf * leaf;
37391
37392 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
37393 if (!sg_proc_sgp)
37394 diff -urNp linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c
37395 --- linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
37396 +++ linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
37397 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
37398 int do_iounmap = 0;
37399 int do_disable_device = 1;
37400
37401 + pax_track_stack();
37402 +
37403 memset(&sym_dev, 0, sizeof(sym_dev));
37404 memset(&nvram, 0, sizeof(nvram));
37405 sym_dev.pdev = pdev;
37406 diff -urNp linux-2.6.32.45/drivers/serial/kgdboc.c linux-2.6.32.45/drivers/serial/kgdboc.c
37407 --- linux-2.6.32.45/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
37408 +++ linux-2.6.32.45/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
37409 @@ -18,7 +18,7 @@
37410
37411 #define MAX_CONFIG_LEN 40
37412
37413 -static struct kgdb_io kgdboc_io_ops;
37414 +static const struct kgdb_io kgdboc_io_ops;
37415
37416 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37417 static int configured = -1;
37418 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
37419 module_put(THIS_MODULE);
37420 }
37421
37422 -static struct kgdb_io kgdboc_io_ops = {
37423 +static const struct kgdb_io kgdboc_io_ops = {
37424 .name = "kgdboc",
37425 .read_char = kgdboc_get_char,
37426 .write_char = kgdboc_put_char,
37427 diff -urNp linux-2.6.32.45/drivers/spi/spi.c linux-2.6.32.45/drivers/spi/spi.c
37428 --- linux-2.6.32.45/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
37429 +++ linux-2.6.32.45/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
37430 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
37431 EXPORT_SYMBOL_GPL(spi_sync);
37432
37433 /* portable code must never pass more than 32 bytes */
37434 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37435 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
37436
37437 static u8 *buf;
37438
37439 diff -urNp linux-2.6.32.45/drivers/ssb/driver_gige.c linux-2.6.32.45/drivers/ssb/driver_gige.c
37440 --- linux-2.6.32.45/drivers/ssb/driver_gige.c 2011-03-27 14:31:47.000000000 -0400
37441 +++ linux-2.6.32.45/drivers/ssb/driver_gige.c 2011-08-05 20:33:55.000000000 -0400
37442 @@ -180,8 +180,8 @@ static int ssb_gige_probe(struct ssb_dev
37443 dev->pci_controller.io_resource = &dev->io_resource;
37444 dev->pci_controller.mem_resource = &dev->mem_resource;
37445 dev->pci_controller.io_map_base = 0x800;
37446 - dev->pci_ops.read = ssb_gige_pci_read_config;
37447 - dev->pci_ops.write = ssb_gige_pci_write_config;
37448 + *(void **)&dev->pci_ops.read = ssb_gige_pci_read_config;
37449 + *(void **)&dev->pci_ops.write = ssb_gige_pci_write_config;
37450
37451 dev->io_resource.name = SSB_GIGE_IO_RES_NAME;
37452 dev->io_resource.start = 0x800;
37453 diff -urNp linux-2.6.32.45/drivers/staging/android/binder.c linux-2.6.32.45/drivers/staging/android/binder.c
37454 --- linux-2.6.32.45/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
37455 +++ linux-2.6.32.45/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
37456 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
37457 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
37458 }
37459
37460 -static struct vm_operations_struct binder_vm_ops = {
37461 +static const struct vm_operations_struct binder_vm_ops = {
37462 .open = binder_vma_open,
37463 .close = binder_vma_close,
37464 };
37465 diff -urNp linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c
37466 --- linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
37467 +++ linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
37468 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
37469 return VM_FAULT_NOPAGE;
37470 }
37471
37472 -static struct vm_operations_struct b3dfg_vm_ops = {
37473 +static const struct vm_operations_struct b3dfg_vm_ops = {
37474 .fault = b3dfg_vma_fault,
37475 };
37476
37477 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
37478 return r;
37479 }
37480
37481 -static struct file_operations b3dfg_fops = {
37482 +static const struct file_operations b3dfg_fops = {
37483 .owner = THIS_MODULE,
37484 .open = b3dfg_open,
37485 .release = b3dfg_release,
37486 diff -urNp linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c
37487 --- linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:35:29.000000000 -0400
37488 +++ linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:34:00.000000000 -0400
37489 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
37490 mutex_unlock(&dev->mutex);
37491 }
37492
37493 -static struct vm_operations_struct comedi_vm_ops = {
37494 +static const struct vm_operations_struct comedi_vm_ops = {
37495 .close = comedi_unmap,
37496 };
37497
37498 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c
37499 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
37500 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
37501 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
37502 static dev_t adsp_devno;
37503 static struct class *adsp_class;
37504
37505 -static struct file_operations adsp_fops = {
37506 +static const struct file_operations adsp_fops = {
37507 .owner = THIS_MODULE,
37508 .open = adsp_open,
37509 .unlocked_ioctl = adsp_ioctl,
37510 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c
37511 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
37512 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
37513 @@ -1022,7 +1022,7 @@ done:
37514 return rc;
37515 }
37516
37517 -static struct file_operations audio_aac_fops = {
37518 +static const struct file_operations audio_aac_fops = {
37519 .owner = THIS_MODULE,
37520 .open = audio_open,
37521 .release = audio_release,
37522 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c
37523 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
37524 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
37525 @@ -833,7 +833,7 @@ done:
37526 return rc;
37527 }
37528
37529 -static struct file_operations audio_amrnb_fops = {
37530 +static const struct file_operations audio_amrnb_fops = {
37531 .owner = THIS_MODULE,
37532 .open = audamrnb_open,
37533 .release = audamrnb_release,
37534 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c
37535 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
37536 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
37537 @@ -805,7 +805,7 @@ dma_fail:
37538 return rc;
37539 }
37540
37541 -static struct file_operations audio_evrc_fops = {
37542 +static const struct file_operations audio_evrc_fops = {
37543 .owner = THIS_MODULE,
37544 .open = audevrc_open,
37545 .release = audevrc_release,
37546 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c
37547 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
37548 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
37549 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
37550 return 0;
37551 }
37552
37553 -static struct file_operations audio_fops = {
37554 +static const struct file_operations audio_fops = {
37555 .owner = THIS_MODULE,
37556 .open = audio_in_open,
37557 .release = audio_in_release,
37558 @@ -922,7 +922,7 @@ static struct file_operations audio_fops
37559 .unlocked_ioctl = audio_in_ioctl,
37560 };
37561
37562 -static struct file_operations audpre_fops = {
37563 +static const struct file_operations audpre_fops = {
37564 .owner = THIS_MODULE,
37565 .open = audpre_open,
37566 .unlocked_ioctl = audpre_ioctl,
37567 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c
37568 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
37569 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
37570 @@ -941,7 +941,7 @@ done:
37571 return rc;
37572 }
37573
37574 -static struct file_operations audio_mp3_fops = {
37575 +static const struct file_operations audio_mp3_fops = {
37576 .owner = THIS_MODULE,
37577 .open = audio_open,
37578 .release = audio_release,
37579 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c
37580 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
37581 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
37582 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
37583 return 0;
37584 }
37585
37586 -static struct file_operations audio_fops = {
37587 +static const struct file_operations audio_fops = {
37588 .owner = THIS_MODULE,
37589 .open = audio_open,
37590 .release = audio_release,
37591 @@ -819,7 +819,7 @@ static struct file_operations audio_fops
37592 .unlocked_ioctl = audio_ioctl,
37593 };
37594
37595 -static struct file_operations audpp_fops = {
37596 +static const struct file_operations audpp_fops = {
37597 .owner = THIS_MODULE,
37598 .open = audpp_open,
37599 .unlocked_ioctl = audpp_ioctl,
37600 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c
37601 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
37602 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
37603 @@ -816,7 +816,7 @@ err:
37604 return rc;
37605 }
37606
37607 -static struct file_operations audio_qcelp_fops = {
37608 +static const struct file_operations audio_qcelp_fops = {
37609 .owner = THIS_MODULE,
37610 .open = audqcelp_open,
37611 .release = audqcelp_release,
37612 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c
37613 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
37614 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
37615 @@ -242,7 +242,7 @@ err:
37616 return rc;
37617 }
37618
37619 -static struct file_operations snd_fops = {
37620 +static const struct file_operations snd_fops = {
37621 .owner = THIS_MODULE,
37622 .open = snd_open,
37623 .release = snd_release,
37624 diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c
37625 --- linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
37626 +++ linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
37627 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
37628 return 0;
37629 }
37630
37631 -static struct file_operations qmi_fops = {
37632 +static const struct file_operations qmi_fops = {
37633 .owner = THIS_MODULE,
37634 .read = qmi_read,
37635 .write = qmi_write,
37636 diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c
37637 --- linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
37638 +++ linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
37639 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
37640 return rc;
37641 }
37642
37643 -static struct file_operations rpcrouter_server_fops = {
37644 +static const struct file_operations rpcrouter_server_fops = {
37645 .owner = THIS_MODULE,
37646 .open = rpcrouter_open,
37647 .release = rpcrouter_release,
37648 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
37649 .unlocked_ioctl = rpcrouter_ioctl,
37650 };
37651
37652 -static struct file_operations rpcrouter_router_fops = {
37653 +static const struct file_operations rpcrouter_router_fops = {
37654 .owner = THIS_MODULE,
37655 .open = rpcrouter_open,
37656 .release = rpcrouter_release,
37657 diff -urNp linux-2.6.32.45/drivers/staging/dst/dcore.c linux-2.6.32.45/drivers/staging/dst/dcore.c
37658 --- linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
37659 +++ linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
37660 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
37661 return 0;
37662 }
37663
37664 -static struct block_device_operations dst_blk_ops = {
37665 +static const struct block_device_operations dst_blk_ops = {
37666 .open = dst_bdev_open,
37667 .release = dst_bdev_release,
37668 .owner = THIS_MODULE,
37669 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
37670 n->size = ctl->size;
37671
37672 atomic_set(&n->refcnt, 1);
37673 - atomic_long_set(&n->gen, 0);
37674 + atomic_long_set_unchecked(&n->gen, 0);
37675 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
37676
37677 err = dst_node_sysfs_init(n);
37678 diff -urNp linux-2.6.32.45/drivers/staging/dst/trans.c linux-2.6.32.45/drivers/staging/dst/trans.c
37679 --- linux-2.6.32.45/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
37680 +++ linux-2.6.32.45/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
37681 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
37682 t->error = 0;
37683 t->retries = 0;
37684 atomic_set(&t->refcnt, 1);
37685 - t->gen = atomic_long_inc_return(&n->gen);
37686 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
37687
37688 t->enc = bio_data_dir(bio);
37689 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
37690 diff -urNp linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c
37691 --- linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
37692 +++ linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
37693 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
37694 struct net_device_stats *stats = &etdev->net_stats;
37695
37696 if (pMpTcb->Flags & fMP_DEST_BROAD)
37697 - atomic_inc(&etdev->Stats.brdcstxmt);
37698 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
37699 else if (pMpTcb->Flags & fMP_DEST_MULTI)
37700 - atomic_inc(&etdev->Stats.multixmt);
37701 + atomic_inc_unchecked(&etdev->Stats.multixmt);
37702 else
37703 - atomic_inc(&etdev->Stats.unixmt);
37704 + atomic_inc_unchecked(&etdev->Stats.unixmt);
37705
37706 if (pMpTcb->Packet) {
37707 stats->tx_bytes += pMpTcb->Packet->len;
37708 diff -urNp linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h
37709 --- linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
37710 +++ linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
37711 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
37712 * operations
37713 */
37714 u32 unircv; /* # multicast packets received */
37715 - atomic_t unixmt; /* # multicast packets for Tx */
37716 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
37717 u32 multircv; /* # multicast packets received */
37718 - atomic_t multixmt; /* # multicast packets for Tx */
37719 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
37720 u32 brdcstrcv; /* # broadcast packets received */
37721 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
37722 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
37723 u32 norcvbuf; /* # Rx packets discarded */
37724 u32 noxmtbuf; /* # Tx packets discarded */
37725
37726 diff -urNp linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c
37727 --- linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
37728 +++ linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
37729 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
37730 return 0;
37731 }
37732
37733 -static struct vm_operations_struct go7007_vm_ops = {
37734 +static const struct vm_operations_struct go7007_vm_ops = {
37735 .open = go7007_vm_open,
37736 .close = go7007_vm_close,
37737 .fault = go7007_vm_fault,
37738 diff -urNp linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c
37739 --- linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
37740 +++ linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
37741 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
37742 /* The one and only one */
37743 static struct blkvsc_driver_context g_blkvsc_drv;
37744
37745 -static struct block_device_operations block_ops = {
37746 +static const struct block_device_operations block_ops = {
37747 .owner = THIS_MODULE,
37748 .open = blkvsc_open,
37749 .release = blkvsc_release,
37750 diff -urNp linux-2.6.32.45/drivers/staging/hv/Channel.c linux-2.6.32.45/drivers/staging/hv/Channel.c
37751 --- linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
37752 +++ linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
37753 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
37754
37755 DPRINT_ENTER(VMBUS);
37756
37757 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
37758 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
37759 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
37760 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
37761
37762 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
37763 ASSERT(msgInfo != NULL);
37764 diff -urNp linux-2.6.32.45/drivers/staging/hv/Hv.c linux-2.6.32.45/drivers/staging/hv/Hv.c
37765 --- linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
37766 +++ linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
37767 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
37768 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
37769 u32 outputAddressHi = outputAddress >> 32;
37770 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
37771 - volatile void *hypercallPage = gHvContext.HypercallPage;
37772 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
37773
37774 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
37775 Control, Input, Output);
37776 diff -urNp linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c
37777 --- linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
37778 +++ linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
37779 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
37780 to_device_context(root_device_obj);
37781 struct device_context *child_device_ctx =
37782 to_device_context(child_device_obj);
37783 - static atomic_t device_num = ATOMIC_INIT(0);
37784 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
37785
37786 DPRINT_ENTER(VMBUS_DRV);
37787
37788 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
37789
37790 /* Set the device name. Otherwise, device_register() will fail. */
37791 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
37792 - atomic_inc_return(&device_num));
37793 + atomic_inc_return_unchecked(&device_num));
37794
37795 /* The new device belongs to this bus */
37796 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
37797 diff -urNp linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h
37798 --- linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
37799 +++ linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
37800 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
37801 struct VMBUS_CONNECTION {
37802 enum VMBUS_CONNECT_STATE ConnectState;
37803
37804 - atomic_t NextGpadlHandle;
37805 + atomic_unchecked_t NextGpadlHandle;
37806
37807 /*
37808 * Represents channel interrupts. Each bit position represents a
37809 diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet.c linux-2.6.32.45/drivers/staging/octeon/ethernet.c
37810 --- linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
37811 +++ linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
37812 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
37813 * since the RX tasklet also increments it.
37814 */
37815 #ifdef CONFIG_64BIT
37816 - atomic64_add(rx_status.dropped_packets,
37817 - (atomic64_t *)&priv->stats.rx_dropped);
37818 + atomic64_add_unchecked(rx_status.dropped_packets,
37819 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37820 #else
37821 - atomic_add(rx_status.dropped_packets,
37822 - (atomic_t *)&priv->stats.rx_dropped);
37823 + atomic_add_unchecked(rx_status.dropped_packets,
37824 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37825 #endif
37826 }
37827
37828 diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c
37829 --- linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
37830 +++ linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
37831 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
37832 /* Increment RX stats for virtual ports */
37833 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37834 #ifdef CONFIG_64BIT
37835 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37836 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37837 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37838 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37839 #else
37840 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37841 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37842 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37843 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37844 #endif
37845 }
37846 netif_receive_skb(skb);
37847 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
37848 dev->name);
37849 */
37850 #ifdef CONFIG_64BIT
37851 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37852 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
37853 #else
37854 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37855 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
37856 #endif
37857 dev_kfree_skb_irq(skb);
37858 }
37859 diff -urNp linux-2.6.32.45/drivers/staging/panel/panel.c linux-2.6.32.45/drivers/staging/panel/panel.c
37860 --- linux-2.6.32.45/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
37861 +++ linux-2.6.32.45/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
37862 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
37863 return 0;
37864 }
37865
37866 -static struct file_operations lcd_fops = {
37867 +static const struct file_operations lcd_fops = {
37868 .write = lcd_write,
37869 .open = lcd_open,
37870 .release = lcd_release,
37871 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
37872 return 0;
37873 }
37874
37875 -static struct file_operations keypad_fops = {
37876 +static const struct file_operations keypad_fops = {
37877 .read = keypad_read, /* read */
37878 .open = keypad_open, /* open */
37879 .release = keypad_release, /* close */
37880 diff -urNp linux-2.6.32.45/drivers/staging/phison/phison.c linux-2.6.32.45/drivers/staging/phison/phison.c
37881 --- linux-2.6.32.45/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
37882 +++ linux-2.6.32.45/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
37883 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
37884 ATA_BMDMA_SHT(DRV_NAME),
37885 };
37886
37887 -static struct ata_port_operations phison_ops = {
37888 +static const struct ata_port_operations phison_ops = {
37889 .inherits = &ata_bmdma_port_ops,
37890 .prereset = phison_pre_reset,
37891 };
37892 diff -urNp linux-2.6.32.45/drivers/staging/poch/poch.c linux-2.6.32.45/drivers/staging/poch/poch.c
37893 --- linux-2.6.32.45/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
37894 +++ linux-2.6.32.45/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
37895 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
37896 return 0;
37897 }
37898
37899 -static struct file_operations poch_fops = {
37900 +static const struct file_operations poch_fops = {
37901 .owner = THIS_MODULE,
37902 .open = poch_open,
37903 .release = poch_release,
37904 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/inode.c linux-2.6.32.45/drivers/staging/pohmelfs/inode.c
37905 --- linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37906 +++ linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
37907 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
37908 mutex_init(&psb->mcache_lock);
37909 psb->mcache_root = RB_ROOT;
37910 psb->mcache_timeout = msecs_to_jiffies(5000);
37911 - atomic_long_set(&psb->mcache_gen, 0);
37912 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
37913
37914 psb->trans_max_pages = 100;
37915
37916 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
37917 INIT_LIST_HEAD(&psb->crypto_ready_list);
37918 INIT_LIST_HEAD(&psb->crypto_active_list);
37919
37920 - atomic_set(&psb->trans_gen, 1);
37921 + atomic_set_unchecked(&psb->trans_gen, 1);
37922 atomic_long_set(&psb->total_inodes, 0);
37923
37924 mutex_init(&psb->state_lock);
37925 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c
37926 --- linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
37927 +++ linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
37928 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
37929 m->data = data;
37930 m->start = start;
37931 m->size = size;
37932 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
37933 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
37934
37935 mutex_lock(&psb->mcache_lock);
37936 err = pohmelfs_mcache_insert(psb, m);
37937 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h
37938 --- linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
37939 +++ linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
37940 @@ -570,14 +570,14 @@ struct pohmelfs_config;
37941 struct pohmelfs_sb {
37942 struct rb_root mcache_root;
37943 struct mutex mcache_lock;
37944 - atomic_long_t mcache_gen;
37945 + atomic_long_unchecked_t mcache_gen;
37946 unsigned long mcache_timeout;
37947
37948 unsigned int idx;
37949
37950 unsigned int trans_retries;
37951
37952 - atomic_t trans_gen;
37953 + atomic_unchecked_t trans_gen;
37954
37955 unsigned int crypto_attached_size;
37956 unsigned int crypto_align_size;
37957 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/trans.c linux-2.6.32.45/drivers/staging/pohmelfs/trans.c
37958 --- linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
37959 +++ linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
37960 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
37961 int err;
37962 struct netfs_cmd *cmd = t->iovec.iov_base;
37963
37964 - t->gen = atomic_inc_return(&psb->trans_gen);
37965 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
37966
37967 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
37968 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
37969 diff -urNp linux-2.6.32.45/drivers/staging/sep/sep_driver.c linux-2.6.32.45/drivers/staging/sep/sep_driver.c
37970 --- linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
37971 +++ linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
37972 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
37973 static dev_t sep_devno;
37974
37975 /* the files operations structure of the driver */
37976 -static struct file_operations sep_file_operations = {
37977 +static const struct file_operations sep_file_operations = {
37978 .owner = THIS_MODULE,
37979 .ioctl = sep_ioctl,
37980 .poll = sep_poll,
37981 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci.h linux-2.6.32.45/drivers/staging/usbip/vhci.h
37982 --- linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
37983 +++ linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
37984 @@ -92,7 +92,7 @@ struct vhci_hcd {
37985 unsigned resuming:1;
37986 unsigned long re_timeout;
37987
37988 - atomic_t seqnum;
37989 + atomic_unchecked_t seqnum;
37990
37991 /*
37992 * NOTE:
37993 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c
37994 --- linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
37995 +++ linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
37996 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
37997 return;
37998 }
37999
38000 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
38001 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38002 if (priv->seqnum == 0xffff)
38003 usbip_uinfo("seqnum max\n");
38004
38005 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
38006 return -ENOMEM;
38007 }
38008
38009 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
38010 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38011 if (unlink->seqnum == 0xffff)
38012 usbip_uinfo("seqnum max\n");
38013
38014 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
38015 vdev->rhport = rhport;
38016 }
38017
38018 - atomic_set(&vhci->seqnum, 0);
38019 + atomic_set_unchecked(&vhci->seqnum, 0);
38020 spin_lock_init(&vhci->lock);
38021
38022
38023 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c
38024 --- linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
38025 +++ linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
38026 @@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
38027 usbip_uerr("cannot find a urb of seqnum %u\n",
38028 pdu->base.seqnum);
38029 usbip_uinfo("max seqnum %d\n",
38030 - atomic_read(&the_controller->seqnum));
38031 + atomic_read_unchecked(&the_controller->seqnum));
38032 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
38033 return;
38034 }
38035 diff -urNp linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c
38036 --- linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
38037 +++ linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
38038 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
38039 static int __init vme_user_probe(struct device *, int, int);
38040 static int __exit vme_user_remove(struct device *, int, int);
38041
38042 -static struct file_operations vme_user_fops = {
38043 +static const struct file_operations vme_user_fops = {
38044 .open = vme_user_open,
38045 .release = vme_user_release,
38046 .read = vme_user_read,
38047 diff -urNp linux-2.6.32.45/drivers/telephony/ixj.c linux-2.6.32.45/drivers/telephony/ixj.c
38048 --- linux-2.6.32.45/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
38049 +++ linux-2.6.32.45/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
38050 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
38051 bool mContinue;
38052 char *pIn, *pOut;
38053
38054 + pax_track_stack();
38055 +
38056 if (!SCI_Prepare(j))
38057 return 0;
38058
38059 diff -urNp linux-2.6.32.45/drivers/uio/uio.c linux-2.6.32.45/drivers/uio/uio.c
38060 --- linux-2.6.32.45/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
38061 +++ linux-2.6.32.45/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
38062 @@ -23,6 +23,7 @@
38063 #include <linux/string.h>
38064 #include <linux/kobject.h>
38065 #include <linux/uio_driver.h>
38066 +#include <asm/local.h>
38067
38068 #define UIO_MAX_DEVICES 255
38069
38070 @@ -30,10 +31,10 @@ struct uio_device {
38071 struct module *owner;
38072 struct device *dev;
38073 int minor;
38074 - atomic_t event;
38075 + atomic_unchecked_t event;
38076 struct fasync_struct *async_queue;
38077 wait_queue_head_t wait;
38078 - int vma_count;
38079 + local_t vma_count;
38080 struct uio_info *info;
38081 struct kobject *map_dir;
38082 struct kobject *portio_dir;
38083 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
38084 return entry->show(mem, buf);
38085 }
38086
38087 -static struct sysfs_ops map_sysfs_ops = {
38088 +static const struct sysfs_ops map_sysfs_ops = {
38089 .show = map_type_show,
38090 };
38091
38092 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
38093 return entry->show(port, buf);
38094 }
38095
38096 -static struct sysfs_ops portio_sysfs_ops = {
38097 +static const struct sysfs_ops portio_sysfs_ops = {
38098 .show = portio_type_show,
38099 };
38100
38101 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device
38102 struct uio_device *idev = dev_get_drvdata(dev);
38103 if (idev)
38104 return sprintf(buf, "%u\n",
38105 - (unsigned int)atomic_read(&idev->event));
38106 + (unsigned int)atomic_read_unchecked(&idev->event));
38107 else
38108 return -ENODEV;
38109 }
38110 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
38111 {
38112 struct uio_device *idev = info->uio_dev;
38113
38114 - atomic_inc(&idev->event);
38115 + atomic_inc_unchecked(&idev->event);
38116 wake_up_interruptible(&idev->wait);
38117 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38118 }
38119 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
38120 }
38121
38122 listener->dev = idev;
38123 - listener->event_count = atomic_read(&idev->event);
38124 + listener->event_count = atomic_read_unchecked(&idev->event);
38125 filep->private_data = listener;
38126
38127 if (idev->info->open) {
38128 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
38129 return -EIO;
38130
38131 poll_wait(filep, &idev->wait, wait);
38132 - if (listener->event_count != atomic_read(&idev->event))
38133 + if (listener->event_count != atomic_read_unchecked(&idev->event))
38134 return POLLIN | POLLRDNORM;
38135 return 0;
38136 }
38137 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
38138 do {
38139 set_current_state(TASK_INTERRUPTIBLE);
38140
38141 - event_count = atomic_read(&idev->event);
38142 + event_count = atomic_read_unchecked(&idev->event);
38143 if (event_count != listener->event_count) {
38144 if (copy_to_user(buf, &event_count, count))
38145 retval = -EFAULT;
38146 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
38147 static void uio_vma_open(struct vm_area_struct *vma)
38148 {
38149 struct uio_device *idev = vma->vm_private_data;
38150 - idev->vma_count++;
38151 + local_inc(&idev->vma_count);
38152 }
38153
38154 static void uio_vma_close(struct vm_area_struct *vma)
38155 {
38156 struct uio_device *idev = vma->vm_private_data;
38157 - idev->vma_count--;
38158 + local_dec(&idev->vma_count);
38159 }
38160
38161 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38162 @@ -840,7 +841,7 @@ int __uio_register_device(struct module
38163 idev->owner = owner;
38164 idev->info = info;
38165 init_waitqueue_head(&idev->wait);
38166 - atomic_set(&idev->event, 0);
38167 + atomic_set_unchecked(&idev->event, 0);
38168
38169 ret = uio_get_minor(idev);
38170 if (ret)
38171 diff -urNp linux-2.6.32.45/drivers/usb/atm/usbatm.c linux-2.6.32.45/drivers/usb/atm/usbatm.c
38172 --- linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
38173 +++ linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
38174 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
38175 if (printk_ratelimit())
38176 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38177 __func__, vpi, vci);
38178 - atomic_inc(&vcc->stats->rx_err);
38179 + atomic_inc_unchecked(&vcc->stats->rx_err);
38180 return;
38181 }
38182
38183 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
38184 if (length > ATM_MAX_AAL5_PDU) {
38185 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38186 __func__, length, vcc);
38187 - atomic_inc(&vcc->stats->rx_err);
38188 + atomic_inc_unchecked(&vcc->stats->rx_err);
38189 goto out;
38190 }
38191
38192 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
38193 if (sarb->len < pdu_length) {
38194 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38195 __func__, pdu_length, sarb->len, vcc);
38196 - atomic_inc(&vcc->stats->rx_err);
38197 + atomic_inc_unchecked(&vcc->stats->rx_err);
38198 goto out;
38199 }
38200
38201 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38202 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38203 __func__, vcc);
38204 - atomic_inc(&vcc->stats->rx_err);
38205 + atomic_inc_unchecked(&vcc->stats->rx_err);
38206 goto out;
38207 }
38208
38209 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
38210 if (printk_ratelimit())
38211 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38212 __func__, length);
38213 - atomic_inc(&vcc->stats->rx_drop);
38214 + atomic_inc_unchecked(&vcc->stats->rx_drop);
38215 goto out;
38216 }
38217
38218 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
38219
38220 vcc->push(vcc, skb);
38221
38222 - atomic_inc(&vcc->stats->rx);
38223 + atomic_inc_unchecked(&vcc->stats->rx);
38224 out:
38225 skb_trim(sarb, 0);
38226 }
38227 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
38228 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38229
38230 usbatm_pop(vcc, skb);
38231 - atomic_inc(&vcc->stats->tx);
38232 + atomic_inc_unchecked(&vcc->stats->tx);
38233
38234 skb = skb_dequeue(&instance->sndqueue);
38235 }
38236 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
38237 if (!left--)
38238 return sprintf(page,
38239 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38240 - atomic_read(&atm_dev->stats.aal5.tx),
38241 - atomic_read(&atm_dev->stats.aal5.tx_err),
38242 - atomic_read(&atm_dev->stats.aal5.rx),
38243 - atomic_read(&atm_dev->stats.aal5.rx_err),
38244 - atomic_read(&atm_dev->stats.aal5.rx_drop));
38245 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38246 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38247 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38248 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38249 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38250
38251 if (!left--) {
38252 if (instance->disconnected)
38253 diff -urNp linux-2.6.32.45/drivers/usb/class/cdc-wdm.c linux-2.6.32.45/drivers/usb/class/cdc-wdm.c
38254 --- linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
38255 +++ linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
38256 @@ -314,7 +314,7 @@ static ssize_t wdm_write
38257 if (r < 0)
38258 goto outnp;
38259
38260 - if (!file->f_flags && O_NONBLOCK)
38261 + if (!(file->f_flags & O_NONBLOCK))
38262 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
38263 &desc->flags));
38264 else
38265 diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.c linux-2.6.32.45/drivers/usb/core/hcd.c
38266 --- linux-2.6.32.45/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
38267 +++ linux-2.6.32.45/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
38268 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
38269
38270 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38271
38272 -struct usb_mon_operations *mon_ops;
38273 +const struct usb_mon_operations *mon_ops;
38274
38275 /*
38276 * The registration is unlocked.
38277 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
38278 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
38279 */
38280
38281 -int usb_mon_register (struct usb_mon_operations *ops)
38282 +int usb_mon_register (const struct usb_mon_operations *ops)
38283 {
38284
38285 if (mon_ops)
38286 diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.h linux-2.6.32.45/drivers/usb/core/hcd.h
38287 --- linux-2.6.32.45/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
38288 +++ linux-2.6.32.45/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
38289 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
38290 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38291
38292 struct usb_mon_operations {
38293 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
38294 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38295 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38296 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
38297 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38298 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38299 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
38300 };
38301
38302 -extern struct usb_mon_operations *mon_ops;
38303 +extern const struct usb_mon_operations *mon_ops;
38304
38305 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
38306 {
38307 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
38308 (*mon_ops->urb_complete)(bus, urb, status);
38309 }
38310
38311 -int usb_mon_register(struct usb_mon_operations *ops);
38312 +int usb_mon_register(const struct usb_mon_operations *ops);
38313 void usb_mon_deregister(void);
38314
38315 #else
38316 diff -urNp linux-2.6.32.45/drivers/usb/core/message.c linux-2.6.32.45/drivers/usb/core/message.c
38317 --- linux-2.6.32.45/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
38318 +++ linux-2.6.32.45/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
38319 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
38320 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
38321 if (buf) {
38322 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
38323 - if (len > 0) {
38324 - smallbuf = kmalloc(++len, GFP_NOIO);
38325 + if (len++ > 0) {
38326 + smallbuf = kmalloc(len, GFP_NOIO);
38327 if (!smallbuf)
38328 return buf;
38329 memcpy(smallbuf, buf, len);
38330 diff -urNp linux-2.6.32.45/drivers/usb/misc/appledisplay.c linux-2.6.32.45/drivers/usb/misc/appledisplay.c
38331 --- linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
38332 +++ linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
38333 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
38334 return pdata->msgdata[1];
38335 }
38336
38337 -static struct backlight_ops appledisplay_bl_data = {
38338 +static const struct backlight_ops appledisplay_bl_data = {
38339 .get_brightness = appledisplay_bl_get_brightness,
38340 .update_status = appledisplay_bl_update_status,
38341 };
38342 diff -urNp linux-2.6.32.45/drivers/usb/mon/mon_main.c linux-2.6.32.45/drivers/usb/mon/mon_main.c
38343 --- linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
38344 +++ linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
38345 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
38346 /*
38347 * Ops
38348 */
38349 -static struct usb_mon_operations mon_ops_0 = {
38350 +static const struct usb_mon_operations mon_ops_0 = {
38351 .urb_submit = mon_submit,
38352 .urb_submit_error = mon_submit_error,
38353 .urb_complete = mon_complete,
38354 diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h
38355 --- linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
38356 +++ linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
38357 @@ -192,7 +192,7 @@ struct wahc {
38358 struct list_head xfer_delayed_list;
38359 spinlock_t xfer_list_lock;
38360 struct work_struct xfer_work;
38361 - atomic_t xfer_id_count;
38362 + atomic_unchecked_t xfer_id_count;
38363 };
38364
38365
38366 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
38367 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38368 spin_lock_init(&wa->xfer_list_lock);
38369 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38370 - atomic_set(&wa->xfer_id_count, 1);
38371 + atomic_set_unchecked(&wa->xfer_id_count, 1);
38372 }
38373
38374 /**
38375 diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c
38376 --- linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
38377 +++ linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
38378 @@ -293,7 +293,7 @@ out:
38379 */
38380 static void wa_xfer_id_init(struct wa_xfer *xfer)
38381 {
38382 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38383 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38384 }
38385
38386 /*
38387 diff -urNp linux-2.6.32.45/drivers/uwb/wlp/messages.c linux-2.6.32.45/drivers/uwb/wlp/messages.c
38388 --- linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
38389 +++ linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
38390 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
38391 size_t len = skb->len;
38392 size_t used;
38393 ssize_t result;
38394 - struct wlp_nonce enonce, rnonce;
38395 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
38396 enum wlp_assc_error assc_err;
38397 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
38398 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
38399 diff -urNp linux-2.6.32.45/drivers/uwb/wlp/sysfs.c linux-2.6.32.45/drivers/uwb/wlp/sysfs.c
38400 --- linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
38401 +++ linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
38402 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
38403 return ret;
38404 }
38405
38406 -static
38407 -struct sysfs_ops wss_sysfs_ops = {
38408 +static const struct sysfs_ops wss_sysfs_ops = {
38409 .show = wlp_wss_attr_show,
38410 .store = wlp_wss_attr_store,
38411 };
38412 diff -urNp linux-2.6.32.45/drivers/video/atmel_lcdfb.c linux-2.6.32.45/drivers/video/atmel_lcdfb.c
38413 --- linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
38414 +++ linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
38415 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
38416 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
38417 }
38418
38419 -static struct backlight_ops atmel_lcdc_bl_ops = {
38420 +static const struct backlight_ops atmel_lcdc_bl_ops = {
38421 .update_status = atmel_bl_update_status,
38422 .get_brightness = atmel_bl_get_brightness,
38423 };
38424 diff -urNp linux-2.6.32.45/drivers/video/aty/aty128fb.c linux-2.6.32.45/drivers/video/aty/aty128fb.c
38425 --- linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
38426 +++ linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
38427 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
38428 return bd->props.brightness;
38429 }
38430
38431 -static struct backlight_ops aty128_bl_data = {
38432 +static const struct backlight_ops aty128_bl_data = {
38433 .get_brightness = aty128_bl_get_brightness,
38434 .update_status = aty128_bl_update_status,
38435 };
38436 diff -urNp linux-2.6.32.45/drivers/video/aty/atyfb_base.c linux-2.6.32.45/drivers/video/aty/atyfb_base.c
38437 --- linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
38438 +++ linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
38439 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
38440 return bd->props.brightness;
38441 }
38442
38443 -static struct backlight_ops aty_bl_data = {
38444 +static const struct backlight_ops aty_bl_data = {
38445 .get_brightness = aty_bl_get_brightness,
38446 .update_status = aty_bl_update_status,
38447 };
38448 diff -urNp linux-2.6.32.45/drivers/video/aty/radeon_backlight.c linux-2.6.32.45/drivers/video/aty/radeon_backlight.c
38449 --- linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
38450 +++ linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
38451 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
38452 return bd->props.brightness;
38453 }
38454
38455 -static struct backlight_ops radeon_bl_data = {
38456 +static const struct backlight_ops radeon_bl_data = {
38457 .get_brightness = radeon_bl_get_brightness,
38458 .update_status = radeon_bl_update_status,
38459 };
38460 diff -urNp linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c
38461 --- linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
38462 +++ linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
38463 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
38464 return error ? data->current_brightness : reg_val;
38465 }
38466
38467 -static struct backlight_ops adp5520_bl_ops = {
38468 +static const struct backlight_ops adp5520_bl_ops = {
38469 .update_status = adp5520_bl_update_status,
38470 .get_brightness = adp5520_bl_get_brightness,
38471 };
38472 diff -urNp linux-2.6.32.45/drivers/video/backlight/adx_bl.c linux-2.6.32.45/drivers/video/backlight/adx_bl.c
38473 --- linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
38474 +++ linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
38475 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
38476 return 1;
38477 }
38478
38479 -static struct backlight_ops adx_backlight_ops = {
38480 +static const struct backlight_ops adx_backlight_ops = {
38481 .options = 0,
38482 .update_status = adx_backlight_update_status,
38483 .get_brightness = adx_backlight_get_brightness,
38484 diff -urNp linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c
38485 --- linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
38486 +++ linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
38487 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
38488 return pwm_channel_enable(&pwmbl->pwmc);
38489 }
38490
38491 -static struct backlight_ops atmel_pwm_bl_ops = {
38492 +static const struct backlight_ops atmel_pwm_bl_ops = {
38493 .get_brightness = atmel_pwm_bl_get_intensity,
38494 .update_status = atmel_pwm_bl_set_intensity,
38495 };
38496 diff -urNp linux-2.6.32.45/drivers/video/backlight/backlight.c linux-2.6.32.45/drivers/video/backlight/backlight.c
38497 --- linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
38498 +++ linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
38499 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
38500 * ERR_PTR() or a pointer to the newly allocated device.
38501 */
38502 struct backlight_device *backlight_device_register(const char *name,
38503 - struct device *parent, void *devdata, struct backlight_ops *ops)
38504 + struct device *parent, void *devdata, const struct backlight_ops *ops)
38505 {
38506 struct backlight_device *new_bd;
38507 int rc;
38508 diff -urNp linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c
38509 --- linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
38510 +++ linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
38511 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
38512 }
38513 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
38514
38515 -static struct backlight_ops corgi_bl_ops = {
38516 +static const struct backlight_ops corgi_bl_ops = {
38517 .get_brightness = corgi_bl_get_intensity,
38518 .update_status = corgi_bl_update_status,
38519 };
38520 diff -urNp linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c
38521 --- linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
38522 +++ linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
38523 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
38524 return intensity;
38525 }
38526
38527 -static struct backlight_ops cr_backlight_ops = {
38528 +static const struct backlight_ops cr_backlight_ops = {
38529 .get_brightness = cr_backlight_get_intensity,
38530 .update_status = cr_backlight_set_intensity,
38531 };
38532 diff -urNp linux-2.6.32.45/drivers/video/backlight/da903x_bl.c linux-2.6.32.45/drivers/video/backlight/da903x_bl.c
38533 --- linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
38534 +++ linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
38535 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
38536 return data->current_brightness;
38537 }
38538
38539 -static struct backlight_ops da903x_backlight_ops = {
38540 +static const struct backlight_ops da903x_backlight_ops = {
38541 .update_status = da903x_backlight_update_status,
38542 .get_brightness = da903x_backlight_get_brightness,
38543 };
38544 diff -urNp linux-2.6.32.45/drivers/video/backlight/generic_bl.c linux-2.6.32.45/drivers/video/backlight/generic_bl.c
38545 --- linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
38546 +++ linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
38547 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
38548 }
38549 EXPORT_SYMBOL(corgibl_limit_intensity);
38550
38551 -static struct backlight_ops genericbl_ops = {
38552 +static const struct backlight_ops genericbl_ops = {
38553 .options = BL_CORE_SUSPENDRESUME,
38554 .get_brightness = genericbl_get_intensity,
38555 .update_status = genericbl_send_intensity,
38556 diff -urNp linux-2.6.32.45/drivers/video/backlight/hp680_bl.c linux-2.6.32.45/drivers/video/backlight/hp680_bl.c
38557 --- linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
38558 +++ linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
38559 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
38560 return current_intensity;
38561 }
38562
38563 -static struct backlight_ops hp680bl_ops = {
38564 +static const struct backlight_ops hp680bl_ops = {
38565 .get_brightness = hp680bl_get_intensity,
38566 .update_status = hp680bl_set_intensity,
38567 };
38568 diff -urNp linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c
38569 --- linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
38570 +++ linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
38571 @@ -93,7 +93,7 @@ out:
38572 return ret;
38573 }
38574
38575 -static struct backlight_ops jornada_bl_ops = {
38576 +static const struct backlight_ops jornada_bl_ops = {
38577 .get_brightness = jornada_bl_get_brightness,
38578 .update_status = jornada_bl_update_status,
38579 .options = BL_CORE_SUSPENDRESUME,
38580 diff -urNp linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c
38581 --- linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
38582 +++ linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
38583 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
38584 return kb3886bl_intensity;
38585 }
38586
38587 -static struct backlight_ops kb3886bl_ops = {
38588 +static const struct backlight_ops kb3886bl_ops = {
38589 .get_brightness = kb3886bl_get_intensity,
38590 .update_status = kb3886bl_send_intensity,
38591 };
38592 diff -urNp linux-2.6.32.45/drivers/video/backlight/locomolcd.c linux-2.6.32.45/drivers/video/backlight/locomolcd.c
38593 --- linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
38594 +++ linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
38595 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
38596 return current_intensity;
38597 }
38598
38599 -static struct backlight_ops locomobl_data = {
38600 +static const struct backlight_ops locomobl_data = {
38601 .get_brightness = locomolcd_get_intensity,
38602 .update_status = locomolcd_set_intensity,
38603 };
38604 diff -urNp linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c
38605 --- linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
38606 +++ linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
38607 @@ -33,7 +33,7 @@ struct dmi_match_data {
38608 unsigned long iostart;
38609 unsigned long iolen;
38610 /* Backlight operations structure. */
38611 - struct backlight_ops backlight_ops;
38612 + const struct backlight_ops backlight_ops;
38613 };
38614
38615 /* Module parameters. */
38616 diff -urNp linux-2.6.32.45/drivers/video/backlight/omap1_bl.c linux-2.6.32.45/drivers/video/backlight/omap1_bl.c
38617 --- linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
38618 +++ linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
38619 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
38620 return bl->current_intensity;
38621 }
38622
38623 -static struct backlight_ops omapbl_ops = {
38624 +static const struct backlight_ops omapbl_ops = {
38625 .get_brightness = omapbl_get_intensity,
38626 .update_status = omapbl_update_status,
38627 };
38628 diff -urNp linux-2.6.32.45/drivers/video/backlight/progear_bl.c linux-2.6.32.45/drivers/video/backlight/progear_bl.c
38629 --- linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
38630 +++ linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
38631 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
38632 return intensity - HW_LEVEL_MIN;
38633 }
38634
38635 -static struct backlight_ops progearbl_ops = {
38636 +static const struct backlight_ops progearbl_ops = {
38637 .get_brightness = progearbl_get_intensity,
38638 .update_status = progearbl_set_intensity,
38639 };
38640 diff -urNp linux-2.6.32.45/drivers/video/backlight/pwm_bl.c linux-2.6.32.45/drivers/video/backlight/pwm_bl.c
38641 --- linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
38642 +++ linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
38643 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
38644 return bl->props.brightness;
38645 }
38646
38647 -static struct backlight_ops pwm_backlight_ops = {
38648 +static const struct backlight_ops pwm_backlight_ops = {
38649 .update_status = pwm_backlight_update_status,
38650 .get_brightness = pwm_backlight_get_brightness,
38651 };
38652 diff -urNp linux-2.6.32.45/drivers/video/backlight/tosa_bl.c linux-2.6.32.45/drivers/video/backlight/tosa_bl.c
38653 --- linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
38654 +++ linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
38655 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
38656 return props->brightness;
38657 }
38658
38659 -static struct backlight_ops bl_ops = {
38660 +static const struct backlight_ops bl_ops = {
38661 .get_brightness = tosa_bl_get_brightness,
38662 .update_status = tosa_bl_update_status,
38663 };
38664 diff -urNp linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c
38665 --- linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
38666 +++ linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
38667 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
38668 return data->current_brightness;
38669 }
38670
38671 -static struct backlight_ops wm831x_backlight_ops = {
38672 +static const struct backlight_ops wm831x_backlight_ops = {
38673 .options = BL_CORE_SUSPENDRESUME,
38674 .update_status = wm831x_backlight_update_status,
38675 .get_brightness = wm831x_backlight_get_brightness,
38676 diff -urNp linux-2.6.32.45/drivers/video/bf54x-lq043fb.c linux-2.6.32.45/drivers/video/bf54x-lq043fb.c
38677 --- linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
38678 +++ linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
38679 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
38680 return 0;
38681 }
38682
38683 -static struct backlight_ops bfin_lq043fb_bl_ops = {
38684 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
38685 .get_brightness = bl_get_brightness,
38686 };
38687
38688 diff -urNp linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c
38689 --- linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
38690 +++ linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
38691 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
38692 return 0;
38693 }
38694
38695 -static struct backlight_ops bfin_lq043fb_bl_ops = {
38696 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
38697 .get_brightness = bl_get_brightness,
38698 };
38699
38700 diff -urNp linux-2.6.32.45/drivers/video/fbcmap.c linux-2.6.32.45/drivers/video/fbcmap.c
38701 --- linux-2.6.32.45/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
38702 +++ linux-2.6.32.45/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
38703 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
38704 rc = -ENODEV;
38705 goto out;
38706 }
38707 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38708 - !info->fbops->fb_setcmap)) {
38709 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38710 rc = -EINVAL;
38711 goto out1;
38712 }
38713 diff -urNp linux-2.6.32.45/drivers/video/fbmem.c linux-2.6.32.45/drivers/video/fbmem.c
38714 --- linux-2.6.32.45/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
38715 +++ linux-2.6.32.45/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
38716 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
38717 image->dx += image->width + 8;
38718 }
38719 } else if (rotate == FB_ROTATE_UD) {
38720 - for (x = 0; x < num && image->dx >= 0; x++) {
38721 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38722 info->fbops->fb_imageblit(info, image);
38723 image->dx -= image->width + 8;
38724 }
38725 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
38726 image->dy += image->height + 8;
38727 }
38728 } else if (rotate == FB_ROTATE_CCW) {
38729 - for (x = 0; x < num && image->dy >= 0; x++) {
38730 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38731 info->fbops->fb_imageblit(info, image);
38732 image->dy -= image->height + 8;
38733 }
38734 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
38735 int flags = info->flags;
38736 int ret = 0;
38737
38738 + pax_track_stack();
38739 +
38740 if (var->activate & FB_ACTIVATE_INV_MODE) {
38741 struct fb_videomode mode1, mode2;
38742
38743 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
38744 void __user *argp = (void __user *)arg;
38745 long ret = 0;
38746
38747 + pax_track_stack();
38748 +
38749 switch (cmd) {
38750 case FBIOGET_VSCREENINFO:
38751 if (!lock_fb_info(info))
38752 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
38753 return -EFAULT;
38754 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38755 return -EINVAL;
38756 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38757 + if (con2fb.framebuffer >= FB_MAX)
38758 return -EINVAL;
38759 if (!registered_fb[con2fb.framebuffer])
38760 request_module("fb%d", con2fb.framebuffer);
38761 diff -urNp linux-2.6.32.45/drivers/video/i810/i810_accel.c linux-2.6.32.45/drivers/video/i810/i810_accel.c
38762 --- linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
38763 +++ linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
38764 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
38765 }
38766 }
38767 printk("ringbuffer lockup!!!\n");
38768 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38769 i810_report_error(mmio);
38770 par->dev_flags |= LOCKUP;
38771 info->pixmap.scan_align = 1;
38772 diff -urNp linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c
38773 --- linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
38774 +++ linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
38775 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
38776 return bd->props.brightness;
38777 }
38778
38779 -static struct backlight_ops nvidia_bl_ops = {
38780 +static const struct backlight_ops nvidia_bl_ops = {
38781 .get_brightness = nvidia_bl_get_brightness,
38782 .update_status = nvidia_bl_update_status,
38783 };
38784 diff -urNp linux-2.6.32.45/drivers/video/riva/fbdev.c linux-2.6.32.45/drivers/video/riva/fbdev.c
38785 --- linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
38786 +++ linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
38787 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
38788 return bd->props.brightness;
38789 }
38790
38791 -static struct backlight_ops riva_bl_ops = {
38792 +static const struct backlight_ops riva_bl_ops = {
38793 .get_brightness = riva_bl_get_brightness,
38794 .update_status = riva_bl_update_status,
38795 };
38796 diff -urNp linux-2.6.32.45/drivers/video/uvesafb.c linux-2.6.32.45/drivers/video/uvesafb.c
38797 --- linux-2.6.32.45/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
38798 +++ linux-2.6.32.45/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
38799 @@ -18,6 +18,7 @@
38800 #include <linux/fb.h>
38801 #include <linux/io.h>
38802 #include <linux/mutex.h>
38803 +#include <linux/moduleloader.h>
38804 #include <video/edid.h>
38805 #include <video/uvesafb.h>
38806 #ifdef CONFIG_X86
38807 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
38808 NULL,
38809 };
38810
38811 - return call_usermodehelper(v86d_path, argv, envp, 1);
38812 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
38813 }
38814
38815 /*
38816 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
38817 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
38818 par->pmi_setpal = par->ypan = 0;
38819 } else {
38820 +
38821 +#ifdef CONFIG_PAX_KERNEXEC
38822 +#ifdef CONFIG_MODULES
38823 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
38824 +#endif
38825 + if (!par->pmi_code) {
38826 + par->pmi_setpal = par->ypan = 0;
38827 + return 0;
38828 + }
38829 +#endif
38830 +
38831 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
38832 + task->t.regs.edi);
38833 +
38834 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38835 + pax_open_kernel();
38836 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
38837 + pax_close_kernel();
38838 +
38839 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
38840 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
38841 +#else
38842 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
38843 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
38844 +#endif
38845 +
38846 printk(KERN_INFO "uvesafb: protected mode interface info at "
38847 "%04x:%04x\n",
38848 (u16)task->t.regs.es, (u16)task->t.regs.edi);
38849 @@ -1799,6 +1822,11 @@ out:
38850 if (par->vbe_modes)
38851 kfree(par->vbe_modes);
38852
38853 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38854 + if (par->pmi_code)
38855 + module_free_exec(NULL, par->pmi_code);
38856 +#endif
38857 +
38858 framebuffer_release(info);
38859 return err;
38860 }
38861 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
38862 kfree(par->vbe_state_orig);
38863 if (par->vbe_state_saved)
38864 kfree(par->vbe_state_saved);
38865 +
38866 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38867 + if (par->pmi_code)
38868 + module_free_exec(NULL, par->pmi_code);
38869 +#endif
38870 +
38871 }
38872
38873 framebuffer_release(info);
38874 diff -urNp linux-2.6.32.45/drivers/video/vesafb.c linux-2.6.32.45/drivers/video/vesafb.c
38875 --- linux-2.6.32.45/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
38876 +++ linux-2.6.32.45/drivers/video/vesafb.c 2011-08-05 20:33:55.000000000 -0400
38877 @@ -9,6 +9,7 @@
38878 */
38879
38880 #include <linux/module.h>
38881 +#include <linux/moduleloader.h>
38882 #include <linux/kernel.h>
38883 #include <linux/errno.h>
38884 #include <linux/string.h>
38885 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
38886 static int vram_total __initdata; /* Set total amount of memory */
38887 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
38888 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
38889 -static void (*pmi_start)(void) __read_mostly;
38890 -static void (*pmi_pal) (void) __read_mostly;
38891 +static void (*pmi_start)(void) __read_only;
38892 +static void (*pmi_pal) (void) __read_only;
38893 static int depth __read_mostly;
38894 static int vga_compat __read_mostly;
38895 /* --------------------------------------------------------------------- */
38896 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
38897 unsigned int size_vmode;
38898 unsigned int size_remap;
38899 unsigned int size_total;
38900 + void *pmi_code = NULL;
38901
38902 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
38903 return -ENODEV;
38904 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
38905 size_remap = size_total;
38906 vesafb_fix.smem_len = size_remap;
38907
38908 -#ifndef __i386__
38909 - screen_info.vesapm_seg = 0;
38910 -#endif
38911 -
38912 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
38913 printk(KERN_WARNING
38914 "vesafb: cannot reserve video memory at 0x%lx\n",
38915 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
38916 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
38917 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
38918
38919 +#ifdef __i386__
38920 +
38921 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38922 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
38923 + if (!pmi_code)
38924 +#elif !defined(CONFIG_PAX_KERNEXEC)
38925 + if (0)
38926 +#endif
38927 +
38928 +#endif
38929 + screen_info.vesapm_seg = 0;
38930 +
38931 if (screen_info.vesapm_seg) {
38932 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
38933 - screen_info.vesapm_seg,screen_info.vesapm_off);
38934 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
38935 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
38936 }
38937
38938 if (screen_info.vesapm_seg < 0xc000)
38939 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
38940
38941 if (ypan || pmi_setpal) {
38942 unsigned short *pmi_base;
38943 +
38944 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
38945 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
38946 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
38947 +
38948 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38949 + pax_open_kernel();
38950 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
38951 +#else
38952 + pmi_code = pmi_base;
38953 +#endif
38954 +
38955 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
38956 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
38957 +
38958 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38959 + pmi_start = ktva_ktla(pmi_start);
38960 + pmi_pal = ktva_ktla(pmi_pal);
38961 + pax_close_kernel();
38962 +#endif
38963 +
38964 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
38965 if (pmi_base[3]) {
38966 printk(KERN_INFO "vesafb: pmi: ports = ");
38967 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
38968 info->node, info->fix.id);
38969 return 0;
38970 err:
38971 +
38972 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38973 + module_free_exec(NULL, pmi_code);
38974 +#endif
38975 +
38976 if (info->screen_base)
38977 iounmap(info->screen_base);
38978 framebuffer_release(info);
38979 diff -urNp linux-2.6.32.45/drivers/xen/sys-hypervisor.c linux-2.6.32.45/drivers/xen/sys-hypervisor.c
38980 --- linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
38981 +++ linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
38982 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
38983 return 0;
38984 }
38985
38986 -static struct sysfs_ops hyp_sysfs_ops = {
38987 +static const struct sysfs_ops hyp_sysfs_ops = {
38988 .show = hyp_sysfs_show,
38989 .store = hyp_sysfs_store,
38990 };
38991 diff -urNp linux-2.6.32.45/fs/9p/vfs_inode.c linux-2.6.32.45/fs/9p/vfs_inode.c
38992 --- linux-2.6.32.45/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
38993 +++ linux-2.6.32.45/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
38994 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
38995 static void
38996 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38997 {
38998 - char *s = nd_get_link(nd);
38999 + const char *s = nd_get_link(nd);
39000
39001 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39002 IS_ERR(s) ? "<error>" : s);
39003 diff -urNp linux-2.6.32.45/fs/aio.c linux-2.6.32.45/fs/aio.c
39004 --- linux-2.6.32.45/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
39005 +++ linux-2.6.32.45/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
39006 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
39007 size += sizeof(struct io_event) * nr_events;
39008 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39009
39010 - if (nr_pages < 0)
39011 + if (nr_pages <= 0)
39012 return -EINVAL;
39013
39014 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39015 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
39016 struct aio_timeout to;
39017 int retry = 0;
39018
39019 + pax_track_stack();
39020 +
39021 /* needed to zero any padding within an entry (there shouldn't be
39022 * any, but C is fun!
39023 */
39024 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
39025 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
39026 {
39027 ssize_t ret;
39028 + struct iovec iovstack;
39029
39030 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
39031 kiocb->ki_nbytes, 1,
39032 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
39033 + &iovstack, &kiocb->ki_iovec);
39034 if (ret < 0)
39035 goto out;
39036
39037 + if (kiocb->ki_iovec == &iovstack) {
39038 + kiocb->ki_inline_vec = iovstack;
39039 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
39040 + }
39041 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39042 kiocb->ki_cur_seg = 0;
39043 /* ki_nbytes/left now reflect bytes instead of segs */
39044 diff -urNp linux-2.6.32.45/fs/attr.c linux-2.6.32.45/fs/attr.c
39045 --- linux-2.6.32.45/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
39046 +++ linux-2.6.32.45/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
39047 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
39048 unsigned long limit;
39049
39050 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
39051 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39052 if (limit != RLIM_INFINITY && offset > limit)
39053 goto out_sig;
39054 if (offset > inode->i_sb->s_maxbytes)
39055 diff -urNp linux-2.6.32.45/fs/autofs/root.c linux-2.6.32.45/fs/autofs/root.c
39056 --- linux-2.6.32.45/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
39057 +++ linux-2.6.32.45/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
39058 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
39059 set_bit(n,sbi->symlink_bitmap);
39060 sl = &sbi->symlink[n];
39061 sl->len = strlen(symname);
39062 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
39063 + slsize = sl->len+1;
39064 + sl->data = kmalloc(slsize, GFP_KERNEL);
39065 if (!sl->data) {
39066 clear_bit(n,sbi->symlink_bitmap);
39067 unlock_kernel();
39068 diff -urNp linux-2.6.32.45/fs/autofs4/symlink.c linux-2.6.32.45/fs/autofs4/symlink.c
39069 --- linux-2.6.32.45/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
39070 +++ linux-2.6.32.45/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
39071 @@ -15,7 +15,7 @@
39072 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
39073 {
39074 struct autofs_info *ino = autofs4_dentry_ino(dentry);
39075 - nd_set_link(nd, (char *)ino->u.symlink);
39076 + nd_set_link(nd, ino->u.symlink);
39077 return NULL;
39078 }
39079
39080 diff -urNp linux-2.6.32.45/fs/befs/linuxvfs.c linux-2.6.32.45/fs/befs/linuxvfs.c
39081 --- linux-2.6.32.45/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
39082 +++ linux-2.6.32.45/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
39083 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
39084 {
39085 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39086 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39087 - char *link = nd_get_link(nd);
39088 + const char *link = nd_get_link(nd);
39089 if (!IS_ERR(link))
39090 kfree(link);
39091 }
39092 diff -urNp linux-2.6.32.45/fs/binfmt_aout.c linux-2.6.32.45/fs/binfmt_aout.c
39093 --- linux-2.6.32.45/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
39094 +++ linux-2.6.32.45/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
39095 @@ -16,6 +16,7 @@
39096 #include <linux/string.h>
39097 #include <linux/fs.h>
39098 #include <linux/file.h>
39099 +#include <linux/security.h>
39100 #include <linux/stat.h>
39101 #include <linux/fcntl.h>
39102 #include <linux/ptrace.h>
39103 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
39104 #endif
39105 # define START_STACK(u) (u.start_stack)
39106
39107 + memset(&dump, 0, sizeof(dump));
39108 +
39109 fs = get_fs();
39110 set_fs(KERNEL_DS);
39111 has_dumped = 1;
39112 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
39113
39114 /* If the size of the dump file exceeds the rlimit, then see what would happen
39115 if we wrote the stack, but not the data area. */
39116 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39117 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
39118 dump.u_dsize = 0;
39119
39120 /* Make sure we have enough room to write the stack and data areas. */
39121 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39122 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
39123 dump.u_ssize = 0;
39124
39125 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
39126 dump_size = dump.u_ssize << PAGE_SHIFT;
39127 DUMP_WRITE(dump_start,dump_size);
39128 }
39129 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
39130 - set_fs(KERNEL_DS);
39131 - DUMP_WRITE(current,sizeof(*current));
39132 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
39133 end_coredump:
39134 set_fs(fs);
39135 return has_dumped;
39136 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
39137 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
39138 if (rlim >= RLIM_INFINITY)
39139 rlim = ~0;
39140 +
39141 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39142 if (ex.a_data + ex.a_bss > rlim)
39143 return -ENOMEM;
39144
39145 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
39146 install_exec_creds(bprm);
39147 current->flags &= ~PF_FORKNOEXEC;
39148
39149 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39150 + current->mm->pax_flags = 0UL;
39151 +#endif
39152 +
39153 +#ifdef CONFIG_PAX_PAGEEXEC
39154 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39155 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39156 +
39157 +#ifdef CONFIG_PAX_EMUTRAMP
39158 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39159 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39160 +#endif
39161 +
39162 +#ifdef CONFIG_PAX_MPROTECT
39163 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39164 + current->mm->pax_flags |= MF_PAX_MPROTECT;
39165 +#endif
39166 +
39167 + }
39168 +#endif
39169 +
39170 if (N_MAGIC(ex) == OMAGIC) {
39171 unsigned long text_addr, map_size;
39172 loff_t pos;
39173 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
39174
39175 down_write(&current->mm->mmap_sem);
39176 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39177 - PROT_READ | PROT_WRITE | PROT_EXEC,
39178 + PROT_READ | PROT_WRITE,
39179 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39180 fd_offset + ex.a_text);
39181 up_write(&current->mm->mmap_sem);
39182 diff -urNp linux-2.6.32.45/fs/binfmt_elf.c linux-2.6.32.45/fs/binfmt_elf.c
39183 --- linux-2.6.32.45/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
39184 +++ linux-2.6.32.45/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
39185 @@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
39186 #define elf_core_dump NULL
39187 #endif
39188
39189 +#ifdef CONFIG_PAX_MPROTECT
39190 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39191 +#endif
39192 +
39193 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39194 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39195 #else
39196 @@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
39197 .load_binary = load_elf_binary,
39198 .load_shlib = load_elf_library,
39199 .core_dump = elf_core_dump,
39200 +
39201 +#ifdef CONFIG_PAX_MPROTECT
39202 + .handle_mprotect= elf_handle_mprotect,
39203 +#endif
39204 +
39205 .min_coredump = ELF_EXEC_PAGESIZE,
39206 .hasvdso = 1
39207 };
39208 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39209
39210 static int set_brk(unsigned long start, unsigned long end)
39211 {
39212 + unsigned long e = end;
39213 +
39214 start = ELF_PAGEALIGN(start);
39215 end = ELF_PAGEALIGN(end);
39216 if (end > start) {
39217 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39218 if (BAD_ADDR(addr))
39219 return addr;
39220 }
39221 - current->mm->start_brk = current->mm->brk = end;
39222 + current->mm->start_brk = current->mm->brk = e;
39223 return 0;
39224 }
39225
39226 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39227 elf_addr_t __user *u_rand_bytes;
39228 const char *k_platform = ELF_PLATFORM;
39229 const char *k_base_platform = ELF_BASE_PLATFORM;
39230 - unsigned char k_rand_bytes[16];
39231 + u32 k_rand_bytes[4];
39232 int items;
39233 elf_addr_t *elf_info;
39234 int ei_index = 0;
39235 const struct cred *cred = current_cred();
39236 struct vm_area_struct *vma;
39237 + unsigned long saved_auxv[AT_VECTOR_SIZE];
39238 +
39239 + pax_track_stack();
39240
39241 /*
39242 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39243 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39244 * Generate 16 random bytes for userspace PRNG seeding.
39245 */
39246 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39247 - u_rand_bytes = (elf_addr_t __user *)
39248 - STACK_ALLOC(p, sizeof(k_rand_bytes));
39249 + srandom32(k_rand_bytes[0] ^ random32());
39250 + srandom32(k_rand_bytes[1] ^ random32());
39251 + srandom32(k_rand_bytes[2] ^ random32());
39252 + srandom32(k_rand_bytes[3] ^ random32());
39253 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
39254 + u_rand_bytes = (elf_addr_t __user *) p;
39255 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39256 return -EFAULT;
39257
39258 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39259 return -EFAULT;
39260 current->mm->env_end = p;
39261
39262 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39263 +
39264 /* Put the elf_info on the stack in the right place. */
39265 sp = (elf_addr_t __user *)envp + 1;
39266 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39267 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39268 return -EFAULT;
39269 return 0;
39270 }
39271 @@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
39272 {
39273 struct elf_phdr *elf_phdata;
39274 struct elf_phdr *eppnt;
39275 - unsigned long load_addr = 0;
39276 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39277 int load_addr_set = 0;
39278 unsigned long last_bss = 0, elf_bss = 0;
39279 - unsigned long error = ~0UL;
39280 + unsigned long error = -EINVAL;
39281 unsigned long total_size;
39282 int retval, i, size;
39283
39284 @@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
39285 goto out_close;
39286 }
39287
39288 +#ifdef CONFIG_PAX_SEGMEXEC
39289 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39290 + pax_task_size = SEGMEXEC_TASK_SIZE;
39291 +#endif
39292 +
39293 eppnt = elf_phdata;
39294 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39295 if (eppnt->p_type == PT_LOAD) {
39296 @@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
39297 k = load_addr + eppnt->p_vaddr;
39298 if (BAD_ADDR(k) ||
39299 eppnt->p_filesz > eppnt->p_memsz ||
39300 - eppnt->p_memsz > TASK_SIZE ||
39301 - TASK_SIZE - eppnt->p_memsz < k) {
39302 + eppnt->p_memsz > pax_task_size ||
39303 + pax_task_size - eppnt->p_memsz < k) {
39304 error = -ENOMEM;
39305 goto out_close;
39306 }
39307 @@ -532,6 +557,194 @@ out:
39308 return error;
39309 }
39310
39311 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39312 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39313 +{
39314 + unsigned long pax_flags = 0UL;
39315 +
39316 +#ifdef CONFIG_PAX_PAGEEXEC
39317 + if (elf_phdata->p_flags & PF_PAGEEXEC)
39318 + pax_flags |= MF_PAX_PAGEEXEC;
39319 +#endif
39320 +
39321 +#ifdef CONFIG_PAX_SEGMEXEC
39322 + if (elf_phdata->p_flags & PF_SEGMEXEC)
39323 + pax_flags |= MF_PAX_SEGMEXEC;
39324 +#endif
39325 +
39326 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39327 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39328 + if (nx_enabled)
39329 + pax_flags &= ~MF_PAX_SEGMEXEC;
39330 + else
39331 + pax_flags &= ~MF_PAX_PAGEEXEC;
39332 + }
39333 +#endif
39334 +
39335 +#ifdef CONFIG_PAX_EMUTRAMP
39336 + if (elf_phdata->p_flags & PF_EMUTRAMP)
39337 + pax_flags |= MF_PAX_EMUTRAMP;
39338 +#endif
39339 +
39340 +#ifdef CONFIG_PAX_MPROTECT
39341 + if (elf_phdata->p_flags & PF_MPROTECT)
39342 + pax_flags |= MF_PAX_MPROTECT;
39343 +#endif
39344 +
39345 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39346 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39347 + pax_flags |= MF_PAX_RANDMMAP;
39348 +#endif
39349 +
39350 + return pax_flags;
39351 +}
39352 +#endif
39353 +
39354 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39355 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39356 +{
39357 + unsigned long pax_flags = 0UL;
39358 +
39359 +#ifdef CONFIG_PAX_PAGEEXEC
39360 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39361 + pax_flags |= MF_PAX_PAGEEXEC;
39362 +#endif
39363 +
39364 +#ifdef CONFIG_PAX_SEGMEXEC
39365 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39366 + pax_flags |= MF_PAX_SEGMEXEC;
39367 +#endif
39368 +
39369 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39370 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39371 + if (nx_enabled)
39372 + pax_flags &= ~MF_PAX_SEGMEXEC;
39373 + else
39374 + pax_flags &= ~MF_PAX_PAGEEXEC;
39375 + }
39376 +#endif
39377 +
39378 +#ifdef CONFIG_PAX_EMUTRAMP
39379 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39380 + pax_flags |= MF_PAX_EMUTRAMP;
39381 +#endif
39382 +
39383 +#ifdef CONFIG_PAX_MPROTECT
39384 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39385 + pax_flags |= MF_PAX_MPROTECT;
39386 +#endif
39387 +
39388 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39389 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39390 + pax_flags |= MF_PAX_RANDMMAP;
39391 +#endif
39392 +
39393 + return pax_flags;
39394 +}
39395 +#endif
39396 +
39397 +#ifdef CONFIG_PAX_EI_PAX
39398 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39399 +{
39400 + unsigned long pax_flags = 0UL;
39401 +
39402 +#ifdef CONFIG_PAX_PAGEEXEC
39403 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39404 + pax_flags |= MF_PAX_PAGEEXEC;
39405 +#endif
39406 +
39407 +#ifdef CONFIG_PAX_SEGMEXEC
39408 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39409 + pax_flags |= MF_PAX_SEGMEXEC;
39410 +#endif
39411 +
39412 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39413 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39414 + if (nx_enabled)
39415 + pax_flags &= ~MF_PAX_SEGMEXEC;
39416 + else
39417 + pax_flags &= ~MF_PAX_PAGEEXEC;
39418 + }
39419 +#endif
39420 +
39421 +#ifdef CONFIG_PAX_EMUTRAMP
39422 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39423 + pax_flags |= MF_PAX_EMUTRAMP;
39424 +#endif
39425 +
39426 +#ifdef CONFIG_PAX_MPROTECT
39427 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39428 + pax_flags |= MF_PAX_MPROTECT;
39429 +#endif
39430 +
39431 +#ifdef CONFIG_PAX_ASLR
39432 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39433 + pax_flags |= MF_PAX_RANDMMAP;
39434 +#endif
39435 +
39436 + return pax_flags;
39437 +}
39438 +#endif
39439 +
39440 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39441 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39442 +{
39443 + unsigned long pax_flags = 0UL;
39444 +
39445 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39446 + unsigned long i;
39447 + int found_flags = 0;
39448 +#endif
39449 +
39450 +#ifdef CONFIG_PAX_EI_PAX
39451 + pax_flags = pax_parse_ei_pax(elf_ex);
39452 +#endif
39453 +
39454 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39455 + for (i = 0UL; i < elf_ex->e_phnum; i++)
39456 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39457 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39458 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39459 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39460 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39461 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39462 + return -EINVAL;
39463 +
39464 +#ifdef CONFIG_PAX_SOFTMODE
39465 + if (pax_softmode)
39466 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
39467 + else
39468 +#endif
39469 +
39470 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39471 + found_flags = 1;
39472 + break;
39473 + }
39474 +#endif
39475 +
39476 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39477 + if (found_flags == 0) {
39478 + struct elf_phdr phdr;
39479 + memset(&phdr, 0, sizeof(phdr));
39480 + phdr.p_flags = PF_NOEMUTRAMP;
39481 +#ifdef CONFIG_PAX_SOFTMODE
39482 + if (pax_softmode)
39483 + pax_flags = pax_parse_softmode(&phdr);
39484 + else
39485 +#endif
39486 + pax_flags = pax_parse_hardmode(&phdr);
39487 + }
39488 +#endif
39489 +
39490 +
39491 + if (0 > pax_check_flags(&pax_flags))
39492 + return -EINVAL;
39493 +
39494 + current->mm->pax_flags = pax_flags;
39495 + return 0;
39496 +}
39497 +#endif
39498 +
39499 /*
39500 * These are the functions used to load ELF style executables and shared
39501 * libraries. There is no binary dependent code anywhere else.
39502 @@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
39503 {
39504 unsigned int random_variable = 0;
39505
39506 +#ifdef CONFIG_PAX_RANDUSTACK
39507 + if (randomize_va_space)
39508 + return stack_top - current->mm->delta_stack;
39509 +#endif
39510 +
39511 if ((current->flags & PF_RANDOMIZE) &&
39512 !(current->personality & ADDR_NO_RANDOMIZE)) {
39513 random_variable = get_random_int() & STACK_RND_MASK;
39514 @@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
39515 unsigned long load_addr = 0, load_bias = 0;
39516 int load_addr_set = 0;
39517 char * elf_interpreter = NULL;
39518 - unsigned long error;
39519 + unsigned long error = 0;
39520 struct elf_phdr *elf_ppnt, *elf_phdata;
39521 unsigned long elf_bss, elf_brk;
39522 int retval, i;
39523 @@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
39524 unsigned long start_code, end_code, start_data, end_data;
39525 unsigned long reloc_func_desc = 0;
39526 int executable_stack = EXSTACK_DEFAULT;
39527 - unsigned long def_flags = 0;
39528 struct {
39529 struct elfhdr elf_ex;
39530 struct elfhdr interp_elf_ex;
39531 } *loc;
39532 + unsigned long pax_task_size = TASK_SIZE;
39533
39534 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39535 if (!loc) {
39536 @@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
39537
39538 /* OK, This is the point of no return */
39539 current->flags &= ~PF_FORKNOEXEC;
39540 - current->mm->def_flags = def_flags;
39541 +
39542 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39543 + current->mm->pax_flags = 0UL;
39544 +#endif
39545 +
39546 +#ifdef CONFIG_PAX_DLRESOLVE
39547 + current->mm->call_dl_resolve = 0UL;
39548 +#endif
39549 +
39550 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39551 + current->mm->call_syscall = 0UL;
39552 +#endif
39553 +
39554 +#ifdef CONFIG_PAX_ASLR
39555 + current->mm->delta_mmap = 0UL;
39556 + current->mm->delta_stack = 0UL;
39557 +#endif
39558 +
39559 + current->mm->def_flags = 0;
39560 +
39561 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39562 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39563 + send_sig(SIGKILL, current, 0);
39564 + goto out_free_dentry;
39565 + }
39566 +#endif
39567 +
39568 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39569 + pax_set_initial_flags(bprm);
39570 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39571 + if (pax_set_initial_flags_func)
39572 + (pax_set_initial_flags_func)(bprm);
39573 +#endif
39574 +
39575 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39576 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
39577 + current->mm->context.user_cs_limit = PAGE_SIZE;
39578 + current->mm->def_flags |= VM_PAGEEXEC;
39579 + }
39580 +#endif
39581 +
39582 +#ifdef CONFIG_PAX_SEGMEXEC
39583 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39584 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39585 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39586 + pax_task_size = SEGMEXEC_TASK_SIZE;
39587 + }
39588 +#endif
39589 +
39590 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39591 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39592 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39593 + put_cpu();
39594 + }
39595 +#endif
39596
39597 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39598 may depend on the personality. */
39599 SET_PERSONALITY(loc->elf_ex);
39600 +
39601 +#ifdef CONFIG_PAX_ASLR
39602 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
39603 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
39604 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
39605 + }
39606 +#endif
39607 +
39608 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39609 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39610 + executable_stack = EXSTACK_DISABLE_X;
39611 + current->personality &= ~READ_IMPLIES_EXEC;
39612 + } else
39613 +#endif
39614 +
39615 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
39616 current->personality |= READ_IMPLIES_EXEC;
39617
39618 @@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
39619 #else
39620 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
39621 #endif
39622 +
39623 +#ifdef CONFIG_PAX_RANDMMAP
39624 + /* PaX: randomize base address at the default exe base if requested */
39625 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
39626 +#ifdef CONFIG_SPARC64
39627 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
39628 +#else
39629 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
39630 +#endif
39631 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
39632 + elf_flags |= MAP_FIXED;
39633 + }
39634 +#endif
39635 +
39636 }
39637
39638 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
39639 @@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
39640 * allowed task size. Note that p_filesz must always be
39641 * <= p_memsz so it is only necessary to check p_memsz.
39642 */
39643 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39644 - elf_ppnt->p_memsz > TASK_SIZE ||
39645 - TASK_SIZE - elf_ppnt->p_memsz < k) {
39646 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39647 + elf_ppnt->p_memsz > pax_task_size ||
39648 + pax_task_size - elf_ppnt->p_memsz < k) {
39649 /* set_brk can never work. Avoid overflows. */
39650 send_sig(SIGKILL, current, 0);
39651 retval = -EINVAL;
39652 @@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
39653 start_data += load_bias;
39654 end_data += load_bias;
39655
39656 +#ifdef CONFIG_PAX_RANDMMAP
39657 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
39658 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
39659 +#endif
39660 +
39661 /* Calling set_brk effectively mmaps the pages that we need
39662 * for the bss and break sections. We must do this before
39663 * mapping in the interpreter, to make sure it doesn't wind
39664 @@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
39665 goto out_free_dentry;
39666 }
39667 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
39668 - send_sig(SIGSEGV, current, 0);
39669 - retval = -EFAULT; /* Nobody gets to see this, but.. */
39670 - goto out_free_dentry;
39671 + /*
39672 + * This bss-zeroing can fail if the ELF
39673 + * file specifies odd protections. So
39674 + * we don't check the return value
39675 + */
39676 }
39677
39678 if (elf_interpreter) {
39679 @@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
39680 unsigned long n = off;
39681 if (n > PAGE_SIZE)
39682 n = PAGE_SIZE;
39683 - if (!dump_write(file, buf, n))
39684 + if (!dump_write(file, buf, n)) {
39685 + free_page((unsigned long)buf);
39686 return 0;
39687 + }
39688 off -= n;
39689 }
39690 free_page((unsigned long)buf);
39691 @@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
39692 * Decide what to dump of a segment, part, all or none.
39693 */
39694 static unsigned long vma_dump_size(struct vm_area_struct *vma,
39695 - unsigned long mm_flags)
39696 + unsigned long mm_flags, long signr)
39697 {
39698 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
39699
39700 @@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
39701 if (vma->vm_file == NULL)
39702 return 0;
39703
39704 - if (FILTER(MAPPED_PRIVATE))
39705 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
39706 goto whole;
39707
39708 /*
39709 @@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
39710 #undef DUMP_WRITE
39711
39712 #define DUMP_WRITE(addr, nr) \
39713 + do { \
39714 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
39715 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
39716 - goto end_coredump;
39717 + goto end_coredump; \
39718 + } while (0);
39719
39720 static void fill_elf_header(struct elfhdr *elf, int segs,
39721 u16 machine, u32 flags, u8 osabi)
39722 @@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
39723 {
39724 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
39725 int i = 0;
39726 - do
39727 + do {
39728 i += 2;
39729 - while (auxv[i - 2] != AT_NULL);
39730 + } while (auxv[i - 2] != AT_NULL);
39731 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
39732 }
39733
39734 @@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
39735 phdr.p_offset = offset;
39736 phdr.p_vaddr = vma->vm_start;
39737 phdr.p_paddr = 0;
39738 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
39739 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
39740 phdr.p_memsz = vma->vm_end - vma->vm_start;
39741 offset += phdr.p_filesz;
39742 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
39743 @@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
39744 unsigned long addr;
39745 unsigned long end;
39746
39747 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
39748 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
39749
39750 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
39751 struct page *page;
39752 @@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
39753 page = get_dump_page(addr);
39754 if (page) {
39755 void *kaddr = kmap(page);
39756 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
39757 stop = ((size += PAGE_SIZE) > limit) ||
39758 !dump_write(file, kaddr, PAGE_SIZE);
39759 kunmap(page);
39760 @@ -2042,6 +2356,97 @@ out:
39761
39762 #endif /* USE_ELF_CORE_DUMP */
39763
39764 +#ifdef CONFIG_PAX_MPROTECT
39765 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
39766 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
39767 + * we'll remove VM_MAYWRITE for good on RELRO segments.
39768 + *
39769 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
39770 + * basis because we want to allow the common case and not the special ones.
39771 + */
39772 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
39773 +{
39774 + struct elfhdr elf_h;
39775 + struct elf_phdr elf_p;
39776 + unsigned long i;
39777 + unsigned long oldflags;
39778 + bool is_textrel_rw, is_textrel_rx, is_relro;
39779 +
39780 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
39781 + return;
39782 +
39783 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
39784 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
39785 +
39786 +#ifdef CONFIG_PAX_ELFRELOCS
39787 + /* possible TEXTREL */
39788 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
39789 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
39790 +#else
39791 + is_textrel_rw = false;
39792 + is_textrel_rx = false;
39793 +#endif
39794 +
39795 + /* possible RELRO */
39796 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
39797 +
39798 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
39799 + return;
39800 +
39801 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
39802 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
39803 +
39804 +#ifdef CONFIG_PAX_ETEXECRELOCS
39805 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39806 +#else
39807 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
39808 +#endif
39809 +
39810 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39811 + !elf_check_arch(&elf_h) ||
39812 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
39813 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
39814 + return;
39815 +
39816 + for (i = 0UL; i < elf_h.e_phnum; i++) {
39817 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
39818 + return;
39819 + switch (elf_p.p_type) {
39820 + case PT_DYNAMIC:
39821 + if (!is_textrel_rw && !is_textrel_rx)
39822 + continue;
39823 + i = 0UL;
39824 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
39825 + elf_dyn dyn;
39826 +
39827 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
39828 + return;
39829 + if (dyn.d_tag == DT_NULL)
39830 + return;
39831 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
39832 + gr_log_textrel(vma);
39833 + if (is_textrel_rw)
39834 + vma->vm_flags |= VM_MAYWRITE;
39835 + else
39836 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
39837 + vma->vm_flags &= ~VM_MAYWRITE;
39838 + return;
39839 + }
39840 + i++;
39841 + }
39842 + return;
39843 +
39844 + case PT_GNU_RELRO:
39845 + if (!is_relro)
39846 + continue;
39847 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
39848 + vma->vm_flags &= ~VM_MAYWRITE;
39849 + return;
39850 + }
39851 + }
39852 +}
39853 +#endif
39854 +
39855 static int __init init_elf_binfmt(void)
39856 {
39857 return register_binfmt(&elf_format);
39858 diff -urNp linux-2.6.32.45/fs/binfmt_flat.c linux-2.6.32.45/fs/binfmt_flat.c
39859 --- linux-2.6.32.45/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
39860 +++ linux-2.6.32.45/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
39861 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
39862 realdatastart = (unsigned long) -ENOMEM;
39863 printk("Unable to allocate RAM for process data, errno %d\n",
39864 (int)-realdatastart);
39865 + down_write(&current->mm->mmap_sem);
39866 do_munmap(current->mm, textpos, text_len);
39867 + up_write(&current->mm->mmap_sem);
39868 ret = realdatastart;
39869 goto err;
39870 }
39871 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
39872 }
39873 if (IS_ERR_VALUE(result)) {
39874 printk("Unable to read data+bss, errno %d\n", (int)-result);
39875 + down_write(&current->mm->mmap_sem);
39876 do_munmap(current->mm, textpos, text_len);
39877 do_munmap(current->mm, realdatastart, data_len + extra);
39878 + up_write(&current->mm->mmap_sem);
39879 ret = result;
39880 goto err;
39881 }
39882 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
39883 }
39884 if (IS_ERR_VALUE(result)) {
39885 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
39886 + down_write(&current->mm->mmap_sem);
39887 do_munmap(current->mm, textpos, text_len + data_len + extra +
39888 MAX_SHARED_LIBS * sizeof(unsigned long));
39889 + up_write(&current->mm->mmap_sem);
39890 ret = result;
39891 goto err;
39892 }
39893 diff -urNp linux-2.6.32.45/fs/bio.c linux-2.6.32.45/fs/bio.c
39894 --- linux-2.6.32.45/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
39895 +++ linux-2.6.32.45/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
39896 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
39897
39898 i = 0;
39899 while (i < bio_slab_nr) {
39900 - struct bio_slab *bslab = &bio_slabs[i];
39901 + bslab = &bio_slabs[i];
39902
39903 if (!bslab->slab && entry == -1)
39904 entry = i;
39905 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
39906 const int read = bio_data_dir(bio) == READ;
39907 struct bio_map_data *bmd = bio->bi_private;
39908 int i;
39909 - char *p = bmd->sgvecs[0].iov_base;
39910 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
39911
39912 __bio_for_each_segment(bvec, bio, i, 0) {
39913 char *addr = page_address(bvec->bv_page);
39914 diff -urNp linux-2.6.32.45/fs/block_dev.c linux-2.6.32.45/fs/block_dev.c
39915 --- linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:35:29.000000000 -0400
39916 +++ linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:34:00.000000000 -0400
39917 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
39918 else if (bdev->bd_contains == bdev)
39919 res = 0; /* is a whole device which isn't held */
39920
39921 - else if (bdev->bd_contains->bd_holder == bd_claim)
39922 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
39923 res = 0; /* is a partition of a device that is being partitioned */
39924 else if (bdev->bd_contains->bd_holder != NULL)
39925 res = -EBUSY; /* is a partition of a held device */
39926 diff -urNp linux-2.6.32.45/fs/btrfs/ctree.c linux-2.6.32.45/fs/btrfs/ctree.c
39927 --- linux-2.6.32.45/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
39928 +++ linux-2.6.32.45/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
39929 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
39930 free_extent_buffer(buf);
39931 add_root_to_dirty_list(root);
39932 } else {
39933 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
39934 - parent_start = parent->start;
39935 - else
39936 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
39937 + if (parent)
39938 + parent_start = parent->start;
39939 + else
39940 + parent_start = 0;
39941 + } else
39942 parent_start = 0;
39943
39944 WARN_ON(trans->transid != btrfs_header_generation(parent));
39945 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
39946
39947 ret = 0;
39948 if (slot == 0) {
39949 - struct btrfs_disk_key disk_key;
39950 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
39951 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
39952 }
39953 diff -urNp linux-2.6.32.45/fs/btrfs/disk-io.c linux-2.6.32.45/fs/btrfs/disk-io.c
39954 --- linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
39955 +++ linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
39956 @@ -39,7 +39,7 @@
39957 #include "tree-log.h"
39958 #include "free-space-cache.h"
39959
39960 -static struct extent_io_ops btree_extent_io_ops;
39961 +static const struct extent_io_ops btree_extent_io_ops;
39962 static void end_workqueue_fn(struct btrfs_work *work);
39963 static void free_fs_root(struct btrfs_root *root);
39964
39965 @@ -2607,7 +2607,7 @@ out:
39966 return 0;
39967 }
39968
39969 -static struct extent_io_ops btree_extent_io_ops = {
39970 +static const struct extent_io_ops btree_extent_io_ops = {
39971 .write_cache_pages_lock_hook = btree_lock_page_hook,
39972 .readpage_end_io_hook = btree_readpage_end_io_hook,
39973 .submit_bio_hook = btree_submit_bio_hook,
39974 diff -urNp linux-2.6.32.45/fs/btrfs/extent_io.h linux-2.6.32.45/fs/btrfs/extent_io.h
39975 --- linux-2.6.32.45/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
39976 +++ linux-2.6.32.45/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
39977 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
39978 struct bio *bio, int mirror_num,
39979 unsigned long bio_flags);
39980 struct extent_io_ops {
39981 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
39982 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
39983 u64 start, u64 end, int *page_started,
39984 unsigned long *nr_written);
39985 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
39986 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
39987 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
39988 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
39989 extent_submit_bio_hook_t *submit_bio_hook;
39990 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
39991 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
39992 size_t size, struct bio *bio,
39993 unsigned long bio_flags);
39994 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
39995 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
39996 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
39997 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
39998 u64 start, u64 end,
39999 struct extent_state *state);
40000 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
40001 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
40002 u64 start, u64 end,
40003 struct extent_state *state);
40004 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
40005 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
40006 struct extent_state *state);
40007 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
40008 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
40009 struct extent_state *state, int uptodate);
40010 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
40011 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
40012 unsigned long old, unsigned long bits);
40013 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
40014 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
40015 unsigned long bits);
40016 - int (*merge_extent_hook)(struct inode *inode,
40017 + int (* const merge_extent_hook)(struct inode *inode,
40018 struct extent_state *new,
40019 struct extent_state *other);
40020 - int (*split_extent_hook)(struct inode *inode,
40021 + int (* const split_extent_hook)(struct inode *inode,
40022 struct extent_state *orig, u64 split);
40023 - int (*write_cache_pages_lock_hook)(struct page *page);
40024 + int (* const write_cache_pages_lock_hook)(struct page *page);
40025 };
40026
40027 struct extent_io_tree {
40028 @@ -88,7 +88,7 @@ struct extent_io_tree {
40029 u64 dirty_bytes;
40030 spinlock_t lock;
40031 spinlock_t buffer_lock;
40032 - struct extent_io_ops *ops;
40033 + const struct extent_io_ops *ops;
40034 };
40035
40036 struct extent_state {
40037 diff -urNp linux-2.6.32.45/fs/btrfs/extent-tree.c linux-2.6.32.45/fs/btrfs/extent-tree.c
40038 --- linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
40039 +++ linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
40040 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
40041 u64 group_start = group->key.objectid;
40042 new_extents = kmalloc(sizeof(*new_extents),
40043 GFP_NOFS);
40044 + if (!new_extents) {
40045 + ret = -ENOMEM;
40046 + goto out;
40047 + }
40048 nr_extents = 1;
40049 ret = get_new_locations(reloc_inode,
40050 extent_key,
40051 diff -urNp linux-2.6.32.45/fs/btrfs/free-space-cache.c linux-2.6.32.45/fs/btrfs/free-space-cache.c
40052 --- linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
40053 +++ linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
40054 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
40055
40056 while(1) {
40057 if (entry->bytes < bytes || entry->offset < min_start) {
40058 - struct rb_node *node;
40059 -
40060 node = rb_next(&entry->offset_index);
40061 if (!node)
40062 break;
40063 @@ -1226,7 +1224,7 @@ again:
40064 */
40065 while (entry->bitmap || found_bitmap ||
40066 (!entry->bitmap && entry->bytes < min_bytes)) {
40067 - struct rb_node *node = rb_next(&entry->offset_index);
40068 + node = rb_next(&entry->offset_index);
40069
40070 if (entry->bitmap && entry->bytes > bytes + empty_size) {
40071 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
40072 diff -urNp linux-2.6.32.45/fs/btrfs/inode.c linux-2.6.32.45/fs/btrfs/inode.c
40073 --- linux-2.6.32.45/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40074 +++ linux-2.6.32.45/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
40075 @@ -63,7 +63,7 @@ static const struct inode_operations btr
40076 static const struct address_space_operations btrfs_aops;
40077 static const struct address_space_operations btrfs_symlink_aops;
40078 static const struct file_operations btrfs_dir_file_operations;
40079 -static struct extent_io_ops btrfs_extent_io_ops;
40080 +static const struct extent_io_ops btrfs_extent_io_ops;
40081
40082 static struct kmem_cache *btrfs_inode_cachep;
40083 struct kmem_cache *btrfs_trans_handle_cachep;
40084 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
40085 1, 0, NULL, GFP_NOFS);
40086 while (start < end) {
40087 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
40088 + BUG_ON(!async_cow);
40089 async_cow->inode = inode;
40090 async_cow->root = root;
40091 async_cow->locked_page = locked_page;
40092 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
40093 inline_size = btrfs_file_extent_inline_item_len(leaf,
40094 btrfs_item_nr(leaf, path->slots[0]));
40095 tmp = kmalloc(inline_size, GFP_NOFS);
40096 + if (!tmp)
40097 + return -ENOMEM;
40098 ptr = btrfs_file_extent_inline_start(item);
40099
40100 read_extent_buffer(leaf, tmp, ptr, inline_size);
40101 @@ -5410,7 +5413,7 @@ fail:
40102 return -ENOMEM;
40103 }
40104
40105 -static int btrfs_getattr(struct vfsmount *mnt,
40106 +int btrfs_getattr(struct vfsmount *mnt,
40107 struct dentry *dentry, struct kstat *stat)
40108 {
40109 struct inode *inode = dentry->d_inode;
40110 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
40111 return 0;
40112 }
40113
40114 +EXPORT_SYMBOL(btrfs_getattr);
40115 +
40116 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
40117 +{
40118 + return BTRFS_I(inode)->root->anon_super.s_dev;
40119 +}
40120 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40121 +
40122 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
40123 struct inode *new_dir, struct dentry *new_dentry)
40124 {
40125 @@ -5972,7 +5983,7 @@ static const struct file_operations btrf
40126 .fsync = btrfs_sync_file,
40127 };
40128
40129 -static struct extent_io_ops btrfs_extent_io_ops = {
40130 +static const struct extent_io_ops btrfs_extent_io_ops = {
40131 .fill_delalloc = run_delalloc_range,
40132 .submit_bio_hook = btrfs_submit_bio_hook,
40133 .merge_bio_hook = btrfs_merge_bio_hook,
40134 diff -urNp linux-2.6.32.45/fs/btrfs/relocation.c linux-2.6.32.45/fs/btrfs/relocation.c
40135 --- linux-2.6.32.45/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
40136 +++ linux-2.6.32.45/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
40137 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
40138 }
40139 spin_unlock(&rc->reloc_root_tree.lock);
40140
40141 - BUG_ON((struct btrfs_root *)node->data != root);
40142 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
40143
40144 if (!del) {
40145 spin_lock(&rc->reloc_root_tree.lock);
40146 diff -urNp linux-2.6.32.45/fs/btrfs/sysfs.c linux-2.6.32.45/fs/btrfs/sysfs.c
40147 --- linux-2.6.32.45/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
40148 +++ linux-2.6.32.45/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
40149 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
40150 complete(&root->kobj_unregister);
40151 }
40152
40153 -static struct sysfs_ops btrfs_super_attr_ops = {
40154 +static const struct sysfs_ops btrfs_super_attr_ops = {
40155 .show = btrfs_super_attr_show,
40156 .store = btrfs_super_attr_store,
40157 };
40158
40159 -static struct sysfs_ops btrfs_root_attr_ops = {
40160 +static const struct sysfs_ops btrfs_root_attr_ops = {
40161 .show = btrfs_root_attr_show,
40162 .store = btrfs_root_attr_store,
40163 };
40164 diff -urNp linux-2.6.32.45/fs/buffer.c linux-2.6.32.45/fs/buffer.c
40165 --- linux-2.6.32.45/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
40166 +++ linux-2.6.32.45/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
40167 @@ -25,6 +25,7 @@
40168 #include <linux/percpu.h>
40169 #include <linux/slab.h>
40170 #include <linux/capability.h>
40171 +#include <linux/security.h>
40172 #include <linux/blkdev.h>
40173 #include <linux/file.h>
40174 #include <linux/quotaops.h>
40175 diff -urNp linux-2.6.32.45/fs/cachefiles/bind.c linux-2.6.32.45/fs/cachefiles/bind.c
40176 --- linux-2.6.32.45/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
40177 +++ linux-2.6.32.45/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
40178 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40179 args);
40180
40181 /* start by checking things over */
40182 - ASSERT(cache->fstop_percent >= 0 &&
40183 - cache->fstop_percent < cache->fcull_percent &&
40184 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
40185 cache->fcull_percent < cache->frun_percent &&
40186 cache->frun_percent < 100);
40187
40188 - ASSERT(cache->bstop_percent >= 0 &&
40189 - cache->bstop_percent < cache->bcull_percent &&
40190 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
40191 cache->bcull_percent < cache->brun_percent &&
40192 cache->brun_percent < 100);
40193
40194 diff -urNp linux-2.6.32.45/fs/cachefiles/daemon.c linux-2.6.32.45/fs/cachefiles/daemon.c
40195 --- linux-2.6.32.45/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
40196 +++ linux-2.6.32.45/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
40197 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
40198 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40199 return -EIO;
40200
40201 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
40202 + if (datalen > PAGE_SIZE - 1)
40203 return -EOPNOTSUPP;
40204
40205 /* drag the command string into the kernel so we can parse it */
40206 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
40207 if (args[0] != '%' || args[1] != '\0')
40208 return -EINVAL;
40209
40210 - if (fstop < 0 || fstop >= cache->fcull_percent)
40211 + if (fstop >= cache->fcull_percent)
40212 return cachefiles_daemon_range_error(cache, args);
40213
40214 cache->fstop_percent = fstop;
40215 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
40216 if (args[0] != '%' || args[1] != '\0')
40217 return -EINVAL;
40218
40219 - if (bstop < 0 || bstop >= cache->bcull_percent)
40220 + if (bstop >= cache->bcull_percent)
40221 return cachefiles_daemon_range_error(cache, args);
40222
40223 cache->bstop_percent = bstop;
40224 diff -urNp linux-2.6.32.45/fs/cachefiles/internal.h linux-2.6.32.45/fs/cachefiles/internal.h
40225 --- linux-2.6.32.45/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
40226 +++ linux-2.6.32.45/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
40227 @@ -56,7 +56,7 @@ struct cachefiles_cache {
40228 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40229 struct rb_root active_nodes; /* active nodes (can't be culled) */
40230 rwlock_t active_lock; /* lock for active_nodes */
40231 - atomic_t gravecounter; /* graveyard uniquifier */
40232 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40233 unsigned frun_percent; /* when to stop culling (% files) */
40234 unsigned fcull_percent; /* when to start culling (% files) */
40235 unsigned fstop_percent; /* when to stop allocating (% files) */
40236 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
40237 * proc.c
40238 */
40239 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40240 -extern atomic_t cachefiles_lookup_histogram[HZ];
40241 -extern atomic_t cachefiles_mkdir_histogram[HZ];
40242 -extern atomic_t cachefiles_create_histogram[HZ];
40243 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40244 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40245 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40246
40247 extern int __init cachefiles_proc_init(void);
40248 extern void cachefiles_proc_cleanup(void);
40249 static inline
40250 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40251 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40252 {
40253 unsigned long jif = jiffies - start_jif;
40254 if (jif >= HZ)
40255 jif = HZ - 1;
40256 - atomic_inc(&histogram[jif]);
40257 + atomic_inc_unchecked(&histogram[jif]);
40258 }
40259
40260 #else
40261 diff -urNp linux-2.6.32.45/fs/cachefiles/namei.c linux-2.6.32.45/fs/cachefiles/namei.c
40262 --- linux-2.6.32.45/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
40263 +++ linux-2.6.32.45/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
40264 @@ -250,7 +250,7 @@ try_again:
40265 /* first step is to make up a grave dentry in the graveyard */
40266 sprintf(nbuffer, "%08x%08x",
40267 (uint32_t) get_seconds(),
40268 - (uint32_t) atomic_inc_return(&cache->gravecounter));
40269 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40270
40271 /* do the multiway lock magic */
40272 trap = lock_rename(cache->graveyard, dir);
40273 diff -urNp linux-2.6.32.45/fs/cachefiles/proc.c linux-2.6.32.45/fs/cachefiles/proc.c
40274 --- linux-2.6.32.45/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
40275 +++ linux-2.6.32.45/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
40276 @@ -14,9 +14,9 @@
40277 #include <linux/seq_file.h>
40278 #include "internal.h"
40279
40280 -atomic_t cachefiles_lookup_histogram[HZ];
40281 -atomic_t cachefiles_mkdir_histogram[HZ];
40282 -atomic_t cachefiles_create_histogram[HZ];
40283 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40284 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40285 +atomic_unchecked_t cachefiles_create_histogram[HZ];
40286
40287 /*
40288 * display the latency histogram
40289 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40290 return 0;
40291 default:
40292 index = (unsigned long) v - 3;
40293 - x = atomic_read(&cachefiles_lookup_histogram[index]);
40294 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
40295 - z = atomic_read(&cachefiles_create_histogram[index]);
40296 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40297 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40298 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40299 if (x == 0 && y == 0 && z == 0)
40300 return 0;
40301
40302 diff -urNp linux-2.6.32.45/fs/cachefiles/rdwr.c linux-2.6.32.45/fs/cachefiles/rdwr.c
40303 --- linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
40304 +++ linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
40305 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
40306 old_fs = get_fs();
40307 set_fs(KERNEL_DS);
40308 ret = file->f_op->write(
40309 - file, (const void __user *) data, len, &pos);
40310 + file, (__force const void __user *) data, len, &pos);
40311 set_fs(old_fs);
40312 kunmap(page);
40313 if (ret != len)
40314 diff -urNp linux-2.6.32.45/fs/cifs/cifs_debug.c linux-2.6.32.45/fs/cifs/cifs_debug.c
40315 --- linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
40316 +++ linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
40317 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
40318 tcon = list_entry(tmp3,
40319 struct cifsTconInfo,
40320 tcon_list);
40321 - atomic_set(&tcon->num_smbs_sent, 0);
40322 - atomic_set(&tcon->num_writes, 0);
40323 - atomic_set(&tcon->num_reads, 0);
40324 - atomic_set(&tcon->num_oplock_brks, 0);
40325 - atomic_set(&tcon->num_opens, 0);
40326 - atomic_set(&tcon->num_posixopens, 0);
40327 - atomic_set(&tcon->num_posixmkdirs, 0);
40328 - atomic_set(&tcon->num_closes, 0);
40329 - atomic_set(&tcon->num_deletes, 0);
40330 - atomic_set(&tcon->num_mkdirs, 0);
40331 - atomic_set(&tcon->num_rmdirs, 0);
40332 - atomic_set(&tcon->num_renames, 0);
40333 - atomic_set(&tcon->num_t2renames, 0);
40334 - atomic_set(&tcon->num_ffirst, 0);
40335 - atomic_set(&tcon->num_fnext, 0);
40336 - atomic_set(&tcon->num_fclose, 0);
40337 - atomic_set(&tcon->num_hardlinks, 0);
40338 - atomic_set(&tcon->num_symlinks, 0);
40339 - atomic_set(&tcon->num_locks, 0);
40340 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40341 + atomic_set_unchecked(&tcon->num_writes, 0);
40342 + atomic_set_unchecked(&tcon->num_reads, 0);
40343 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40344 + atomic_set_unchecked(&tcon->num_opens, 0);
40345 + atomic_set_unchecked(&tcon->num_posixopens, 0);
40346 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40347 + atomic_set_unchecked(&tcon->num_closes, 0);
40348 + atomic_set_unchecked(&tcon->num_deletes, 0);
40349 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
40350 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
40351 + atomic_set_unchecked(&tcon->num_renames, 0);
40352 + atomic_set_unchecked(&tcon->num_t2renames, 0);
40353 + atomic_set_unchecked(&tcon->num_ffirst, 0);
40354 + atomic_set_unchecked(&tcon->num_fnext, 0);
40355 + atomic_set_unchecked(&tcon->num_fclose, 0);
40356 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
40357 + atomic_set_unchecked(&tcon->num_symlinks, 0);
40358 + atomic_set_unchecked(&tcon->num_locks, 0);
40359 }
40360 }
40361 }
40362 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
40363 if (tcon->need_reconnect)
40364 seq_puts(m, "\tDISCONNECTED ");
40365 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40366 - atomic_read(&tcon->num_smbs_sent),
40367 - atomic_read(&tcon->num_oplock_brks));
40368 + atomic_read_unchecked(&tcon->num_smbs_sent),
40369 + atomic_read_unchecked(&tcon->num_oplock_brks));
40370 seq_printf(m, "\nReads: %d Bytes: %lld",
40371 - atomic_read(&tcon->num_reads),
40372 + atomic_read_unchecked(&tcon->num_reads),
40373 (long long)(tcon->bytes_read));
40374 seq_printf(m, "\nWrites: %d Bytes: %lld",
40375 - atomic_read(&tcon->num_writes),
40376 + atomic_read_unchecked(&tcon->num_writes),
40377 (long long)(tcon->bytes_written));
40378 seq_printf(m, "\nFlushes: %d",
40379 - atomic_read(&tcon->num_flushes));
40380 + atomic_read_unchecked(&tcon->num_flushes));
40381 seq_printf(m, "\nLocks: %d HardLinks: %d "
40382 "Symlinks: %d",
40383 - atomic_read(&tcon->num_locks),
40384 - atomic_read(&tcon->num_hardlinks),
40385 - atomic_read(&tcon->num_symlinks));
40386 + atomic_read_unchecked(&tcon->num_locks),
40387 + atomic_read_unchecked(&tcon->num_hardlinks),
40388 + atomic_read_unchecked(&tcon->num_symlinks));
40389 seq_printf(m, "\nOpens: %d Closes: %d "
40390 "Deletes: %d",
40391 - atomic_read(&tcon->num_opens),
40392 - atomic_read(&tcon->num_closes),
40393 - atomic_read(&tcon->num_deletes));
40394 + atomic_read_unchecked(&tcon->num_opens),
40395 + atomic_read_unchecked(&tcon->num_closes),
40396 + atomic_read_unchecked(&tcon->num_deletes));
40397 seq_printf(m, "\nPosix Opens: %d "
40398 "Posix Mkdirs: %d",
40399 - atomic_read(&tcon->num_posixopens),
40400 - atomic_read(&tcon->num_posixmkdirs));
40401 + atomic_read_unchecked(&tcon->num_posixopens),
40402 + atomic_read_unchecked(&tcon->num_posixmkdirs));
40403 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40404 - atomic_read(&tcon->num_mkdirs),
40405 - atomic_read(&tcon->num_rmdirs));
40406 + atomic_read_unchecked(&tcon->num_mkdirs),
40407 + atomic_read_unchecked(&tcon->num_rmdirs));
40408 seq_printf(m, "\nRenames: %d T2 Renames %d",
40409 - atomic_read(&tcon->num_renames),
40410 - atomic_read(&tcon->num_t2renames));
40411 + atomic_read_unchecked(&tcon->num_renames),
40412 + atomic_read_unchecked(&tcon->num_t2renames));
40413 seq_printf(m, "\nFindFirst: %d FNext %d "
40414 "FClose %d",
40415 - atomic_read(&tcon->num_ffirst),
40416 - atomic_read(&tcon->num_fnext),
40417 - atomic_read(&tcon->num_fclose));
40418 + atomic_read_unchecked(&tcon->num_ffirst),
40419 + atomic_read_unchecked(&tcon->num_fnext),
40420 + atomic_read_unchecked(&tcon->num_fclose));
40421 }
40422 }
40423 }
40424 diff -urNp linux-2.6.32.45/fs/cifs/cifsglob.h linux-2.6.32.45/fs/cifs/cifsglob.h
40425 --- linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:35:29.000000000 -0400
40426 +++ linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:34:00.000000000 -0400
40427 @@ -252,28 +252,28 @@ struct cifsTconInfo {
40428 __u16 Flags; /* optional support bits */
40429 enum statusEnum tidStatus;
40430 #ifdef CONFIG_CIFS_STATS
40431 - atomic_t num_smbs_sent;
40432 - atomic_t num_writes;
40433 - atomic_t num_reads;
40434 - atomic_t num_flushes;
40435 - atomic_t num_oplock_brks;
40436 - atomic_t num_opens;
40437 - atomic_t num_closes;
40438 - atomic_t num_deletes;
40439 - atomic_t num_mkdirs;
40440 - atomic_t num_posixopens;
40441 - atomic_t num_posixmkdirs;
40442 - atomic_t num_rmdirs;
40443 - atomic_t num_renames;
40444 - atomic_t num_t2renames;
40445 - atomic_t num_ffirst;
40446 - atomic_t num_fnext;
40447 - atomic_t num_fclose;
40448 - atomic_t num_hardlinks;
40449 - atomic_t num_symlinks;
40450 - atomic_t num_locks;
40451 - atomic_t num_acl_get;
40452 - atomic_t num_acl_set;
40453 + atomic_unchecked_t num_smbs_sent;
40454 + atomic_unchecked_t num_writes;
40455 + atomic_unchecked_t num_reads;
40456 + atomic_unchecked_t num_flushes;
40457 + atomic_unchecked_t num_oplock_brks;
40458 + atomic_unchecked_t num_opens;
40459 + atomic_unchecked_t num_closes;
40460 + atomic_unchecked_t num_deletes;
40461 + atomic_unchecked_t num_mkdirs;
40462 + atomic_unchecked_t num_posixopens;
40463 + atomic_unchecked_t num_posixmkdirs;
40464 + atomic_unchecked_t num_rmdirs;
40465 + atomic_unchecked_t num_renames;
40466 + atomic_unchecked_t num_t2renames;
40467 + atomic_unchecked_t num_ffirst;
40468 + atomic_unchecked_t num_fnext;
40469 + atomic_unchecked_t num_fclose;
40470 + atomic_unchecked_t num_hardlinks;
40471 + atomic_unchecked_t num_symlinks;
40472 + atomic_unchecked_t num_locks;
40473 + atomic_unchecked_t num_acl_get;
40474 + atomic_unchecked_t num_acl_set;
40475 #ifdef CONFIG_CIFS_STATS2
40476 unsigned long long time_writes;
40477 unsigned long long time_reads;
40478 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
40479 }
40480
40481 #ifdef CONFIG_CIFS_STATS
40482 -#define cifs_stats_inc atomic_inc
40483 +#define cifs_stats_inc atomic_inc_unchecked
40484
40485 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
40486 unsigned int bytes)
40487 diff -urNp linux-2.6.32.45/fs/cifs/link.c linux-2.6.32.45/fs/cifs/link.c
40488 --- linux-2.6.32.45/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
40489 +++ linux-2.6.32.45/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
40490 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
40491
40492 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40493 {
40494 - char *p = nd_get_link(nd);
40495 + const char *p = nd_get_link(nd);
40496 if (!IS_ERR(p))
40497 kfree(p);
40498 }
40499 diff -urNp linux-2.6.32.45/fs/coda/cache.c linux-2.6.32.45/fs/coda/cache.c
40500 --- linux-2.6.32.45/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
40501 +++ linux-2.6.32.45/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
40502 @@ -24,14 +24,14 @@
40503 #include <linux/coda_fs_i.h>
40504 #include <linux/coda_cache.h>
40505
40506 -static atomic_t permission_epoch = ATOMIC_INIT(0);
40507 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40508
40509 /* replace or extend an acl cache hit */
40510 void coda_cache_enter(struct inode *inode, int mask)
40511 {
40512 struct coda_inode_info *cii = ITOC(inode);
40513
40514 - cii->c_cached_epoch = atomic_read(&permission_epoch);
40515 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40516 if (cii->c_uid != current_fsuid()) {
40517 cii->c_uid = current_fsuid();
40518 cii->c_cached_perm = mask;
40519 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
40520 void coda_cache_clear_inode(struct inode *inode)
40521 {
40522 struct coda_inode_info *cii = ITOC(inode);
40523 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40524 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40525 }
40526
40527 /* remove all acl caches */
40528 void coda_cache_clear_all(struct super_block *sb)
40529 {
40530 - atomic_inc(&permission_epoch);
40531 + atomic_inc_unchecked(&permission_epoch);
40532 }
40533
40534
40535 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
40536
40537 hit = (mask & cii->c_cached_perm) == mask &&
40538 cii->c_uid == current_fsuid() &&
40539 - cii->c_cached_epoch == atomic_read(&permission_epoch);
40540 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40541
40542 return hit;
40543 }
40544 diff -urNp linux-2.6.32.45/fs/compat_binfmt_elf.c linux-2.6.32.45/fs/compat_binfmt_elf.c
40545 --- linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
40546 +++ linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
40547 @@ -29,10 +29,12 @@
40548 #undef elfhdr
40549 #undef elf_phdr
40550 #undef elf_note
40551 +#undef elf_dyn
40552 #undef elf_addr_t
40553 #define elfhdr elf32_hdr
40554 #define elf_phdr elf32_phdr
40555 #define elf_note elf32_note
40556 +#define elf_dyn Elf32_Dyn
40557 #define elf_addr_t Elf32_Addr
40558
40559 /*
40560 diff -urNp linux-2.6.32.45/fs/compat.c linux-2.6.32.45/fs/compat.c
40561 --- linux-2.6.32.45/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
40562 +++ linux-2.6.32.45/fs/compat.c 2011-08-11 19:56:56.000000000 -0400
40563 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
40564
40565 struct compat_readdir_callback {
40566 struct compat_old_linux_dirent __user *dirent;
40567 + struct file * file;
40568 int result;
40569 };
40570
40571 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
40572 buf->result = -EOVERFLOW;
40573 return -EOVERFLOW;
40574 }
40575 +
40576 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40577 + return 0;
40578 +
40579 buf->result++;
40580 dirent = buf->dirent;
40581 if (!access_ok(VERIFY_WRITE, dirent,
40582 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
40583
40584 buf.result = 0;
40585 buf.dirent = dirent;
40586 + buf.file = file;
40587
40588 error = vfs_readdir(file, compat_fillonedir, &buf);
40589 if (buf.result)
40590 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
40591 struct compat_getdents_callback {
40592 struct compat_linux_dirent __user *current_dir;
40593 struct compat_linux_dirent __user *previous;
40594 + struct file * file;
40595 int count;
40596 int error;
40597 };
40598 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
40599 buf->error = -EOVERFLOW;
40600 return -EOVERFLOW;
40601 }
40602 +
40603 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40604 + return 0;
40605 +
40606 dirent = buf->previous;
40607 if (dirent) {
40608 if (__put_user(offset, &dirent->d_off))
40609 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
40610 buf.previous = NULL;
40611 buf.count = count;
40612 buf.error = 0;
40613 + buf.file = file;
40614
40615 error = vfs_readdir(file, compat_filldir, &buf);
40616 if (error >= 0)
40617 @@ -987,6 +999,7 @@ out:
40618 struct compat_getdents_callback64 {
40619 struct linux_dirent64 __user *current_dir;
40620 struct linux_dirent64 __user *previous;
40621 + struct file * file;
40622 int count;
40623 int error;
40624 };
40625 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
40626 buf->error = -EINVAL; /* only used if we fail.. */
40627 if (reclen > buf->count)
40628 return -EINVAL;
40629 +
40630 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40631 + return 0;
40632 +
40633 dirent = buf->previous;
40634
40635 if (dirent) {
40636 @@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
40637 buf.previous = NULL;
40638 buf.count = count;
40639 buf.error = 0;
40640 + buf.file = file;
40641
40642 error = vfs_readdir(file, compat_filldir64, &buf);
40643 if (error >= 0)
40644 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
40645 * verify all the pointers
40646 */
40647 ret = -EINVAL;
40648 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
40649 + if (nr_segs > UIO_MAXIOV)
40650 goto out;
40651 if (!file->f_op)
40652 goto out;
40653 @@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
40654 compat_uptr_t __user *envp,
40655 struct pt_regs * regs)
40656 {
40657 +#ifdef CONFIG_GRKERNSEC
40658 + struct file *old_exec_file;
40659 + struct acl_subject_label *old_acl;
40660 + struct rlimit old_rlim[RLIM_NLIMITS];
40661 +#endif
40662 struct linux_binprm *bprm;
40663 struct file *file;
40664 struct files_struct *displaced;
40665 bool clear_in_exec;
40666 int retval;
40667 + const struct cred *cred = current_cred();
40668 +
40669 + /*
40670 + * We move the actual failure in case of RLIMIT_NPROC excess from
40671 + * set*uid() to execve() because too many poorly written programs
40672 + * don't check setuid() return code. Here we additionally recheck
40673 + * whether NPROC limit is still exceeded.
40674 + */
40675 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40676 +
40677 + if ((current->flags & PF_NPROC_EXCEEDED) &&
40678 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40679 + retval = -EAGAIN;
40680 + goto out_ret;
40681 + }
40682 +
40683 + /* We're below the limit (still or again), so we don't want to make
40684 + * further execve() calls fail. */
40685 + current->flags &= ~PF_NPROC_EXCEEDED;
40686
40687 retval = unshare_files(&displaced);
40688 if (retval)
40689 @@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
40690 bprm->filename = filename;
40691 bprm->interp = filename;
40692
40693 + if (gr_process_user_ban()) {
40694 + retval = -EPERM;
40695 + goto out_file;
40696 + }
40697 +
40698 + retval = -EACCES;
40699 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
40700 + goto out_file;
40701 +
40702 retval = bprm_mm_init(bprm);
40703 if (retval)
40704 goto out_file;
40705 @@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
40706 if (retval < 0)
40707 goto out;
40708
40709 + if (!gr_tpe_allow(file)) {
40710 + retval = -EACCES;
40711 + goto out;
40712 + }
40713 +
40714 + if (gr_check_crash_exec(file)) {
40715 + retval = -EACCES;
40716 + goto out;
40717 + }
40718 +
40719 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40720 +
40721 + gr_handle_exec_args_compat(bprm, argv);
40722 +
40723 +#ifdef CONFIG_GRKERNSEC
40724 + old_acl = current->acl;
40725 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40726 + old_exec_file = current->exec_file;
40727 + get_file(file);
40728 + current->exec_file = file;
40729 +#endif
40730 +
40731 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40732 + bprm->unsafe & LSM_UNSAFE_SHARE);
40733 + if (retval < 0)
40734 + goto out_fail;
40735 +
40736 retval = search_binary_handler(bprm, regs);
40737 if (retval < 0)
40738 - goto out;
40739 + goto out_fail;
40740 +#ifdef CONFIG_GRKERNSEC
40741 + if (old_exec_file)
40742 + fput(old_exec_file);
40743 +#endif
40744
40745 /* execve succeeded */
40746 current->fs->in_exec = 0;
40747 @@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
40748 put_files_struct(displaced);
40749 return retval;
40750
40751 +out_fail:
40752 +#ifdef CONFIG_GRKERNSEC
40753 + current->acl = old_acl;
40754 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40755 + fput(current->exec_file);
40756 + current->exec_file = old_exec_file;
40757 +#endif
40758 +
40759 out:
40760 if (bprm->mm) {
40761 acct_arg_size(bprm, 0);
40762 @@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat
40763 struct fdtable *fdt;
40764 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40765
40766 + pax_track_stack();
40767 +
40768 if (n < 0)
40769 goto out_nofds;
40770
40771 diff -urNp linux-2.6.32.45/fs/compat_ioctl.c linux-2.6.32.45/fs/compat_ioctl.c
40772 --- linux-2.6.32.45/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
40773 +++ linux-2.6.32.45/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
40774 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
40775 up = (struct compat_video_spu_palette __user *) arg;
40776 err = get_user(palp, &up->palette);
40777 err |= get_user(length, &up->length);
40778 + if (err)
40779 + return -EFAULT;
40780
40781 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
40782 err = put_user(compat_ptr(palp), &up_native->palette);
40783 diff -urNp linux-2.6.32.45/fs/configfs/dir.c linux-2.6.32.45/fs/configfs/dir.c
40784 --- linux-2.6.32.45/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
40785 +++ linux-2.6.32.45/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
40786 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
40787 }
40788 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
40789 struct configfs_dirent *next;
40790 - const char * name;
40791 + const unsigned char * name;
40792 + char d_name[sizeof(next->s_dentry->d_iname)];
40793 int len;
40794
40795 next = list_entry(p, struct configfs_dirent,
40796 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
40797 continue;
40798
40799 name = configfs_get_name(next);
40800 - len = strlen(name);
40801 + if (next->s_dentry && name == next->s_dentry->d_iname) {
40802 + len = next->s_dentry->d_name.len;
40803 + memcpy(d_name, name, len);
40804 + name = d_name;
40805 + } else
40806 + len = strlen(name);
40807 if (next->s_dentry)
40808 ino = next->s_dentry->d_inode->i_ino;
40809 else
40810 diff -urNp linux-2.6.32.45/fs/dcache.c linux-2.6.32.45/fs/dcache.c
40811 --- linux-2.6.32.45/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
40812 +++ linux-2.6.32.45/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
40813 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
40814
40815 static struct kmem_cache *dentry_cache __read_mostly;
40816
40817 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
40818 -
40819 /*
40820 * This is the single most critical data structure when it comes
40821 * to the dcache: the hashtable for lookups. Somebody should try
40822 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
40823 mempages -= reserve;
40824
40825 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
40826 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
40827 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
40828
40829 dcache_init();
40830 inode_init();
40831 diff -urNp linux-2.6.32.45/fs/dlm/lockspace.c linux-2.6.32.45/fs/dlm/lockspace.c
40832 --- linux-2.6.32.45/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
40833 +++ linux-2.6.32.45/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
40834 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
40835 kfree(ls);
40836 }
40837
40838 -static struct sysfs_ops dlm_attr_ops = {
40839 +static const struct sysfs_ops dlm_attr_ops = {
40840 .show = dlm_attr_show,
40841 .store = dlm_attr_store,
40842 };
40843 diff -urNp linux-2.6.32.45/fs/ecryptfs/inode.c linux-2.6.32.45/fs/ecryptfs/inode.c
40844 --- linux-2.6.32.45/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40845 +++ linux-2.6.32.45/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40846 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
40847 old_fs = get_fs();
40848 set_fs(get_ds());
40849 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
40850 - (char __user *)lower_buf,
40851 + (__force char __user *)lower_buf,
40852 lower_bufsiz);
40853 set_fs(old_fs);
40854 if (rc < 0)
40855 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
40856 }
40857 old_fs = get_fs();
40858 set_fs(get_ds());
40859 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
40860 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
40861 set_fs(old_fs);
40862 if (rc < 0)
40863 goto out_free;
40864 diff -urNp linux-2.6.32.45/fs/exec.c linux-2.6.32.45/fs/exec.c
40865 --- linux-2.6.32.45/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
40866 +++ linux-2.6.32.45/fs/exec.c 2011-08-11 19:56:19.000000000 -0400
40867 @@ -56,12 +56,24 @@
40868 #include <linux/fsnotify.h>
40869 #include <linux/fs_struct.h>
40870 #include <linux/pipe_fs_i.h>
40871 +#include <linux/random.h>
40872 +#include <linux/seq_file.h>
40873 +
40874 +#ifdef CONFIG_PAX_REFCOUNT
40875 +#include <linux/kallsyms.h>
40876 +#include <linux/kdebug.h>
40877 +#endif
40878
40879 #include <asm/uaccess.h>
40880 #include <asm/mmu_context.h>
40881 #include <asm/tlb.h>
40882 #include "internal.h"
40883
40884 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
40885 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
40886 +EXPORT_SYMBOL(pax_set_initial_flags_func);
40887 +#endif
40888 +
40889 int core_uses_pid;
40890 char core_pattern[CORENAME_MAX_SIZE] = "core";
40891 unsigned int core_pipe_limit;
40892 @@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
40893 goto out;
40894
40895 file = do_filp_open(AT_FDCWD, tmp,
40896 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40897 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40898 MAY_READ | MAY_EXEC | MAY_OPEN);
40899 putname(tmp);
40900 error = PTR_ERR(file);
40901 @@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
40902 int write)
40903 {
40904 struct page *page;
40905 - int ret;
40906
40907 -#ifdef CONFIG_STACK_GROWSUP
40908 - if (write) {
40909 - ret = expand_stack_downwards(bprm->vma, pos);
40910 - if (ret < 0)
40911 - return NULL;
40912 - }
40913 -#endif
40914 - ret = get_user_pages(current, bprm->mm, pos,
40915 - 1, write, 1, &page, NULL);
40916 - if (ret <= 0)
40917 + if (0 > expand_stack_downwards(bprm->vma, pos))
40918 + return NULL;
40919 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
40920 return NULL;
40921
40922 if (write) {
40923 @@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
40924 vma->vm_end = STACK_TOP_MAX;
40925 vma->vm_start = vma->vm_end - PAGE_SIZE;
40926 vma->vm_flags = VM_STACK_FLAGS;
40927 +
40928 +#ifdef CONFIG_PAX_SEGMEXEC
40929 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
40930 +#endif
40931 +
40932 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
40933
40934 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
40935 @@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
40936 mm->stack_vm = mm->total_vm = 1;
40937 up_write(&mm->mmap_sem);
40938 bprm->p = vma->vm_end - sizeof(void *);
40939 +
40940 +#ifdef CONFIG_PAX_RANDUSTACK
40941 + if (randomize_va_space)
40942 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
40943 +#endif
40944 +
40945 return 0;
40946 err:
40947 up_write(&mm->mmap_sem);
40948 @@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
40949 int r;
40950 mm_segment_t oldfs = get_fs();
40951 set_fs(KERNEL_DS);
40952 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
40953 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
40954 set_fs(oldfs);
40955 return r;
40956 }
40957 @@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
40958 unsigned long new_end = old_end - shift;
40959 struct mmu_gather *tlb;
40960
40961 - BUG_ON(new_start > new_end);
40962 + if (new_start >= new_end || new_start < mmap_min_addr)
40963 + return -ENOMEM;
40964
40965 /*
40966 * ensure there are no vmas between where we want to go
40967 @@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
40968 if (vma != find_vma(mm, new_start))
40969 return -EFAULT;
40970
40971 +#ifdef CONFIG_PAX_SEGMEXEC
40972 + BUG_ON(pax_find_mirror_vma(vma));
40973 +#endif
40974 +
40975 /*
40976 * cover the whole range: [new_start, old_end)
40977 */
40978 @@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
40979 stack_top = arch_align_stack(stack_top);
40980 stack_top = PAGE_ALIGN(stack_top);
40981
40982 - if (unlikely(stack_top < mmap_min_addr) ||
40983 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
40984 - return -ENOMEM;
40985 -
40986 stack_shift = vma->vm_end - stack_top;
40987
40988 bprm->p -= stack_shift;
40989 @@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
40990 bprm->exec -= stack_shift;
40991
40992 down_write(&mm->mmap_sem);
40993 +
40994 + /* Move stack pages down in memory. */
40995 + if (stack_shift) {
40996 + ret = shift_arg_pages(vma, stack_shift);
40997 + if (ret)
40998 + goto out_unlock;
40999 + }
41000 +
41001 vm_flags = VM_STACK_FLAGS;
41002
41003 /*
41004 @@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
41005 vm_flags &= ~VM_EXEC;
41006 vm_flags |= mm->def_flags;
41007
41008 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41009 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41010 + vm_flags &= ~VM_EXEC;
41011 +
41012 +#ifdef CONFIG_PAX_MPROTECT
41013 + if (mm->pax_flags & MF_PAX_MPROTECT)
41014 + vm_flags &= ~VM_MAYEXEC;
41015 +#endif
41016 +
41017 + }
41018 +#endif
41019 +
41020 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
41021 vm_flags);
41022 if (ret)
41023 goto out_unlock;
41024 BUG_ON(prev != vma);
41025
41026 - /* Move stack pages down in memory. */
41027 - if (stack_shift) {
41028 - ret = shift_arg_pages(vma, stack_shift);
41029 - if (ret)
41030 - goto out_unlock;
41031 - }
41032 -
41033 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
41034 stack_size = vma->vm_end - vma->vm_start;
41035 /*
41036 @@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
41037 int err;
41038
41039 file = do_filp_open(AT_FDCWD, name,
41040 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
41041 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
41042 MAY_EXEC | MAY_OPEN);
41043 if (IS_ERR(file))
41044 goto out;
41045 @@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
41046 old_fs = get_fs();
41047 set_fs(get_ds());
41048 /* The cast to a user pointer is valid due to the set_fs() */
41049 - result = vfs_read(file, (void __user *)addr, count, &pos);
41050 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
41051 set_fs(old_fs);
41052 return result;
41053 }
41054 @@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
41055 }
41056 rcu_read_unlock();
41057
41058 - if (p->fs->users > n_fs) {
41059 + if (atomic_read(&p->fs->users) > n_fs) {
41060 bprm->unsafe |= LSM_UNSAFE_SHARE;
41061 } else {
41062 res = -EAGAIN;
41063 @@ -1347,11 +1376,35 @@ int do_execve(char * filename,
41064 char __user *__user *envp,
41065 struct pt_regs * regs)
41066 {
41067 +#ifdef CONFIG_GRKERNSEC
41068 + struct file *old_exec_file;
41069 + struct acl_subject_label *old_acl;
41070 + struct rlimit old_rlim[RLIM_NLIMITS];
41071 +#endif
41072 struct linux_binprm *bprm;
41073 struct file *file;
41074 struct files_struct *displaced;
41075 bool clear_in_exec;
41076 int retval;
41077 + const struct cred *cred = current_cred();
41078 +
41079 + /*
41080 + * We move the actual failure in case of RLIMIT_NPROC excess from
41081 + * set*uid() to execve() because too many poorly written programs
41082 + * don't check setuid() return code. Here we additionally recheck
41083 + * whether NPROC limit is still exceeded.
41084 + */
41085 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41086 +
41087 + if ((current->flags & PF_NPROC_EXCEEDED) &&
41088 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
41089 + retval = -EAGAIN;
41090 + goto out_ret;
41091 + }
41092 +
41093 + /* We're below the limit (still or again), so we don't want to make
41094 + * further execve() calls fail. */
41095 + current->flags &= ~PF_NPROC_EXCEEDED;
41096
41097 retval = unshare_files(&displaced);
41098 if (retval)
41099 @@ -1383,6 +1436,16 @@ int do_execve(char * filename,
41100 bprm->filename = filename;
41101 bprm->interp = filename;
41102
41103 + if (gr_process_user_ban()) {
41104 + retval = -EPERM;
41105 + goto out_file;
41106 + }
41107 +
41108 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41109 + retval = -EACCES;
41110 + goto out_file;
41111 + }
41112 +
41113 retval = bprm_mm_init(bprm);
41114 if (retval)
41115 goto out_file;
41116 @@ -1412,10 +1475,41 @@ int do_execve(char * filename,
41117 if (retval < 0)
41118 goto out;
41119
41120 + if (!gr_tpe_allow(file)) {
41121 + retval = -EACCES;
41122 + goto out;
41123 + }
41124 +
41125 + if (gr_check_crash_exec(file)) {
41126 + retval = -EACCES;
41127 + goto out;
41128 + }
41129 +
41130 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41131 +
41132 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
41133 +
41134 +#ifdef CONFIG_GRKERNSEC
41135 + old_acl = current->acl;
41136 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41137 + old_exec_file = current->exec_file;
41138 + get_file(file);
41139 + current->exec_file = file;
41140 +#endif
41141 +
41142 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41143 + bprm->unsafe & LSM_UNSAFE_SHARE);
41144 + if (retval < 0)
41145 + goto out_fail;
41146 +
41147 current->flags &= ~PF_KTHREAD;
41148 retval = search_binary_handler(bprm,regs);
41149 if (retval < 0)
41150 - goto out;
41151 + goto out_fail;
41152 +#ifdef CONFIG_GRKERNSEC
41153 + if (old_exec_file)
41154 + fput(old_exec_file);
41155 +#endif
41156
41157 /* execve succeeded */
41158 current->fs->in_exec = 0;
41159 @@ -1426,6 +1520,14 @@ int do_execve(char * filename,
41160 put_files_struct(displaced);
41161 return retval;
41162
41163 +out_fail:
41164 +#ifdef CONFIG_GRKERNSEC
41165 + current->acl = old_acl;
41166 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41167 + fput(current->exec_file);
41168 + current->exec_file = old_exec_file;
41169 +#endif
41170 +
41171 out:
41172 if (bprm->mm) {
41173 acct_arg_size(bprm, 0);
41174 @@ -1591,6 +1693,220 @@ out:
41175 return ispipe;
41176 }
41177
41178 +int pax_check_flags(unsigned long *flags)
41179 +{
41180 + int retval = 0;
41181 +
41182 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41183 + if (*flags & MF_PAX_SEGMEXEC)
41184 + {
41185 + *flags &= ~MF_PAX_SEGMEXEC;
41186 + retval = -EINVAL;
41187 + }
41188 +#endif
41189 +
41190 + if ((*flags & MF_PAX_PAGEEXEC)
41191 +
41192 +#ifdef CONFIG_PAX_PAGEEXEC
41193 + && (*flags & MF_PAX_SEGMEXEC)
41194 +#endif
41195 +
41196 + )
41197 + {
41198 + *flags &= ~MF_PAX_PAGEEXEC;
41199 + retval = -EINVAL;
41200 + }
41201 +
41202 + if ((*flags & MF_PAX_MPROTECT)
41203 +
41204 +#ifdef CONFIG_PAX_MPROTECT
41205 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41206 +#endif
41207 +
41208 + )
41209 + {
41210 + *flags &= ~MF_PAX_MPROTECT;
41211 + retval = -EINVAL;
41212 + }
41213 +
41214 + if ((*flags & MF_PAX_EMUTRAMP)
41215 +
41216 +#ifdef CONFIG_PAX_EMUTRAMP
41217 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41218 +#endif
41219 +
41220 + )
41221 + {
41222 + *flags &= ~MF_PAX_EMUTRAMP;
41223 + retval = -EINVAL;
41224 + }
41225 +
41226 + return retval;
41227 +}
41228 +
41229 +EXPORT_SYMBOL(pax_check_flags);
41230 +
41231 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41232 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41233 +{
41234 + struct task_struct *tsk = current;
41235 + struct mm_struct *mm = current->mm;
41236 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41237 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41238 + char *path_exec = NULL;
41239 + char *path_fault = NULL;
41240 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
41241 +
41242 + if (buffer_exec && buffer_fault) {
41243 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41244 +
41245 + down_read(&mm->mmap_sem);
41246 + vma = mm->mmap;
41247 + while (vma && (!vma_exec || !vma_fault)) {
41248 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41249 + vma_exec = vma;
41250 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41251 + vma_fault = vma;
41252 + vma = vma->vm_next;
41253 + }
41254 + if (vma_exec) {
41255 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41256 + if (IS_ERR(path_exec))
41257 + path_exec = "<path too long>";
41258 + else {
41259 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41260 + if (path_exec) {
41261 + *path_exec = 0;
41262 + path_exec = buffer_exec;
41263 + } else
41264 + path_exec = "<path too long>";
41265 + }
41266 + }
41267 + if (vma_fault) {
41268 + start = vma_fault->vm_start;
41269 + end = vma_fault->vm_end;
41270 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41271 + if (vma_fault->vm_file) {
41272 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41273 + if (IS_ERR(path_fault))
41274 + path_fault = "<path too long>";
41275 + else {
41276 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41277 + if (path_fault) {
41278 + *path_fault = 0;
41279 + path_fault = buffer_fault;
41280 + } else
41281 + path_fault = "<path too long>";
41282 + }
41283 + } else
41284 + path_fault = "<anonymous mapping>";
41285 + }
41286 + up_read(&mm->mmap_sem);
41287 + }
41288 + if (tsk->signal->curr_ip)
41289 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41290 + else
41291 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41292 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41293 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41294 + task_uid(tsk), task_euid(tsk), pc, sp);
41295 + free_page((unsigned long)buffer_exec);
41296 + free_page((unsigned long)buffer_fault);
41297 + pax_report_insns(pc, sp);
41298 + do_coredump(SIGKILL, SIGKILL, regs);
41299 +}
41300 +#endif
41301 +
41302 +#ifdef CONFIG_PAX_REFCOUNT
41303 +void pax_report_refcount_overflow(struct pt_regs *regs)
41304 +{
41305 + if (current->signal->curr_ip)
41306 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41307 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41308 + else
41309 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41310 + current->comm, task_pid_nr(current), current_uid(), current_euid());
41311 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41312 + show_regs(regs);
41313 + force_sig_specific(SIGKILL, current);
41314 +}
41315 +#endif
41316 +
41317 +#ifdef CONFIG_PAX_USERCOPY
41318 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41319 +int object_is_on_stack(const void *obj, unsigned long len)
41320 +{
41321 + const void * const stack = task_stack_page(current);
41322 + const void * const stackend = stack + THREAD_SIZE;
41323 +
41324 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41325 + const void *frame = NULL;
41326 + const void *oldframe;
41327 +#endif
41328 +
41329 + if (obj + len < obj)
41330 + return -1;
41331 +
41332 + if (obj + len <= stack || stackend <= obj)
41333 + return 0;
41334 +
41335 + if (obj < stack || stackend < obj + len)
41336 + return -1;
41337 +
41338 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41339 + oldframe = __builtin_frame_address(1);
41340 + if (oldframe)
41341 + frame = __builtin_frame_address(2);
41342 + /*
41343 + low ----------------------------------------------> high
41344 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
41345 + ^----------------^
41346 + allow copies only within here
41347 + */
41348 + while (stack <= frame && frame < stackend) {
41349 + /* if obj + len extends past the last frame, this
41350 + check won't pass and the next frame will be 0,
41351 + causing us to bail out and correctly report
41352 + the copy as invalid
41353 + */
41354 + if (obj + len <= frame)
41355 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41356 + oldframe = frame;
41357 + frame = *(const void * const *)frame;
41358 + }
41359 + return -1;
41360 +#else
41361 + return 1;
41362 +#endif
41363 +}
41364 +
41365 +
41366 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41367 +{
41368 + if (current->signal->curr_ip)
41369 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41370 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41371 + else
41372 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41373 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41374 +
41375 + dump_stack();
41376 + gr_handle_kernel_exploit();
41377 + do_group_exit(SIGKILL);
41378 +}
41379 +#endif
41380 +
41381 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41382 +void pax_track_stack(void)
41383 +{
41384 + unsigned long sp = (unsigned long)&sp;
41385 + if (sp < current_thread_info()->lowest_stack &&
41386 + sp > (unsigned long)task_stack_page(current))
41387 + current_thread_info()->lowest_stack = sp;
41388 +}
41389 +EXPORT_SYMBOL(pax_track_stack);
41390 +#endif
41391 +
41392 static int zap_process(struct task_struct *start)
41393 {
41394 struct task_struct *t;
41395 @@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct
41396 pipe = file->f_path.dentry->d_inode->i_pipe;
41397
41398 pipe_lock(pipe);
41399 - pipe->readers++;
41400 - pipe->writers--;
41401 + atomic_inc(&pipe->readers);
41402 + atomic_dec(&pipe->writers);
41403
41404 - while ((pipe->readers > 1) && (!signal_pending(current))) {
41405 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41406 wake_up_interruptible_sync(&pipe->wait);
41407 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41408 pipe_wait(pipe);
41409 }
41410
41411 - pipe->readers--;
41412 - pipe->writers++;
41413 + atomic_dec(&pipe->readers);
41414 + atomic_inc(&pipe->writers);
41415 pipe_unlock(pipe);
41416
41417 }
41418 @@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_co
41419 char **helper_argv = NULL;
41420 int helper_argc = 0;
41421 int dump_count = 0;
41422 - static atomic_t core_dump_count = ATOMIC_INIT(0);
41423 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41424
41425 audit_core_dumps(signr);
41426
41427 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41428 + gr_handle_brute_attach(current, mm->flags);
41429 +
41430 binfmt = mm->binfmt;
41431 if (!binfmt || !binfmt->core_dump)
41432 goto fail;
41433 @@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_co
41434 */
41435 clear_thread_flag(TIF_SIGPENDING);
41436
41437 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41438 +
41439 /*
41440 * lock_kernel() because format_corename() is controlled by sysctl, which
41441 * uses lock_kernel()
41442 @@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_co
41443 goto fail_unlock;
41444 }
41445
41446 - dump_count = atomic_inc_return(&core_dump_count);
41447 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
41448 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41449 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41450 task_tgid_vnr(current), current->comm);
41451 @@ -1972,7 +2293,7 @@ close_fail:
41452 filp_close(file, NULL);
41453 fail_dropcount:
41454 if (dump_count)
41455 - atomic_dec(&core_dump_count);
41456 + atomic_dec_unchecked(&core_dump_count);
41457 fail_unlock:
41458 if (helper_argv)
41459 argv_free(helper_argv);
41460 diff -urNp linux-2.6.32.45/fs/ext2/balloc.c linux-2.6.32.45/fs/ext2/balloc.c
41461 --- linux-2.6.32.45/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
41462 +++ linux-2.6.32.45/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
41463 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41464
41465 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41466 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41467 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41468 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41469 sbi->s_resuid != current_fsuid() &&
41470 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41471 return 0;
41472 diff -urNp linux-2.6.32.45/fs/ext3/balloc.c linux-2.6.32.45/fs/ext3/balloc.c
41473 --- linux-2.6.32.45/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
41474 +++ linux-2.6.32.45/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
41475 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
41476
41477 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41478 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41479 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41480 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41481 sbi->s_resuid != current_fsuid() &&
41482 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41483 return 0;
41484 diff -urNp linux-2.6.32.45/fs/ext4/balloc.c linux-2.6.32.45/fs/ext4/balloc.c
41485 --- linux-2.6.32.45/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
41486 +++ linux-2.6.32.45/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
41487 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
41488 /* Hm, nope. Are (enough) root reserved blocks available? */
41489 if (sbi->s_resuid == current_fsuid() ||
41490 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41491 - capable(CAP_SYS_RESOURCE)) {
41492 + capable_nolog(CAP_SYS_RESOURCE)) {
41493 if (free_blocks >= (nblocks + dirty_blocks))
41494 return 1;
41495 }
41496 diff -urNp linux-2.6.32.45/fs/ext4/ext4.h linux-2.6.32.45/fs/ext4/ext4.h
41497 --- linux-2.6.32.45/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
41498 +++ linux-2.6.32.45/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
41499 @@ -1078,19 +1078,19 @@ struct ext4_sb_info {
41500
41501 /* stats for buddy allocator */
41502 spinlock_t s_mb_pa_lock;
41503 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41504 - atomic_t s_bal_success; /* we found long enough chunks */
41505 - atomic_t s_bal_allocated; /* in blocks */
41506 - atomic_t s_bal_ex_scanned; /* total extents scanned */
41507 - atomic_t s_bal_goals; /* goal hits */
41508 - atomic_t s_bal_breaks; /* too long searches */
41509 - atomic_t s_bal_2orders; /* 2^order hits */
41510 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41511 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41512 + atomic_unchecked_t s_bal_allocated; /* in blocks */
41513 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41514 + atomic_unchecked_t s_bal_goals; /* goal hits */
41515 + atomic_unchecked_t s_bal_breaks; /* too long searches */
41516 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41517 spinlock_t s_bal_lock;
41518 unsigned long s_mb_buddies_generated;
41519 unsigned long long s_mb_generation_time;
41520 - atomic_t s_mb_lost_chunks;
41521 - atomic_t s_mb_preallocated;
41522 - atomic_t s_mb_discarded;
41523 + atomic_unchecked_t s_mb_lost_chunks;
41524 + atomic_unchecked_t s_mb_preallocated;
41525 + atomic_unchecked_t s_mb_discarded;
41526 atomic_t s_lock_busy;
41527
41528 /* locality groups */
41529 diff -urNp linux-2.6.32.45/fs/ext4/mballoc.c linux-2.6.32.45/fs/ext4/mballoc.c
41530 --- linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
41531 +++ linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
41532 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
41533 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41534
41535 if (EXT4_SB(sb)->s_mb_stats)
41536 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41537 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41538
41539 break;
41540 }
41541 @@ -2131,7 +2131,7 @@ repeat:
41542 ac->ac_status = AC_STATUS_CONTINUE;
41543 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41544 cr = 3;
41545 - atomic_inc(&sbi->s_mb_lost_chunks);
41546 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41547 goto repeat;
41548 }
41549 }
41550 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
41551 ext4_grpblk_t counters[16];
41552 } sg;
41553
41554 + pax_track_stack();
41555 +
41556 group--;
41557 if (group == 0)
41558 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41559 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
41560 if (sbi->s_mb_stats) {
41561 printk(KERN_INFO
41562 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41563 - atomic_read(&sbi->s_bal_allocated),
41564 - atomic_read(&sbi->s_bal_reqs),
41565 - atomic_read(&sbi->s_bal_success));
41566 + atomic_read_unchecked(&sbi->s_bal_allocated),
41567 + atomic_read_unchecked(&sbi->s_bal_reqs),
41568 + atomic_read_unchecked(&sbi->s_bal_success));
41569 printk(KERN_INFO
41570 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41571 "%u 2^N hits, %u breaks, %u lost\n",
41572 - atomic_read(&sbi->s_bal_ex_scanned),
41573 - atomic_read(&sbi->s_bal_goals),
41574 - atomic_read(&sbi->s_bal_2orders),
41575 - atomic_read(&sbi->s_bal_breaks),
41576 - atomic_read(&sbi->s_mb_lost_chunks));
41577 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41578 + atomic_read_unchecked(&sbi->s_bal_goals),
41579 + atomic_read_unchecked(&sbi->s_bal_2orders),
41580 + atomic_read_unchecked(&sbi->s_bal_breaks),
41581 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41582 printk(KERN_INFO
41583 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41584 sbi->s_mb_buddies_generated++,
41585 sbi->s_mb_generation_time);
41586 printk(KERN_INFO
41587 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41588 - atomic_read(&sbi->s_mb_preallocated),
41589 - atomic_read(&sbi->s_mb_discarded));
41590 + atomic_read_unchecked(&sbi->s_mb_preallocated),
41591 + atomic_read_unchecked(&sbi->s_mb_discarded));
41592 }
41593
41594 free_percpu(sbi->s_locality_groups);
41595 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
41596 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41597
41598 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41599 - atomic_inc(&sbi->s_bal_reqs);
41600 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41601 + atomic_inc_unchecked(&sbi->s_bal_reqs);
41602 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41603 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
41604 - atomic_inc(&sbi->s_bal_success);
41605 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41606 + atomic_inc_unchecked(&sbi->s_bal_success);
41607 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41608 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41609 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41610 - atomic_inc(&sbi->s_bal_goals);
41611 + atomic_inc_unchecked(&sbi->s_bal_goals);
41612 if (ac->ac_found > sbi->s_mb_max_to_scan)
41613 - atomic_inc(&sbi->s_bal_breaks);
41614 + atomic_inc_unchecked(&sbi->s_bal_breaks);
41615 }
41616
41617 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41618 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41619 trace_ext4_mb_new_inode_pa(ac, pa);
41620
41621 ext4_mb_use_inode_pa(ac, pa);
41622 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41623 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41624
41625 ei = EXT4_I(ac->ac_inode);
41626 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41627 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41628 trace_ext4_mb_new_group_pa(ac, pa);
41629
41630 ext4_mb_use_group_pa(ac, pa);
41631 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41632 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41633
41634 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41635 lg = ac->ac_lg;
41636 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
41637 * from the bitmap and continue.
41638 */
41639 }
41640 - atomic_add(free, &sbi->s_mb_discarded);
41641 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
41642
41643 return err;
41644 }
41645 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
41646 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
41647 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
41648 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
41649 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41650 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41651
41652 if (ac) {
41653 ac->ac_sb = sb;
41654 diff -urNp linux-2.6.32.45/fs/ext4/super.c linux-2.6.32.45/fs/ext4/super.c
41655 --- linux-2.6.32.45/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
41656 +++ linux-2.6.32.45/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
41657 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
41658 }
41659
41660
41661 -static struct sysfs_ops ext4_attr_ops = {
41662 +static const struct sysfs_ops ext4_attr_ops = {
41663 .show = ext4_attr_show,
41664 .store = ext4_attr_store,
41665 };
41666 diff -urNp linux-2.6.32.45/fs/fcntl.c linux-2.6.32.45/fs/fcntl.c
41667 --- linux-2.6.32.45/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
41668 +++ linux-2.6.32.45/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
41669 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
41670 if (err)
41671 return err;
41672
41673 + if (gr_handle_chroot_fowner(pid, type))
41674 + return -ENOENT;
41675 + if (gr_check_protected_task_fowner(pid, type))
41676 + return -EACCES;
41677 +
41678 f_modown(filp, pid, type, force);
41679 return 0;
41680 }
41681 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
41682 switch (cmd) {
41683 case F_DUPFD:
41684 case F_DUPFD_CLOEXEC:
41685 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
41686 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41687 break;
41688 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
41689 diff -urNp linux-2.6.32.45/fs/fifo.c linux-2.6.32.45/fs/fifo.c
41690 --- linux-2.6.32.45/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
41691 +++ linux-2.6.32.45/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
41692 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
41693 */
41694 filp->f_op = &read_pipefifo_fops;
41695 pipe->r_counter++;
41696 - if (pipe->readers++ == 0)
41697 + if (atomic_inc_return(&pipe->readers) == 1)
41698 wake_up_partner(inode);
41699
41700 - if (!pipe->writers) {
41701 + if (!atomic_read(&pipe->writers)) {
41702 if ((filp->f_flags & O_NONBLOCK)) {
41703 /* suppress POLLHUP until we have
41704 * seen a writer */
41705 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
41706 * errno=ENXIO when there is no process reading the FIFO.
41707 */
41708 ret = -ENXIO;
41709 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
41710 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
41711 goto err;
41712
41713 filp->f_op = &write_pipefifo_fops;
41714 pipe->w_counter++;
41715 - if (!pipe->writers++)
41716 + if (atomic_inc_return(&pipe->writers) == 1)
41717 wake_up_partner(inode);
41718
41719 - if (!pipe->readers) {
41720 + if (!atomic_read(&pipe->readers)) {
41721 wait_for_partner(inode, &pipe->r_counter);
41722 if (signal_pending(current))
41723 goto err_wr;
41724 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
41725 */
41726 filp->f_op = &rdwr_pipefifo_fops;
41727
41728 - pipe->readers++;
41729 - pipe->writers++;
41730 + atomic_inc(&pipe->readers);
41731 + atomic_inc(&pipe->writers);
41732 pipe->r_counter++;
41733 pipe->w_counter++;
41734 - if (pipe->readers == 1 || pipe->writers == 1)
41735 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
41736 wake_up_partner(inode);
41737 break;
41738
41739 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
41740 return 0;
41741
41742 err_rd:
41743 - if (!--pipe->readers)
41744 + if (atomic_dec_and_test(&pipe->readers))
41745 wake_up_interruptible(&pipe->wait);
41746 ret = -ERESTARTSYS;
41747 goto err;
41748
41749 err_wr:
41750 - if (!--pipe->writers)
41751 + if (atomic_dec_and_test(&pipe->writers))
41752 wake_up_interruptible(&pipe->wait);
41753 ret = -ERESTARTSYS;
41754 goto err;
41755
41756 err:
41757 - if (!pipe->readers && !pipe->writers)
41758 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
41759 free_pipe_info(inode);
41760
41761 err_nocleanup:
41762 diff -urNp linux-2.6.32.45/fs/file.c linux-2.6.32.45/fs/file.c
41763 --- linux-2.6.32.45/fs/file.c 2011-03-27 14:31:47.000000000 -0400
41764 +++ linux-2.6.32.45/fs/file.c 2011-04-17 15:56:46.000000000 -0400
41765 @@ -14,6 +14,7 @@
41766 #include <linux/slab.h>
41767 #include <linux/vmalloc.h>
41768 #include <linux/file.h>
41769 +#include <linux/security.h>
41770 #include <linux/fdtable.h>
41771 #include <linux/bitops.h>
41772 #include <linux/interrupt.h>
41773 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
41774 * N.B. For clone tasks sharing a files structure, this test
41775 * will limit the total number of files that can be opened.
41776 */
41777 +
41778 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
41779 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41780 return -EMFILE;
41781
41782 diff -urNp linux-2.6.32.45/fs/filesystems.c linux-2.6.32.45/fs/filesystems.c
41783 --- linux-2.6.32.45/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
41784 +++ linux-2.6.32.45/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
41785 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
41786 int len = dot ? dot - name : strlen(name);
41787
41788 fs = __get_fs_type(name, len);
41789 +
41790 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
41791 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
41792 +#else
41793 if (!fs && (request_module("%.*s", len, name) == 0))
41794 +#endif
41795 fs = __get_fs_type(name, len);
41796
41797 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
41798 diff -urNp linux-2.6.32.45/fs/fscache/cookie.c linux-2.6.32.45/fs/fscache/cookie.c
41799 --- linux-2.6.32.45/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
41800 +++ linux-2.6.32.45/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
41801 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
41802 parent ? (char *) parent->def->name : "<no-parent>",
41803 def->name, netfs_data);
41804
41805 - fscache_stat(&fscache_n_acquires);
41806 + fscache_stat_unchecked(&fscache_n_acquires);
41807
41808 /* if there's no parent cookie, then we don't create one here either */
41809 if (!parent) {
41810 - fscache_stat(&fscache_n_acquires_null);
41811 + fscache_stat_unchecked(&fscache_n_acquires_null);
41812 _leave(" [no parent]");
41813 return NULL;
41814 }
41815 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
41816 /* allocate and initialise a cookie */
41817 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
41818 if (!cookie) {
41819 - fscache_stat(&fscache_n_acquires_oom);
41820 + fscache_stat_unchecked(&fscache_n_acquires_oom);
41821 _leave(" [ENOMEM]");
41822 return NULL;
41823 }
41824 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
41825
41826 switch (cookie->def->type) {
41827 case FSCACHE_COOKIE_TYPE_INDEX:
41828 - fscache_stat(&fscache_n_cookie_index);
41829 + fscache_stat_unchecked(&fscache_n_cookie_index);
41830 break;
41831 case FSCACHE_COOKIE_TYPE_DATAFILE:
41832 - fscache_stat(&fscache_n_cookie_data);
41833 + fscache_stat_unchecked(&fscache_n_cookie_data);
41834 break;
41835 default:
41836 - fscache_stat(&fscache_n_cookie_special);
41837 + fscache_stat_unchecked(&fscache_n_cookie_special);
41838 break;
41839 }
41840
41841 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
41842 if (fscache_acquire_non_index_cookie(cookie) < 0) {
41843 atomic_dec(&parent->n_children);
41844 __fscache_cookie_put(cookie);
41845 - fscache_stat(&fscache_n_acquires_nobufs);
41846 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
41847 _leave(" = NULL");
41848 return NULL;
41849 }
41850 }
41851
41852 - fscache_stat(&fscache_n_acquires_ok);
41853 + fscache_stat_unchecked(&fscache_n_acquires_ok);
41854 _leave(" = %p", cookie);
41855 return cookie;
41856 }
41857 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
41858 cache = fscache_select_cache_for_object(cookie->parent);
41859 if (!cache) {
41860 up_read(&fscache_addremove_sem);
41861 - fscache_stat(&fscache_n_acquires_no_cache);
41862 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
41863 _leave(" = -ENOMEDIUM [no cache]");
41864 return -ENOMEDIUM;
41865 }
41866 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
41867 object = cache->ops->alloc_object(cache, cookie);
41868 fscache_stat_d(&fscache_n_cop_alloc_object);
41869 if (IS_ERR(object)) {
41870 - fscache_stat(&fscache_n_object_no_alloc);
41871 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
41872 ret = PTR_ERR(object);
41873 goto error;
41874 }
41875
41876 - fscache_stat(&fscache_n_object_alloc);
41877 + fscache_stat_unchecked(&fscache_n_object_alloc);
41878
41879 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
41880
41881 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
41882 struct fscache_object *object;
41883 struct hlist_node *_p;
41884
41885 - fscache_stat(&fscache_n_updates);
41886 + fscache_stat_unchecked(&fscache_n_updates);
41887
41888 if (!cookie) {
41889 - fscache_stat(&fscache_n_updates_null);
41890 + fscache_stat_unchecked(&fscache_n_updates_null);
41891 _leave(" [no cookie]");
41892 return;
41893 }
41894 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
41895 struct fscache_object *object;
41896 unsigned long event;
41897
41898 - fscache_stat(&fscache_n_relinquishes);
41899 + fscache_stat_unchecked(&fscache_n_relinquishes);
41900 if (retire)
41901 - fscache_stat(&fscache_n_relinquishes_retire);
41902 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
41903
41904 if (!cookie) {
41905 - fscache_stat(&fscache_n_relinquishes_null);
41906 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
41907 _leave(" [no cookie]");
41908 return;
41909 }
41910 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
41911
41912 /* wait for the cookie to finish being instantiated (or to fail) */
41913 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
41914 - fscache_stat(&fscache_n_relinquishes_waitcrt);
41915 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
41916 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
41917 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
41918 }
41919 diff -urNp linux-2.6.32.45/fs/fscache/internal.h linux-2.6.32.45/fs/fscache/internal.h
41920 --- linux-2.6.32.45/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
41921 +++ linux-2.6.32.45/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
41922 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
41923 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
41924 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
41925
41926 -extern atomic_t fscache_n_op_pend;
41927 -extern atomic_t fscache_n_op_run;
41928 -extern atomic_t fscache_n_op_enqueue;
41929 -extern atomic_t fscache_n_op_deferred_release;
41930 -extern atomic_t fscache_n_op_release;
41931 -extern atomic_t fscache_n_op_gc;
41932 -extern atomic_t fscache_n_op_cancelled;
41933 -extern atomic_t fscache_n_op_rejected;
41934 -
41935 -extern atomic_t fscache_n_attr_changed;
41936 -extern atomic_t fscache_n_attr_changed_ok;
41937 -extern atomic_t fscache_n_attr_changed_nobufs;
41938 -extern atomic_t fscache_n_attr_changed_nomem;
41939 -extern atomic_t fscache_n_attr_changed_calls;
41940 -
41941 -extern atomic_t fscache_n_allocs;
41942 -extern atomic_t fscache_n_allocs_ok;
41943 -extern atomic_t fscache_n_allocs_wait;
41944 -extern atomic_t fscache_n_allocs_nobufs;
41945 -extern atomic_t fscache_n_allocs_intr;
41946 -extern atomic_t fscache_n_allocs_object_dead;
41947 -extern atomic_t fscache_n_alloc_ops;
41948 -extern atomic_t fscache_n_alloc_op_waits;
41949 -
41950 -extern atomic_t fscache_n_retrievals;
41951 -extern atomic_t fscache_n_retrievals_ok;
41952 -extern atomic_t fscache_n_retrievals_wait;
41953 -extern atomic_t fscache_n_retrievals_nodata;
41954 -extern atomic_t fscache_n_retrievals_nobufs;
41955 -extern atomic_t fscache_n_retrievals_intr;
41956 -extern atomic_t fscache_n_retrievals_nomem;
41957 -extern atomic_t fscache_n_retrievals_object_dead;
41958 -extern atomic_t fscache_n_retrieval_ops;
41959 -extern atomic_t fscache_n_retrieval_op_waits;
41960 -
41961 -extern atomic_t fscache_n_stores;
41962 -extern atomic_t fscache_n_stores_ok;
41963 -extern atomic_t fscache_n_stores_again;
41964 -extern atomic_t fscache_n_stores_nobufs;
41965 -extern atomic_t fscache_n_stores_oom;
41966 -extern atomic_t fscache_n_store_ops;
41967 -extern atomic_t fscache_n_store_calls;
41968 -extern atomic_t fscache_n_store_pages;
41969 -extern atomic_t fscache_n_store_radix_deletes;
41970 -extern atomic_t fscache_n_store_pages_over_limit;
41971 -
41972 -extern atomic_t fscache_n_store_vmscan_not_storing;
41973 -extern atomic_t fscache_n_store_vmscan_gone;
41974 -extern atomic_t fscache_n_store_vmscan_busy;
41975 -extern atomic_t fscache_n_store_vmscan_cancelled;
41976 -
41977 -extern atomic_t fscache_n_marks;
41978 -extern atomic_t fscache_n_uncaches;
41979 -
41980 -extern atomic_t fscache_n_acquires;
41981 -extern atomic_t fscache_n_acquires_null;
41982 -extern atomic_t fscache_n_acquires_no_cache;
41983 -extern atomic_t fscache_n_acquires_ok;
41984 -extern atomic_t fscache_n_acquires_nobufs;
41985 -extern atomic_t fscache_n_acquires_oom;
41986 -
41987 -extern atomic_t fscache_n_updates;
41988 -extern atomic_t fscache_n_updates_null;
41989 -extern atomic_t fscache_n_updates_run;
41990 -
41991 -extern atomic_t fscache_n_relinquishes;
41992 -extern atomic_t fscache_n_relinquishes_null;
41993 -extern atomic_t fscache_n_relinquishes_waitcrt;
41994 -extern atomic_t fscache_n_relinquishes_retire;
41995 -
41996 -extern atomic_t fscache_n_cookie_index;
41997 -extern atomic_t fscache_n_cookie_data;
41998 -extern atomic_t fscache_n_cookie_special;
41999 -
42000 -extern atomic_t fscache_n_object_alloc;
42001 -extern atomic_t fscache_n_object_no_alloc;
42002 -extern atomic_t fscache_n_object_lookups;
42003 -extern atomic_t fscache_n_object_lookups_negative;
42004 -extern atomic_t fscache_n_object_lookups_positive;
42005 -extern atomic_t fscache_n_object_lookups_timed_out;
42006 -extern atomic_t fscache_n_object_created;
42007 -extern atomic_t fscache_n_object_avail;
42008 -extern atomic_t fscache_n_object_dead;
42009 -
42010 -extern atomic_t fscache_n_checkaux_none;
42011 -extern atomic_t fscache_n_checkaux_okay;
42012 -extern atomic_t fscache_n_checkaux_update;
42013 -extern atomic_t fscache_n_checkaux_obsolete;
42014 +extern atomic_unchecked_t fscache_n_op_pend;
42015 +extern atomic_unchecked_t fscache_n_op_run;
42016 +extern atomic_unchecked_t fscache_n_op_enqueue;
42017 +extern atomic_unchecked_t fscache_n_op_deferred_release;
42018 +extern atomic_unchecked_t fscache_n_op_release;
42019 +extern atomic_unchecked_t fscache_n_op_gc;
42020 +extern atomic_unchecked_t fscache_n_op_cancelled;
42021 +extern atomic_unchecked_t fscache_n_op_rejected;
42022 +
42023 +extern atomic_unchecked_t fscache_n_attr_changed;
42024 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
42025 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42026 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42027 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
42028 +
42029 +extern atomic_unchecked_t fscache_n_allocs;
42030 +extern atomic_unchecked_t fscache_n_allocs_ok;
42031 +extern atomic_unchecked_t fscache_n_allocs_wait;
42032 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
42033 +extern atomic_unchecked_t fscache_n_allocs_intr;
42034 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
42035 +extern atomic_unchecked_t fscache_n_alloc_ops;
42036 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
42037 +
42038 +extern atomic_unchecked_t fscache_n_retrievals;
42039 +extern atomic_unchecked_t fscache_n_retrievals_ok;
42040 +extern atomic_unchecked_t fscache_n_retrievals_wait;
42041 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
42042 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42043 +extern atomic_unchecked_t fscache_n_retrievals_intr;
42044 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
42045 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42046 +extern atomic_unchecked_t fscache_n_retrieval_ops;
42047 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42048 +
42049 +extern atomic_unchecked_t fscache_n_stores;
42050 +extern atomic_unchecked_t fscache_n_stores_ok;
42051 +extern atomic_unchecked_t fscache_n_stores_again;
42052 +extern atomic_unchecked_t fscache_n_stores_nobufs;
42053 +extern atomic_unchecked_t fscache_n_stores_oom;
42054 +extern atomic_unchecked_t fscache_n_store_ops;
42055 +extern atomic_unchecked_t fscache_n_store_calls;
42056 +extern atomic_unchecked_t fscache_n_store_pages;
42057 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
42058 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42059 +
42060 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42061 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42062 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42063 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42064 +
42065 +extern atomic_unchecked_t fscache_n_marks;
42066 +extern atomic_unchecked_t fscache_n_uncaches;
42067 +
42068 +extern atomic_unchecked_t fscache_n_acquires;
42069 +extern atomic_unchecked_t fscache_n_acquires_null;
42070 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
42071 +extern atomic_unchecked_t fscache_n_acquires_ok;
42072 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
42073 +extern atomic_unchecked_t fscache_n_acquires_oom;
42074 +
42075 +extern atomic_unchecked_t fscache_n_updates;
42076 +extern atomic_unchecked_t fscache_n_updates_null;
42077 +extern atomic_unchecked_t fscache_n_updates_run;
42078 +
42079 +extern atomic_unchecked_t fscache_n_relinquishes;
42080 +extern atomic_unchecked_t fscache_n_relinquishes_null;
42081 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42082 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
42083 +
42084 +extern atomic_unchecked_t fscache_n_cookie_index;
42085 +extern atomic_unchecked_t fscache_n_cookie_data;
42086 +extern atomic_unchecked_t fscache_n_cookie_special;
42087 +
42088 +extern atomic_unchecked_t fscache_n_object_alloc;
42089 +extern atomic_unchecked_t fscache_n_object_no_alloc;
42090 +extern atomic_unchecked_t fscache_n_object_lookups;
42091 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
42092 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
42093 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42094 +extern atomic_unchecked_t fscache_n_object_created;
42095 +extern atomic_unchecked_t fscache_n_object_avail;
42096 +extern atomic_unchecked_t fscache_n_object_dead;
42097 +
42098 +extern atomic_unchecked_t fscache_n_checkaux_none;
42099 +extern atomic_unchecked_t fscache_n_checkaux_okay;
42100 +extern atomic_unchecked_t fscache_n_checkaux_update;
42101 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42102
42103 extern atomic_t fscache_n_cop_alloc_object;
42104 extern atomic_t fscache_n_cop_lookup_object;
42105 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
42106 atomic_inc(stat);
42107 }
42108
42109 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42110 +{
42111 + atomic_inc_unchecked(stat);
42112 +}
42113 +
42114 static inline void fscache_stat_d(atomic_t *stat)
42115 {
42116 atomic_dec(stat);
42117 @@ -259,6 +264,7 @@ extern const struct file_operations fsca
42118
42119 #define __fscache_stat(stat) (NULL)
42120 #define fscache_stat(stat) do {} while (0)
42121 +#define fscache_stat_unchecked(stat) do {} while (0)
42122 #define fscache_stat_d(stat) do {} while (0)
42123 #endif
42124
42125 diff -urNp linux-2.6.32.45/fs/fscache/object.c linux-2.6.32.45/fs/fscache/object.c
42126 --- linux-2.6.32.45/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
42127 +++ linux-2.6.32.45/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
42128 @@ -144,7 +144,7 @@ static void fscache_object_state_machine
42129 /* update the object metadata on disk */
42130 case FSCACHE_OBJECT_UPDATING:
42131 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42132 - fscache_stat(&fscache_n_updates_run);
42133 + fscache_stat_unchecked(&fscache_n_updates_run);
42134 fscache_stat(&fscache_n_cop_update_object);
42135 object->cache->ops->update_object(object);
42136 fscache_stat_d(&fscache_n_cop_update_object);
42137 @@ -233,7 +233,7 @@ static void fscache_object_state_machine
42138 spin_lock(&object->lock);
42139 object->state = FSCACHE_OBJECT_DEAD;
42140 spin_unlock(&object->lock);
42141 - fscache_stat(&fscache_n_object_dead);
42142 + fscache_stat_unchecked(&fscache_n_object_dead);
42143 goto terminal_transit;
42144
42145 /* handle the parent cache of this object being withdrawn from
42146 @@ -248,7 +248,7 @@ static void fscache_object_state_machine
42147 spin_lock(&object->lock);
42148 object->state = FSCACHE_OBJECT_DEAD;
42149 spin_unlock(&object->lock);
42150 - fscache_stat(&fscache_n_object_dead);
42151 + fscache_stat_unchecked(&fscache_n_object_dead);
42152 goto terminal_transit;
42153
42154 /* complain about the object being woken up once it is
42155 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
42156 parent->cookie->def->name, cookie->def->name,
42157 object->cache->tag->name);
42158
42159 - fscache_stat(&fscache_n_object_lookups);
42160 + fscache_stat_unchecked(&fscache_n_object_lookups);
42161 fscache_stat(&fscache_n_cop_lookup_object);
42162 ret = object->cache->ops->lookup_object(object);
42163 fscache_stat_d(&fscache_n_cop_lookup_object);
42164 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
42165 if (ret == -ETIMEDOUT) {
42166 /* probably stuck behind another object, so move this one to
42167 * the back of the queue */
42168 - fscache_stat(&fscache_n_object_lookups_timed_out);
42169 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42170 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42171 }
42172
42173 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
42174
42175 spin_lock(&object->lock);
42176 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42177 - fscache_stat(&fscache_n_object_lookups_negative);
42178 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42179
42180 /* transit here to allow write requests to begin stacking up
42181 * and read requests to begin returning ENODATA */
42182 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
42183 * result, in which case there may be data available */
42184 spin_lock(&object->lock);
42185 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42186 - fscache_stat(&fscache_n_object_lookups_positive);
42187 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42188
42189 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42190
42191 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
42192 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42193 } else {
42194 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42195 - fscache_stat(&fscache_n_object_created);
42196 + fscache_stat_unchecked(&fscache_n_object_created);
42197
42198 object->state = FSCACHE_OBJECT_AVAILABLE;
42199 spin_unlock(&object->lock);
42200 @@ -633,7 +633,7 @@ static void fscache_object_available(str
42201 fscache_enqueue_dependents(object);
42202
42203 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42204 - fscache_stat(&fscache_n_object_avail);
42205 + fscache_stat_unchecked(&fscache_n_object_avail);
42206
42207 _leave("");
42208 }
42209 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42210 enum fscache_checkaux result;
42211
42212 if (!object->cookie->def->check_aux) {
42213 - fscache_stat(&fscache_n_checkaux_none);
42214 + fscache_stat_unchecked(&fscache_n_checkaux_none);
42215 return FSCACHE_CHECKAUX_OKAY;
42216 }
42217
42218 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42219 switch (result) {
42220 /* entry okay as is */
42221 case FSCACHE_CHECKAUX_OKAY:
42222 - fscache_stat(&fscache_n_checkaux_okay);
42223 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
42224 break;
42225
42226 /* entry requires update */
42227 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42228 - fscache_stat(&fscache_n_checkaux_update);
42229 + fscache_stat_unchecked(&fscache_n_checkaux_update);
42230 break;
42231
42232 /* entry requires deletion */
42233 case FSCACHE_CHECKAUX_OBSOLETE:
42234 - fscache_stat(&fscache_n_checkaux_obsolete);
42235 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42236 break;
42237
42238 default:
42239 diff -urNp linux-2.6.32.45/fs/fscache/operation.c linux-2.6.32.45/fs/fscache/operation.c
42240 --- linux-2.6.32.45/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
42241 +++ linux-2.6.32.45/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
42242 @@ -16,7 +16,7 @@
42243 #include <linux/seq_file.h>
42244 #include "internal.h"
42245
42246 -atomic_t fscache_op_debug_id;
42247 +atomic_unchecked_t fscache_op_debug_id;
42248 EXPORT_SYMBOL(fscache_op_debug_id);
42249
42250 /**
42251 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
42252 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42253 ASSERTCMP(atomic_read(&op->usage), >, 0);
42254
42255 - fscache_stat(&fscache_n_op_enqueue);
42256 + fscache_stat_unchecked(&fscache_n_op_enqueue);
42257 switch (op->flags & FSCACHE_OP_TYPE) {
42258 case FSCACHE_OP_FAST:
42259 _debug("queue fast");
42260 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
42261 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42262 if (op->processor)
42263 fscache_enqueue_operation(op);
42264 - fscache_stat(&fscache_n_op_run);
42265 + fscache_stat_unchecked(&fscache_n_op_run);
42266 }
42267
42268 /*
42269 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
42270 if (object->n_ops > 0) {
42271 atomic_inc(&op->usage);
42272 list_add_tail(&op->pend_link, &object->pending_ops);
42273 - fscache_stat(&fscache_n_op_pend);
42274 + fscache_stat_unchecked(&fscache_n_op_pend);
42275 } else if (!list_empty(&object->pending_ops)) {
42276 atomic_inc(&op->usage);
42277 list_add_tail(&op->pend_link, &object->pending_ops);
42278 - fscache_stat(&fscache_n_op_pend);
42279 + fscache_stat_unchecked(&fscache_n_op_pend);
42280 fscache_start_operations(object);
42281 } else {
42282 ASSERTCMP(object->n_in_progress, ==, 0);
42283 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
42284 object->n_exclusive++; /* reads and writes must wait */
42285 atomic_inc(&op->usage);
42286 list_add_tail(&op->pend_link, &object->pending_ops);
42287 - fscache_stat(&fscache_n_op_pend);
42288 + fscache_stat_unchecked(&fscache_n_op_pend);
42289 ret = 0;
42290 } else {
42291 /* not allowed to submit ops in any other state */
42292 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
42293 if (object->n_exclusive > 0) {
42294 atomic_inc(&op->usage);
42295 list_add_tail(&op->pend_link, &object->pending_ops);
42296 - fscache_stat(&fscache_n_op_pend);
42297 + fscache_stat_unchecked(&fscache_n_op_pend);
42298 } else if (!list_empty(&object->pending_ops)) {
42299 atomic_inc(&op->usage);
42300 list_add_tail(&op->pend_link, &object->pending_ops);
42301 - fscache_stat(&fscache_n_op_pend);
42302 + fscache_stat_unchecked(&fscache_n_op_pend);
42303 fscache_start_operations(object);
42304 } else {
42305 ASSERTCMP(object->n_exclusive, ==, 0);
42306 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
42307 object->n_ops++;
42308 atomic_inc(&op->usage);
42309 list_add_tail(&op->pend_link, &object->pending_ops);
42310 - fscache_stat(&fscache_n_op_pend);
42311 + fscache_stat_unchecked(&fscache_n_op_pend);
42312 ret = 0;
42313 } else if (object->state == FSCACHE_OBJECT_DYING ||
42314 object->state == FSCACHE_OBJECT_LC_DYING ||
42315 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42316 - fscache_stat(&fscache_n_op_rejected);
42317 + fscache_stat_unchecked(&fscache_n_op_rejected);
42318 ret = -ENOBUFS;
42319 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42320 fscache_report_unexpected_submission(object, op, ostate);
42321 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
42322
42323 ret = -EBUSY;
42324 if (!list_empty(&op->pend_link)) {
42325 - fscache_stat(&fscache_n_op_cancelled);
42326 + fscache_stat_unchecked(&fscache_n_op_cancelled);
42327 list_del_init(&op->pend_link);
42328 object->n_ops--;
42329 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42330 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
42331 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42332 BUG();
42333
42334 - fscache_stat(&fscache_n_op_release);
42335 + fscache_stat_unchecked(&fscache_n_op_release);
42336
42337 if (op->release) {
42338 op->release(op);
42339 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
42340 * lock, and defer it otherwise */
42341 if (!spin_trylock(&object->lock)) {
42342 _debug("defer put");
42343 - fscache_stat(&fscache_n_op_deferred_release);
42344 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
42345
42346 cache = object->cache;
42347 spin_lock(&cache->op_gc_list_lock);
42348 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
42349
42350 _debug("GC DEFERRED REL OBJ%x OP%x",
42351 object->debug_id, op->debug_id);
42352 - fscache_stat(&fscache_n_op_gc);
42353 + fscache_stat_unchecked(&fscache_n_op_gc);
42354
42355 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42356
42357 diff -urNp linux-2.6.32.45/fs/fscache/page.c linux-2.6.32.45/fs/fscache/page.c
42358 --- linux-2.6.32.45/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
42359 +++ linux-2.6.32.45/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
42360 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
42361 val = radix_tree_lookup(&cookie->stores, page->index);
42362 if (!val) {
42363 rcu_read_unlock();
42364 - fscache_stat(&fscache_n_store_vmscan_not_storing);
42365 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42366 __fscache_uncache_page(cookie, page);
42367 return true;
42368 }
42369 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
42370 spin_unlock(&cookie->stores_lock);
42371
42372 if (xpage) {
42373 - fscache_stat(&fscache_n_store_vmscan_cancelled);
42374 - fscache_stat(&fscache_n_store_radix_deletes);
42375 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42376 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42377 ASSERTCMP(xpage, ==, page);
42378 } else {
42379 - fscache_stat(&fscache_n_store_vmscan_gone);
42380 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42381 }
42382
42383 wake_up_bit(&cookie->flags, 0);
42384 @@ -106,7 +106,7 @@ page_busy:
42385 /* we might want to wait here, but that could deadlock the allocator as
42386 * the slow-work threads writing to the cache may all end up sleeping
42387 * on memory allocation */
42388 - fscache_stat(&fscache_n_store_vmscan_busy);
42389 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42390 return false;
42391 }
42392 EXPORT_SYMBOL(__fscache_maybe_release_page);
42393 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
42394 FSCACHE_COOKIE_STORING_TAG);
42395 if (!radix_tree_tag_get(&cookie->stores, page->index,
42396 FSCACHE_COOKIE_PENDING_TAG)) {
42397 - fscache_stat(&fscache_n_store_radix_deletes);
42398 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42399 xpage = radix_tree_delete(&cookie->stores, page->index);
42400 }
42401 spin_unlock(&cookie->stores_lock);
42402 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
42403
42404 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42405
42406 - fscache_stat(&fscache_n_attr_changed_calls);
42407 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42408
42409 if (fscache_object_is_active(object)) {
42410 fscache_set_op_state(op, "CallFS");
42411 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
42412
42413 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42414
42415 - fscache_stat(&fscache_n_attr_changed);
42416 + fscache_stat_unchecked(&fscache_n_attr_changed);
42417
42418 op = kzalloc(sizeof(*op), GFP_KERNEL);
42419 if (!op) {
42420 - fscache_stat(&fscache_n_attr_changed_nomem);
42421 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42422 _leave(" = -ENOMEM");
42423 return -ENOMEM;
42424 }
42425 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
42426 if (fscache_submit_exclusive_op(object, op) < 0)
42427 goto nobufs;
42428 spin_unlock(&cookie->lock);
42429 - fscache_stat(&fscache_n_attr_changed_ok);
42430 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42431 fscache_put_operation(op);
42432 _leave(" = 0");
42433 return 0;
42434 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
42435 nobufs:
42436 spin_unlock(&cookie->lock);
42437 kfree(op);
42438 - fscache_stat(&fscache_n_attr_changed_nobufs);
42439 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42440 _leave(" = %d", -ENOBUFS);
42441 return -ENOBUFS;
42442 }
42443 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
42444 /* allocate a retrieval operation and attempt to submit it */
42445 op = kzalloc(sizeof(*op), GFP_NOIO);
42446 if (!op) {
42447 - fscache_stat(&fscache_n_retrievals_nomem);
42448 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42449 return NULL;
42450 }
42451
42452 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
42453 return 0;
42454 }
42455
42456 - fscache_stat(&fscache_n_retrievals_wait);
42457 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
42458
42459 jif = jiffies;
42460 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42461 fscache_wait_bit_interruptible,
42462 TASK_INTERRUPTIBLE) != 0) {
42463 - fscache_stat(&fscache_n_retrievals_intr);
42464 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42465 _leave(" = -ERESTARTSYS");
42466 return -ERESTARTSYS;
42467 }
42468 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
42469 */
42470 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42471 struct fscache_retrieval *op,
42472 - atomic_t *stat_op_waits,
42473 - atomic_t *stat_object_dead)
42474 + atomic_unchecked_t *stat_op_waits,
42475 + atomic_unchecked_t *stat_object_dead)
42476 {
42477 int ret;
42478
42479 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
42480 goto check_if_dead;
42481
42482 _debug(">>> WT");
42483 - fscache_stat(stat_op_waits);
42484 + fscache_stat_unchecked(stat_op_waits);
42485 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42486 fscache_wait_bit_interruptible,
42487 TASK_INTERRUPTIBLE) < 0) {
42488 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
42489
42490 check_if_dead:
42491 if (unlikely(fscache_object_is_dead(object))) {
42492 - fscache_stat(stat_object_dead);
42493 + fscache_stat_unchecked(stat_object_dead);
42494 return -ENOBUFS;
42495 }
42496 return 0;
42497 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
42498
42499 _enter("%p,%p,,,", cookie, page);
42500
42501 - fscache_stat(&fscache_n_retrievals);
42502 + fscache_stat_unchecked(&fscache_n_retrievals);
42503
42504 if (hlist_empty(&cookie->backing_objects))
42505 goto nobufs;
42506 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
42507 goto nobufs_unlock;
42508 spin_unlock(&cookie->lock);
42509
42510 - fscache_stat(&fscache_n_retrieval_ops);
42511 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
42512
42513 /* pin the netfs read context in case we need to do the actual netfs
42514 * read because we've encountered a cache read failure */
42515 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
42516
42517 error:
42518 if (ret == -ENOMEM)
42519 - fscache_stat(&fscache_n_retrievals_nomem);
42520 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42521 else if (ret == -ERESTARTSYS)
42522 - fscache_stat(&fscache_n_retrievals_intr);
42523 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42524 else if (ret == -ENODATA)
42525 - fscache_stat(&fscache_n_retrievals_nodata);
42526 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42527 else if (ret < 0)
42528 - fscache_stat(&fscache_n_retrievals_nobufs);
42529 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42530 else
42531 - fscache_stat(&fscache_n_retrievals_ok);
42532 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
42533
42534 fscache_put_retrieval(op);
42535 _leave(" = %d", ret);
42536 @@ -453,7 +453,7 @@ nobufs_unlock:
42537 spin_unlock(&cookie->lock);
42538 kfree(op);
42539 nobufs:
42540 - fscache_stat(&fscache_n_retrievals_nobufs);
42541 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42542 _leave(" = -ENOBUFS");
42543 return -ENOBUFS;
42544 }
42545 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
42546
42547 _enter("%p,,%d,,,", cookie, *nr_pages);
42548
42549 - fscache_stat(&fscache_n_retrievals);
42550 + fscache_stat_unchecked(&fscache_n_retrievals);
42551
42552 if (hlist_empty(&cookie->backing_objects))
42553 goto nobufs;
42554 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
42555 goto nobufs_unlock;
42556 spin_unlock(&cookie->lock);
42557
42558 - fscache_stat(&fscache_n_retrieval_ops);
42559 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
42560
42561 /* pin the netfs read context in case we need to do the actual netfs
42562 * read because we've encountered a cache read failure */
42563 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
42564
42565 error:
42566 if (ret == -ENOMEM)
42567 - fscache_stat(&fscache_n_retrievals_nomem);
42568 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42569 else if (ret == -ERESTARTSYS)
42570 - fscache_stat(&fscache_n_retrievals_intr);
42571 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42572 else if (ret == -ENODATA)
42573 - fscache_stat(&fscache_n_retrievals_nodata);
42574 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42575 else if (ret < 0)
42576 - fscache_stat(&fscache_n_retrievals_nobufs);
42577 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42578 else
42579 - fscache_stat(&fscache_n_retrievals_ok);
42580 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
42581
42582 fscache_put_retrieval(op);
42583 _leave(" = %d", ret);
42584 @@ -570,7 +570,7 @@ nobufs_unlock:
42585 spin_unlock(&cookie->lock);
42586 kfree(op);
42587 nobufs:
42588 - fscache_stat(&fscache_n_retrievals_nobufs);
42589 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42590 _leave(" = -ENOBUFS");
42591 return -ENOBUFS;
42592 }
42593 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
42594
42595 _enter("%p,%p,,,", cookie, page);
42596
42597 - fscache_stat(&fscache_n_allocs);
42598 + fscache_stat_unchecked(&fscache_n_allocs);
42599
42600 if (hlist_empty(&cookie->backing_objects))
42601 goto nobufs;
42602 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
42603 goto nobufs_unlock;
42604 spin_unlock(&cookie->lock);
42605
42606 - fscache_stat(&fscache_n_alloc_ops);
42607 + fscache_stat_unchecked(&fscache_n_alloc_ops);
42608
42609 ret = fscache_wait_for_retrieval_activation(
42610 object, op,
42611 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
42612
42613 error:
42614 if (ret == -ERESTARTSYS)
42615 - fscache_stat(&fscache_n_allocs_intr);
42616 + fscache_stat_unchecked(&fscache_n_allocs_intr);
42617 else if (ret < 0)
42618 - fscache_stat(&fscache_n_allocs_nobufs);
42619 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42620 else
42621 - fscache_stat(&fscache_n_allocs_ok);
42622 + fscache_stat_unchecked(&fscache_n_allocs_ok);
42623
42624 fscache_put_retrieval(op);
42625 _leave(" = %d", ret);
42626 @@ -651,7 +651,7 @@ nobufs_unlock:
42627 spin_unlock(&cookie->lock);
42628 kfree(op);
42629 nobufs:
42630 - fscache_stat(&fscache_n_allocs_nobufs);
42631 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42632 _leave(" = -ENOBUFS");
42633 return -ENOBUFS;
42634 }
42635 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
42636
42637 spin_lock(&cookie->stores_lock);
42638
42639 - fscache_stat(&fscache_n_store_calls);
42640 + fscache_stat_unchecked(&fscache_n_store_calls);
42641
42642 /* find a page to store */
42643 page = NULL;
42644 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
42645 page = results[0];
42646 _debug("gang %d [%lx]", n, page->index);
42647 if (page->index > op->store_limit) {
42648 - fscache_stat(&fscache_n_store_pages_over_limit);
42649 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
42650 goto superseded;
42651 }
42652
42653 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
42654
42655 if (page) {
42656 fscache_set_op_state(&op->op, "Store");
42657 - fscache_stat(&fscache_n_store_pages);
42658 + fscache_stat_unchecked(&fscache_n_store_pages);
42659 fscache_stat(&fscache_n_cop_write_page);
42660 ret = object->cache->ops->write_page(op, page);
42661 fscache_stat_d(&fscache_n_cop_write_page);
42662 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
42663 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42664 ASSERT(PageFsCache(page));
42665
42666 - fscache_stat(&fscache_n_stores);
42667 + fscache_stat_unchecked(&fscache_n_stores);
42668
42669 op = kzalloc(sizeof(*op), GFP_NOIO);
42670 if (!op)
42671 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
42672 spin_unlock(&cookie->stores_lock);
42673 spin_unlock(&object->lock);
42674
42675 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
42676 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
42677 op->store_limit = object->store_limit;
42678
42679 if (fscache_submit_op(object, &op->op) < 0)
42680 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
42681
42682 spin_unlock(&cookie->lock);
42683 radix_tree_preload_end();
42684 - fscache_stat(&fscache_n_store_ops);
42685 - fscache_stat(&fscache_n_stores_ok);
42686 + fscache_stat_unchecked(&fscache_n_store_ops);
42687 + fscache_stat_unchecked(&fscache_n_stores_ok);
42688
42689 /* the slow work queue now carries its own ref on the object */
42690 fscache_put_operation(&op->op);
42691 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
42692 return 0;
42693
42694 already_queued:
42695 - fscache_stat(&fscache_n_stores_again);
42696 + fscache_stat_unchecked(&fscache_n_stores_again);
42697 already_pending:
42698 spin_unlock(&cookie->stores_lock);
42699 spin_unlock(&object->lock);
42700 spin_unlock(&cookie->lock);
42701 radix_tree_preload_end();
42702 kfree(op);
42703 - fscache_stat(&fscache_n_stores_ok);
42704 + fscache_stat_unchecked(&fscache_n_stores_ok);
42705 _leave(" = 0");
42706 return 0;
42707
42708 @@ -886,14 +886,14 @@ nobufs:
42709 spin_unlock(&cookie->lock);
42710 radix_tree_preload_end();
42711 kfree(op);
42712 - fscache_stat(&fscache_n_stores_nobufs);
42713 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
42714 _leave(" = -ENOBUFS");
42715 return -ENOBUFS;
42716
42717 nomem_free:
42718 kfree(op);
42719 nomem:
42720 - fscache_stat(&fscache_n_stores_oom);
42721 + fscache_stat_unchecked(&fscache_n_stores_oom);
42722 _leave(" = -ENOMEM");
42723 return -ENOMEM;
42724 }
42725 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
42726 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42727 ASSERTCMP(page, !=, NULL);
42728
42729 - fscache_stat(&fscache_n_uncaches);
42730 + fscache_stat_unchecked(&fscache_n_uncaches);
42731
42732 /* cache withdrawal may beat us to it */
42733 if (!PageFsCache(page))
42734 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
42735 unsigned long loop;
42736
42737 #ifdef CONFIG_FSCACHE_STATS
42738 - atomic_add(pagevec->nr, &fscache_n_marks);
42739 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
42740 #endif
42741
42742 for (loop = 0; loop < pagevec->nr; loop++) {
42743 diff -urNp linux-2.6.32.45/fs/fscache/stats.c linux-2.6.32.45/fs/fscache/stats.c
42744 --- linux-2.6.32.45/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
42745 +++ linux-2.6.32.45/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
42746 @@ -18,95 +18,95 @@
42747 /*
42748 * operation counters
42749 */
42750 -atomic_t fscache_n_op_pend;
42751 -atomic_t fscache_n_op_run;
42752 -atomic_t fscache_n_op_enqueue;
42753 -atomic_t fscache_n_op_requeue;
42754 -atomic_t fscache_n_op_deferred_release;
42755 -atomic_t fscache_n_op_release;
42756 -atomic_t fscache_n_op_gc;
42757 -atomic_t fscache_n_op_cancelled;
42758 -atomic_t fscache_n_op_rejected;
42759 -
42760 -atomic_t fscache_n_attr_changed;
42761 -atomic_t fscache_n_attr_changed_ok;
42762 -atomic_t fscache_n_attr_changed_nobufs;
42763 -atomic_t fscache_n_attr_changed_nomem;
42764 -atomic_t fscache_n_attr_changed_calls;
42765 -
42766 -atomic_t fscache_n_allocs;
42767 -atomic_t fscache_n_allocs_ok;
42768 -atomic_t fscache_n_allocs_wait;
42769 -atomic_t fscache_n_allocs_nobufs;
42770 -atomic_t fscache_n_allocs_intr;
42771 -atomic_t fscache_n_allocs_object_dead;
42772 -atomic_t fscache_n_alloc_ops;
42773 -atomic_t fscache_n_alloc_op_waits;
42774 -
42775 -atomic_t fscache_n_retrievals;
42776 -atomic_t fscache_n_retrievals_ok;
42777 -atomic_t fscache_n_retrievals_wait;
42778 -atomic_t fscache_n_retrievals_nodata;
42779 -atomic_t fscache_n_retrievals_nobufs;
42780 -atomic_t fscache_n_retrievals_intr;
42781 -atomic_t fscache_n_retrievals_nomem;
42782 -atomic_t fscache_n_retrievals_object_dead;
42783 -atomic_t fscache_n_retrieval_ops;
42784 -atomic_t fscache_n_retrieval_op_waits;
42785 -
42786 -atomic_t fscache_n_stores;
42787 -atomic_t fscache_n_stores_ok;
42788 -atomic_t fscache_n_stores_again;
42789 -atomic_t fscache_n_stores_nobufs;
42790 -atomic_t fscache_n_stores_oom;
42791 -atomic_t fscache_n_store_ops;
42792 -atomic_t fscache_n_store_calls;
42793 -atomic_t fscache_n_store_pages;
42794 -atomic_t fscache_n_store_radix_deletes;
42795 -atomic_t fscache_n_store_pages_over_limit;
42796 -
42797 -atomic_t fscache_n_store_vmscan_not_storing;
42798 -atomic_t fscache_n_store_vmscan_gone;
42799 -atomic_t fscache_n_store_vmscan_busy;
42800 -atomic_t fscache_n_store_vmscan_cancelled;
42801 -
42802 -atomic_t fscache_n_marks;
42803 -atomic_t fscache_n_uncaches;
42804 -
42805 -atomic_t fscache_n_acquires;
42806 -atomic_t fscache_n_acquires_null;
42807 -atomic_t fscache_n_acquires_no_cache;
42808 -atomic_t fscache_n_acquires_ok;
42809 -atomic_t fscache_n_acquires_nobufs;
42810 -atomic_t fscache_n_acquires_oom;
42811 -
42812 -atomic_t fscache_n_updates;
42813 -atomic_t fscache_n_updates_null;
42814 -atomic_t fscache_n_updates_run;
42815 -
42816 -atomic_t fscache_n_relinquishes;
42817 -atomic_t fscache_n_relinquishes_null;
42818 -atomic_t fscache_n_relinquishes_waitcrt;
42819 -atomic_t fscache_n_relinquishes_retire;
42820 -
42821 -atomic_t fscache_n_cookie_index;
42822 -atomic_t fscache_n_cookie_data;
42823 -atomic_t fscache_n_cookie_special;
42824 -
42825 -atomic_t fscache_n_object_alloc;
42826 -atomic_t fscache_n_object_no_alloc;
42827 -atomic_t fscache_n_object_lookups;
42828 -atomic_t fscache_n_object_lookups_negative;
42829 -atomic_t fscache_n_object_lookups_positive;
42830 -atomic_t fscache_n_object_lookups_timed_out;
42831 -atomic_t fscache_n_object_created;
42832 -atomic_t fscache_n_object_avail;
42833 -atomic_t fscache_n_object_dead;
42834 -
42835 -atomic_t fscache_n_checkaux_none;
42836 -atomic_t fscache_n_checkaux_okay;
42837 -atomic_t fscache_n_checkaux_update;
42838 -atomic_t fscache_n_checkaux_obsolete;
42839 +atomic_unchecked_t fscache_n_op_pend;
42840 +atomic_unchecked_t fscache_n_op_run;
42841 +atomic_unchecked_t fscache_n_op_enqueue;
42842 +atomic_unchecked_t fscache_n_op_requeue;
42843 +atomic_unchecked_t fscache_n_op_deferred_release;
42844 +atomic_unchecked_t fscache_n_op_release;
42845 +atomic_unchecked_t fscache_n_op_gc;
42846 +atomic_unchecked_t fscache_n_op_cancelled;
42847 +atomic_unchecked_t fscache_n_op_rejected;
42848 +
42849 +atomic_unchecked_t fscache_n_attr_changed;
42850 +atomic_unchecked_t fscache_n_attr_changed_ok;
42851 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
42852 +atomic_unchecked_t fscache_n_attr_changed_nomem;
42853 +atomic_unchecked_t fscache_n_attr_changed_calls;
42854 +
42855 +atomic_unchecked_t fscache_n_allocs;
42856 +atomic_unchecked_t fscache_n_allocs_ok;
42857 +atomic_unchecked_t fscache_n_allocs_wait;
42858 +atomic_unchecked_t fscache_n_allocs_nobufs;
42859 +atomic_unchecked_t fscache_n_allocs_intr;
42860 +atomic_unchecked_t fscache_n_allocs_object_dead;
42861 +atomic_unchecked_t fscache_n_alloc_ops;
42862 +atomic_unchecked_t fscache_n_alloc_op_waits;
42863 +
42864 +atomic_unchecked_t fscache_n_retrievals;
42865 +atomic_unchecked_t fscache_n_retrievals_ok;
42866 +atomic_unchecked_t fscache_n_retrievals_wait;
42867 +atomic_unchecked_t fscache_n_retrievals_nodata;
42868 +atomic_unchecked_t fscache_n_retrievals_nobufs;
42869 +atomic_unchecked_t fscache_n_retrievals_intr;
42870 +atomic_unchecked_t fscache_n_retrievals_nomem;
42871 +atomic_unchecked_t fscache_n_retrievals_object_dead;
42872 +atomic_unchecked_t fscache_n_retrieval_ops;
42873 +atomic_unchecked_t fscache_n_retrieval_op_waits;
42874 +
42875 +atomic_unchecked_t fscache_n_stores;
42876 +atomic_unchecked_t fscache_n_stores_ok;
42877 +atomic_unchecked_t fscache_n_stores_again;
42878 +atomic_unchecked_t fscache_n_stores_nobufs;
42879 +atomic_unchecked_t fscache_n_stores_oom;
42880 +atomic_unchecked_t fscache_n_store_ops;
42881 +atomic_unchecked_t fscache_n_store_calls;
42882 +atomic_unchecked_t fscache_n_store_pages;
42883 +atomic_unchecked_t fscache_n_store_radix_deletes;
42884 +atomic_unchecked_t fscache_n_store_pages_over_limit;
42885 +
42886 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42887 +atomic_unchecked_t fscache_n_store_vmscan_gone;
42888 +atomic_unchecked_t fscache_n_store_vmscan_busy;
42889 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42890 +
42891 +atomic_unchecked_t fscache_n_marks;
42892 +atomic_unchecked_t fscache_n_uncaches;
42893 +
42894 +atomic_unchecked_t fscache_n_acquires;
42895 +atomic_unchecked_t fscache_n_acquires_null;
42896 +atomic_unchecked_t fscache_n_acquires_no_cache;
42897 +atomic_unchecked_t fscache_n_acquires_ok;
42898 +atomic_unchecked_t fscache_n_acquires_nobufs;
42899 +atomic_unchecked_t fscache_n_acquires_oom;
42900 +
42901 +atomic_unchecked_t fscache_n_updates;
42902 +atomic_unchecked_t fscache_n_updates_null;
42903 +atomic_unchecked_t fscache_n_updates_run;
42904 +
42905 +atomic_unchecked_t fscache_n_relinquishes;
42906 +atomic_unchecked_t fscache_n_relinquishes_null;
42907 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42908 +atomic_unchecked_t fscache_n_relinquishes_retire;
42909 +
42910 +atomic_unchecked_t fscache_n_cookie_index;
42911 +atomic_unchecked_t fscache_n_cookie_data;
42912 +atomic_unchecked_t fscache_n_cookie_special;
42913 +
42914 +atomic_unchecked_t fscache_n_object_alloc;
42915 +atomic_unchecked_t fscache_n_object_no_alloc;
42916 +atomic_unchecked_t fscache_n_object_lookups;
42917 +atomic_unchecked_t fscache_n_object_lookups_negative;
42918 +atomic_unchecked_t fscache_n_object_lookups_positive;
42919 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
42920 +atomic_unchecked_t fscache_n_object_created;
42921 +atomic_unchecked_t fscache_n_object_avail;
42922 +atomic_unchecked_t fscache_n_object_dead;
42923 +
42924 +atomic_unchecked_t fscache_n_checkaux_none;
42925 +atomic_unchecked_t fscache_n_checkaux_okay;
42926 +atomic_unchecked_t fscache_n_checkaux_update;
42927 +atomic_unchecked_t fscache_n_checkaux_obsolete;
42928
42929 atomic_t fscache_n_cop_alloc_object;
42930 atomic_t fscache_n_cop_lookup_object;
42931 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
42932 seq_puts(m, "FS-Cache statistics\n");
42933
42934 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
42935 - atomic_read(&fscache_n_cookie_index),
42936 - atomic_read(&fscache_n_cookie_data),
42937 - atomic_read(&fscache_n_cookie_special));
42938 + atomic_read_unchecked(&fscache_n_cookie_index),
42939 + atomic_read_unchecked(&fscache_n_cookie_data),
42940 + atomic_read_unchecked(&fscache_n_cookie_special));
42941
42942 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
42943 - atomic_read(&fscache_n_object_alloc),
42944 - atomic_read(&fscache_n_object_no_alloc),
42945 - atomic_read(&fscache_n_object_avail),
42946 - atomic_read(&fscache_n_object_dead));
42947 + atomic_read_unchecked(&fscache_n_object_alloc),
42948 + atomic_read_unchecked(&fscache_n_object_no_alloc),
42949 + atomic_read_unchecked(&fscache_n_object_avail),
42950 + atomic_read_unchecked(&fscache_n_object_dead));
42951 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
42952 - atomic_read(&fscache_n_checkaux_none),
42953 - atomic_read(&fscache_n_checkaux_okay),
42954 - atomic_read(&fscache_n_checkaux_update),
42955 - atomic_read(&fscache_n_checkaux_obsolete));
42956 + atomic_read_unchecked(&fscache_n_checkaux_none),
42957 + atomic_read_unchecked(&fscache_n_checkaux_okay),
42958 + atomic_read_unchecked(&fscache_n_checkaux_update),
42959 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
42960
42961 seq_printf(m, "Pages : mrk=%u unc=%u\n",
42962 - atomic_read(&fscache_n_marks),
42963 - atomic_read(&fscache_n_uncaches));
42964 + atomic_read_unchecked(&fscache_n_marks),
42965 + atomic_read_unchecked(&fscache_n_uncaches));
42966
42967 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
42968 " oom=%u\n",
42969 - atomic_read(&fscache_n_acquires),
42970 - atomic_read(&fscache_n_acquires_null),
42971 - atomic_read(&fscache_n_acquires_no_cache),
42972 - atomic_read(&fscache_n_acquires_ok),
42973 - atomic_read(&fscache_n_acquires_nobufs),
42974 - atomic_read(&fscache_n_acquires_oom));
42975 + atomic_read_unchecked(&fscache_n_acquires),
42976 + atomic_read_unchecked(&fscache_n_acquires_null),
42977 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
42978 + atomic_read_unchecked(&fscache_n_acquires_ok),
42979 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
42980 + atomic_read_unchecked(&fscache_n_acquires_oom));
42981
42982 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
42983 - atomic_read(&fscache_n_object_lookups),
42984 - atomic_read(&fscache_n_object_lookups_negative),
42985 - atomic_read(&fscache_n_object_lookups_positive),
42986 - atomic_read(&fscache_n_object_lookups_timed_out),
42987 - atomic_read(&fscache_n_object_created));
42988 + atomic_read_unchecked(&fscache_n_object_lookups),
42989 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
42990 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
42991 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
42992 + atomic_read_unchecked(&fscache_n_object_created));
42993
42994 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
42995 - atomic_read(&fscache_n_updates),
42996 - atomic_read(&fscache_n_updates_null),
42997 - atomic_read(&fscache_n_updates_run));
42998 + atomic_read_unchecked(&fscache_n_updates),
42999 + atomic_read_unchecked(&fscache_n_updates_null),
43000 + atomic_read_unchecked(&fscache_n_updates_run));
43001
43002 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43003 - atomic_read(&fscache_n_relinquishes),
43004 - atomic_read(&fscache_n_relinquishes_null),
43005 - atomic_read(&fscache_n_relinquishes_waitcrt),
43006 - atomic_read(&fscache_n_relinquishes_retire));
43007 + atomic_read_unchecked(&fscache_n_relinquishes),
43008 + atomic_read_unchecked(&fscache_n_relinquishes_null),
43009 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43010 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
43011
43012 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43013 - atomic_read(&fscache_n_attr_changed),
43014 - atomic_read(&fscache_n_attr_changed_ok),
43015 - atomic_read(&fscache_n_attr_changed_nobufs),
43016 - atomic_read(&fscache_n_attr_changed_nomem),
43017 - atomic_read(&fscache_n_attr_changed_calls));
43018 + atomic_read_unchecked(&fscache_n_attr_changed),
43019 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
43020 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43021 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43022 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
43023
43024 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43025 - atomic_read(&fscache_n_allocs),
43026 - atomic_read(&fscache_n_allocs_ok),
43027 - atomic_read(&fscache_n_allocs_wait),
43028 - atomic_read(&fscache_n_allocs_nobufs),
43029 - atomic_read(&fscache_n_allocs_intr));
43030 + atomic_read_unchecked(&fscache_n_allocs),
43031 + atomic_read_unchecked(&fscache_n_allocs_ok),
43032 + atomic_read_unchecked(&fscache_n_allocs_wait),
43033 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
43034 + atomic_read_unchecked(&fscache_n_allocs_intr));
43035 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43036 - atomic_read(&fscache_n_alloc_ops),
43037 - atomic_read(&fscache_n_alloc_op_waits),
43038 - atomic_read(&fscache_n_allocs_object_dead));
43039 + atomic_read_unchecked(&fscache_n_alloc_ops),
43040 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
43041 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
43042
43043 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43044 " int=%u oom=%u\n",
43045 - atomic_read(&fscache_n_retrievals),
43046 - atomic_read(&fscache_n_retrievals_ok),
43047 - atomic_read(&fscache_n_retrievals_wait),
43048 - atomic_read(&fscache_n_retrievals_nodata),
43049 - atomic_read(&fscache_n_retrievals_nobufs),
43050 - atomic_read(&fscache_n_retrievals_intr),
43051 - atomic_read(&fscache_n_retrievals_nomem));
43052 + atomic_read_unchecked(&fscache_n_retrievals),
43053 + atomic_read_unchecked(&fscache_n_retrievals_ok),
43054 + atomic_read_unchecked(&fscache_n_retrievals_wait),
43055 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
43056 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43057 + atomic_read_unchecked(&fscache_n_retrievals_intr),
43058 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
43059 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43060 - atomic_read(&fscache_n_retrieval_ops),
43061 - atomic_read(&fscache_n_retrieval_op_waits),
43062 - atomic_read(&fscache_n_retrievals_object_dead));
43063 + atomic_read_unchecked(&fscache_n_retrieval_ops),
43064 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43065 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43066
43067 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43068 - atomic_read(&fscache_n_stores),
43069 - atomic_read(&fscache_n_stores_ok),
43070 - atomic_read(&fscache_n_stores_again),
43071 - atomic_read(&fscache_n_stores_nobufs),
43072 - atomic_read(&fscache_n_stores_oom));
43073 + atomic_read_unchecked(&fscache_n_stores),
43074 + atomic_read_unchecked(&fscache_n_stores_ok),
43075 + atomic_read_unchecked(&fscache_n_stores_again),
43076 + atomic_read_unchecked(&fscache_n_stores_nobufs),
43077 + atomic_read_unchecked(&fscache_n_stores_oom));
43078 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43079 - atomic_read(&fscache_n_store_ops),
43080 - atomic_read(&fscache_n_store_calls),
43081 - atomic_read(&fscache_n_store_pages),
43082 - atomic_read(&fscache_n_store_radix_deletes),
43083 - atomic_read(&fscache_n_store_pages_over_limit));
43084 + atomic_read_unchecked(&fscache_n_store_ops),
43085 + atomic_read_unchecked(&fscache_n_store_calls),
43086 + atomic_read_unchecked(&fscache_n_store_pages),
43087 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
43088 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43089
43090 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43091 - atomic_read(&fscache_n_store_vmscan_not_storing),
43092 - atomic_read(&fscache_n_store_vmscan_gone),
43093 - atomic_read(&fscache_n_store_vmscan_busy),
43094 - atomic_read(&fscache_n_store_vmscan_cancelled));
43095 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43096 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43097 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43098 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43099
43100 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43101 - atomic_read(&fscache_n_op_pend),
43102 - atomic_read(&fscache_n_op_run),
43103 - atomic_read(&fscache_n_op_enqueue),
43104 - atomic_read(&fscache_n_op_cancelled),
43105 - atomic_read(&fscache_n_op_rejected));
43106 + atomic_read_unchecked(&fscache_n_op_pend),
43107 + atomic_read_unchecked(&fscache_n_op_run),
43108 + atomic_read_unchecked(&fscache_n_op_enqueue),
43109 + atomic_read_unchecked(&fscache_n_op_cancelled),
43110 + atomic_read_unchecked(&fscache_n_op_rejected));
43111 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43112 - atomic_read(&fscache_n_op_deferred_release),
43113 - atomic_read(&fscache_n_op_release),
43114 - atomic_read(&fscache_n_op_gc));
43115 + atomic_read_unchecked(&fscache_n_op_deferred_release),
43116 + atomic_read_unchecked(&fscache_n_op_release),
43117 + atomic_read_unchecked(&fscache_n_op_gc));
43118
43119 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43120 atomic_read(&fscache_n_cop_alloc_object),
43121 diff -urNp linux-2.6.32.45/fs/fs_struct.c linux-2.6.32.45/fs/fs_struct.c
43122 --- linux-2.6.32.45/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
43123 +++ linux-2.6.32.45/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
43124 @@ -4,6 +4,7 @@
43125 #include <linux/path.h>
43126 #include <linux/slab.h>
43127 #include <linux/fs_struct.h>
43128 +#include <linux/grsecurity.h>
43129
43130 /*
43131 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
43132 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
43133 old_root = fs->root;
43134 fs->root = *path;
43135 path_get(path);
43136 + gr_set_chroot_entries(current, path);
43137 write_unlock(&fs->lock);
43138 if (old_root.dentry)
43139 path_put(&old_root);
43140 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
43141 && fs->root.mnt == old_root->mnt) {
43142 path_get(new_root);
43143 fs->root = *new_root;
43144 + gr_set_chroot_entries(p, new_root);
43145 count++;
43146 }
43147 if (fs->pwd.dentry == old_root->dentry
43148 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
43149 task_lock(tsk);
43150 write_lock(&fs->lock);
43151 tsk->fs = NULL;
43152 - kill = !--fs->users;
43153 + gr_clear_chroot_entries(tsk);
43154 + kill = !atomic_dec_return(&fs->users);
43155 write_unlock(&fs->lock);
43156 task_unlock(tsk);
43157 if (kill)
43158 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
43159 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43160 /* We don't need to lock fs - think why ;-) */
43161 if (fs) {
43162 - fs->users = 1;
43163 + atomic_set(&fs->users, 1);
43164 fs->in_exec = 0;
43165 rwlock_init(&fs->lock);
43166 fs->umask = old->umask;
43167 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
43168
43169 task_lock(current);
43170 write_lock(&fs->lock);
43171 - kill = !--fs->users;
43172 + kill = !atomic_dec_return(&fs->users);
43173 current->fs = new_fs;
43174 + gr_set_chroot_entries(current, &new_fs->root);
43175 write_unlock(&fs->lock);
43176 task_unlock(current);
43177
43178 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
43179
43180 /* to be mentioned only in INIT_TASK */
43181 struct fs_struct init_fs = {
43182 - .users = 1,
43183 + .users = ATOMIC_INIT(1),
43184 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
43185 .umask = 0022,
43186 };
43187 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
43188 task_lock(current);
43189
43190 write_lock(&init_fs.lock);
43191 - init_fs.users++;
43192 + atomic_inc(&init_fs.users);
43193 write_unlock(&init_fs.lock);
43194
43195 write_lock(&fs->lock);
43196 current->fs = &init_fs;
43197 - kill = !--fs->users;
43198 + gr_set_chroot_entries(current, &current->fs->root);
43199 + kill = !atomic_dec_return(&fs->users);
43200 write_unlock(&fs->lock);
43201
43202 task_unlock(current);
43203 diff -urNp linux-2.6.32.45/fs/fuse/cuse.c linux-2.6.32.45/fs/fuse/cuse.c
43204 --- linux-2.6.32.45/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
43205 +++ linux-2.6.32.45/fs/fuse/cuse.c 2011-08-05 20:33:55.000000000 -0400
43206 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
43207 INIT_LIST_HEAD(&cuse_conntbl[i]);
43208
43209 /* inherit and extend fuse_dev_operations */
43210 - cuse_channel_fops = fuse_dev_operations;
43211 - cuse_channel_fops.owner = THIS_MODULE;
43212 - cuse_channel_fops.open = cuse_channel_open;
43213 - cuse_channel_fops.release = cuse_channel_release;
43214 + pax_open_kernel();
43215 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43216 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43217 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
43218 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
43219 + pax_close_kernel();
43220
43221 cuse_class = class_create(THIS_MODULE, "cuse");
43222 if (IS_ERR(cuse_class))
43223 diff -urNp linux-2.6.32.45/fs/fuse/dev.c linux-2.6.32.45/fs/fuse/dev.c
43224 --- linux-2.6.32.45/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
43225 +++ linux-2.6.32.45/fs/fuse/dev.c 2011-08-05 20:33:55.000000000 -0400
43226 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struc
43227 {
43228 struct fuse_notify_inval_entry_out outarg;
43229 int err = -EINVAL;
43230 - char buf[FUSE_NAME_MAX+1];
43231 + char *buf = NULL;
43232 struct qstr name;
43233
43234 if (size < sizeof(outarg))
43235 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struc
43236 if (outarg.namelen > FUSE_NAME_MAX)
43237 goto err;
43238
43239 + err = -ENOMEM;
43240 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
43241 + if (!buf)
43242 + goto err;
43243 +
43244 name.name = buf;
43245 name.len = outarg.namelen;
43246 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
43247 @@ -910,17 +915,15 @@ static int fuse_notify_inval_entry(struc
43248
43249 down_read(&fc->killsb);
43250 err = -ENOENT;
43251 - if (!fc->sb)
43252 - goto err_unlock;
43253 -
43254 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
43255 -
43256 -err_unlock:
43257 + if (fc->sb)
43258 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
43259 up_read(&fc->killsb);
43260 + kfree(buf);
43261 return err;
43262
43263 err:
43264 fuse_copy_finish(cs);
43265 + kfree(buf);
43266 return err;
43267 }
43268
43269 diff -urNp linux-2.6.32.45/fs/fuse/dir.c linux-2.6.32.45/fs/fuse/dir.c
43270 --- linux-2.6.32.45/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
43271 +++ linux-2.6.32.45/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
43272 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
43273 return link;
43274 }
43275
43276 -static void free_link(char *link)
43277 +static void free_link(const char *link)
43278 {
43279 if (!IS_ERR(link))
43280 free_page((unsigned long) link);
43281 diff -urNp linux-2.6.32.45/fs/gfs2/ops_inode.c linux-2.6.32.45/fs/gfs2/ops_inode.c
43282 --- linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
43283 +++ linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
43284 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
43285 unsigned int x;
43286 int error;
43287
43288 + pax_track_stack();
43289 +
43290 if (ndentry->d_inode) {
43291 nip = GFS2_I(ndentry->d_inode);
43292 if (ip == nip)
43293 diff -urNp linux-2.6.32.45/fs/gfs2/sys.c linux-2.6.32.45/fs/gfs2/sys.c
43294 --- linux-2.6.32.45/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
43295 +++ linux-2.6.32.45/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
43296 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
43297 return a->store ? a->store(sdp, buf, len) : len;
43298 }
43299
43300 -static struct sysfs_ops gfs2_attr_ops = {
43301 +static const struct sysfs_ops gfs2_attr_ops = {
43302 .show = gfs2_attr_show,
43303 .store = gfs2_attr_store,
43304 };
43305 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
43306 return 0;
43307 }
43308
43309 -static struct kset_uevent_ops gfs2_uevent_ops = {
43310 +static const struct kset_uevent_ops gfs2_uevent_ops = {
43311 .uevent = gfs2_uevent,
43312 };
43313
43314 diff -urNp linux-2.6.32.45/fs/hfsplus/catalog.c linux-2.6.32.45/fs/hfsplus/catalog.c
43315 --- linux-2.6.32.45/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
43316 +++ linux-2.6.32.45/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
43317 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
43318 int err;
43319 u16 type;
43320
43321 + pax_track_stack();
43322 +
43323 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43324 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43325 if (err)
43326 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
43327 int entry_size;
43328 int err;
43329
43330 + pax_track_stack();
43331 +
43332 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
43333 sb = dir->i_sb;
43334 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
43335 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
43336 int entry_size, type;
43337 int err = 0;
43338
43339 + pax_track_stack();
43340 +
43341 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
43342 dst_dir->i_ino, dst_name->name);
43343 sb = src_dir->i_sb;
43344 diff -urNp linux-2.6.32.45/fs/hfsplus/dir.c linux-2.6.32.45/fs/hfsplus/dir.c
43345 --- linux-2.6.32.45/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
43346 +++ linux-2.6.32.45/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
43347 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
43348 struct hfsplus_readdir_data *rd;
43349 u16 type;
43350
43351 + pax_track_stack();
43352 +
43353 if (filp->f_pos >= inode->i_size)
43354 return 0;
43355
43356 diff -urNp linux-2.6.32.45/fs/hfsplus/inode.c linux-2.6.32.45/fs/hfsplus/inode.c
43357 --- linux-2.6.32.45/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
43358 +++ linux-2.6.32.45/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
43359 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
43360 int res = 0;
43361 u16 type;
43362
43363 + pax_track_stack();
43364 +
43365 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43366
43367 HFSPLUS_I(inode).dev = 0;
43368 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
43369 struct hfs_find_data fd;
43370 hfsplus_cat_entry entry;
43371
43372 + pax_track_stack();
43373 +
43374 if (HFSPLUS_IS_RSRC(inode))
43375 main_inode = HFSPLUS_I(inode).rsrc_inode;
43376
43377 diff -urNp linux-2.6.32.45/fs/hfsplus/ioctl.c linux-2.6.32.45/fs/hfsplus/ioctl.c
43378 --- linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43379 +++ linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
43380 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
43381 struct hfsplus_cat_file *file;
43382 int res;
43383
43384 + pax_track_stack();
43385 +
43386 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43387 return -EOPNOTSUPP;
43388
43389 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43390 struct hfsplus_cat_file *file;
43391 ssize_t res = 0;
43392
43393 + pax_track_stack();
43394 +
43395 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43396 return -EOPNOTSUPP;
43397
43398 diff -urNp linux-2.6.32.45/fs/hfsplus/super.c linux-2.6.32.45/fs/hfsplus/super.c
43399 --- linux-2.6.32.45/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
43400 +++ linux-2.6.32.45/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
43401 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
43402 struct nls_table *nls = NULL;
43403 int err = -EINVAL;
43404
43405 + pax_track_stack();
43406 +
43407 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43408 if (!sbi)
43409 return -ENOMEM;
43410 diff -urNp linux-2.6.32.45/fs/hugetlbfs/inode.c linux-2.6.32.45/fs/hugetlbfs/inode.c
43411 --- linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43412 +++ linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
43413 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
43414 .kill_sb = kill_litter_super,
43415 };
43416
43417 -static struct vfsmount *hugetlbfs_vfsmount;
43418 +struct vfsmount *hugetlbfs_vfsmount;
43419
43420 static int can_do_hugetlb_shm(void)
43421 {
43422 diff -urNp linux-2.6.32.45/fs/ioctl.c linux-2.6.32.45/fs/ioctl.c
43423 --- linux-2.6.32.45/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43424 +++ linux-2.6.32.45/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
43425 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
43426 u64 phys, u64 len, u32 flags)
43427 {
43428 struct fiemap_extent extent;
43429 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
43430 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
43431
43432 /* only count the extents */
43433 if (fieinfo->fi_extents_max == 0) {
43434 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
43435
43436 fieinfo.fi_flags = fiemap.fm_flags;
43437 fieinfo.fi_extents_max = fiemap.fm_extent_count;
43438 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
43439 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
43440
43441 if (fiemap.fm_extent_count != 0 &&
43442 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
43443 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
43444 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
43445 fiemap.fm_flags = fieinfo.fi_flags;
43446 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
43447 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
43448 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
43449 error = -EFAULT;
43450
43451 return error;
43452 diff -urNp linux-2.6.32.45/fs/jbd/checkpoint.c linux-2.6.32.45/fs/jbd/checkpoint.c
43453 --- linux-2.6.32.45/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
43454 +++ linux-2.6.32.45/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
43455 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
43456 tid_t this_tid;
43457 int result;
43458
43459 + pax_track_stack();
43460 +
43461 jbd_debug(1, "Start checkpoint\n");
43462
43463 /*
43464 diff -urNp linux-2.6.32.45/fs/jffs2/compr_rtime.c linux-2.6.32.45/fs/jffs2/compr_rtime.c
43465 --- linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
43466 +++ linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
43467 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43468 int outpos = 0;
43469 int pos=0;
43470
43471 + pax_track_stack();
43472 +
43473 memset(positions,0,sizeof(positions));
43474
43475 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43476 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
43477 int outpos = 0;
43478 int pos=0;
43479
43480 + pax_track_stack();
43481 +
43482 memset(positions,0,sizeof(positions));
43483
43484 while (outpos<destlen) {
43485 diff -urNp linux-2.6.32.45/fs/jffs2/compr_rubin.c linux-2.6.32.45/fs/jffs2/compr_rubin.c
43486 --- linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
43487 +++ linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
43488 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43489 int ret;
43490 uint32_t mysrclen, mydstlen;
43491
43492 + pax_track_stack();
43493 +
43494 mysrclen = *sourcelen;
43495 mydstlen = *dstlen - 8;
43496
43497 diff -urNp linux-2.6.32.45/fs/jffs2/erase.c linux-2.6.32.45/fs/jffs2/erase.c
43498 --- linux-2.6.32.45/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
43499 +++ linux-2.6.32.45/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
43500 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
43501 struct jffs2_unknown_node marker = {
43502 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43503 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43504 - .totlen = cpu_to_je32(c->cleanmarker_size)
43505 + .totlen = cpu_to_je32(c->cleanmarker_size),
43506 + .hdr_crc = cpu_to_je32(0)
43507 };
43508
43509 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43510 diff -urNp linux-2.6.32.45/fs/jffs2/wbuf.c linux-2.6.32.45/fs/jffs2/wbuf.c
43511 --- linux-2.6.32.45/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
43512 +++ linux-2.6.32.45/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
43513 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43514 {
43515 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43516 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43517 - .totlen = constant_cpu_to_je32(8)
43518 + .totlen = constant_cpu_to_je32(8),
43519 + .hdr_crc = constant_cpu_to_je32(0)
43520 };
43521
43522 /*
43523 diff -urNp linux-2.6.32.45/fs/jffs2/xattr.c linux-2.6.32.45/fs/jffs2/xattr.c
43524 --- linux-2.6.32.45/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
43525 +++ linux-2.6.32.45/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
43526 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43527
43528 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43529
43530 + pax_track_stack();
43531 +
43532 /* Phase.1 : Merge same xref */
43533 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43534 xref_tmphash[i] = NULL;
43535 diff -urNp linux-2.6.32.45/fs/jfs/super.c linux-2.6.32.45/fs/jfs/super.c
43536 --- linux-2.6.32.45/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
43537 +++ linux-2.6.32.45/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
43538 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
43539
43540 jfs_inode_cachep =
43541 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43542 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43543 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43544 init_once);
43545 if (jfs_inode_cachep == NULL)
43546 return -ENOMEM;
43547 diff -urNp linux-2.6.32.45/fs/Kconfig.binfmt linux-2.6.32.45/fs/Kconfig.binfmt
43548 --- linux-2.6.32.45/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
43549 +++ linux-2.6.32.45/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
43550 @@ -86,7 +86,7 @@ config HAVE_AOUT
43551
43552 config BINFMT_AOUT
43553 tristate "Kernel support for a.out and ECOFF binaries"
43554 - depends on HAVE_AOUT
43555 + depends on HAVE_AOUT && BROKEN
43556 ---help---
43557 A.out (Assembler.OUTput) is a set of formats for libraries and
43558 executables used in the earliest versions of UNIX. Linux used
43559 diff -urNp linux-2.6.32.45/fs/libfs.c linux-2.6.32.45/fs/libfs.c
43560 --- linux-2.6.32.45/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
43561 +++ linux-2.6.32.45/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
43562 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
43563
43564 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43565 struct dentry *next;
43566 + char d_name[sizeof(next->d_iname)];
43567 + const unsigned char *name;
43568 +
43569 next = list_entry(p, struct dentry, d_u.d_child);
43570 if (d_unhashed(next) || !next->d_inode)
43571 continue;
43572
43573 spin_unlock(&dcache_lock);
43574 - if (filldir(dirent, next->d_name.name,
43575 + name = next->d_name.name;
43576 + if (name == next->d_iname) {
43577 + memcpy(d_name, name, next->d_name.len);
43578 + name = d_name;
43579 + }
43580 + if (filldir(dirent, name,
43581 next->d_name.len, filp->f_pos,
43582 next->d_inode->i_ino,
43583 dt_type(next->d_inode)) < 0)
43584 diff -urNp linux-2.6.32.45/fs/lockd/clntproc.c linux-2.6.32.45/fs/lockd/clntproc.c
43585 --- linux-2.6.32.45/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
43586 +++ linux-2.6.32.45/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
43587 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43588 /*
43589 * Cookie counter for NLM requests
43590 */
43591 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43592 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43593
43594 void nlmclnt_next_cookie(struct nlm_cookie *c)
43595 {
43596 - u32 cookie = atomic_inc_return(&nlm_cookie);
43597 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43598
43599 memcpy(c->data, &cookie, 4);
43600 c->len=4;
43601 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43602 struct nlm_rqst reqst, *req;
43603 int status;
43604
43605 + pax_track_stack();
43606 +
43607 req = &reqst;
43608 memset(req, 0, sizeof(*req));
43609 locks_init_lock(&req->a_args.lock.fl);
43610 diff -urNp linux-2.6.32.45/fs/lockd/svc.c linux-2.6.32.45/fs/lockd/svc.c
43611 --- linux-2.6.32.45/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
43612 +++ linux-2.6.32.45/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
43613 @@ -43,7 +43,7 @@
43614
43615 static struct svc_program nlmsvc_program;
43616
43617 -struct nlmsvc_binding * nlmsvc_ops;
43618 +const struct nlmsvc_binding * nlmsvc_ops;
43619 EXPORT_SYMBOL_GPL(nlmsvc_ops);
43620
43621 static DEFINE_MUTEX(nlmsvc_mutex);
43622 diff -urNp linux-2.6.32.45/fs/locks.c linux-2.6.32.45/fs/locks.c
43623 --- linux-2.6.32.45/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
43624 +++ linux-2.6.32.45/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
43625 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
43626
43627 static struct kmem_cache *filelock_cache __read_mostly;
43628
43629 +static void locks_init_lock_always(struct file_lock *fl)
43630 +{
43631 + fl->fl_next = NULL;
43632 + fl->fl_fasync = NULL;
43633 + fl->fl_owner = NULL;
43634 + fl->fl_pid = 0;
43635 + fl->fl_nspid = NULL;
43636 + fl->fl_file = NULL;
43637 + fl->fl_flags = 0;
43638 + fl->fl_type = 0;
43639 + fl->fl_start = fl->fl_end = 0;
43640 +}
43641 +
43642 /* Allocate an empty lock structure. */
43643 static struct file_lock *locks_alloc_lock(void)
43644 {
43645 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43646 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43647 +
43648 + if (fl)
43649 + locks_init_lock_always(fl);
43650 +
43651 + return fl;
43652 }
43653
43654 void locks_release_private(struct file_lock *fl)
43655 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
43656 INIT_LIST_HEAD(&fl->fl_link);
43657 INIT_LIST_HEAD(&fl->fl_block);
43658 init_waitqueue_head(&fl->fl_wait);
43659 - fl->fl_next = NULL;
43660 - fl->fl_fasync = NULL;
43661 - fl->fl_owner = NULL;
43662 - fl->fl_pid = 0;
43663 - fl->fl_nspid = NULL;
43664 - fl->fl_file = NULL;
43665 - fl->fl_flags = 0;
43666 - fl->fl_type = 0;
43667 - fl->fl_start = fl->fl_end = 0;
43668 fl->fl_ops = NULL;
43669 fl->fl_lmops = NULL;
43670 + locks_init_lock_always(fl);
43671 }
43672
43673 EXPORT_SYMBOL(locks_init_lock);
43674 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
43675 return;
43676
43677 if (filp->f_op && filp->f_op->flock) {
43678 - struct file_lock fl = {
43679 + struct file_lock flock = {
43680 .fl_pid = current->tgid,
43681 .fl_file = filp,
43682 .fl_flags = FL_FLOCK,
43683 .fl_type = F_UNLCK,
43684 .fl_end = OFFSET_MAX,
43685 };
43686 - filp->f_op->flock(filp, F_SETLKW, &fl);
43687 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
43688 - fl.fl_ops->fl_release_private(&fl);
43689 + filp->f_op->flock(filp, F_SETLKW, &flock);
43690 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
43691 + flock.fl_ops->fl_release_private(&flock);
43692 }
43693
43694 lock_kernel();
43695 diff -urNp linux-2.6.32.45/fs/mbcache.c linux-2.6.32.45/fs/mbcache.c
43696 --- linux-2.6.32.45/fs/mbcache.c 2011-03-27 14:31:47.000000000 -0400
43697 +++ linux-2.6.32.45/fs/mbcache.c 2011-08-05 20:33:55.000000000 -0400
43698 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct
43699 if (!cache)
43700 goto fail;
43701 cache->c_name = name;
43702 - cache->c_op.free = NULL;
43703 + *(void **)&cache->c_op.free = NULL;
43704 if (cache_op)
43705 - cache->c_op.free = cache_op->free;
43706 + *(void **)&cache->c_op.free = cache_op->free;
43707 atomic_set(&cache->c_entry_count, 0);
43708 cache->c_bucket_bits = bucket_bits;
43709 #ifdef MB_CACHE_INDEXES_COUNT
43710 diff -urNp linux-2.6.32.45/fs/namei.c linux-2.6.32.45/fs/namei.c
43711 --- linux-2.6.32.45/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
43712 +++ linux-2.6.32.45/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
43713 @@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
43714 return ret;
43715
43716 /*
43717 - * Read/write DACs are always overridable.
43718 - * Executable DACs are overridable if at least one exec bit is set.
43719 - */
43720 - if (!(mask & MAY_EXEC) || execute_ok(inode))
43721 - if (capable(CAP_DAC_OVERRIDE))
43722 - return 0;
43723 -
43724 - /*
43725 * Searching includes executable on directories, else just read.
43726 */
43727 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43728 @@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
43729 if (capable(CAP_DAC_READ_SEARCH))
43730 return 0;
43731
43732 + /*
43733 + * Read/write DACs are always overridable.
43734 + * Executable DACs are overridable if at least one exec bit is set.
43735 + */
43736 + if (!(mask & MAY_EXEC) || execute_ok(inode))
43737 + if (capable(CAP_DAC_OVERRIDE))
43738 + return 0;
43739 +
43740 return -EACCES;
43741 }
43742
43743 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
43744 if (!ret)
43745 goto ok;
43746
43747 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
43748 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
43749 + capable(CAP_DAC_OVERRIDE))
43750 goto ok;
43751
43752 return ret;
43753 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
43754 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
43755 error = PTR_ERR(cookie);
43756 if (!IS_ERR(cookie)) {
43757 - char *s = nd_get_link(nd);
43758 + const char *s = nd_get_link(nd);
43759 error = 0;
43760 if (s)
43761 error = __vfs_follow_link(nd, s);
43762 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct
43763 err = security_inode_follow_link(path->dentry, nd);
43764 if (err)
43765 goto loop;
43766 +
43767 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
43768 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
43769 + err = -EACCES;
43770 + goto loop;
43771 + }
43772 +
43773 current->link_count++;
43774 current->total_link_count++;
43775 nd->depth++;
43776 @@ -1016,11 +1024,18 @@ return_reval:
43777 break;
43778 }
43779 return_base:
43780 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
43781 + path_put(&nd->path);
43782 + return -ENOENT;
43783 + }
43784 return 0;
43785 out_dput:
43786 path_put_conditional(&next, nd);
43787 break;
43788 }
43789 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
43790 + err = -ENOENT;
43791 +
43792 path_put(&nd->path);
43793 return_err:
43794 return err;
43795 @@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
43796 int retval = path_init(dfd, name, flags, nd);
43797 if (!retval)
43798 retval = path_walk(name, nd);
43799 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
43800 - nd->path.dentry->d_inode))
43801 - audit_inode(name, nd->path.dentry);
43802 +
43803 + if (likely(!retval)) {
43804 + if (nd->path.dentry && nd->path.dentry->d_inode) {
43805 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
43806 + retval = -ENOENT;
43807 + if (!audit_dummy_context())
43808 + audit_inode(name, nd->path.dentry);
43809 + }
43810 + }
43811 if (nd->root.mnt) {
43812 path_put(&nd->root);
43813 nd->root.mnt = NULL;
43814 }
43815 +
43816 return retval;
43817 }
43818
43819 @@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
43820 if (error)
43821 goto err_out;
43822
43823 +
43824 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
43825 + error = -EPERM;
43826 + goto err_out;
43827 + }
43828 + if (gr_handle_rawio(inode)) {
43829 + error = -EPERM;
43830 + goto err_out;
43831 + }
43832 + if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
43833 + error = -EACCES;
43834 + goto err_out;
43835 + }
43836 +
43837 if (flag & O_TRUNC) {
43838 error = get_write_access(inode);
43839 if (error)
43840 @@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
43841 int error;
43842 struct dentry *dir = nd->path.dentry;
43843
43844 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
43845 + error = -EACCES;
43846 + goto out_unlock;
43847 + }
43848 +
43849 if (!IS_POSIXACL(dir->d_inode))
43850 mode &= ~current_umask();
43851 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
43852 if (error)
43853 goto out_unlock;
43854 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
43855 + if (!error)
43856 + gr_handle_create(path->dentry, nd->path.mnt);
43857 out_unlock:
43858 mutex_unlock(&dir->d_inode->i_mutex);
43859 dput(nd->path.dentry);
43860 @@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
43861 &nd, flag);
43862 if (error)
43863 return ERR_PTR(error);
43864 +
43865 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
43866 + error = -EPERM;
43867 + goto exit;
43868 + }
43869 +
43870 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
43871 + error = -EPERM;
43872 + goto exit;
43873 + }
43874 +
43875 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
43876 + error = -EACCES;
43877 + goto exit;
43878 + }
43879 +
43880 goto ok;
43881 }
43882
43883 @@ -1795,6 +1854,14 @@ do_last:
43884 /*
43885 * It already exists.
43886 */
43887 +
43888 + /* only check if O_CREAT is specified, all other checks need
43889 + to go into may_open */
43890 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
43891 + error = -EACCES;
43892 + goto exit_mutex_unlock;
43893 + }
43894 +
43895 mutex_unlock(&dir->d_inode->i_mutex);
43896 audit_inode(pathname, path.dentry);
43897
43898 @@ -1887,6 +1954,13 @@ do_link:
43899 error = security_inode_follow_link(path.dentry, &nd);
43900 if (error)
43901 goto exit_dput;
43902 +
43903 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
43904 + path.dentry, nd.path.mnt)) {
43905 + error = -EACCES;
43906 + goto exit_dput;
43907 + }
43908 +
43909 error = __do_follow_link(&path, &nd);
43910 if (error) {
43911 /* Does someone understand code flow here? Or it is only
43912 @@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43913 error = may_mknod(mode);
43914 if (error)
43915 goto out_dput;
43916 +
43917 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
43918 + error = -EPERM;
43919 + goto out_dput;
43920 + }
43921 +
43922 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
43923 + error = -EACCES;
43924 + goto out_dput;
43925 + }
43926 +
43927 error = mnt_want_write(nd.path.mnt);
43928 if (error)
43929 goto out_dput;
43930 @@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43931 }
43932 out_drop_write:
43933 mnt_drop_write(nd.path.mnt);
43934 +
43935 + if (!error)
43936 + gr_handle_create(dentry, nd.path.mnt);
43937 out_dput:
43938 dput(dentry);
43939 out_unlock:
43940 @@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43941 if (IS_ERR(dentry))
43942 goto out_unlock;
43943
43944 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
43945 + error = -EACCES;
43946 + goto out_dput;
43947 + }
43948 +
43949 if (!IS_POSIXACL(nd.path.dentry->d_inode))
43950 mode &= ~current_umask();
43951 error = mnt_want_write(nd.path.mnt);
43952 @@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43953 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
43954 out_drop_write:
43955 mnt_drop_write(nd.path.mnt);
43956 +
43957 + if (!error)
43958 + gr_handle_create(dentry, nd.path.mnt);
43959 +
43960 out_dput:
43961 dput(dentry);
43962 out_unlock:
43963 @@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
43964 char * name;
43965 struct dentry *dentry;
43966 struct nameidata nd;
43967 + ino_t saved_ino = 0;
43968 + dev_t saved_dev = 0;
43969
43970 error = user_path_parent(dfd, pathname, &nd, &name);
43971 if (error)
43972 @@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
43973 error = PTR_ERR(dentry);
43974 if (IS_ERR(dentry))
43975 goto exit2;
43976 +
43977 + if (dentry->d_inode != NULL) {
43978 + if (dentry->d_inode->i_nlink <= 1) {
43979 + saved_ino = dentry->d_inode->i_ino;
43980 + saved_dev = gr_get_dev_from_dentry(dentry);
43981 + }
43982 +
43983 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
43984 + error = -EACCES;
43985 + goto exit3;
43986 + }
43987 + }
43988 +
43989 error = mnt_want_write(nd.path.mnt);
43990 if (error)
43991 goto exit3;
43992 @@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
43993 if (error)
43994 goto exit4;
43995 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
43996 + if (!error && (saved_dev || saved_ino))
43997 + gr_handle_delete(saved_ino, saved_dev);
43998 exit4:
43999 mnt_drop_write(nd.path.mnt);
44000 exit3:
44001 @@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
44002 struct dentry *dentry;
44003 struct nameidata nd;
44004 struct inode *inode = NULL;
44005 + ino_t saved_ino = 0;
44006 + dev_t saved_dev = 0;
44007
44008 error = user_path_parent(dfd, pathname, &nd, &name);
44009 if (error)
44010 @@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
44011 if (nd.last.name[nd.last.len])
44012 goto slashes;
44013 inode = dentry->d_inode;
44014 - if (inode)
44015 + if (inode) {
44016 + if (inode->i_nlink <= 1) {
44017 + saved_ino = inode->i_ino;
44018 + saved_dev = gr_get_dev_from_dentry(dentry);
44019 + }
44020 +
44021 atomic_inc(&inode->i_count);
44022 +
44023 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44024 + error = -EACCES;
44025 + goto exit2;
44026 + }
44027 + }
44028 error = mnt_want_write(nd.path.mnt);
44029 if (error)
44030 goto exit2;
44031 @@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
44032 if (error)
44033 goto exit3;
44034 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44035 + if (!error && (saved_ino || saved_dev))
44036 + gr_handle_delete(saved_ino, saved_dev);
44037 exit3:
44038 mnt_drop_write(nd.path.mnt);
44039 exit2:
44040 @@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
44041 if (IS_ERR(dentry))
44042 goto out_unlock;
44043
44044 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
44045 + error = -EACCES;
44046 + goto out_dput;
44047 + }
44048 +
44049 error = mnt_want_write(nd.path.mnt);
44050 if (error)
44051 goto out_dput;
44052 @@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
44053 if (error)
44054 goto out_drop_write;
44055 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
44056 + if (!error)
44057 + gr_handle_create(dentry, nd.path.mnt);
44058 out_drop_write:
44059 mnt_drop_write(nd.path.mnt);
44060 out_dput:
44061 @@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44062 error = PTR_ERR(new_dentry);
44063 if (IS_ERR(new_dentry))
44064 goto out_unlock;
44065 +
44066 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44067 + old_path.dentry->d_inode,
44068 + old_path.dentry->d_inode->i_mode, to)) {
44069 + error = -EACCES;
44070 + goto out_dput;
44071 + }
44072 +
44073 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
44074 + old_path.dentry, old_path.mnt, to)) {
44075 + error = -EACCES;
44076 + goto out_dput;
44077 + }
44078 +
44079 error = mnt_want_write(nd.path.mnt);
44080 if (error)
44081 goto out_dput;
44082 @@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44083 if (error)
44084 goto out_drop_write;
44085 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
44086 + if (!error)
44087 + gr_handle_create(new_dentry, nd.path.mnt);
44088 out_drop_write:
44089 mnt_drop_write(nd.path.mnt);
44090 out_dput:
44091 @@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44092 char *to;
44093 int error;
44094
44095 + pax_track_stack();
44096 +
44097 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44098 if (error)
44099 goto exit;
44100 @@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44101 if (new_dentry == trap)
44102 goto exit5;
44103
44104 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44105 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
44106 + to);
44107 + if (error)
44108 + goto exit5;
44109 +
44110 error = mnt_want_write(oldnd.path.mnt);
44111 if (error)
44112 goto exit5;
44113 @@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44114 goto exit6;
44115 error = vfs_rename(old_dir->d_inode, old_dentry,
44116 new_dir->d_inode, new_dentry);
44117 + if (!error)
44118 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44119 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44120 exit6:
44121 mnt_drop_write(oldnd.path.mnt);
44122 exit5:
44123 @@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
44124
44125 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44126 {
44127 + char tmpbuf[64];
44128 + const char *newlink;
44129 int len;
44130
44131 len = PTR_ERR(link);
44132 @@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
44133 len = strlen(link);
44134 if (len > (unsigned) buflen)
44135 len = buflen;
44136 - if (copy_to_user(buffer, link, len))
44137 +
44138 + if (len < sizeof(tmpbuf)) {
44139 + memcpy(tmpbuf, link, len);
44140 + newlink = tmpbuf;
44141 + } else
44142 + newlink = link;
44143 +
44144 + if (copy_to_user(buffer, newlink, len))
44145 len = -EFAULT;
44146 out:
44147 return len;
44148 diff -urNp linux-2.6.32.45/fs/namespace.c linux-2.6.32.45/fs/namespace.c
44149 --- linux-2.6.32.45/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
44150 +++ linux-2.6.32.45/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
44151 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
44152 if (!(sb->s_flags & MS_RDONLY))
44153 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44154 up_write(&sb->s_umount);
44155 +
44156 + gr_log_remount(mnt->mnt_devname, retval);
44157 +
44158 return retval;
44159 }
44160
44161 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
44162 security_sb_umount_busy(mnt);
44163 up_write(&namespace_sem);
44164 release_mounts(&umount_list);
44165 +
44166 + gr_log_unmount(mnt->mnt_devname, retval);
44167 +
44168 return retval;
44169 }
44170
44171 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
44172 if (retval)
44173 goto dput_out;
44174
44175 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44176 + retval = -EPERM;
44177 + goto dput_out;
44178 + }
44179 +
44180 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44181 + retval = -EPERM;
44182 + goto dput_out;
44183 + }
44184 +
44185 if (flags & MS_REMOUNT)
44186 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44187 data_page);
44188 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
44189 dev_name, data_page);
44190 dput_out:
44191 path_put(&path);
44192 +
44193 + gr_log_mount(dev_name, dir_name, retval);
44194 +
44195 return retval;
44196 }
44197
44198 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
44199 goto out1;
44200 }
44201
44202 + if (gr_handle_chroot_pivot()) {
44203 + error = -EPERM;
44204 + path_put(&old);
44205 + goto out1;
44206 + }
44207 +
44208 read_lock(&current->fs->lock);
44209 root = current->fs->root;
44210 path_get(&current->fs->root);
44211 diff -urNp linux-2.6.32.45/fs/ncpfs/dir.c linux-2.6.32.45/fs/ncpfs/dir.c
44212 --- linux-2.6.32.45/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44213 +++ linux-2.6.32.45/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
44214 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
44215 int res, val = 0, len;
44216 __u8 __name[NCP_MAXPATHLEN + 1];
44217
44218 + pax_track_stack();
44219 +
44220 parent = dget_parent(dentry);
44221 dir = parent->d_inode;
44222
44223 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
44224 int error, res, len;
44225 __u8 __name[NCP_MAXPATHLEN + 1];
44226
44227 + pax_track_stack();
44228 +
44229 lock_kernel();
44230 error = -EIO;
44231 if (!ncp_conn_valid(server))
44232 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
44233 int error, result, len;
44234 int opmode;
44235 __u8 __name[NCP_MAXPATHLEN + 1];
44236 -
44237 +
44238 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44239 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44240
44241 + pax_track_stack();
44242 +
44243 error = -EIO;
44244 lock_kernel();
44245 if (!ncp_conn_valid(server))
44246 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
44247 int error, len;
44248 __u8 __name[NCP_MAXPATHLEN + 1];
44249
44250 + pax_track_stack();
44251 +
44252 DPRINTK("ncp_mkdir: making %s/%s\n",
44253 dentry->d_parent->d_name.name, dentry->d_name.name);
44254
44255 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
44256 if (!ncp_conn_valid(server))
44257 goto out;
44258
44259 + pax_track_stack();
44260 +
44261 ncp_age_dentry(server, dentry);
44262 len = sizeof(__name);
44263 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44264 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
44265 int old_len, new_len;
44266 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44267
44268 + pax_track_stack();
44269 +
44270 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44271 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44272 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44273 diff -urNp linux-2.6.32.45/fs/ncpfs/inode.c linux-2.6.32.45/fs/ncpfs/inode.c
44274 --- linux-2.6.32.45/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
44275 +++ linux-2.6.32.45/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
44276 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
44277 #endif
44278 struct ncp_entry_info finfo;
44279
44280 + pax_track_stack();
44281 +
44282 data.wdog_pid = NULL;
44283 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44284 if (!server)
44285 diff -urNp linux-2.6.32.45/fs/nfs/inode.c linux-2.6.32.45/fs/nfs/inode.c
44286 --- linux-2.6.32.45/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
44287 +++ linux-2.6.32.45/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
44288 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
44289 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44290 nfsi->attrtimeo_timestamp = jiffies;
44291
44292 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44293 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44294 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44295 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44296 else
44297 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
44298 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44299 }
44300
44301 -static atomic_long_t nfs_attr_generation_counter;
44302 +static atomic_long_unchecked_t nfs_attr_generation_counter;
44303
44304 static unsigned long nfs_read_attr_generation_counter(void)
44305 {
44306 - return atomic_long_read(&nfs_attr_generation_counter);
44307 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44308 }
44309
44310 unsigned long nfs_inc_attr_generation_counter(void)
44311 {
44312 - return atomic_long_inc_return(&nfs_attr_generation_counter);
44313 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44314 }
44315
44316 void nfs_fattr_init(struct nfs_fattr *fattr)
44317 diff -urNp linux-2.6.32.45/fs/nfsd/lockd.c linux-2.6.32.45/fs/nfsd/lockd.c
44318 --- linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
44319 +++ linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
44320 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
44321 fput(filp);
44322 }
44323
44324 -static struct nlmsvc_binding nfsd_nlm_ops = {
44325 +static const struct nlmsvc_binding nfsd_nlm_ops = {
44326 .fopen = nlm_fopen, /* open file for locking */
44327 .fclose = nlm_fclose, /* close file */
44328 };
44329 diff -urNp linux-2.6.32.45/fs/nfsd/nfs4state.c linux-2.6.32.45/fs/nfsd/nfs4state.c
44330 --- linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
44331 +++ linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
44332 @@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44333 unsigned int cmd;
44334 int err;
44335
44336 + pax_track_stack();
44337 +
44338 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44339 (long long) lock->lk_offset,
44340 (long long) lock->lk_length);
44341 diff -urNp linux-2.6.32.45/fs/nfsd/nfs4xdr.c linux-2.6.32.45/fs/nfsd/nfs4xdr.c
44342 --- linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
44343 +++ linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
44344 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44345 struct nfsd4_compoundres *resp = rqstp->rq_resp;
44346 u32 minorversion = resp->cstate.minorversion;
44347
44348 + pax_track_stack();
44349 +
44350 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44351 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44352 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44353 diff -urNp linux-2.6.32.45/fs/nfsd/vfs.c linux-2.6.32.45/fs/nfsd/vfs.c
44354 --- linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
44355 +++ linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
44356 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44357 } else {
44358 oldfs = get_fs();
44359 set_fs(KERNEL_DS);
44360 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44361 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
44362 set_fs(oldfs);
44363 }
44364
44365 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44366
44367 /* Write the data. */
44368 oldfs = get_fs(); set_fs(KERNEL_DS);
44369 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44370 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
44371 set_fs(oldfs);
44372 if (host_err < 0)
44373 goto out_nfserr;
44374 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44375 */
44376
44377 oldfs = get_fs(); set_fs(KERNEL_DS);
44378 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
44379 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
44380 set_fs(oldfs);
44381
44382 if (host_err < 0)
44383 diff -urNp linux-2.6.32.45/fs/nilfs2/ioctl.c linux-2.6.32.45/fs/nilfs2/ioctl.c
44384 --- linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
44385 +++ linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
44386 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
44387 unsigned int cmd, void __user *argp)
44388 {
44389 struct nilfs_argv argv[5];
44390 - const static size_t argsz[5] = {
44391 + static const size_t argsz[5] = {
44392 sizeof(struct nilfs_vdesc),
44393 sizeof(struct nilfs_period),
44394 sizeof(__u64),
44395 diff -urNp linux-2.6.32.45/fs/notify/dnotify/dnotify.c linux-2.6.32.45/fs/notify/dnotify/dnotify.c
44396 --- linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
44397 +++ linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
44398 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
44399 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
44400 }
44401
44402 -static struct fsnotify_ops dnotify_fsnotify_ops = {
44403 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
44404 .handle_event = dnotify_handle_event,
44405 .should_send_event = dnotify_should_send_event,
44406 .free_group_priv = NULL,
44407 diff -urNp linux-2.6.32.45/fs/notify/notification.c linux-2.6.32.45/fs/notify/notification.c
44408 --- linux-2.6.32.45/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
44409 +++ linux-2.6.32.45/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
44410 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44411 * get set to 0 so it will never get 'freed'
44412 */
44413 static struct fsnotify_event q_overflow_event;
44414 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44415 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44416
44417 /**
44418 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44419 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44420 */
44421 u32 fsnotify_get_cookie(void)
44422 {
44423 - return atomic_inc_return(&fsnotify_sync_cookie);
44424 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44425 }
44426 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44427
44428 diff -urNp linux-2.6.32.45/fs/ntfs/dir.c linux-2.6.32.45/fs/ntfs/dir.c
44429 --- linux-2.6.32.45/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44430 +++ linux-2.6.32.45/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
44431 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
44432 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44433 ~(s64)(ndir->itype.index.block_size - 1)));
44434 /* Bounds checks. */
44435 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44436 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44437 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44438 "inode 0x%lx or driver bug.", vdir->i_ino);
44439 goto err_out;
44440 diff -urNp linux-2.6.32.45/fs/ntfs/file.c linux-2.6.32.45/fs/ntfs/file.c
44441 --- linux-2.6.32.45/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
44442 +++ linux-2.6.32.45/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
44443 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
44444 #endif /* NTFS_RW */
44445 };
44446
44447 -const struct file_operations ntfs_empty_file_ops = {};
44448 +const struct file_operations ntfs_empty_file_ops __read_only;
44449
44450 -const struct inode_operations ntfs_empty_inode_ops = {};
44451 +const struct inode_operations ntfs_empty_inode_ops __read_only;
44452 diff -urNp linux-2.6.32.45/fs/ocfs2/cluster/masklog.c linux-2.6.32.45/fs/ocfs2/cluster/masklog.c
44453 --- linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
44454 +++ linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
44455 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
44456 return mlog_mask_store(mlog_attr->mask, buf, count);
44457 }
44458
44459 -static struct sysfs_ops mlog_attr_ops = {
44460 +static const struct sysfs_ops mlog_attr_ops = {
44461 .show = mlog_show,
44462 .store = mlog_store,
44463 };
44464 diff -urNp linux-2.6.32.45/fs/ocfs2/localalloc.c linux-2.6.32.45/fs/ocfs2/localalloc.c
44465 --- linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
44466 +++ linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
44467 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
44468 goto bail;
44469 }
44470
44471 - atomic_inc(&osb->alloc_stats.moves);
44472 + atomic_inc_unchecked(&osb->alloc_stats.moves);
44473
44474 status = 0;
44475 bail:
44476 diff -urNp linux-2.6.32.45/fs/ocfs2/namei.c linux-2.6.32.45/fs/ocfs2/namei.c
44477 --- linux-2.6.32.45/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
44478 +++ linux-2.6.32.45/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
44479 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
44480 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44481 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44482
44483 + pax_track_stack();
44484 +
44485 /* At some point it might be nice to break this function up a
44486 * bit. */
44487
44488 diff -urNp linux-2.6.32.45/fs/ocfs2/ocfs2.h linux-2.6.32.45/fs/ocfs2/ocfs2.h
44489 --- linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
44490 +++ linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
44491 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
44492
44493 struct ocfs2_alloc_stats
44494 {
44495 - atomic_t moves;
44496 - atomic_t local_data;
44497 - atomic_t bitmap_data;
44498 - atomic_t bg_allocs;
44499 - atomic_t bg_extends;
44500 + atomic_unchecked_t moves;
44501 + atomic_unchecked_t local_data;
44502 + atomic_unchecked_t bitmap_data;
44503 + atomic_unchecked_t bg_allocs;
44504 + atomic_unchecked_t bg_extends;
44505 };
44506
44507 enum ocfs2_local_alloc_state
44508 diff -urNp linux-2.6.32.45/fs/ocfs2/suballoc.c linux-2.6.32.45/fs/ocfs2/suballoc.c
44509 --- linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
44510 +++ linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
44511 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
44512 mlog_errno(status);
44513 goto bail;
44514 }
44515 - atomic_inc(&osb->alloc_stats.bg_extends);
44516 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44517
44518 /* You should never ask for this much metadata */
44519 BUG_ON(bits_wanted >
44520 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
44521 mlog_errno(status);
44522 goto bail;
44523 }
44524 - atomic_inc(&osb->alloc_stats.bg_allocs);
44525 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44526
44527 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
44528 ac->ac_bits_given += (*num_bits);
44529 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
44530 mlog_errno(status);
44531 goto bail;
44532 }
44533 - atomic_inc(&osb->alloc_stats.bg_allocs);
44534 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44535
44536 BUG_ON(num_bits != 1);
44537
44538 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44539 cluster_start,
44540 num_clusters);
44541 if (!status)
44542 - atomic_inc(&osb->alloc_stats.local_data);
44543 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
44544 } else {
44545 if (min_clusters > (osb->bitmap_cpg - 1)) {
44546 /* The only paths asking for contiguousness
44547 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44548 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44549 bg_blkno,
44550 bg_bit_off);
44551 - atomic_inc(&osb->alloc_stats.bitmap_data);
44552 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44553 }
44554 }
44555 if (status < 0) {
44556 diff -urNp linux-2.6.32.45/fs/ocfs2/super.c linux-2.6.32.45/fs/ocfs2/super.c
44557 --- linux-2.6.32.45/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
44558 +++ linux-2.6.32.45/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
44559 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44560 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44561 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44562 "Stats",
44563 - atomic_read(&osb->alloc_stats.bitmap_data),
44564 - atomic_read(&osb->alloc_stats.local_data),
44565 - atomic_read(&osb->alloc_stats.bg_allocs),
44566 - atomic_read(&osb->alloc_stats.moves),
44567 - atomic_read(&osb->alloc_stats.bg_extends));
44568 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44569 + atomic_read_unchecked(&osb->alloc_stats.local_data),
44570 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44571 + atomic_read_unchecked(&osb->alloc_stats.moves),
44572 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44573
44574 out += snprintf(buf + out, len - out,
44575 "%10s => State: %u Descriptor: %llu Size: %u bits "
44576 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
44577 spin_lock_init(&osb->osb_xattr_lock);
44578 ocfs2_init_inode_steal_slot(osb);
44579
44580 - atomic_set(&osb->alloc_stats.moves, 0);
44581 - atomic_set(&osb->alloc_stats.local_data, 0);
44582 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
44583 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
44584 - atomic_set(&osb->alloc_stats.bg_extends, 0);
44585 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44586 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44587 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44588 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44589 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44590
44591 /* Copy the blockcheck stats from the superblock probe */
44592 osb->osb_ecc_stats = *stats;
44593 diff -urNp linux-2.6.32.45/fs/open.c linux-2.6.32.45/fs/open.c
44594 --- linux-2.6.32.45/fs/open.c 2011-03-27 14:31:47.000000000 -0400
44595 +++ linux-2.6.32.45/fs/open.c 2011-04-17 15:56:46.000000000 -0400
44596 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
44597 error = locks_verify_truncate(inode, NULL, length);
44598 if (!error)
44599 error = security_path_truncate(&path, length, 0);
44600 +
44601 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44602 + error = -EACCES;
44603 +
44604 if (!error) {
44605 vfs_dq_init(inode);
44606 error = do_truncate(path.dentry, length, 0, NULL);
44607 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44608 if (__mnt_is_readonly(path.mnt))
44609 res = -EROFS;
44610
44611 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44612 + res = -EACCES;
44613 +
44614 out_path_release:
44615 path_put(&path);
44616 out:
44617 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44618 if (error)
44619 goto dput_and_out;
44620
44621 + gr_log_chdir(path.dentry, path.mnt);
44622 +
44623 set_fs_pwd(current->fs, &path);
44624
44625 dput_and_out:
44626 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44627 goto out_putf;
44628
44629 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
44630 +
44631 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44632 + error = -EPERM;
44633 +
44634 + if (!error)
44635 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44636 +
44637 if (!error)
44638 set_fs_pwd(current->fs, &file->f_path);
44639 out_putf:
44640 @@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
44641 if (!capable(CAP_SYS_CHROOT))
44642 goto dput_and_out;
44643
44644 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44645 + goto dput_and_out;
44646 +
44647 + if (gr_handle_chroot_caps(&path)) {
44648 + error = -ENOMEM;
44649 + goto dput_and_out;
44650 + }
44651 +
44652 set_fs_root(current->fs, &path);
44653 +
44654 + gr_handle_chroot_chdir(&path);
44655 +
44656 error = 0;
44657 dput_and_out:
44658 path_put(&path);
44659 @@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
44660 err = mnt_want_write_file(file);
44661 if (err)
44662 goto out_putf;
44663 +
44664 mutex_lock(&inode->i_mutex);
44665 +
44666 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
44667 + err = -EACCES;
44668 + goto out_unlock;
44669 + }
44670 +
44671 if (mode == (mode_t) -1)
44672 mode = inode->i_mode;
44673 +
44674 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
44675 + err = -EPERM;
44676 + goto out_unlock;
44677 + }
44678 +
44679 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44680 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44681 err = notify_change(dentry, &newattrs);
44682 +
44683 +out_unlock:
44684 mutex_unlock(&inode->i_mutex);
44685 mnt_drop_write(file->f_path.mnt);
44686 out_putf:
44687 @@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
44688 error = mnt_want_write(path.mnt);
44689 if (error)
44690 goto dput_and_out;
44691 +
44692 mutex_lock(&inode->i_mutex);
44693 +
44694 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
44695 + error = -EACCES;
44696 + goto out_unlock;
44697 + }
44698 +
44699 if (mode == (mode_t) -1)
44700 mode = inode->i_mode;
44701 +
44702 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
44703 + error = -EACCES;
44704 + goto out_unlock;
44705 + }
44706 +
44707 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44708 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44709 error = notify_change(path.dentry, &newattrs);
44710 +
44711 +out_unlock:
44712 mutex_unlock(&inode->i_mutex);
44713 mnt_drop_write(path.mnt);
44714 dput_and_out:
44715 @@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
44716 return sys_fchmodat(AT_FDCWD, filename, mode);
44717 }
44718
44719 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
44720 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
44721 {
44722 struct inode *inode = dentry->d_inode;
44723 int error;
44724 struct iattr newattrs;
44725
44726 + if (!gr_acl_handle_chown(dentry, mnt))
44727 + return -EACCES;
44728 +
44729 newattrs.ia_valid = ATTR_CTIME;
44730 if (user != (uid_t) -1) {
44731 newattrs.ia_valid |= ATTR_UID;
44732 @@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
44733 error = mnt_want_write(path.mnt);
44734 if (error)
44735 goto out_release;
44736 - error = chown_common(path.dentry, user, group);
44737 + error = chown_common(path.dentry, user, group, path.mnt);
44738 mnt_drop_write(path.mnt);
44739 out_release:
44740 path_put(&path);
44741 @@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
44742 error = mnt_want_write(path.mnt);
44743 if (error)
44744 goto out_release;
44745 - error = chown_common(path.dentry, user, group);
44746 + error = chown_common(path.dentry, user, group, path.mnt);
44747 mnt_drop_write(path.mnt);
44748 out_release:
44749 path_put(&path);
44750 @@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
44751 error = mnt_want_write(path.mnt);
44752 if (error)
44753 goto out_release;
44754 - error = chown_common(path.dentry, user, group);
44755 + error = chown_common(path.dentry, user, group, path.mnt);
44756 mnt_drop_write(path.mnt);
44757 out_release:
44758 path_put(&path);
44759 @@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
44760 goto out_fput;
44761 dentry = file->f_path.dentry;
44762 audit_inode(NULL, dentry);
44763 - error = chown_common(dentry, user, group);
44764 + error = chown_common(dentry, user, group, file->f_path.mnt);
44765 mnt_drop_write(file->f_path.mnt);
44766 out_fput:
44767 fput(file);
44768 @@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
44769 if (!IS_ERR(tmp)) {
44770 fd = get_unused_fd_flags(flags);
44771 if (fd >= 0) {
44772 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
44773 + struct file *f;
44774 + /* don't allow to be set by userland */
44775 + flags &= ~FMODE_GREXEC;
44776 + f = do_filp_open(dfd, tmp, flags, mode, 0);
44777 if (IS_ERR(f)) {
44778 put_unused_fd(fd);
44779 fd = PTR_ERR(f);
44780 diff -urNp linux-2.6.32.45/fs/partitions/ldm.c linux-2.6.32.45/fs/partitions/ldm.c
44781 --- linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
44782 +++ linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
44783 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
44784 ldm_error ("A VBLK claims to have %d parts.", num);
44785 return false;
44786 }
44787 +
44788 if (rec >= num) {
44789 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
44790 return false;
44791 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
44792 goto found;
44793 }
44794
44795 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44796 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44797 if (!f) {
44798 ldm_crit ("Out of memory.");
44799 return false;
44800 diff -urNp linux-2.6.32.45/fs/partitions/mac.c linux-2.6.32.45/fs/partitions/mac.c
44801 --- linux-2.6.32.45/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
44802 +++ linux-2.6.32.45/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
44803 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
44804 return 0; /* not a MacOS disk */
44805 }
44806 blocks_in_map = be32_to_cpu(part->map_count);
44807 + printk(" [mac]");
44808 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
44809 put_dev_sector(sect);
44810 return 0;
44811 }
44812 - printk(" [mac]");
44813 for (slot = 1; slot <= blocks_in_map; ++slot) {
44814 int pos = slot * secsize;
44815 put_dev_sector(sect);
44816 diff -urNp linux-2.6.32.45/fs/pipe.c linux-2.6.32.45/fs/pipe.c
44817 --- linux-2.6.32.45/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
44818 +++ linux-2.6.32.45/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
44819 @@ -401,9 +401,9 @@ redo:
44820 }
44821 if (bufs) /* More to do? */
44822 continue;
44823 - if (!pipe->writers)
44824 + if (!atomic_read(&pipe->writers))
44825 break;
44826 - if (!pipe->waiting_writers) {
44827 + if (!atomic_read(&pipe->waiting_writers)) {
44828 /* syscall merging: Usually we must not sleep
44829 * if O_NONBLOCK is set, or if we got some data.
44830 * But if a writer sleeps in kernel space, then
44831 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
44832 mutex_lock(&inode->i_mutex);
44833 pipe = inode->i_pipe;
44834
44835 - if (!pipe->readers) {
44836 + if (!atomic_read(&pipe->readers)) {
44837 send_sig(SIGPIPE, current, 0);
44838 ret = -EPIPE;
44839 goto out;
44840 @@ -511,7 +511,7 @@ redo1:
44841 for (;;) {
44842 int bufs;
44843
44844 - if (!pipe->readers) {
44845 + if (!atomic_read(&pipe->readers)) {
44846 send_sig(SIGPIPE, current, 0);
44847 if (!ret)
44848 ret = -EPIPE;
44849 @@ -597,9 +597,9 @@ redo2:
44850 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44851 do_wakeup = 0;
44852 }
44853 - pipe->waiting_writers++;
44854 + atomic_inc(&pipe->waiting_writers);
44855 pipe_wait(pipe);
44856 - pipe->waiting_writers--;
44857 + atomic_dec(&pipe->waiting_writers);
44858 }
44859 out:
44860 mutex_unlock(&inode->i_mutex);
44861 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
44862 mask = 0;
44863 if (filp->f_mode & FMODE_READ) {
44864 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
44865 - if (!pipe->writers && filp->f_version != pipe->w_counter)
44866 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
44867 mask |= POLLHUP;
44868 }
44869
44870 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
44871 * Most Unices do not set POLLERR for FIFOs but on Linux they
44872 * behave exactly like pipes for poll().
44873 */
44874 - if (!pipe->readers)
44875 + if (!atomic_read(&pipe->readers))
44876 mask |= POLLERR;
44877 }
44878
44879 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
44880
44881 mutex_lock(&inode->i_mutex);
44882 pipe = inode->i_pipe;
44883 - pipe->readers -= decr;
44884 - pipe->writers -= decw;
44885 + atomic_sub(decr, &pipe->readers);
44886 + atomic_sub(decw, &pipe->writers);
44887
44888 - if (!pipe->readers && !pipe->writers) {
44889 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
44890 free_pipe_info(inode);
44891 } else {
44892 wake_up_interruptible_sync(&pipe->wait);
44893 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
44894
44895 if (inode->i_pipe) {
44896 ret = 0;
44897 - inode->i_pipe->readers++;
44898 + atomic_inc(&inode->i_pipe->readers);
44899 }
44900
44901 mutex_unlock(&inode->i_mutex);
44902 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
44903
44904 if (inode->i_pipe) {
44905 ret = 0;
44906 - inode->i_pipe->writers++;
44907 + atomic_inc(&inode->i_pipe->writers);
44908 }
44909
44910 mutex_unlock(&inode->i_mutex);
44911 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
44912 if (inode->i_pipe) {
44913 ret = 0;
44914 if (filp->f_mode & FMODE_READ)
44915 - inode->i_pipe->readers++;
44916 + atomic_inc(&inode->i_pipe->readers);
44917 if (filp->f_mode & FMODE_WRITE)
44918 - inode->i_pipe->writers++;
44919 + atomic_inc(&inode->i_pipe->writers);
44920 }
44921
44922 mutex_unlock(&inode->i_mutex);
44923 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
44924 inode->i_pipe = NULL;
44925 }
44926
44927 -static struct vfsmount *pipe_mnt __read_mostly;
44928 +struct vfsmount *pipe_mnt __read_mostly;
44929 static int pipefs_delete_dentry(struct dentry *dentry)
44930 {
44931 /*
44932 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
44933 goto fail_iput;
44934 inode->i_pipe = pipe;
44935
44936 - pipe->readers = pipe->writers = 1;
44937 + atomic_set(&pipe->readers, 1);
44938 + atomic_set(&pipe->writers, 1);
44939 inode->i_fop = &rdwr_pipefifo_fops;
44940
44941 /*
44942 diff -urNp linux-2.6.32.45/fs/proc/array.c linux-2.6.32.45/fs/proc/array.c
44943 --- linux-2.6.32.45/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
44944 +++ linux-2.6.32.45/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
44945 @@ -60,6 +60,7 @@
44946 #include <linux/tty.h>
44947 #include <linux/string.h>
44948 #include <linux/mman.h>
44949 +#include <linux/grsecurity.h>
44950 #include <linux/proc_fs.h>
44951 #include <linux/ioport.h>
44952 #include <linux/uaccess.h>
44953 @@ -321,6 +322,21 @@ static inline void task_context_switch_c
44954 p->nivcsw);
44955 }
44956
44957 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44958 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
44959 +{
44960 + if (p->mm)
44961 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
44962 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
44963 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
44964 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
44965 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
44966 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
44967 + else
44968 + seq_printf(m, "PaX:\t-----\n");
44969 +}
44970 +#endif
44971 +
44972 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
44973 struct pid *pid, struct task_struct *task)
44974 {
44975 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
44976 task_cap(m, task);
44977 cpuset_task_status_allowed(m, task);
44978 task_context_switch_counts(m, task);
44979 +
44980 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44981 + task_pax(m, task);
44982 +#endif
44983 +
44984 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
44985 + task_grsec_rbac(m, task);
44986 +#endif
44987 +
44988 return 0;
44989 }
44990
44991 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44992 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44993 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
44994 + _mm->pax_flags & MF_PAX_SEGMEXEC))
44995 +#endif
44996 +
44997 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
44998 struct pid *pid, struct task_struct *task, int whole)
44999 {
45000 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
45001 cputime_t cutime, cstime, utime, stime;
45002 cputime_t cgtime, gtime;
45003 unsigned long rsslim = 0;
45004 - char tcomm[sizeof(task->comm)];
45005 + char tcomm[sizeof(task->comm)] = { 0 };
45006 unsigned long flags;
45007
45008 + pax_track_stack();
45009 +
45010 state = *get_task_state(task);
45011 vsize = eip = esp = 0;
45012 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45013 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
45014 gtime = task_gtime(task);
45015 }
45016
45017 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45018 + if (PAX_RAND_FLAGS(mm)) {
45019 + eip = 0;
45020 + esp = 0;
45021 + wchan = 0;
45022 + }
45023 +#endif
45024 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45025 + wchan = 0;
45026 + eip =0;
45027 + esp =0;
45028 +#endif
45029 +
45030 /* scale priority and nice values from timeslices to -20..20 */
45031 /* to make it look like a "normal" Unix priority/nice value */
45032 priority = task_prio(task);
45033 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
45034 vsize,
45035 mm ? get_mm_rss(mm) : 0,
45036 rsslim,
45037 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45038 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45039 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45040 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45041 +#else
45042 mm ? (permitted ? mm->start_code : 1) : 0,
45043 mm ? (permitted ? mm->end_code : 1) : 0,
45044 (permitted && mm) ? mm->start_stack : 0,
45045 +#endif
45046 esp,
45047 eip,
45048 /* The signal information here is obsolete.
45049 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
45050
45051 return 0;
45052 }
45053 +
45054 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45055 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45056 +{
45057 + u32 curr_ip = 0;
45058 + unsigned long flags;
45059 +
45060 + if (lock_task_sighand(task, &flags)) {
45061 + curr_ip = task->signal->curr_ip;
45062 + unlock_task_sighand(task, &flags);
45063 + }
45064 +
45065 + return sprintf(buffer, "%pI4\n", &curr_ip);
45066 +}
45067 +#endif
45068 diff -urNp linux-2.6.32.45/fs/proc/base.c linux-2.6.32.45/fs/proc/base.c
45069 --- linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:35:30.000000000 -0400
45070 +++ linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:34:33.000000000 -0400
45071 @@ -102,6 +102,22 @@ struct pid_entry {
45072 union proc_op op;
45073 };
45074
45075 +struct getdents_callback {
45076 + struct linux_dirent __user * current_dir;
45077 + struct linux_dirent __user * previous;
45078 + struct file * file;
45079 + int count;
45080 + int error;
45081 +};
45082 +
45083 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45084 + loff_t offset, u64 ino, unsigned int d_type)
45085 +{
45086 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
45087 + buf->error = -EINVAL;
45088 + return 0;
45089 +}
45090 +
45091 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45092 .name = (NAME), \
45093 .len = sizeof(NAME) - 1, \
45094 @@ -213,6 +229,9 @@ static int check_mem_permission(struct t
45095 if (task == current)
45096 return 0;
45097
45098 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45099 + return -EPERM;
45100 +
45101 /*
45102 * If current is actively ptrace'ing, and would also be
45103 * permitted to freshly attach with ptrace now, permit it.
45104 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
45105 if (!mm->arg_end)
45106 goto out_mm; /* Shh! No looking before we're done */
45107
45108 + if (gr_acl_handle_procpidmem(task))
45109 + goto out_mm;
45110 +
45111 len = mm->arg_end - mm->arg_start;
45112
45113 if (len > PAGE_SIZE)
45114 @@ -287,12 +309,28 @@ out:
45115 return res;
45116 }
45117
45118 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45119 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45120 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45121 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45122 +#endif
45123 +
45124 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45125 {
45126 int res = 0;
45127 struct mm_struct *mm = get_task_mm(task);
45128 if (mm) {
45129 unsigned int nwords = 0;
45130 +
45131 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45132 + /* allow if we're currently ptracing this task */
45133 + if (PAX_RAND_FLAGS(mm) &&
45134 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45135 + mmput(mm);
45136 + return res;
45137 + }
45138 +#endif
45139 +
45140 do {
45141 nwords += 2;
45142 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45143 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
45144 }
45145
45146
45147 -#ifdef CONFIG_KALLSYMS
45148 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45149 /*
45150 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45151 * Returns the resolved symbol. If that fails, simply return the address.
45152 @@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
45153 }
45154 #endif /* CONFIG_KALLSYMS */
45155
45156 -#ifdef CONFIG_STACKTRACE
45157 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45158
45159 #define MAX_STACK_TRACE_DEPTH 64
45160
45161 @@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
45162 return count;
45163 }
45164
45165 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45166 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45167 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45168 {
45169 long nr;
45170 @@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
45171 /************************************************************************/
45172
45173 /* permission checks */
45174 -static int proc_fd_access_allowed(struct inode *inode)
45175 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45176 {
45177 struct task_struct *task;
45178 int allowed = 0;
45179 @@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
45180 */
45181 task = get_proc_task(inode);
45182 if (task) {
45183 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45184 + if (log)
45185 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45186 + else
45187 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45188 put_task_struct(task);
45189 }
45190 return allowed;
45191 @@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
45192 if (!task)
45193 goto out_no_task;
45194
45195 + if (gr_acl_handle_procpidmem(task))
45196 + goto out;
45197 +
45198 if (!ptrace_may_access(task, PTRACE_MODE_READ))
45199 goto out;
45200
45201 @@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
45202 path_put(&nd->path);
45203
45204 /* Are we allowed to snoop on the tasks file descriptors? */
45205 - if (!proc_fd_access_allowed(inode))
45206 + if (!proc_fd_access_allowed(inode,0))
45207 goto out;
45208
45209 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45210 @@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
45211 struct path path;
45212
45213 /* Are we allowed to snoop on the tasks file descriptors? */
45214 - if (!proc_fd_access_allowed(inode))
45215 - goto out;
45216 + /* logging this is needed for learning on chromium to work properly,
45217 + but we don't want to flood the logs from 'ps' which does a readlink
45218 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45219 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
45220 + */
45221 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45222 + if (!proc_fd_access_allowed(inode,0))
45223 + goto out;
45224 + } else {
45225 + if (!proc_fd_access_allowed(inode,1))
45226 + goto out;
45227 + }
45228
45229 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45230 if (error)
45231 @@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
45232 rcu_read_lock();
45233 cred = __task_cred(task);
45234 inode->i_uid = cred->euid;
45235 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45236 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45237 +#else
45238 inode->i_gid = cred->egid;
45239 +#endif
45240 rcu_read_unlock();
45241 }
45242 security_task_to_inode(task, inode);
45243 @@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
45244 struct inode *inode = dentry->d_inode;
45245 struct task_struct *task;
45246 const struct cred *cred;
45247 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45248 + const struct cred *tmpcred = current_cred();
45249 +#endif
45250
45251 generic_fillattr(inode, stat);
45252
45253 @@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
45254 stat->uid = 0;
45255 stat->gid = 0;
45256 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45257 +
45258 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45259 + rcu_read_unlock();
45260 + return -ENOENT;
45261 + }
45262 +
45263 if (task) {
45264 + cred = __task_cred(task);
45265 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45266 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45267 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45268 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45269 +#endif
45270 + ) {
45271 +#endif
45272 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45273 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45274 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45275 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45276 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45277 +#endif
45278 task_dumpable(task)) {
45279 - cred = __task_cred(task);
45280 stat->uid = cred->euid;
45281 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45282 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45283 +#else
45284 stat->gid = cred->egid;
45285 +#endif
45286 }
45287 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45288 + } else {
45289 + rcu_read_unlock();
45290 + return -ENOENT;
45291 + }
45292 +#endif
45293 }
45294 rcu_read_unlock();
45295 return 0;
45296 @@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
45297
45298 if (task) {
45299 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45300 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45301 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45302 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45303 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45304 +#endif
45305 task_dumpable(task)) {
45306 rcu_read_lock();
45307 cred = __task_cred(task);
45308 inode->i_uid = cred->euid;
45309 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45310 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45311 +#else
45312 inode->i_gid = cred->egid;
45313 +#endif
45314 rcu_read_unlock();
45315 } else {
45316 inode->i_uid = 0;
45317 @@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
45318 int fd = proc_fd(inode);
45319
45320 if (task) {
45321 - files = get_files_struct(task);
45322 + if (!gr_acl_handle_procpidmem(task))
45323 + files = get_files_struct(task);
45324 put_task_struct(task);
45325 }
45326 if (files) {
45327 @@ -1895,12 +1994,22 @@ static const struct file_operations proc
45328 static int proc_fd_permission(struct inode *inode, int mask)
45329 {
45330 int rv;
45331 + struct task_struct *task;
45332
45333 rv = generic_permission(inode, mask, NULL);
45334 - if (rv == 0)
45335 - return 0;
45336 +
45337 if (task_pid(current) == proc_pid(inode))
45338 rv = 0;
45339 +
45340 + task = get_proc_task(inode);
45341 + if (task == NULL)
45342 + return rv;
45343 +
45344 + if (gr_acl_handle_procpidmem(task))
45345 + rv = -EACCES;
45346 +
45347 + put_task_struct(task);
45348 +
45349 return rv;
45350 }
45351
45352 @@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
45353 if (!task)
45354 goto out_no_task;
45355
45356 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45357 + goto out;
45358 +
45359 /*
45360 * Yes, it does not scale. And it should not. Don't add
45361 * new entries into /proc/<tgid>/ without very good reasons.
45362 @@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
45363 if (!task)
45364 goto out_no_task;
45365
45366 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45367 + goto out;
45368 +
45369 ret = 0;
45370 i = filp->f_pos;
45371 switch (i) {
45372 @@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
45373 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45374 void *cookie)
45375 {
45376 - char *s = nd_get_link(nd);
45377 + const char *s = nd_get_link(nd);
45378 if (!IS_ERR(s))
45379 __putname(s);
45380 }
45381 @@ -2522,7 +2637,7 @@ static const struct pid_entry tgid_base_
45382 #ifdef CONFIG_SCHED_DEBUG
45383 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45384 #endif
45385 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45386 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45387 INF("syscall", S_IRUSR, proc_pid_syscall),
45388 #endif
45389 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45390 @@ -2547,10 +2662,10 @@ static const struct pid_entry tgid_base_
45391 #ifdef CONFIG_SECURITY
45392 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45393 #endif
45394 -#ifdef CONFIG_KALLSYMS
45395 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45396 INF("wchan", S_IRUGO, proc_pid_wchan),
45397 #endif
45398 -#ifdef CONFIG_STACKTRACE
45399 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45400 ONE("stack", S_IRUSR, proc_pid_stack),
45401 #endif
45402 #ifdef CONFIG_SCHEDSTATS
45403 @@ -2580,6 +2695,9 @@ static const struct pid_entry tgid_base_
45404 #ifdef CONFIG_TASK_IO_ACCOUNTING
45405 INF("io", S_IRUSR, proc_tgid_io_accounting),
45406 #endif
45407 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45408 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45409 +#endif
45410 };
45411
45412 static int proc_tgid_base_readdir(struct file * filp,
45413 @@ -2704,7 +2822,14 @@ static struct dentry *proc_pid_instantia
45414 if (!inode)
45415 goto out;
45416
45417 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45418 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45419 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45420 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45421 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45422 +#else
45423 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45424 +#endif
45425 inode->i_op = &proc_tgid_base_inode_operations;
45426 inode->i_fop = &proc_tgid_base_operations;
45427 inode->i_flags|=S_IMMUTABLE;
45428 @@ -2746,7 +2871,11 @@ struct dentry *proc_pid_lookup(struct in
45429 if (!task)
45430 goto out;
45431
45432 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45433 + goto out_put_task;
45434 +
45435 result = proc_pid_instantiate(dir, dentry, task, NULL);
45436 +out_put_task:
45437 put_task_struct(task);
45438 out:
45439 return result;
45440 @@ -2811,6 +2940,11 @@ int proc_pid_readdir(struct file * filp,
45441 {
45442 unsigned int nr;
45443 struct task_struct *reaper;
45444 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45445 + const struct cred *tmpcred = current_cred();
45446 + const struct cred *itercred;
45447 +#endif
45448 + filldir_t __filldir = filldir;
45449 struct tgid_iter iter;
45450 struct pid_namespace *ns;
45451
45452 @@ -2834,8 +2968,27 @@ int proc_pid_readdir(struct file * filp,
45453 for (iter = next_tgid(ns, iter);
45454 iter.task;
45455 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45456 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45457 + rcu_read_lock();
45458 + itercred = __task_cred(iter.task);
45459 +#endif
45460 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45461 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45462 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45463 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45464 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45465 +#endif
45466 + )
45467 +#endif
45468 + )
45469 + __filldir = &gr_fake_filldir;
45470 + else
45471 + __filldir = filldir;
45472 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45473 + rcu_read_unlock();
45474 +#endif
45475 filp->f_pos = iter.tgid + TGID_OFFSET;
45476 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45477 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45478 put_task_struct(iter.task);
45479 goto out;
45480 }
45481 @@ -2861,7 +3014,7 @@ static const struct pid_entry tid_base_s
45482 #ifdef CONFIG_SCHED_DEBUG
45483 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45484 #endif
45485 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45486 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45487 INF("syscall", S_IRUSR, proc_pid_syscall),
45488 #endif
45489 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45490 @@ -2885,10 +3038,10 @@ static const struct pid_entry tid_base_s
45491 #ifdef CONFIG_SECURITY
45492 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45493 #endif
45494 -#ifdef CONFIG_KALLSYMS
45495 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45496 INF("wchan", S_IRUGO, proc_pid_wchan),
45497 #endif
45498 -#ifdef CONFIG_STACKTRACE
45499 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45500 ONE("stack", S_IRUSR, proc_pid_stack),
45501 #endif
45502 #ifdef CONFIG_SCHEDSTATS
45503 diff -urNp linux-2.6.32.45/fs/proc/cmdline.c linux-2.6.32.45/fs/proc/cmdline.c
45504 --- linux-2.6.32.45/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
45505 +++ linux-2.6.32.45/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
45506 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
45507
45508 static int __init proc_cmdline_init(void)
45509 {
45510 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45511 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45512 +#else
45513 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45514 +#endif
45515 return 0;
45516 }
45517 module_init(proc_cmdline_init);
45518 diff -urNp linux-2.6.32.45/fs/proc/devices.c linux-2.6.32.45/fs/proc/devices.c
45519 --- linux-2.6.32.45/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
45520 +++ linux-2.6.32.45/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
45521 @@ -64,7 +64,11 @@ static const struct file_operations proc
45522
45523 static int __init proc_devices_init(void)
45524 {
45525 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45526 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45527 +#else
45528 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45529 +#endif
45530 return 0;
45531 }
45532 module_init(proc_devices_init);
45533 diff -urNp linux-2.6.32.45/fs/proc/inode.c linux-2.6.32.45/fs/proc/inode.c
45534 --- linux-2.6.32.45/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
45535 +++ linux-2.6.32.45/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
45536 @@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
45537 if (de->mode) {
45538 inode->i_mode = de->mode;
45539 inode->i_uid = de->uid;
45540 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45541 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45542 +#else
45543 inode->i_gid = de->gid;
45544 +#endif
45545 }
45546 if (de->size)
45547 inode->i_size = de->size;
45548 diff -urNp linux-2.6.32.45/fs/proc/internal.h linux-2.6.32.45/fs/proc/internal.h
45549 --- linux-2.6.32.45/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
45550 +++ linux-2.6.32.45/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
45551 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45552 struct pid *pid, struct task_struct *task);
45553 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45554 struct pid *pid, struct task_struct *task);
45555 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45556 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45557 +#endif
45558 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45559
45560 extern const struct file_operations proc_maps_operations;
45561 diff -urNp linux-2.6.32.45/fs/proc/Kconfig linux-2.6.32.45/fs/proc/Kconfig
45562 --- linux-2.6.32.45/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
45563 +++ linux-2.6.32.45/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
45564 @@ -30,12 +30,12 @@ config PROC_FS
45565
45566 config PROC_KCORE
45567 bool "/proc/kcore support" if !ARM
45568 - depends on PROC_FS && MMU
45569 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45570
45571 config PROC_VMCORE
45572 bool "/proc/vmcore support (EXPERIMENTAL)"
45573 - depends on PROC_FS && CRASH_DUMP
45574 - default y
45575 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45576 + default n
45577 help
45578 Exports the dump image of crashed kernel in ELF format.
45579
45580 @@ -59,8 +59,8 @@ config PROC_SYSCTL
45581 limited in memory.
45582
45583 config PROC_PAGE_MONITOR
45584 - default y
45585 - depends on PROC_FS && MMU
45586 + default n
45587 + depends on PROC_FS && MMU && !GRKERNSEC
45588 bool "Enable /proc page monitoring" if EMBEDDED
45589 help
45590 Various /proc files exist to monitor process memory utilization:
45591 diff -urNp linux-2.6.32.45/fs/proc/kcore.c linux-2.6.32.45/fs/proc/kcore.c
45592 --- linux-2.6.32.45/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
45593 +++ linux-2.6.32.45/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
45594 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
45595 off_t offset = 0;
45596 struct kcore_list *m;
45597
45598 + pax_track_stack();
45599 +
45600 /* setup ELF header */
45601 elf = (struct elfhdr *) bufp;
45602 bufp += sizeof(struct elfhdr);
45603 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
45604 * the addresses in the elf_phdr on our list.
45605 */
45606 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45607 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45608 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45609 + if (tsz > buflen)
45610 tsz = buflen;
45611 -
45612 +
45613 while (buflen) {
45614 struct kcore_list *m;
45615
45616 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
45617 kfree(elf_buf);
45618 } else {
45619 if (kern_addr_valid(start)) {
45620 - unsigned long n;
45621 + char *elf_buf;
45622 + mm_segment_t oldfs;
45623
45624 - n = copy_to_user(buffer, (char *)start, tsz);
45625 - /*
45626 - * We cannot distingush between fault on source
45627 - * and fault on destination. When this happens
45628 - * we clear too and hope it will trigger the
45629 - * EFAULT again.
45630 - */
45631 - if (n) {
45632 - if (clear_user(buffer + tsz - n,
45633 - n))
45634 + elf_buf = kmalloc(tsz, GFP_KERNEL);
45635 + if (!elf_buf)
45636 + return -ENOMEM;
45637 + oldfs = get_fs();
45638 + set_fs(KERNEL_DS);
45639 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45640 + set_fs(oldfs);
45641 + if (copy_to_user(buffer, elf_buf, tsz)) {
45642 + kfree(elf_buf);
45643 return -EFAULT;
45644 + }
45645 }
45646 + set_fs(oldfs);
45647 + kfree(elf_buf);
45648 } else {
45649 if (clear_user(buffer, tsz))
45650 return -EFAULT;
45651 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
45652
45653 static int open_kcore(struct inode *inode, struct file *filp)
45654 {
45655 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45656 + return -EPERM;
45657 +#endif
45658 if (!capable(CAP_SYS_RAWIO))
45659 return -EPERM;
45660 if (kcore_need_update)
45661 diff -urNp linux-2.6.32.45/fs/proc/meminfo.c linux-2.6.32.45/fs/proc/meminfo.c
45662 --- linux-2.6.32.45/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
45663 +++ linux-2.6.32.45/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
45664 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
45665 unsigned long pages[NR_LRU_LISTS];
45666 int lru;
45667
45668 + pax_track_stack();
45669 +
45670 /*
45671 * display in kilobytes.
45672 */
45673 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
45674 vmi.used >> 10,
45675 vmi.largest_chunk >> 10
45676 #ifdef CONFIG_MEMORY_FAILURE
45677 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45678 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45679 #endif
45680 );
45681
45682 diff -urNp linux-2.6.32.45/fs/proc/nommu.c linux-2.6.32.45/fs/proc/nommu.c
45683 --- linux-2.6.32.45/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
45684 +++ linux-2.6.32.45/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
45685 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
45686 if (len < 1)
45687 len = 1;
45688 seq_printf(m, "%*c", len, ' ');
45689 - seq_path(m, &file->f_path, "");
45690 + seq_path(m, &file->f_path, "\n\\");
45691 }
45692
45693 seq_putc(m, '\n');
45694 diff -urNp linux-2.6.32.45/fs/proc/proc_net.c linux-2.6.32.45/fs/proc/proc_net.c
45695 --- linux-2.6.32.45/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
45696 +++ linux-2.6.32.45/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
45697 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
45698 struct task_struct *task;
45699 struct nsproxy *ns;
45700 struct net *net = NULL;
45701 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45702 + const struct cred *cred = current_cred();
45703 +#endif
45704 +
45705 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45706 + if (cred->fsuid)
45707 + return net;
45708 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45709 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45710 + return net;
45711 +#endif
45712
45713 rcu_read_lock();
45714 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45715 diff -urNp linux-2.6.32.45/fs/proc/proc_sysctl.c linux-2.6.32.45/fs/proc/proc_sysctl.c
45716 --- linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
45717 +++ linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
45718 @@ -7,6 +7,8 @@
45719 #include <linux/security.h>
45720 #include "internal.h"
45721
45722 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45723 +
45724 static const struct dentry_operations proc_sys_dentry_operations;
45725 static const struct file_operations proc_sys_file_operations;
45726 static const struct inode_operations proc_sys_inode_operations;
45727 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
45728 if (!p)
45729 goto out;
45730
45731 + if (gr_handle_sysctl(p, MAY_EXEC))
45732 + goto out;
45733 +
45734 err = ERR_PTR(-ENOMEM);
45735 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
45736 if (h)
45737 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
45738 if (*pos < file->f_pos)
45739 continue;
45740
45741 + if (gr_handle_sysctl(table, 0))
45742 + continue;
45743 +
45744 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45745 if (res)
45746 return res;
45747 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
45748 if (IS_ERR(head))
45749 return PTR_ERR(head);
45750
45751 + if (table && gr_handle_sysctl(table, MAY_EXEC))
45752 + return -ENOENT;
45753 +
45754 generic_fillattr(inode, stat);
45755 if (table)
45756 stat->mode = (stat->mode & S_IFMT) | table->mode;
45757 diff -urNp linux-2.6.32.45/fs/proc/root.c linux-2.6.32.45/fs/proc/root.c
45758 --- linux-2.6.32.45/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
45759 +++ linux-2.6.32.45/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
45760 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
45761 #ifdef CONFIG_PROC_DEVICETREE
45762 proc_device_tree_init();
45763 #endif
45764 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45765 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45766 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45767 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45768 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45769 +#endif
45770 +#else
45771 proc_mkdir("bus", NULL);
45772 +#endif
45773 proc_sys_init();
45774 }
45775
45776 diff -urNp linux-2.6.32.45/fs/proc/task_mmu.c linux-2.6.32.45/fs/proc/task_mmu.c
45777 --- linux-2.6.32.45/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
45778 +++ linux-2.6.32.45/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
45779 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
45780 "VmStk:\t%8lu kB\n"
45781 "VmExe:\t%8lu kB\n"
45782 "VmLib:\t%8lu kB\n"
45783 - "VmPTE:\t%8lu kB\n",
45784 - hiwater_vm << (PAGE_SHIFT-10),
45785 + "VmPTE:\t%8lu kB\n"
45786 +
45787 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45788 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
45789 +#endif
45790 +
45791 + ,hiwater_vm << (PAGE_SHIFT-10),
45792 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
45793 mm->locked_vm << (PAGE_SHIFT-10),
45794 hiwater_rss << (PAGE_SHIFT-10),
45795 total_rss << (PAGE_SHIFT-10),
45796 data << (PAGE_SHIFT-10),
45797 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
45798 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
45799 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
45800 +
45801 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45802 + , mm->context.user_cs_base, mm->context.user_cs_limit
45803 +#endif
45804 +
45805 + );
45806 }
45807
45808 unsigned long task_vsize(struct mm_struct *mm)
45809 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
45810 struct proc_maps_private *priv = m->private;
45811 struct vm_area_struct *vma = v;
45812
45813 - vma_stop(priv, vma);
45814 + if (!IS_ERR(vma))
45815 + vma_stop(priv, vma);
45816 if (priv->task)
45817 put_task_struct(priv->task);
45818 }
45819 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
45820 return ret;
45821 }
45822
45823 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45824 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45825 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45826 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45827 +#endif
45828 +
45829 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
45830 {
45831 struct mm_struct *mm = vma->vm_mm;
45832 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
45833 int flags = vma->vm_flags;
45834 unsigned long ino = 0;
45835 unsigned long long pgoff = 0;
45836 - unsigned long start;
45837 dev_t dev = 0;
45838 int len;
45839
45840 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
45841 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
45842 }
45843
45844 - /* We don't show the stack guard page in /proc/maps */
45845 - start = vma->vm_start;
45846 - if (vma->vm_flags & VM_GROWSDOWN)
45847 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
45848 - start += PAGE_SIZE;
45849 -
45850 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
45851 - start,
45852 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45853 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
45854 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
45855 +#else
45856 + vma->vm_start,
45857 vma->vm_end,
45858 +#endif
45859 flags & VM_READ ? 'r' : '-',
45860 flags & VM_WRITE ? 'w' : '-',
45861 flags & VM_EXEC ? 'x' : '-',
45862 flags & VM_MAYSHARE ? 's' : 'p',
45863 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45864 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
45865 +#else
45866 pgoff,
45867 +#endif
45868 MAJOR(dev), MINOR(dev), ino, &len);
45869
45870 /*
45871 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
45872 */
45873 if (file) {
45874 pad_len_spaces(m, len);
45875 - seq_path(m, &file->f_path, "\n");
45876 + seq_path(m, &file->f_path, "\n\\");
45877 } else {
45878 const char *name = arch_vma_name(vma);
45879 if (!name) {
45880 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
45881 if (vma->vm_start <= mm->brk &&
45882 vma->vm_end >= mm->start_brk) {
45883 name = "[heap]";
45884 - } else if (vma->vm_start <= mm->start_stack &&
45885 - vma->vm_end >= mm->start_stack) {
45886 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
45887 + (vma->vm_start <= mm->start_stack &&
45888 + vma->vm_end >= mm->start_stack)) {
45889 name = "[stack]";
45890 }
45891 } else {
45892 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
45893 };
45894
45895 memset(&mss, 0, sizeof mss);
45896 - mss.vma = vma;
45897 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45898 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45899 +
45900 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45901 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
45902 +#endif
45903 + mss.vma = vma;
45904 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45905 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45906 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45907 + }
45908 +#endif
45909
45910 show_map_vma(m, vma);
45911
45912 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
45913 "Swap: %8lu kB\n"
45914 "KernelPageSize: %8lu kB\n"
45915 "MMUPageSize: %8lu kB\n",
45916 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45917 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
45918 +#else
45919 (vma->vm_end - vma->vm_start) >> 10,
45920 +#endif
45921 mss.resident >> 10,
45922 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
45923 mss.shared_clean >> 10,
45924 diff -urNp linux-2.6.32.45/fs/proc/task_nommu.c linux-2.6.32.45/fs/proc/task_nommu.c
45925 --- linux-2.6.32.45/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
45926 +++ linux-2.6.32.45/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
45927 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
45928 else
45929 bytes += kobjsize(mm);
45930
45931 - if (current->fs && current->fs->users > 1)
45932 + if (current->fs && atomic_read(&current->fs->users) > 1)
45933 sbytes += kobjsize(current->fs);
45934 else
45935 bytes += kobjsize(current->fs);
45936 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
45937 if (len < 1)
45938 len = 1;
45939 seq_printf(m, "%*c", len, ' ');
45940 - seq_path(m, &file->f_path, "");
45941 + seq_path(m, &file->f_path, "\n\\");
45942 }
45943
45944 seq_putc(m, '\n');
45945 diff -urNp linux-2.6.32.45/fs/readdir.c linux-2.6.32.45/fs/readdir.c
45946 --- linux-2.6.32.45/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
45947 +++ linux-2.6.32.45/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
45948 @@ -16,6 +16,7 @@
45949 #include <linux/security.h>
45950 #include <linux/syscalls.h>
45951 #include <linux/unistd.h>
45952 +#include <linux/namei.h>
45953
45954 #include <asm/uaccess.h>
45955
45956 @@ -67,6 +68,7 @@ struct old_linux_dirent {
45957
45958 struct readdir_callback {
45959 struct old_linux_dirent __user * dirent;
45960 + struct file * file;
45961 int result;
45962 };
45963
45964 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
45965 buf->result = -EOVERFLOW;
45966 return -EOVERFLOW;
45967 }
45968 +
45969 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45970 + return 0;
45971 +
45972 buf->result++;
45973 dirent = buf->dirent;
45974 if (!access_ok(VERIFY_WRITE, dirent,
45975 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
45976
45977 buf.result = 0;
45978 buf.dirent = dirent;
45979 + buf.file = file;
45980
45981 error = vfs_readdir(file, fillonedir, &buf);
45982 if (buf.result)
45983 @@ -142,6 +149,7 @@ struct linux_dirent {
45984 struct getdents_callback {
45985 struct linux_dirent __user * current_dir;
45986 struct linux_dirent __user * previous;
45987 + struct file * file;
45988 int count;
45989 int error;
45990 };
45991 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
45992 buf->error = -EOVERFLOW;
45993 return -EOVERFLOW;
45994 }
45995 +
45996 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45997 + return 0;
45998 +
45999 dirent = buf->previous;
46000 if (dirent) {
46001 if (__put_user(offset, &dirent->d_off))
46002 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
46003 buf.previous = NULL;
46004 buf.count = count;
46005 buf.error = 0;
46006 + buf.file = file;
46007
46008 error = vfs_readdir(file, filldir, &buf);
46009 if (error >= 0)
46010 @@ -228,6 +241,7 @@ out:
46011 struct getdents_callback64 {
46012 struct linux_dirent64 __user * current_dir;
46013 struct linux_dirent64 __user * previous;
46014 + struct file *file;
46015 int count;
46016 int error;
46017 };
46018 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
46019 buf->error = -EINVAL; /* only used if we fail.. */
46020 if (reclen > buf->count)
46021 return -EINVAL;
46022 +
46023 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46024 + return 0;
46025 +
46026 dirent = buf->previous;
46027 if (dirent) {
46028 if (__put_user(offset, &dirent->d_off))
46029 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46030
46031 buf.current_dir = dirent;
46032 buf.previous = NULL;
46033 + buf.file = file;
46034 buf.count = count;
46035 buf.error = 0;
46036
46037 diff -urNp linux-2.6.32.45/fs/reiserfs/dir.c linux-2.6.32.45/fs/reiserfs/dir.c
46038 --- linux-2.6.32.45/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
46039 +++ linux-2.6.32.45/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
46040 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
46041 struct reiserfs_dir_entry de;
46042 int ret = 0;
46043
46044 + pax_track_stack();
46045 +
46046 reiserfs_write_lock(inode->i_sb);
46047
46048 reiserfs_check_lock_depth(inode->i_sb, "readdir");
46049 diff -urNp linux-2.6.32.45/fs/reiserfs/do_balan.c linux-2.6.32.45/fs/reiserfs/do_balan.c
46050 --- linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
46051 +++ linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
46052 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
46053 return;
46054 }
46055
46056 - atomic_inc(&(fs_generation(tb->tb_sb)));
46057 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46058 do_balance_starts(tb);
46059
46060 /* balance leaf returns 0 except if combining L R and S into
46061 diff -urNp linux-2.6.32.45/fs/reiserfs/item_ops.c linux-2.6.32.45/fs/reiserfs/item_ops.c
46062 --- linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
46063 +++ linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
46064 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
46065 vi->vi_index, vi->vi_type, vi->vi_ih);
46066 }
46067
46068 -static struct item_operations stat_data_ops = {
46069 +static const struct item_operations stat_data_ops = {
46070 .bytes_number = sd_bytes_number,
46071 .decrement_key = sd_decrement_key,
46072 .is_left_mergeable = sd_is_left_mergeable,
46073 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
46074 vi->vi_index, vi->vi_type, vi->vi_ih);
46075 }
46076
46077 -static struct item_operations direct_ops = {
46078 +static const struct item_operations direct_ops = {
46079 .bytes_number = direct_bytes_number,
46080 .decrement_key = direct_decrement_key,
46081 .is_left_mergeable = direct_is_left_mergeable,
46082 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
46083 vi->vi_index, vi->vi_type, vi->vi_ih);
46084 }
46085
46086 -static struct item_operations indirect_ops = {
46087 +static const struct item_operations indirect_ops = {
46088 .bytes_number = indirect_bytes_number,
46089 .decrement_key = indirect_decrement_key,
46090 .is_left_mergeable = indirect_is_left_mergeable,
46091 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
46092 printk("\n");
46093 }
46094
46095 -static struct item_operations direntry_ops = {
46096 +static const struct item_operations direntry_ops = {
46097 .bytes_number = direntry_bytes_number,
46098 .decrement_key = direntry_decrement_key,
46099 .is_left_mergeable = direntry_is_left_mergeable,
46100 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
46101 "Invalid item type observed, run fsck ASAP");
46102 }
46103
46104 -static struct item_operations errcatch_ops = {
46105 +static const struct item_operations errcatch_ops = {
46106 errcatch_bytes_number,
46107 errcatch_decrement_key,
46108 errcatch_is_left_mergeable,
46109 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
46110 #error Item types must use disk-format assigned values.
46111 #endif
46112
46113 -struct item_operations *item_ops[TYPE_ANY + 1] = {
46114 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
46115 &stat_data_ops,
46116 &indirect_ops,
46117 &direct_ops,
46118 diff -urNp linux-2.6.32.45/fs/reiserfs/journal.c linux-2.6.32.45/fs/reiserfs/journal.c
46119 --- linux-2.6.32.45/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
46120 +++ linux-2.6.32.45/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
46121 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
46122 struct buffer_head *bh;
46123 int i, j;
46124
46125 + pax_track_stack();
46126 +
46127 bh = __getblk(dev, block, bufsize);
46128 if (buffer_uptodate(bh))
46129 return (bh);
46130 diff -urNp linux-2.6.32.45/fs/reiserfs/namei.c linux-2.6.32.45/fs/reiserfs/namei.c
46131 --- linux-2.6.32.45/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
46132 +++ linux-2.6.32.45/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
46133 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
46134 unsigned long savelink = 1;
46135 struct timespec ctime;
46136
46137 + pax_track_stack();
46138 +
46139 /* three balancings: (1) old name removal, (2) new name insertion
46140 and (3) maybe "save" link insertion
46141 stat data updates: (1) old directory,
46142 diff -urNp linux-2.6.32.45/fs/reiserfs/procfs.c linux-2.6.32.45/fs/reiserfs/procfs.c
46143 --- linux-2.6.32.45/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
46144 +++ linux-2.6.32.45/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
46145 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
46146 "SMALL_TAILS " : "NO_TAILS ",
46147 replay_only(sb) ? "REPLAY_ONLY " : "",
46148 convert_reiserfs(sb) ? "CONV " : "",
46149 - atomic_read(&r->s_generation_counter),
46150 + atomic_read_unchecked(&r->s_generation_counter),
46151 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46152 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46153 SF(s_good_search_by_key_reada), SF(s_bmaps),
46154 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file
46155 struct journal_params *jp = &rs->s_v1.s_journal;
46156 char b[BDEVNAME_SIZE];
46157
46158 + pax_track_stack();
46159 +
46160 seq_printf(m, /* on-disk fields */
46161 "jp_journal_1st_block: \t%i\n"
46162 "jp_journal_dev: \t%s[%x]\n"
46163 diff -urNp linux-2.6.32.45/fs/reiserfs/stree.c linux-2.6.32.45/fs/reiserfs/stree.c
46164 --- linux-2.6.32.45/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
46165 +++ linux-2.6.32.45/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
46166 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
46167 int iter = 0;
46168 #endif
46169
46170 + pax_track_stack();
46171 +
46172 BUG_ON(!th->t_trans_id);
46173
46174 init_tb_struct(th, &s_del_balance, sb, path,
46175 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
46176 int retval;
46177 int quota_cut_bytes = 0;
46178
46179 + pax_track_stack();
46180 +
46181 BUG_ON(!th->t_trans_id);
46182
46183 le_key2cpu_key(&cpu_key, key);
46184 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
46185 int quota_cut_bytes;
46186 loff_t tail_pos = 0;
46187
46188 + pax_track_stack();
46189 +
46190 BUG_ON(!th->t_trans_id);
46191
46192 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46193 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
46194 int retval;
46195 int fs_gen;
46196
46197 + pax_track_stack();
46198 +
46199 BUG_ON(!th->t_trans_id);
46200
46201 fs_gen = get_generation(inode->i_sb);
46202 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
46203 int fs_gen = 0;
46204 int quota_bytes = 0;
46205
46206 + pax_track_stack();
46207 +
46208 BUG_ON(!th->t_trans_id);
46209
46210 if (inode) { /* Do we count quotas for item? */
46211 diff -urNp linux-2.6.32.45/fs/reiserfs/super.c linux-2.6.32.45/fs/reiserfs/super.c
46212 --- linux-2.6.32.45/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
46213 +++ linux-2.6.32.45/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
46214 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
46215 {.option_name = NULL}
46216 };
46217
46218 + pax_track_stack();
46219 +
46220 *blocks = 0;
46221 if (!options || !*options)
46222 /* use default configuration: create tails, journaling on, no
46223 diff -urNp linux-2.6.32.45/fs/select.c linux-2.6.32.45/fs/select.c
46224 --- linux-2.6.32.45/fs/select.c 2011-03-27 14:31:47.000000000 -0400
46225 +++ linux-2.6.32.45/fs/select.c 2011-05-16 21:46:57.000000000 -0400
46226 @@ -20,6 +20,7 @@
46227 #include <linux/module.h>
46228 #include <linux/slab.h>
46229 #include <linux/poll.h>
46230 +#include <linux/security.h>
46231 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46232 #include <linux/file.h>
46233 #include <linux/fdtable.h>
46234 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
46235 int retval, i, timed_out = 0;
46236 unsigned long slack = 0;
46237
46238 + pax_track_stack();
46239 +
46240 rcu_read_lock();
46241 retval = max_select_fd(n, fds);
46242 rcu_read_unlock();
46243 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
46244 /* Allocate small arguments on the stack to save memory and be faster */
46245 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46246
46247 + pax_track_stack();
46248 +
46249 ret = -EINVAL;
46250 if (n < 0)
46251 goto out_nofds;
46252 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
46253 struct poll_list *walk = head;
46254 unsigned long todo = nfds;
46255
46256 + pax_track_stack();
46257 +
46258 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46259 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
46260 return -EINVAL;
46261
46262 diff -urNp linux-2.6.32.45/fs/seq_file.c linux-2.6.32.45/fs/seq_file.c
46263 --- linux-2.6.32.45/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
46264 +++ linux-2.6.32.45/fs/seq_file.c 2011-08-05 20:33:55.000000000 -0400
46265 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46266 return 0;
46267 }
46268 if (!m->buf) {
46269 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46270 + m->size = PAGE_SIZE;
46271 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46272 if (!m->buf)
46273 return -ENOMEM;
46274 }
46275 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46276 Eoverflow:
46277 m->op->stop(m, p);
46278 kfree(m->buf);
46279 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46280 + m->size <<= 1;
46281 + m->buf = kmalloc(m->size, GFP_KERNEL);
46282 return !m->buf ? -ENOMEM : -EAGAIN;
46283 }
46284
46285 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46286 m->version = file->f_version;
46287 /* grab buffer if we didn't have one */
46288 if (!m->buf) {
46289 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46290 + m->size = PAGE_SIZE;
46291 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46292 if (!m->buf)
46293 goto Enomem;
46294 }
46295 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46296 goto Fill;
46297 m->op->stop(m, p);
46298 kfree(m->buf);
46299 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46300 + m->size <<= 1;
46301 + m->buf = kmalloc(m->size, GFP_KERNEL);
46302 if (!m->buf)
46303 goto Enomem;
46304 m->count = 0;
46305 @@ -555,10 +559,10 @@ int single_open(struct file *file, int (
46306 int res = -ENOMEM;
46307
46308 if (op) {
46309 - op->start = single_start;
46310 - op->next = single_next;
46311 - op->stop = single_stop;
46312 - op->show = show;
46313 + *(void **)&op->start = single_start;
46314 + *(void **)&op->next = single_next;
46315 + *(void **)&op->stop = single_stop;
46316 + *(void **)&op->show = show;
46317 res = seq_open(file, op);
46318 if (!res)
46319 ((struct seq_file *)file->private_data)->private = data;
46320 diff -urNp linux-2.6.32.45/fs/smbfs/proc.c linux-2.6.32.45/fs/smbfs/proc.c
46321 --- linux-2.6.32.45/fs/smbfs/proc.c 2011-03-27 14:31:47.000000000 -0400
46322 +++ linux-2.6.32.45/fs/smbfs/proc.c 2011-08-05 20:33:55.000000000 -0400
46323 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *
46324
46325 out:
46326 if (server->local_nls != NULL && server->remote_nls != NULL)
46327 - server->ops->convert = convert_cp;
46328 + *(void **)&server->ops->convert = convert_cp;
46329 else
46330 - server->ops->convert = convert_memcpy;
46331 + *(void **)&server->ops->convert = convert_memcpy;
46332
46333 smb_unlock_server(server);
46334 return n;
46335 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server,
46336
46337 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
46338 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
46339 - server->ops->getattr = smb_proc_getattr_core;
46340 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
46341 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
46342 - server->ops->getattr = smb_proc_getattr_ff;
46343 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
46344 }
46345
46346 /* Decode server capabilities */
46347 @@ -3439,7 +3439,7 @@ out:
46348 static void
46349 install_ops(struct smb_ops *dst, struct smb_ops *src)
46350 {
46351 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46352 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46353 }
46354
46355 /* < LANMAN2 */
46356 diff -urNp linux-2.6.32.45/fs/smbfs/symlink.c linux-2.6.32.45/fs/smbfs/symlink.c
46357 --- linux-2.6.32.45/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46358 +++ linux-2.6.32.45/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46359 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
46360
46361 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46362 {
46363 - char *s = nd_get_link(nd);
46364 + const char *s = nd_get_link(nd);
46365 if (!IS_ERR(s))
46366 __putname(s);
46367 }
46368 diff -urNp linux-2.6.32.45/fs/splice.c linux-2.6.32.45/fs/splice.c
46369 --- linux-2.6.32.45/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
46370 +++ linux-2.6.32.45/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
46371 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46372 pipe_lock(pipe);
46373
46374 for (;;) {
46375 - if (!pipe->readers) {
46376 + if (!atomic_read(&pipe->readers)) {
46377 send_sig(SIGPIPE, current, 0);
46378 if (!ret)
46379 ret = -EPIPE;
46380 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46381 do_wakeup = 0;
46382 }
46383
46384 - pipe->waiting_writers++;
46385 + atomic_inc(&pipe->waiting_writers);
46386 pipe_wait(pipe);
46387 - pipe->waiting_writers--;
46388 + atomic_dec(&pipe->waiting_writers);
46389 }
46390
46391 pipe_unlock(pipe);
46392 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
46393 .spd_release = spd_release_page,
46394 };
46395
46396 + pax_track_stack();
46397 +
46398 index = *ppos >> PAGE_CACHE_SHIFT;
46399 loff = *ppos & ~PAGE_CACHE_MASK;
46400 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46401 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
46402 old_fs = get_fs();
46403 set_fs(get_ds());
46404 /* The cast to a user pointer is valid due to the set_fs() */
46405 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46406 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
46407 set_fs(old_fs);
46408
46409 return res;
46410 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
46411 old_fs = get_fs();
46412 set_fs(get_ds());
46413 /* The cast to a user pointer is valid due to the set_fs() */
46414 - res = vfs_write(file, (const char __user *)buf, count, &pos);
46415 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
46416 set_fs(old_fs);
46417
46418 return res;
46419 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
46420 .spd_release = spd_release_page,
46421 };
46422
46423 + pax_track_stack();
46424 +
46425 index = *ppos >> PAGE_CACHE_SHIFT;
46426 offset = *ppos & ~PAGE_CACHE_MASK;
46427 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46428 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
46429 goto err;
46430
46431 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46432 - vec[i].iov_base = (void __user *) page_address(page);
46433 + vec[i].iov_base = (__force void __user *) page_address(page);
46434 vec[i].iov_len = this_len;
46435 pages[i] = page;
46436 spd.nr_pages++;
46437 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46438 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46439 {
46440 while (!pipe->nrbufs) {
46441 - if (!pipe->writers)
46442 + if (!atomic_read(&pipe->writers))
46443 return 0;
46444
46445 - if (!pipe->waiting_writers && sd->num_spliced)
46446 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46447 return 0;
46448
46449 if (sd->flags & SPLICE_F_NONBLOCK)
46450 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
46451 * out of the pipe right after the splice_to_pipe(). So set
46452 * PIPE_READERS appropriately.
46453 */
46454 - pipe->readers = 1;
46455 + atomic_set(&pipe->readers, 1);
46456
46457 current->splice_pipe = pipe;
46458 }
46459 @@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
46460 .spd_release = spd_release_page,
46461 };
46462
46463 + pax_track_stack();
46464 +
46465 pipe = pipe_info(file->f_path.dentry->d_inode);
46466 if (!pipe)
46467 return -EBADF;
46468 @@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
46469 ret = -ERESTARTSYS;
46470 break;
46471 }
46472 - if (!pipe->writers)
46473 + if (!atomic_read(&pipe->writers))
46474 break;
46475 - if (!pipe->waiting_writers) {
46476 + if (!atomic_read(&pipe->waiting_writers)) {
46477 if (flags & SPLICE_F_NONBLOCK) {
46478 ret = -EAGAIN;
46479 break;
46480 @@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
46481 pipe_lock(pipe);
46482
46483 while (pipe->nrbufs >= PIPE_BUFFERS) {
46484 - if (!pipe->readers) {
46485 + if (!atomic_read(&pipe->readers)) {
46486 send_sig(SIGPIPE, current, 0);
46487 ret = -EPIPE;
46488 break;
46489 @@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
46490 ret = -ERESTARTSYS;
46491 break;
46492 }
46493 - pipe->waiting_writers++;
46494 + atomic_inc(&pipe->waiting_writers);
46495 pipe_wait(pipe);
46496 - pipe->waiting_writers--;
46497 + atomic_dec(&pipe->waiting_writers);
46498 }
46499
46500 pipe_unlock(pipe);
46501 @@ -1785,14 +1791,14 @@ retry:
46502 pipe_double_lock(ipipe, opipe);
46503
46504 do {
46505 - if (!opipe->readers) {
46506 + if (!atomic_read(&opipe->readers)) {
46507 send_sig(SIGPIPE, current, 0);
46508 if (!ret)
46509 ret = -EPIPE;
46510 break;
46511 }
46512
46513 - if (!ipipe->nrbufs && !ipipe->writers)
46514 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46515 break;
46516
46517 /*
46518 @@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
46519 pipe_double_lock(ipipe, opipe);
46520
46521 do {
46522 - if (!opipe->readers) {
46523 + if (!atomic_read(&opipe->readers)) {
46524 send_sig(SIGPIPE, current, 0);
46525 if (!ret)
46526 ret = -EPIPE;
46527 @@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
46528 * return EAGAIN if we have the potential of some data in the
46529 * future, otherwise just return 0
46530 */
46531 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46532 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46533 ret = -EAGAIN;
46534
46535 pipe_unlock(ipipe);
46536 diff -urNp linux-2.6.32.45/fs/sysfs/file.c linux-2.6.32.45/fs/sysfs/file.c
46537 --- linux-2.6.32.45/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
46538 +++ linux-2.6.32.45/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
46539 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46540
46541 struct sysfs_open_dirent {
46542 atomic_t refcnt;
46543 - atomic_t event;
46544 + atomic_unchecked_t event;
46545 wait_queue_head_t poll;
46546 struct list_head buffers; /* goes through sysfs_buffer.list */
46547 };
46548 @@ -53,7 +53,7 @@ struct sysfs_buffer {
46549 size_t count;
46550 loff_t pos;
46551 char * page;
46552 - struct sysfs_ops * ops;
46553 + const struct sysfs_ops * ops;
46554 struct mutex mutex;
46555 int needs_read_fill;
46556 int event;
46557 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
46558 {
46559 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46560 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46561 - struct sysfs_ops * ops = buffer->ops;
46562 + const struct sysfs_ops * ops = buffer->ops;
46563 int ret = 0;
46564 ssize_t count;
46565
46566 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
46567 if (!sysfs_get_active_two(attr_sd))
46568 return -ENODEV;
46569
46570 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46571 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46572 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46573
46574 sysfs_put_active_two(attr_sd);
46575 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
46576 {
46577 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46578 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46579 - struct sysfs_ops * ops = buffer->ops;
46580 + const struct sysfs_ops * ops = buffer->ops;
46581 int rc;
46582
46583 /* need attr_sd for attr and ops, its parent for kobj */
46584 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
46585 return -ENOMEM;
46586
46587 atomic_set(&new_od->refcnt, 0);
46588 - atomic_set(&new_od->event, 1);
46589 + atomic_set_unchecked(&new_od->event, 1);
46590 init_waitqueue_head(&new_od->poll);
46591 INIT_LIST_HEAD(&new_od->buffers);
46592 goto retry;
46593 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
46594 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
46595 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46596 struct sysfs_buffer *buffer;
46597 - struct sysfs_ops *ops;
46598 + const struct sysfs_ops *ops;
46599 int error = -EACCES;
46600 char *p;
46601
46602 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
46603
46604 sysfs_put_active_two(attr_sd);
46605
46606 - if (buffer->event != atomic_read(&od->event))
46607 + if (buffer->event != atomic_read_unchecked(&od->event))
46608 goto trigger;
46609
46610 return DEFAULT_POLLMASK;
46611 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
46612
46613 od = sd->s_attr.open;
46614 if (od) {
46615 - atomic_inc(&od->event);
46616 + atomic_inc_unchecked(&od->event);
46617 wake_up_interruptible(&od->poll);
46618 }
46619
46620 diff -urNp linux-2.6.32.45/fs/sysfs/mount.c linux-2.6.32.45/fs/sysfs/mount.c
46621 --- linux-2.6.32.45/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
46622 +++ linux-2.6.32.45/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
46623 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46624 .s_name = "",
46625 .s_count = ATOMIC_INIT(1),
46626 .s_flags = SYSFS_DIR,
46627 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46628 + .s_mode = S_IFDIR | S_IRWXU,
46629 +#else
46630 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46631 +#endif
46632 .s_ino = 1,
46633 };
46634
46635 diff -urNp linux-2.6.32.45/fs/sysfs/symlink.c linux-2.6.32.45/fs/sysfs/symlink.c
46636 --- linux-2.6.32.45/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46637 +++ linux-2.6.32.45/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46638 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
46639
46640 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46641 {
46642 - char *page = nd_get_link(nd);
46643 + const char *page = nd_get_link(nd);
46644 if (!IS_ERR(page))
46645 free_page((unsigned long)page);
46646 }
46647 diff -urNp linux-2.6.32.45/fs/udf/balloc.c linux-2.6.32.45/fs/udf/balloc.c
46648 --- linux-2.6.32.45/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
46649 +++ linux-2.6.32.45/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
46650 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
46651
46652 mutex_lock(&sbi->s_alloc_mutex);
46653 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46654 - if (bloc->logicalBlockNum < 0 ||
46655 - (bloc->logicalBlockNum + count) >
46656 - partmap->s_partition_len) {
46657 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46658 udf_debug("%d < %d || %d + %d > %d\n",
46659 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
46660 count, partmap->s_partition_len);
46661 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
46662
46663 mutex_lock(&sbi->s_alloc_mutex);
46664 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46665 - if (bloc->logicalBlockNum < 0 ||
46666 - (bloc->logicalBlockNum + count) >
46667 - partmap->s_partition_len) {
46668 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46669 udf_debug("%d < %d || %d + %d > %d\n",
46670 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
46671 partmap->s_partition_len);
46672 diff -urNp linux-2.6.32.45/fs/udf/inode.c linux-2.6.32.45/fs/udf/inode.c
46673 --- linux-2.6.32.45/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
46674 +++ linux-2.6.32.45/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
46675 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
46676 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46677 int lastblock = 0;
46678
46679 + pax_track_stack();
46680 +
46681 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46682 prev_epos.block = iinfo->i_location;
46683 prev_epos.bh = NULL;
46684 diff -urNp linux-2.6.32.45/fs/udf/misc.c linux-2.6.32.45/fs/udf/misc.c
46685 --- linux-2.6.32.45/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
46686 +++ linux-2.6.32.45/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
46687 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46688
46689 u8 udf_tag_checksum(const struct tag *t)
46690 {
46691 - u8 *data = (u8 *)t;
46692 + const u8 *data = (const u8 *)t;
46693 u8 checksum = 0;
46694 int i;
46695 for (i = 0; i < sizeof(struct tag); ++i)
46696 diff -urNp linux-2.6.32.45/fs/utimes.c linux-2.6.32.45/fs/utimes.c
46697 --- linux-2.6.32.45/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
46698 +++ linux-2.6.32.45/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
46699 @@ -1,6 +1,7 @@
46700 #include <linux/compiler.h>
46701 #include <linux/file.h>
46702 #include <linux/fs.h>
46703 +#include <linux/security.h>
46704 #include <linux/linkage.h>
46705 #include <linux/mount.h>
46706 #include <linux/namei.h>
46707 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46708 goto mnt_drop_write_and_out;
46709 }
46710 }
46711 +
46712 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46713 + error = -EACCES;
46714 + goto mnt_drop_write_and_out;
46715 + }
46716 +
46717 mutex_lock(&inode->i_mutex);
46718 error = notify_change(path->dentry, &newattrs);
46719 mutex_unlock(&inode->i_mutex);
46720 diff -urNp linux-2.6.32.45/fs/xattr_acl.c linux-2.6.32.45/fs/xattr_acl.c
46721 --- linux-2.6.32.45/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
46722 +++ linux-2.6.32.45/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
46723 @@ -17,8 +17,8 @@
46724 struct posix_acl *
46725 posix_acl_from_xattr(const void *value, size_t size)
46726 {
46727 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46728 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46729 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46730 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46731 int count;
46732 struct posix_acl *acl;
46733 struct posix_acl_entry *acl_e;
46734 diff -urNp linux-2.6.32.45/fs/xattr.c linux-2.6.32.45/fs/xattr.c
46735 --- linux-2.6.32.45/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
46736 +++ linux-2.6.32.45/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
46737 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46738 * Extended attribute SET operations
46739 */
46740 static long
46741 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
46742 +setxattr(struct path *path, const char __user *name, const void __user *value,
46743 size_t size, int flags)
46744 {
46745 int error;
46746 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
46747 return PTR_ERR(kvalue);
46748 }
46749
46750 - error = vfs_setxattr(d, kname, kvalue, size, flags);
46751 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46752 + error = -EACCES;
46753 + goto out;
46754 + }
46755 +
46756 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46757 +out:
46758 kfree(kvalue);
46759 return error;
46760 }
46761 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
46762 return error;
46763 error = mnt_want_write(path.mnt);
46764 if (!error) {
46765 - error = setxattr(path.dentry, name, value, size, flags);
46766 + error = setxattr(&path, name, value, size, flags);
46767 mnt_drop_write(path.mnt);
46768 }
46769 path_put(&path);
46770 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
46771 return error;
46772 error = mnt_want_write(path.mnt);
46773 if (!error) {
46774 - error = setxattr(path.dentry, name, value, size, flags);
46775 + error = setxattr(&path, name, value, size, flags);
46776 mnt_drop_write(path.mnt);
46777 }
46778 path_put(&path);
46779 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
46780 const void __user *,value, size_t, size, int, flags)
46781 {
46782 struct file *f;
46783 - struct dentry *dentry;
46784 int error = -EBADF;
46785
46786 f = fget(fd);
46787 if (!f)
46788 return error;
46789 - dentry = f->f_path.dentry;
46790 - audit_inode(NULL, dentry);
46791 + audit_inode(NULL, f->f_path.dentry);
46792 error = mnt_want_write_file(f);
46793 if (!error) {
46794 - error = setxattr(dentry, name, value, size, flags);
46795 + error = setxattr(&f->f_path, name, value, size, flags);
46796 mnt_drop_write(f->f_path.mnt);
46797 }
46798 fput(f);
46799 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c
46800 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
46801 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
46802 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
46803 xfs_fsop_geom_t fsgeo;
46804 int error;
46805
46806 + memset(&fsgeo, 0, sizeof(fsgeo));
46807 error = xfs_fs_geometry(mp, &fsgeo, 3);
46808 if (error)
46809 return -error;
46810 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c
46811 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
46812 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
46813 @@ -134,7 +134,7 @@ xfs_find_handle(
46814 }
46815
46816 error = -EFAULT;
46817 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46818 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46819 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46820 goto out_put;
46821
46822 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
46823 if (IS_ERR(dentry))
46824 return PTR_ERR(dentry);
46825
46826 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
46827 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
46828 if (!kbuf)
46829 goto out_dput;
46830
46831 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
46832 xfs_mount_t *mp,
46833 void __user *arg)
46834 {
46835 - xfs_fsop_geom_t fsgeo;
46836 + xfs_fsop_geom_t fsgeo;
46837 int error;
46838
46839 error = xfs_fs_geometry(mp, &fsgeo, 3);
46840 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c
46841 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
46842 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
46843 @@ -468,7 +468,7 @@ xfs_vn_put_link(
46844 struct nameidata *nd,
46845 void *p)
46846 {
46847 - char *s = nd_get_link(nd);
46848 + const char *s = nd_get_link(nd);
46849
46850 if (!IS_ERR(s))
46851 kfree(s);
46852 diff -urNp linux-2.6.32.45/fs/xfs/xfs_bmap.c linux-2.6.32.45/fs/xfs/xfs_bmap.c
46853 --- linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
46854 +++ linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
46855 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
46856 int nmap,
46857 int ret_nmap);
46858 #else
46859 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46860 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46861 #endif /* DEBUG */
46862
46863 #if defined(XFS_RW_TRACE)
46864 diff -urNp linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c
46865 --- linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
46866 +++ linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
46867 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
46868 }
46869
46870 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
46871 - if (filldir(dirent, sfep->name, sfep->namelen,
46872 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46873 + char name[sfep->namelen];
46874 + memcpy(name, sfep->name, sfep->namelen);
46875 + if (filldir(dirent, name, sfep->namelen,
46876 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
46877 + *offset = off & 0x7fffffff;
46878 + return 0;
46879 + }
46880 + } else if (filldir(dirent, sfep->name, sfep->namelen,
46881 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46882 *offset = off & 0x7fffffff;
46883 return 0;
46884 diff -urNp linux-2.6.32.45/grsecurity/gracl_alloc.c linux-2.6.32.45/grsecurity/gracl_alloc.c
46885 --- linux-2.6.32.45/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
46886 +++ linux-2.6.32.45/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
46887 @@ -0,0 +1,105 @@
46888 +#include <linux/kernel.h>
46889 +#include <linux/mm.h>
46890 +#include <linux/slab.h>
46891 +#include <linux/vmalloc.h>
46892 +#include <linux/gracl.h>
46893 +#include <linux/grsecurity.h>
46894 +
46895 +static unsigned long alloc_stack_next = 1;
46896 +static unsigned long alloc_stack_size = 1;
46897 +static void **alloc_stack;
46898 +
46899 +static __inline__ int
46900 +alloc_pop(void)
46901 +{
46902 + if (alloc_stack_next == 1)
46903 + return 0;
46904 +
46905 + kfree(alloc_stack[alloc_stack_next - 2]);
46906 +
46907 + alloc_stack_next--;
46908 +
46909 + return 1;
46910 +}
46911 +
46912 +static __inline__ int
46913 +alloc_push(void *buf)
46914 +{
46915 + if (alloc_stack_next >= alloc_stack_size)
46916 + return 1;
46917 +
46918 + alloc_stack[alloc_stack_next - 1] = buf;
46919 +
46920 + alloc_stack_next++;
46921 +
46922 + return 0;
46923 +}
46924 +
46925 +void *
46926 +acl_alloc(unsigned long len)
46927 +{
46928 + void *ret = NULL;
46929 +
46930 + if (!len || len > PAGE_SIZE)
46931 + goto out;
46932 +
46933 + ret = kmalloc(len, GFP_KERNEL);
46934 +
46935 + if (ret) {
46936 + if (alloc_push(ret)) {
46937 + kfree(ret);
46938 + ret = NULL;
46939 + }
46940 + }
46941 +
46942 +out:
46943 + return ret;
46944 +}
46945 +
46946 +void *
46947 +acl_alloc_num(unsigned long num, unsigned long len)
46948 +{
46949 + if (!len || (num > (PAGE_SIZE / len)))
46950 + return NULL;
46951 +
46952 + return acl_alloc(num * len);
46953 +}
46954 +
46955 +void
46956 +acl_free_all(void)
46957 +{
46958 + if (gr_acl_is_enabled() || !alloc_stack)
46959 + return;
46960 +
46961 + while (alloc_pop()) ;
46962 +
46963 + if (alloc_stack) {
46964 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
46965 + kfree(alloc_stack);
46966 + else
46967 + vfree(alloc_stack);
46968 + }
46969 +
46970 + alloc_stack = NULL;
46971 + alloc_stack_size = 1;
46972 + alloc_stack_next = 1;
46973 +
46974 + return;
46975 +}
46976 +
46977 +int
46978 +acl_alloc_stack_init(unsigned long size)
46979 +{
46980 + if ((size * sizeof (void *)) <= PAGE_SIZE)
46981 + alloc_stack =
46982 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
46983 + else
46984 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
46985 +
46986 + alloc_stack_size = size;
46987 +
46988 + if (!alloc_stack)
46989 + return 0;
46990 + else
46991 + return 1;
46992 +}
46993 diff -urNp linux-2.6.32.45/grsecurity/gracl.c linux-2.6.32.45/grsecurity/gracl.c
46994 --- linux-2.6.32.45/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
46995 +++ linux-2.6.32.45/grsecurity/gracl.c 2011-07-14 20:02:48.000000000 -0400
46996 @@ -0,0 +1,4082 @@
46997 +#include <linux/kernel.h>
46998 +#include <linux/module.h>
46999 +#include <linux/sched.h>
47000 +#include <linux/mm.h>
47001 +#include <linux/file.h>
47002 +#include <linux/fs.h>
47003 +#include <linux/namei.h>
47004 +#include <linux/mount.h>
47005 +#include <linux/tty.h>
47006 +#include <linux/proc_fs.h>
47007 +#include <linux/smp_lock.h>
47008 +#include <linux/slab.h>
47009 +#include <linux/vmalloc.h>
47010 +#include <linux/types.h>
47011 +#include <linux/sysctl.h>
47012 +#include <linux/netdevice.h>
47013 +#include <linux/ptrace.h>
47014 +#include <linux/gracl.h>
47015 +#include <linux/gralloc.h>
47016 +#include <linux/grsecurity.h>
47017 +#include <linux/grinternal.h>
47018 +#include <linux/pid_namespace.h>
47019 +#include <linux/fdtable.h>
47020 +#include <linux/percpu.h>
47021 +
47022 +#include <asm/uaccess.h>
47023 +#include <asm/errno.h>
47024 +#include <asm/mman.h>
47025 +
47026 +static struct acl_role_db acl_role_set;
47027 +static struct name_db name_set;
47028 +static struct inodev_db inodev_set;
47029 +
47030 +/* for keeping track of userspace pointers used for subjects, so we
47031 + can share references in the kernel as well
47032 +*/
47033 +
47034 +static struct dentry *real_root;
47035 +static struct vfsmount *real_root_mnt;
47036 +
47037 +static struct acl_subj_map_db subj_map_set;
47038 +
47039 +static struct acl_role_label *default_role;
47040 +
47041 +static struct acl_role_label *role_list;
47042 +
47043 +static u16 acl_sp_role_value;
47044 +
47045 +extern char *gr_shared_page[4];
47046 +static DEFINE_MUTEX(gr_dev_mutex);
47047 +DEFINE_RWLOCK(gr_inode_lock);
47048 +
47049 +struct gr_arg *gr_usermode;
47050 +
47051 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
47052 +
47053 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
47054 +extern void gr_clear_learn_entries(void);
47055 +
47056 +#ifdef CONFIG_GRKERNSEC_RESLOG
47057 +extern void gr_log_resource(const struct task_struct *task,
47058 + const int res, const unsigned long wanted, const int gt);
47059 +#endif
47060 +
47061 +unsigned char *gr_system_salt;
47062 +unsigned char *gr_system_sum;
47063 +
47064 +static struct sprole_pw **acl_special_roles = NULL;
47065 +static __u16 num_sprole_pws = 0;
47066 +
47067 +static struct acl_role_label *kernel_role = NULL;
47068 +
47069 +static unsigned int gr_auth_attempts = 0;
47070 +static unsigned long gr_auth_expires = 0UL;
47071 +
47072 +#ifdef CONFIG_NET
47073 +extern struct vfsmount *sock_mnt;
47074 +#endif
47075 +extern struct vfsmount *pipe_mnt;
47076 +extern struct vfsmount *shm_mnt;
47077 +#ifdef CONFIG_HUGETLBFS
47078 +extern struct vfsmount *hugetlbfs_vfsmount;
47079 +#endif
47080 +
47081 +static struct acl_object_label *fakefs_obj_rw;
47082 +static struct acl_object_label *fakefs_obj_rwx;
47083 +
47084 +extern int gr_init_uidset(void);
47085 +extern void gr_free_uidset(void);
47086 +extern void gr_remove_uid(uid_t uid);
47087 +extern int gr_find_uid(uid_t uid);
47088 +
47089 +__inline__ int
47090 +gr_acl_is_enabled(void)
47091 +{
47092 + return (gr_status & GR_READY);
47093 +}
47094 +
47095 +#ifdef CONFIG_BTRFS_FS
47096 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
47097 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
47098 +#endif
47099 +
47100 +static inline dev_t __get_dev(const struct dentry *dentry)
47101 +{
47102 +#ifdef CONFIG_BTRFS_FS
47103 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
47104 + return get_btrfs_dev_from_inode(dentry->d_inode);
47105 + else
47106 +#endif
47107 + return dentry->d_inode->i_sb->s_dev;
47108 +}
47109 +
47110 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47111 +{
47112 + return __get_dev(dentry);
47113 +}
47114 +
47115 +static char gr_task_roletype_to_char(struct task_struct *task)
47116 +{
47117 + switch (task->role->roletype &
47118 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
47119 + GR_ROLE_SPECIAL)) {
47120 + case GR_ROLE_DEFAULT:
47121 + return 'D';
47122 + case GR_ROLE_USER:
47123 + return 'U';
47124 + case GR_ROLE_GROUP:
47125 + return 'G';
47126 + case GR_ROLE_SPECIAL:
47127 + return 'S';
47128 + }
47129 +
47130 + return 'X';
47131 +}
47132 +
47133 +char gr_roletype_to_char(void)
47134 +{
47135 + return gr_task_roletype_to_char(current);
47136 +}
47137 +
47138 +__inline__ int
47139 +gr_acl_tpe_check(void)
47140 +{
47141 + if (unlikely(!(gr_status & GR_READY)))
47142 + return 0;
47143 + if (current->role->roletype & GR_ROLE_TPE)
47144 + return 1;
47145 + else
47146 + return 0;
47147 +}
47148 +
47149 +int
47150 +gr_handle_rawio(const struct inode *inode)
47151 +{
47152 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47153 + if (inode && S_ISBLK(inode->i_mode) &&
47154 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47155 + !capable(CAP_SYS_RAWIO))
47156 + return 1;
47157 +#endif
47158 + return 0;
47159 +}
47160 +
47161 +static int
47162 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
47163 +{
47164 + if (likely(lena != lenb))
47165 + return 0;
47166 +
47167 + return !memcmp(a, b, lena);
47168 +}
47169 +
47170 +/* this must be called with vfsmount_lock and dcache_lock held */
47171 +
47172 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
47173 + struct dentry *root, struct vfsmount *rootmnt,
47174 + char *buffer, int buflen)
47175 +{
47176 + char * end = buffer+buflen;
47177 + char * retval;
47178 + int namelen;
47179 +
47180 + *--end = '\0';
47181 + buflen--;
47182 +
47183 + if (buflen < 1)
47184 + goto Elong;
47185 + /* Get '/' right */
47186 + retval = end-1;
47187 + *retval = '/';
47188 +
47189 + for (;;) {
47190 + struct dentry * parent;
47191 +
47192 + if (dentry == root && vfsmnt == rootmnt)
47193 + break;
47194 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47195 + /* Global root? */
47196 + if (vfsmnt->mnt_parent == vfsmnt)
47197 + goto global_root;
47198 + dentry = vfsmnt->mnt_mountpoint;
47199 + vfsmnt = vfsmnt->mnt_parent;
47200 + continue;
47201 + }
47202 + parent = dentry->d_parent;
47203 + prefetch(parent);
47204 + namelen = dentry->d_name.len;
47205 + buflen -= namelen + 1;
47206 + if (buflen < 0)
47207 + goto Elong;
47208 + end -= namelen;
47209 + memcpy(end, dentry->d_name.name, namelen);
47210 + *--end = '/';
47211 + retval = end;
47212 + dentry = parent;
47213 + }
47214 +
47215 +out:
47216 + return retval;
47217 +
47218 +global_root:
47219 + namelen = dentry->d_name.len;
47220 + buflen -= namelen;
47221 + if (buflen < 0)
47222 + goto Elong;
47223 + retval -= namelen-1; /* hit the slash */
47224 + memcpy(retval, dentry->d_name.name, namelen);
47225 + goto out;
47226 +Elong:
47227 + retval = ERR_PTR(-ENAMETOOLONG);
47228 + goto out;
47229 +}
47230 +
47231 +static char *
47232 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
47233 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
47234 +{
47235 + char *retval;
47236 +
47237 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
47238 + if (unlikely(IS_ERR(retval)))
47239 + retval = strcpy(buf, "<path too long>");
47240 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47241 + retval[1] = '\0';
47242 +
47243 + return retval;
47244 +}
47245 +
47246 +static char *
47247 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47248 + char *buf, int buflen)
47249 +{
47250 + char *res;
47251 +
47252 + /* we can use real_root, real_root_mnt, because this is only called
47253 + by the RBAC system */
47254 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
47255 +
47256 + return res;
47257 +}
47258 +
47259 +static char *
47260 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47261 + char *buf, int buflen)
47262 +{
47263 + char *res;
47264 + struct dentry *root;
47265 + struct vfsmount *rootmnt;
47266 + struct task_struct *reaper = &init_task;
47267 +
47268 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
47269 + read_lock(&reaper->fs->lock);
47270 + root = dget(reaper->fs->root.dentry);
47271 + rootmnt = mntget(reaper->fs->root.mnt);
47272 + read_unlock(&reaper->fs->lock);
47273 +
47274 + spin_lock(&dcache_lock);
47275 + spin_lock(&vfsmount_lock);
47276 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
47277 + spin_unlock(&vfsmount_lock);
47278 + spin_unlock(&dcache_lock);
47279 +
47280 + dput(root);
47281 + mntput(rootmnt);
47282 + return res;
47283 +}
47284 +
47285 +static char *
47286 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47287 +{
47288 + char *ret;
47289 + spin_lock(&dcache_lock);
47290 + spin_lock(&vfsmount_lock);
47291 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47292 + PAGE_SIZE);
47293 + spin_unlock(&vfsmount_lock);
47294 + spin_unlock(&dcache_lock);
47295 + return ret;
47296 +}
47297 +
47298 +char *
47299 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47300 +{
47301 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47302 + PAGE_SIZE);
47303 +}
47304 +
47305 +char *
47306 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47307 +{
47308 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47309 + PAGE_SIZE);
47310 +}
47311 +
47312 +char *
47313 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47314 +{
47315 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47316 + PAGE_SIZE);
47317 +}
47318 +
47319 +char *
47320 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47321 +{
47322 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47323 + PAGE_SIZE);
47324 +}
47325 +
47326 +char *
47327 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47328 +{
47329 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47330 + PAGE_SIZE);
47331 +}
47332 +
47333 +__inline__ __u32
47334 +to_gr_audit(const __u32 reqmode)
47335 +{
47336 + /* masks off auditable permission flags, then shifts them to create
47337 + auditing flags, and adds the special case of append auditing if
47338 + we're requesting write */
47339 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47340 +}
47341 +
47342 +struct acl_subject_label *
47343 +lookup_subject_map(const struct acl_subject_label *userp)
47344 +{
47345 + unsigned int index = shash(userp, subj_map_set.s_size);
47346 + struct subject_map *match;
47347 +
47348 + match = subj_map_set.s_hash[index];
47349 +
47350 + while (match && match->user != userp)
47351 + match = match->next;
47352 +
47353 + if (match != NULL)
47354 + return match->kernel;
47355 + else
47356 + return NULL;
47357 +}
47358 +
47359 +static void
47360 +insert_subj_map_entry(struct subject_map *subjmap)
47361 +{
47362 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47363 + struct subject_map **curr;
47364 +
47365 + subjmap->prev = NULL;
47366 +
47367 + curr = &subj_map_set.s_hash[index];
47368 + if (*curr != NULL)
47369 + (*curr)->prev = subjmap;
47370 +
47371 + subjmap->next = *curr;
47372 + *curr = subjmap;
47373 +
47374 + return;
47375 +}
47376 +
47377 +static struct acl_role_label *
47378 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47379 + const gid_t gid)
47380 +{
47381 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47382 + struct acl_role_label *match;
47383 + struct role_allowed_ip *ipp;
47384 + unsigned int x;
47385 + u32 curr_ip = task->signal->curr_ip;
47386 +
47387 + task->signal->saved_ip = curr_ip;
47388 +
47389 + match = acl_role_set.r_hash[index];
47390 +
47391 + while (match) {
47392 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47393 + for (x = 0; x < match->domain_child_num; x++) {
47394 + if (match->domain_children[x] == uid)
47395 + goto found;
47396 + }
47397 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47398 + break;
47399 + match = match->next;
47400 + }
47401 +found:
47402 + if (match == NULL) {
47403 + try_group:
47404 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47405 + match = acl_role_set.r_hash[index];
47406 +
47407 + while (match) {
47408 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47409 + for (x = 0; x < match->domain_child_num; x++) {
47410 + if (match->domain_children[x] == gid)
47411 + goto found2;
47412 + }
47413 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47414 + break;
47415 + match = match->next;
47416 + }
47417 +found2:
47418 + if (match == NULL)
47419 + match = default_role;
47420 + if (match->allowed_ips == NULL)
47421 + return match;
47422 + else {
47423 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47424 + if (likely
47425 + ((ntohl(curr_ip) & ipp->netmask) ==
47426 + (ntohl(ipp->addr) & ipp->netmask)))
47427 + return match;
47428 + }
47429 + match = default_role;
47430 + }
47431 + } else if (match->allowed_ips == NULL) {
47432 + return match;
47433 + } else {
47434 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47435 + if (likely
47436 + ((ntohl(curr_ip) & ipp->netmask) ==
47437 + (ntohl(ipp->addr) & ipp->netmask)))
47438 + return match;
47439 + }
47440 + goto try_group;
47441 + }
47442 +
47443 + return match;
47444 +}
47445 +
47446 +struct acl_subject_label *
47447 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47448 + const struct acl_role_label *role)
47449 +{
47450 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47451 + struct acl_subject_label *match;
47452 +
47453 + match = role->subj_hash[index];
47454 +
47455 + while (match && (match->inode != ino || match->device != dev ||
47456 + (match->mode & GR_DELETED))) {
47457 + match = match->next;
47458 + }
47459 +
47460 + if (match && !(match->mode & GR_DELETED))
47461 + return match;
47462 + else
47463 + return NULL;
47464 +}
47465 +
47466 +struct acl_subject_label *
47467 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47468 + const struct acl_role_label *role)
47469 +{
47470 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47471 + struct acl_subject_label *match;
47472 +
47473 + match = role->subj_hash[index];
47474 +
47475 + while (match && (match->inode != ino || match->device != dev ||
47476 + !(match->mode & GR_DELETED))) {
47477 + match = match->next;
47478 + }
47479 +
47480 + if (match && (match->mode & GR_DELETED))
47481 + return match;
47482 + else
47483 + return NULL;
47484 +}
47485 +
47486 +static struct acl_object_label *
47487 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47488 + const struct acl_subject_label *subj)
47489 +{
47490 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47491 + struct acl_object_label *match;
47492 +
47493 + match = subj->obj_hash[index];
47494 +
47495 + while (match && (match->inode != ino || match->device != dev ||
47496 + (match->mode & GR_DELETED))) {
47497 + match = match->next;
47498 + }
47499 +
47500 + if (match && !(match->mode & GR_DELETED))
47501 + return match;
47502 + else
47503 + return NULL;
47504 +}
47505 +
47506 +static struct acl_object_label *
47507 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47508 + const struct acl_subject_label *subj)
47509 +{
47510 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47511 + struct acl_object_label *match;
47512 +
47513 + match = subj->obj_hash[index];
47514 +
47515 + while (match && (match->inode != ino || match->device != dev ||
47516 + !(match->mode & GR_DELETED))) {
47517 + match = match->next;
47518 + }
47519 +
47520 + if (match && (match->mode & GR_DELETED))
47521 + return match;
47522 +
47523 + match = subj->obj_hash[index];
47524 +
47525 + while (match && (match->inode != ino || match->device != dev ||
47526 + (match->mode & GR_DELETED))) {
47527 + match = match->next;
47528 + }
47529 +
47530 + if (match && !(match->mode & GR_DELETED))
47531 + return match;
47532 + else
47533 + return NULL;
47534 +}
47535 +
47536 +static struct name_entry *
47537 +lookup_name_entry(const char *name)
47538 +{
47539 + unsigned int len = strlen(name);
47540 + unsigned int key = full_name_hash(name, len);
47541 + unsigned int index = key % name_set.n_size;
47542 + struct name_entry *match;
47543 +
47544 + match = name_set.n_hash[index];
47545 +
47546 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47547 + match = match->next;
47548 +
47549 + return match;
47550 +}
47551 +
47552 +static struct name_entry *
47553 +lookup_name_entry_create(const char *name)
47554 +{
47555 + unsigned int len = strlen(name);
47556 + unsigned int key = full_name_hash(name, len);
47557 + unsigned int index = key % name_set.n_size;
47558 + struct name_entry *match;
47559 +
47560 + match = name_set.n_hash[index];
47561 +
47562 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47563 + !match->deleted))
47564 + match = match->next;
47565 +
47566 + if (match && match->deleted)
47567 + return match;
47568 +
47569 + match = name_set.n_hash[index];
47570 +
47571 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47572 + match->deleted))
47573 + match = match->next;
47574 +
47575 + if (match && !match->deleted)
47576 + return match;
47577 + else
47578 + return NULL;
47579 +}
47580 +
47581 +static struct inodev_entry *
47582 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
47583 +{
47584 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
47585 + struct inodev_entry *match;
47586 +
47587 + match = inodev_set.i_hash[index];
47588 +
47589 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47590 + match = match->next;
47591 +
47592 + return match;
47593 +}
47594 +
47595 +static void
47596 +insert_inodev_entry(struct inodev_entry *entry)
47597 +{
47598 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47599 + inodev_set.i_size);
47600 + struct inodev_entry **curr;
47601 +
47602 + entry->prev = NULL;
47603 +
47604 + curr = &inodev_set.i_hash[index];
47605 + if (*curr != NULL)
47606 + (*curr)->prev = entry;
47607 +
47608 + entry->next = *curr;
47609 + *curr = entry;
47610 +
47611 + return;
47612 +}
47613 +
47614 +static void
47615 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47616 +{
47617 + unsigned int index =
47618 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47619 + struct acl_role_label **curr;
47620 + struct acl_role_label *tmp;
47621 +
47622 + curr = &acl_role_set.r_hash[index];
47623 +
47624 + /* if role was already inserted due to domains and already has
47625 + a role in the same bucket as it attached, then we need to
47626 + combine these two buckets
47627 + */
47628 + if (role->next) {
47629 + tmp = role->next;
47630 + while (tmp->next)
47631 + tmp = tmp->next;
47632 + tmp->next = *curr;
47633 + } else
47634 + role->next = *curr;
47635 + *curr = role;
47636 +
47637 + return;
47638 +}
47639 +
47640 +static void
47641 +insert_acl_role_label(struct acl_role_label *role)
47642 +{
47643 + int i;
47644 +
47645 + if (role_list == NULL) {
47646 + role_list = role;
47647 + role->prev = NULL;
47648 + } else {
47649 + role->prev = role_list;
47650 + role_list = role;
47651 + }
47652 +
47653 + /* used for hash chains */
47654 + role->next = NULL;
47655 +
47656 + if (role->roletype & GR_ROLE_DOMAIN) {
47657 + for (i = 0; i < role->domain_child_num; i++)
47658 + __insert_acl_role_label(role, role->domain_children[i]);
47659 + } else
47660 + __insert_acl_role_label(role, role->uidgid);
47661 +}
47662 +
47663 +static int
47664 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47665 +{
47666 + struct name_entry **curr, *nentry;
47667 + struct inodev_entry *ientry;
47668 + unsigned int len = strlen(name);
47669 + unsigned int key = full_name_hash(name, len);
47670 + unsigned int index = key % name_set.n_size;
47671 +
47672 + curr = &name_set.n_hash[index];
47673 +
47674 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47675 + curr = &((*curr)->next);
47676 +
47677 + if (*curr != NULL)
47678 + return 1;
47679 +
47680 + nentry = acl_alloc(sizeof (struct name_entry));
47681 + if (nentry == NULL)
47682 + return 0;
47683 + ientry = acl_alloc(sizeof (struct inodev_entry));
47684 + if (ientry == NULL)
47685 + return 0;
47686 + ientry->nentry = nentry;
47687 +
47688 + nentry->key = key;
47689 + nentry->name = name;
47690 + nentry->inode = inode;
47691 + nentry->device = device;
47692 + nentry->len = len;
47693 + nentry->deleted = deleted;
47694 +
47695 + nentry->prev = NULL;
47696 + curr = &name_set.n_hash[index];
47697 + if (*curr != NULL)
47698 + (*curr)->prev = nentry;
47699 + nentry->next = *curr;
47700 + *curr = nentry;
47701 +
47702 + /* insert us into the table searchable by inode/dev */
47703 + insert_inodev_entry(ientry);
47704 +
47705 + return 1;
47706 +}
47707 +
47708 +static void
47709 +insert_acl_obj_label(struct acl_object_label *obj,
47710 + struct acl_subject_label *subj)
47711 +{
47712 + unsigned int index =
47713 + fhash(obj->inode, obj->device, subj->obj_hash_size);
47714 + struct acl_object_label **curr;
47715 +
47716 +
47717 + obj->prev = NULL;
47718 +
47719 + curr = &subj->obj_hash[index];
47720 + if (*curr != NULL)
47721 + (*curr)->prev = obj;
47722 +
47723 + obj->next = *curr;
47724 + *curr = obj;
47725 +
47726 + return;
47727 +}
47728 +
47729 +static void
47730 +insert_acl_subj_label(struct acl_subject_label *obj,
47731 + struct acl_role_label *role)
47732 +{
47733 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
47734 + struct acl_subject_label **curr;
47735 +
47736 + obj->prev = NULL;
47737 +
47738 + curr = &role->subj_hash[index];
47739 + if (*curr != NULL)
47740 + (*curr)->prev = obj;
47741 +
47742 + obj->next = *curr;
47743 + *curr = obj;
47744 +
47745 + return;
47746 +}
47747 +
47748 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
47749 +
47750 +static void *
47751 +create_table(__u32 * len, int elementsize)
47752 +{
47753 + unsigned int table_sizes[] = {
47754 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
47755 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
47756 + 4194301, 8388593, 16777213, 33554393, 67108859
47757 + };
47758 + void *newtable = NULL;
47759 + unsigned int pwr = 0;
47760 +
47761 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
47762 + table_sizes[pwr] <= *len)
47763 + pwr++;
47764 +
47765 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
47766 + return newtable;
47767 +
47768 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
47769 + newtable =
47770 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
47771 + else
47772 + newtable = vmalloc(table_sizes[pwr] * elementsize);
47773 +
47774 + *len = table_sizes[pwr];
47775 +
47776 + return newtable;
47777 +}
47778 +
47779 +static int
47780 +init_variables(const struct gr_arg *arg)
47781 +{
47782 + struct task_struct *reaper = &init_task;
47783 + unsigned int stacksize;
47784 +
47785 + subj_map_set.s_size = arg->role_db.num_subjects;
47786 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
47787 + name_set.n_size = arg->role_db.num_objects;
47788 + inodev_set.i_size = arg->role_db.num_objects;
47789 +
47790 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
47791 + !name_set.n_size || !inodev_set.i_size)
47792 + return 1;
47793 +
47794 + if (!gr_init_uidset())
47795 + return 1;
47796 +
47797 + /* set up the stack that holds allocation info */
47798 +
47799 + stacksize = arg->role_db.num_pointers + 5;
47800 +
47801 + if (!acl_alloc_stack_init(stacksize))
47802 + return 1;
47803 +
47804 + /* grab reference for the real root dentry and vfsmount */
47805 + read_lock(&reaper->fs->lock);
47806 + real_root = dget(reaper->fs->root.dentry);
47807 + real_root_mnt = mntget(reaper->fs->root.mnt);
47808 + read_unlock(&reaper->fs->lock);
47809 +
47810 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47811 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
47812 +#endif
47813 +
47814 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
47815 + if (fakefs_obj_rw == NULL)
47816 + return 1;
47817 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
47818 +
47819 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
47820 + if (fakefs_obj_rwx == NULL)
47821 + return 1;
47822 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
47823 +
47824 + subj_map_set.s_hash =
47825 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
47826 + acl_role_set.r_hash =
47827 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
47828 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
47829 + inodev_set.i_hash =
47830 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
47831 +
47832 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
47833 + !name_set.n_hash || !inodev_set.i_hash)
47834 + return 1;
47835 +
47836 + memset(subj_map_set.s_hash, 0,
47837 + sizeof(struct subject_map *) * subj_map_set.s_size);
47838 + memset(acl_role_set.r_hash, 0,
47839 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
47840 + memset(name_set.n_hash, 0,
47841 + sizeof (struct name_entry *) * name_set.n_size);
47842 + memset(inodev_set.i_hash, 0,
47843 + sizeof (struct inodev_entry *) * inodev_set.i_size);
47844 +
47845 + return 0;
47846 +}
47847 +
47848 +/* free information not needed after startup
47849 + currently contains user->kernel pointer mappings for subjects
47850 +*/
47851 +
47852 +static void
47853 +free_init_variables(void)
47854 +{
47855 + __u32 i;
47856 +
47857 + if (subj_map_set.s_hash) {
47858 + for (i = 0; i < subj_map_set.s_size; i++) {
47859 + if (subj_map_set.s_hash[i]) {
47860 + kfree(subj_map_set.s_hash[i]);
47861 + subj_map_set.s_hash[i] = NULL;
47862 + }
47863 + }
47864 +
47865 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
47866 + PAGE_SIZE)
47867 + kfree(subj_map_set.s_hash);
47868 + else
47869 + vfree(subj_map_set.s_hash);
47870 + }
47871 +
47872 + return;
47873 +}
47874 +
47875 +static void
47876 +free_variables(void)
47877 +{
47878 + struct acl_subject_label *s;
47879 + struct acl_role_label *r;
47880 + struct task_struct *task, *task2;
47881 + unsigned int x;
47882 +
47883 + gr_clear_learn_entries();
47884 +
47885 + read_lock(&tasklist_lock);
47886 + do_each_thread(task2, task) {
47887 + task->acl_sp_role = 0;
47888 + task->acl_role_id = 0;
47889 + task->acl = NULL;
47890 + task->role = NULL;
47891 + } while_each_thread(task2, task);
47892 + read_unlock(&tasklist_lock);
47893 +
47894 + /* release the reference to the real root dentry and vfsmount */
47895 + if (real_root)
47896 + dput(real_root);
47897 + real_root = NULL;
47898 + if (real_root_mnt)
47899 + mntput(real_root_mnt);
47900 + real_root_mnt = NULL;
47901 +
47902 + /* free all object hash tables */
47903 +
47904 + FOR_EACH_ROLE_START(r)
47905 + if (r->subj_hash == NULL)
47906 + goto next_role;
47907 + FOR_EACH_SUBJECT_START(r, s, x)
47908 + if (s->obj_hash == NULL)
47909 + break;
47910 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47911 + kfree(s->obj_hash);
47912 + else
47913 + vfree(s->obj_hash);
47914 + FOR_EACH_SUBJECT_END(s, x)
47915 + FOR_EACH_NESTED_SUBJECT_START(r, s)
47916 + if (s->obj_hash == NULL)
47917 + break;
47918 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47919 + kfree(s->obj_hash);
47920 + else
47921 + vfree(s->obj_hash);
47922 + FOR_EACH_NESTED_SUBJECT_END(s)
47923 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
47924 + kfree(r->subj_hash);
47925 + else
47926 + vfree(r->subj_hash);
47927 + r->subj_hash = NULL;
47928 +next_role:
47929 + FOR_EACH_ROLE_END(r)
47930 +
47931 + acl_free_all();
47932 +
47933 + if (acl_role_set.r_hash) {
47934 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
47935 + PAGE_SIZE)
47936 + kfree(acl_role_set.r_hash);
47937 + else
47938 + vfree(acl_role_set.r_hash);
47939 + }
47940 + if (name_set.n_hash) {
47941 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
47942 + PAGE_SIZE)
47943 + kfree(name_set.n_hash);
47944 + else
47945 + vfree(name_set.n_hash);
47946 + }
47947 +
47948 + if (inodev_set.i_hash) {
47949 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
47950 + PAGE_SIZE)
47951 + kfree(inodev_set.i_hash);
47952 + else
47953 + vfree(inodev_set.i_hash);
47954 + }
47955 +
47956 + gr_free_uidset();
47957 +
47958 + memset(&name_set, 0, sizeof (struct name_db));
47959 + memset(&inodev_set, 0, sizeof (struct inodev_db));
47960 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
47961 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
47962 +
47963 + default_role = NULL;
47964 + role_list = NULL;
47965 +
47966 + return;
47967 +}
47968 +
47969 +static __u32
47970 +count_user_objs(struct acl_object_label *userp)
47971 +{
47972 + struct acl_object_label o_tmp;
47973 + __u32 num = 0;
47974 +
47975 + while (userp) {
47976 + if (copy_from_user(&o_tmp, userp,
47977 + sizeof (struct acl_object_label)))
47978 + break;
47979 +
47980 + userp = o_tmp.prev;
47981 + num++;
47982 + }
47983 +
47984 + return num;
47985 +}
47986 +
47987 +static struct acl_subject_label *
47988 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
47989 +
47990 +static int
47991 +copy_user_glob(struct acl_object_label *obj)
47992 +{
47993 + struct acl_object_label *g_tmp, **guser;
47994 + unsigned int len;
47995 + char *tmp;
47996 +
47997 + if (obj->globbed == NULL)
47998 + return 0;
47999 +
48000 + guser = &obj->globbed;
48001 + while (*guser) {
48002 + g_tmp = (struct acl_object_label *)
48003 + acl_alloc(sizeof (struct acl_object_label));
48004 + if (g_tmp == NULL)
48005 + return -ENOMEM;
48006 +
48007 + if (copy_from_user(g_tmp, *guser,
48008 + sizeof (struct acl_object_label)))
48009 + return -EFAULT;
48010 +
48011 + len = strnlen_user(g_tmp->filename, PATH_MAX);
48012 +
48013 + if (!len || len >= PATH_MAX)
48014 + return -EINVAL;
48015 +
48016 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48017 + return -ENOMEM;
48018 +
48019 + if (copy_from_user(tmp, g_tmp->filename, len))
48020 + return -EFAULT;
48021 + tmp[len-1] = '\0';
48022 + g_tmp->filename = tmp;
48023 +
48024 + *guser = g_tmp;
48025 + guser = &(g_tmp->next);
48026 + }
48027 +
48028 + return 0;
48029 +}
48030 +
48031 +static int
48032 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
48033 + struct acl_role_label *role)
48034 +{
48035 + struct acl_object_label *o_tmp;
48036 + unsigned int len;
48037 + int ret;
48038 + char *tmp;
48039 +
48040 + while (userp) {
48041 + if ((o_tmp = (struct acl_object_label *)
48042 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
48043 + return -ENOMEM;
48044 +
48045 + if (copy_from_user(o_tmp, userp,
48046 + sizeof (struct acl_object_label)))
48047 + return -EFAULT;
48048 +
48049 + userp = o_tmp->prev;
48050 +
48051 + len = strnlen_user(o_tmp->filename, PATH_MAX);
48052 +
48053 + if (!len || len >= PATH_MAX)
48054 + return -EINVAL;
48055 +
48056 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48057 + return -ENOMEM;
48058 +
48059 + if (copy_from_user(tmp, o_tmp->filename, len))
48060 + return -EFAULT;
48061 + tmp[len-1] = '\0';
48062 + o_tmp->filename = tmp;
48063 +
48064 + insert_acl_obj_label(o_tmp, subj);
48065 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
48066 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
48067 + return -ENOMEM;
48068 +
48069 + ret = copy_user_glob(o_tmp);
48070 + if (ret)
48071 + return ret;
48072 +
48073 + if (o_tmp->nested) {
48074 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
48075 + if (IS_ERR(o_tmp->nested))
48076 + return PTR_ERR(o_tmp->nested);
48077 +
48078 + /* insert into nested subject list */
48079 + o_tmp->nested->next = role->hash->first;
48080 + role->hash->first = o_tmp->nested;
48081 + }
48082 + }
48083 +
48084 + return 0;
48085 +}
48086 +
48087 +static __u32
48088 +count_user_subjs(struct acl_subject_label *userp)
48089 +{
48090 + struct acl_subject_label s_tmp;
48091 + __u32 num = 0;
48092 +
48093 + while (userp) {
48094 + if (copy_from_user(&s_tmp, userp,
48095 + sizeof (struct acl_subject_label)))
48096 + break;
48097 +
48098 + userp = s_tmp.prev;
48099 + /* do not count nested subjects against this count, since
48100 + they are not included in the hash table, but are
48101 + attached to objects. We have already counted
48102 + the subjects in userspace for the allocation
48103 + stack
48104 + */
48105 + if (!(s_tmp.mode & GR_NESTED))
48106 + num++;
48107 + }
48108 +
48109 + return num;
48110 +}
48111 +
48112 +static int
48113 +copy_user_allowedips(struct acl_role_label *rolep)
48114 +{
48115 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
48116 +
48117 + ruserip = rolep->allowed_ips;
48118 +
48119 + while (ruserip) {
48120 + rlast = rtmp;
48121 +
48122 + if ((rtmp = (struct role_allowed_ip *)
48123 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
48124 + return -ENOMEM;
48125 +
48126 + if (copy_from_user(rtmp, ruserip,
48127 + sizeof (struct role_allowed_ip)))
48128 + return -EFAULT;
48129 +
48130 + ruserip = rtmp->prev;
48131 +
48132 + if (!rlast) {
48133 + rtmp->prev = NULL;
48134 + rolep->allowed_ips = rtmp;
48135 + } else {
48136 + rlast->next = rtmp;
48137 + rtmp->prev = rlast;
48138 + }
48139 +
48140 + if (!ruserip)
48141 + rtmp->next = NULL;
48142 + }
48143 +
48144 + return 0;
48145 +}
48146 +
48147 +static int
48148 +copy_user_transitions(struct acl_role_label *rolep)
48149 +{
48150 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
48151 +
48152 + unsigned int len;
48153 + char *tmp;
48154 +
48155 + rusertp = rolep->transitions;
48156 +
48157 + while (rusertp) {
48158 + rlast = rtmp;
48159 +
48160 + if ((rtmp = (struct role_transition *)
48161 + acl_alloc(sizeof (struct role_transition))) == NULL)
48162 + return -ENOMEM;
48163 +
48164 + if (copy_from_user(rtmp, rusertp,
48165 + sizeof (struct role_transition)))
48166 + return -EFAULT;
48167 +
48168 + rusertp = rtmp->prev;
48169 +
48170 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48171 +
48172 + if (!len || len >= GR_SPROLE_LEN)
48173 + return -EINVAL;
48174 +
48175 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48176 + return -ENOMEM;
48177 +
48178 + if (copy_from_user(tmp, rtmp->rolename, len))
48179 + return -EFAULT;
48180 + tmp[len-1] = '\0';
48181 + rtmp->rolename = tmp;
48182 +
48183 + if (!rlast) {
48184 + rtmp->prev = NULL;
48185 + rolep->transitions = rtmp;
48186 + } else {
48187 + rlast->next = rtmp;
48188 + rtmp->prev = rlast;
48189 + }
48190 +
48191 + if (!rusertp)
48192 + rtmp->next = NULL;
48193 + }
48194 +
48195 + return 0;
48196 +}
48197 +
48198 +static struct acl_subject_label *
48199 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48200 +{
48201 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48202 + unsigned int len;
48203 + char *tmp;
48204 + __u32 num_objs;
48205 + struct acl_ip_label **i_tmp, *i_utmp2;
48206 + struct gr_hash_struct ghash;
48207 + struct subject_map *subjmap;
48208 + unsigned int i_num;
48209 + int err;
48210 +
48211 + s_tmp = lookup_subject_map(userp);
48212 +
48213 + /* we've already copied this subject into the kernel, just return
48214 + the reference to it, and don't copy it over again
48215 + */
48216 + if (s_tmp)
48217 + return(s_tmp);
48218 +
48219 + if ((s_tmp = (struct acl_subject_label *)
48220 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48221 + return ERR_PTR(-ENOMEM);
48222 +
48223 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48224 + if (subjmap == NULL)
48225 + return ERR_PTR(-ENOMEM);
48226 +
48227 + subjmap->user = userp;
48228 + subjmap->kernel = s_tmp;
48229 + insert_subj_map_entry(subjmap);
48230 +
48231 + if (copy_from_user(s_tmp, userp,
48232 + sizeof (struct acl_subject_label)))
48233 + return ERR_PTR(-EFAULT);
48234 +
48235 + len = strnlen_user(s_tmp->filename, PATH_MAX);
48236 +
48237 + if (!len || len >= PATH_MAX)
48238 + return ERR_PTR(-EINVAL);
48239 +
48240 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48241 + return ERR_PTR(-ENOMEM);
48242 +
48243 + if (copy_from_user(tmp, s_tmp->filename, len))
48244 + return ERR_PTR(-EFAULT);
48245 + tmp[len-1] = '\0';
48246 + s_tmp->filename = tmp;
48247 +
48248 + if (!strcmp(s_tmp->filename, "/"))
48249 + role->root_label = s_tmp;
48250 +
48251 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48252 + return ERR_PTR(-EFAULT);
48253 +
48254 + /* copy user and group transition tables */
48255 +
48256 + if (s_tmp->user_trans_num) {
48257 + uid_t *uidlist;
48258 +
48259 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48260 + if (uidlist == NULL)
48261 + return ERR_PTR(-ENOMEM);
48262 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48263 + return ERR_PTR(-EFAULT);
48264 +
48265 + s_tmp->user_transitions = uidlist;
48266 + }
48267 +
48268 + if (s_tmp->group_trans_num) {
48269 + gid_t *gidlist;
48270 +
48271 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48272 + if (gidlist == NULL)
48273 + return ERR_PTR(-ENOMEM);
48274 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48275 + return ERR_PTR(-EFAULT);
48276 +
48277 + s_tmp->group_transitions = gidlist;
48278 + }
48279 +
48280 + /* set up object hash table */
48281 + num_objs = count_user_objs(ghash.first);
48282 +
48283 + s_tmp->obj_hash_size = num_objs;
48284 + s_tmp->obj_hash =
48285 + (struct acl_object_label **)
48286 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48287 +
48288 + if (!s_tmp->obj_hash)
48289 + return ERR_PTR(-ENOMEM);
48290 +
48291 + memset(s_tmp->obj_hash, 0,
48292 + s_tmp->obj_hash_size *
48293 + sizeof (struct acl_object_label *));
48294 +
48295 + /* add in objects */
48296 + err = copy_user_objs(ghash.first, s_tmp, role);
48297 +
48298 + if (err)
48299 + return ERR_PTR(err);
48300 +
48301 + /* set pointer for parent subject */
48302 + if (s_tmp->parent_subject) {
48303 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48304 +
48305 + if (IS_ERR(s_tmp2))
48306 + return s_tmp2;
48307 +
48308 + s_tmp->parent_subject = s_tmp2;
48309 + }
48310 +
48311 + /* add in ip acls */
48312 +
48313 + if (!s_tmp->ip_num) {
48314 + s_tmp->ips = NULL;
48315 + goto insert;
48316 + }
48317 +
48318 + i_tmp =
48319 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48320 + sizeof (struct acl_ip_label *));
48321 +
48322 + if (!i_tmp)
48323 + return ERR_PTR(-ENOMEM);
48324 +
48325 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48326 + *(i_tmp + i_num) =
48327 + (struct acl_ip_label *)
48328 + acl_alloc(sizeof (struct acl_ip_label));
48329 + if (!*(i_tmp + i_num))
48330 + return ERR_PTR(-ENOMEM);
48331 +
48332 + if (copy_from_user
48333 + (&i_utmp2, s_tmp->ips + i_num,
48334 + sizeof (struct acl_ip_label *)))
48335 + return ERR_PTR(-EFAULT);
48336 +
48337 + if (copy_from_user
48338 + (*(i_tmp + i_num), i_utmp2,
48339 + sizeof (struct acl_ip_label)))
48340 + return ERR_PTR(-EFAULT);
48341 +
48342 + if ((*(i_tmp + i_num))->iface == NULL)
48343 + continue;
48344 +
48345 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48346 + if (!len || len >= IFNAMSIZ)
48347 + return ERR_PTR(-EINVAL);
48348 + tmp = acl_alloc(len);
48349 + if (tmp == NULL)
48350 + return ERR_PTR(-ENOMEM);
48351 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48352 + return ERR_PTR(-EFAULT);
48353 + (*(i_tmp + i_num))->iface = tmp;
48354 + }
48355 +
48356 + s_tmp->ips = i_tmp;
48357 +
48358 +insert:
48359 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48360 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48361 + return ERR_PTR(-ENOMEM);
48362 +
48363 + return s_tmp;
48364 +}
48365 +
48366 +static int
48367 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48368 +{
48369 + struct acl_subject_label s_pre;
48370 + struct acl_subject_label * ret;
48371 + int err;
48372 +
48373 + while (userp) {
48374 + if (copy_from_user(&s_pre, userp,
48375 + sizeof (struct acl_subject_label)))
48376 + return -EFAULT;
48377 +
48378 + /* do not add nested subjects here, add
48379 + while parsing objects
48380 + */
48381 +
48382 + if (s_pre.mode & GR_NESTED) {
48383 + userp = s_pre.prev;
48384 + continue;
48385 + }
48386 +
48387 + ret = do_copy_user_subj(userp, role);
48388 +
48389 + err = PTR_ERR(ret);
48390 + if (IS_ERR(ret))
48391 + return err;
48392 +
48393 + insert_acl_subj_label(ret, role);
48394 +
48395 + userp = s_pre.prev;
48396 + }
48397 +
48398 + return 0;
48399 +}
48400 +
48401 +static int
48402 +copy_user_acl(struct gr_arg *arg)
48403 +{
48404 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48405 + struct sprole_pw *sptmp;
48406 + struct gr_hash_struct *ghash;
48407 + uid_t *domainlist;
48408 + unsigned int r_num;
48409 + unsigned int len;
48410 + char *tmp;
48411 + int err = 0;
48412 + __u16 i;
48413 + __u32 num_subjs;
48414 +
48415 + /* we need a default and kernel role */
48416 + if (arg->role_db.num_roles < 2)
48417 + return -EINVAL;
48418 +
48419 + /* copy special role authentication info from userspace */
48420 +
48421 + num_sprole_pws = arg->num_sprole_pws;
48422 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48423 +
48424 + if (!acl_special_roles) {
48425 + err = -ENOMEM;
48426 + goto cleanup;
48427 + }
48428 +
48429 + for (i = 0; i < num_sprole_pws; i++) {
48430 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48431 + if (!sptmp) {
48432 + err = -ENOMEM;
48433 + goto cleanup;
48434 + }
48435 + if (copy_from_user(sptmp, arg->sprole_pws + i,
48436 + sizeof (struct sprole_pw))) {
48437 + err = -EFAULT;
48438 + goto cleanup;
48439 + }
48440 +
48441 + len =
48442 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48443 +
48444 + if (!len || len >= GR_SPROLE_LEN) {
48445 + err = -EINVAL;
48446 + goto cleanup;
48447 + }
48448 +
48449 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48450 + err = -ENOMEM;
48451 + goto cleanup;
48452 + }
48453 +
48454 + if (copy_from_user(tmp, sptmp->rolename, len)) {
48455 + err = -EFAULT;
48456 + goto cleanup;
48457 + }
48458 + tmp[len-1] = '\0';
48459 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48460 + printk(KERN_ALERT "Copying special role %s\n", tmp);
48461 +#endif
48462 + sptmp->rolename = tmp;
48463 + acl_special_roles[i] = sptmp;
48464 + }
48465 +
48466 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48467 +
48468 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48469 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
48470 +
48471 + if (!r_tmp) {
48472 + err = -ENOMEM;
48473 + goto cleanup;
48474 + }
48475 +
48476 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
48477 + sizeof (struct acl_role_label *))) {
48478 + err = -EFAULT;
48479 + goto cleanup;
48480 + }
48481 +
48482 + if (copy_from_user(r_tmp, r_utmp2,
48483 + sizeof (struct acl_role_label))) {
48484 + err = -EFAULT;
48485 + goto cleanup;
48486 + }
48487 +
48488 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48489 +
48490 + if (!len || len >= PATH_MAX) {
48491 + err = -EINVAL;
48492 + goto cleanup;
48493 + }
48494 +
48495 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48496 + err = -ENOMEM;
48497 + goto cleanup;
48498 + }
48499 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
48500 + err = -EFAULT;
48501 + goto cleanup;
48502 + }
48503 + tmp[len-1] = '\0';
48504 + r_tmp->rolename = tmp;
48505 +
48506 + if (!strcmp(r_tmp->rolename, "default")
48507 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48508 + default_role = r_tmp;
48509 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48510 + kernel_role = r_tmp;
48511 + }
48512 +
48513 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48514 + err = -ENOMEM;
48515 + goto cleanup;
48516 + }
48517 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48518 + err = -EFAULT;
48519 + goto cleanup;
48520 + }
48521 +
48522 + r_tmp->hash = ghash;
48523 +
48524 + num_subjs = count_user_subjs(r_tmp->hash->first);
48525 +
48526 + r_tmp->subj_hash_size = num_subjs;
48527 + r_tmp->subj_hash =
48528 + (struct acl_subject_label **)
48529 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48530 +
48531 + if (!r_tmp->subj_hash) {
48532 + err = -ENOMEM;
48533 + goto cleanup;
48534 + }
48535 +
48536 + err = copy_user_allowedips(r_tmp);
48537 + if (err)
48538 + goto cleanup;
48539 +
48540 + /* copy domain info */
48541 + if (r_tmp->domain_children != NULL) {
48542 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48543 + if (domainlist == NULL) {
48544 + err = -ENOMEM;
48545 + goto cleanup;
48546 + }
48547 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48548 + err = -EFAULT;
48549 + goto cleanup;
48550 + }
48551 + r_tmp->domain_children = domainlist;
48552 + }
48553 +
48554 + err = copy_user_transitions(r_tmp);
48555 + if (err)
48556 + goto cleanup;
48557 +
48558 + memset(r_tmp->subj_hash, 0,
48559 + r_tmp->subj_hash_size *
48560 + sizeof (struct acl_subject_label *));
48561 +
48562 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48563 +
48564 + if (err)
48565 + goto cleanup;
48566 +
48567 + /* set nested subject list to null */
48568 + r_tmp->hash->first = NULL;
48569 +
48570 + insert_acl_role_label(r_tmp);
48571 + }
48572 +
48573 + goto return_err;
48574 + cleanup:
48575 + free_variables();
48576 + return_err:
48577 + return err;
48578 +
48579 +}
48580 +
48581 +static int
48582 +gracl_init(struct gr_arg *args)
48583 +{
48584 + int error = 0;
48585 +
48586 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48587 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48588 +
48589 + if (init_variables(args)) {
48590 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48591 + error = -ENOMEM;
48592 + free_variables();
48593 + goto out;
48594 + }
48595 +
48596 + error = copy_user_acl(args);
48597 + free_init_variables();
48598 + if (error) {
48599 + free_variables();
48600 + goto out;
48601 + }
48602 +
48603 + if ((error = gr_set_acls(0))) {
48604 + free_variables();
48605 + goto out;
48606 + }
48607 +
48608 + pax_open_kernel();
48609 + gr_status |= GR_READY;
48610 + pax_close_kernel();
48611 +
48612 + out:
48613 + return error;
48614 +}
48615 +
48616 +/* derived from glibc fnmatch() 0: match, 1: no match*/
48617 +
48618 +static int
48619 +glob_match(const char *p, const char *n)
48620 +{
48621 + char c;
48622 +
48623 + while ((c = *p++) != '\0') {
48624 + switch (c) {
48625 + case '?':
48626 + if (*n == '\0')
48627 + return 1;
48628 + else if (*n == '/')
48629 + return 1;
48630 + break;
48631 + case '\\':
48632 + if (*n != c)
48633 + return 1;
48634 + break;
48635 + case '*':
48636 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
48637 + if (*n == '/')
48638 + return 1;
48639 + else if (c == '?') {
48640 + if (*n == '\0')
48641 + return 1;
48642 + else
48643 + ++n;
48644 + }
48645 + }
48646 + if (c == '\0') {
48647 + return 0;
48648 + } else {
48649 + const char *endp;
48650 +
48651 + if ((endp = strchr(n, '/')) == NULL)
48652 + endp = n + strlen(n);
48653 +
48654 + if (c == '[') {
48655 + for (--p; n < endp; ++n)
48656 + if (!glob_match(p, n))
48657 + return 0;
48658 + } else if (c == '/') {
48659 + while (*n != '\0' && *n != '/')
48660 + ++n;
48661 + if (*n == '/' && !glob_match(p, n + 1))
48662 + return 0;
48663 + } else {
48664 + for (--p; n < endp; ++n)
48665 + if (*n == c && !glob_match(p, n))
48666 + return 0;
48667 + }
48668 +
48669 + return 1;
48670 + }
48671 + case '[':
48672 + {
48673 + int not;
48674 + char cold;
48675 +
48676 + if (*n == '\0' || *n == '/')
48677 + return 1;
48678 +
48679 + not = (*p == '!' || *p == '^');
48680 + if (not)
48681 + ++p;
48682 +
48683 + c = *p++;
48684 + for (;;) {
48685 + unsigned char fn = (unsigned char)*n;
48686 +
48687 + if (c == '\0')
48688 + return 1;
48689 + else {
48690 + if (c == fn)
48691 + goto matched;
48692 + cold = c;
48693 + c = *p++;
48694 +
48695 + if (c == '-' && *p != ']') {
48696 + unsigned char cend = *p++;
48697 +
48698 + if (cend == '\0')
48699 + return 1;
48700 +
48701 + if (cold <= fn && fn <= cend)
48702 + goto matched;
48703 +
48704 + c = *p++;
48705 + }
48706 + }
48707 +
48708 + if (c == ']')
48709 + break;
48710 + }
48711 + if (!not)
48712 + return 1;
48713 + break;
48714 + matched:
48715 + while (c != ']') {
48716 + if (c == '\0')
48717 + return 1;
48718 +
48719 + c = *p++;
48720 + }
48721 + if (not)
48722 + return 1;
48723 + }
48724 + break;
48725 + default:
48726 + if (c != *n)
48727 + return 1;
48728 + }
48729 +
48730 + ++n;
48731 + }
48732 +
48733 + if (*n == '\0')
48734 + return 0;
48735 +
48736 + if (*n == '/')
48737 + return 0;
48738 +
48739 + return 1;
48740 +}
48741 +
48742 +static struct acl_object_label *
48743 +chk_glob_label(struct acl_object_label *globbed,
48744 + struct dentry *dentry, struct vfsmount *mnt, char **path)
48745 +{
48746 + struct acl_object_label *tmp;
48747 +
48748 + if (*path == NULL)
48749 + *path = gr_to_filename_nolock(dentry, mnt);
48750 +
48751 + tmp = globbed;
48752 +
48753 + while (tmp) {
48754 + if (!glob_match(tmp->filename, *path))
48755 + return tmp;
48756 + tmp = tmp->next;
48757 + }
48758 +
48759 + return NULL;
48760 +}
48761 +
48762 +static struct acl_object_label *
48763 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48764 + const ino_t curr_ino, const dev_t curr_dev,
48765 + const struct acl_subject_label *subj, char **path, const int checkglob)
48766 +{
48767 + struct acl_subject_label *tmpsubj;
48768 + struct acl_object_label *retval;
48769 + struct acl_object_label *retval2;
48770 +
48771 + tmpsubj = (struct acl_subject_label *) subj;
48772 + read_lock(&gr_inode_lock);
48773 + do {
48774 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
48775 + if (retval) {
48776 + if (checkglob && retval->globbed) {
48777 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
48778 + (struct vfsmount *)orig_mnt, path);
48779 + if (retval2)
48780 + retval = retval2;
48781 + }
48782 + break;
48783 + }
48784 + } while ((tmpsubj = tmpsubj->parent_subject));
48785 + read_unlock(&gr_inode_lock);
48786 +
48787 + return retval;
48788 +}
48789 +
48790 +static __inline__ struct acl_object_label *
48791 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48792 + const struct dentry *curr_dentry,
48793 + const struct acl_subject_label *subj, char **path, const int checkglob)
48794 +{
48795 + int newglob = checkglob;
48796 +
48797 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
48798 + as we don't want a / * rule to match instead of the / object
48799 + don't do this for create lookups that call this function though, since they're looking up
48800 + on the parent and thus need globbing checks on all paths
48801 + */
48802 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
48803 + newglob = GR_NO_GLOB;
48804 +
48805 + return __full_lookup(orig_dentry, orig_mnt,
48806 + curr_dentry->d_inode->i_ino,
48807 + __get_dev(curr_dentry), subj, path, newglob);
48808 +}
48809 +
48810 +static struct acl_object_label *
48811 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48812 + const struct acl_subject_label *subj, char *path, const int checkglob)
48813 +{
48814 + struct dentry *dentry = (struct dentry *) l_dentry;
48815 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48816 + struct acl_object_label *retval;
48817 +
48818 + spin_lock(&dcache_lock);
48819 + spin_lock(&vfsmount_lock);
48820 +
48821 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
48822 +#ifdef CONFIG_NET
48823 + mnt == sock_mnt ||
48824 +#endif
48825 +#ifdef CONFIG_HUGETLBFS
48826 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
48827 +#endif
48828 + /* ignore Eric Biederman */
48829 + IS_PRIVATE(l_dentry->d_inode))) {
48830 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
48831 + goto out;
48832 + }
48833 +
48834 + for (;;) {
48835 + if (dentry == real_root && mnt == real_root_mnt)
48836 + break;
48837 +
48838 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48839 + if (mnt->mnt_parent == mnt)
48840 + break;
48841 +
48842 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48843 + if (retval != NULL)
48844 + goto out;
48845 +
48846 + dentry = mnt->mnt_mountpoint;
48847 + mnt = mnt->mnt_parent;
48848 + continue;
48849 + }
48850 +
48851 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48852 + if (retval != NULL)
48853 + goto out;
48854 +
48855 + dentry = dentry->d_parent;
48856 + }
48857 +
48858 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48859 +
48860 + if (retval == NULL)
48861 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
48862 +out:
48863 + spin_unlock(&vfsmount_lock);
48864 + spin_unlock(&dcache_lock);
48865 +
48866 + BUG_ON(retval == NULL);
48867 +
48868 + return retval;
48869 +}
48870 +
48871 +static __inline__ struct acl_object_label *
48872 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48873 + const struct acl_subject_label *subj)
48874 +{
48875 + char *path = NULL;
48876 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
48877 +}
48878 +
48879 +static __inline__ struct acl_object_label *
48880 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48881 + const struct acl_subject_label *subj)
48882 +{
48883 + char *path = NULL;
48884 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
48885 +}
48886 +
48887 +static __inline__ struct acl_object_label *
48888 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48889 + const struct acl_subject_label *subj, char *path)
48890 +{
48891 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
48892 +}
48893 +
48894 +static struct acl_subject_label *
48895 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48896 + const struct acl_role_label *role)
48897 +{
48898 + struct dentry *dentry = (struct dentry *) l_dentry;
48899 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48900 + struct acl_subject_label *retval;
48901 +
48902 + spin_lock(&dcache_lock);
48903 + spin_lock(&vfsmount_lock);
48904 +
48905 + for (;;) {
48906 + if (dentry == real_root && mnt == real_root_mnt)
48907 + break;
48908 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48909 + if (mnt->mnt_parent == mnt)
48910 + break;
48911 +
48912 + read_lock(&gr_inode_lock);
48913 + retval =
48914 + lookup_acl_subj_label(dentry->d_inode->i_ino,
48915 + __get_dev(dentry), role);
48916 + read_unlock(&gr_inode_lock);
48917 + if (retval != NULL)
48918 + goto out;
48919 +
48920 + dentry = mnt->mnt_mountpoint;
48921 + mnt = mnt->mnt_parent;
48922 + continue;
48923 + }
48924 +
48925 + read_lock(&gr_inode_lock);
48926 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48927 + __get_dev(dentry), role);
48928 + read_unlock(&gr_inode_lock);
48929 + if (retval != NULL)
48930 + goto out;
48931 +
48932 + dentry = dentry->d_parent;
48933 + }
48934 +
48935 + read_lock(&gr_inode_lock);
48936 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48937 + __get_dev(dentry), role);
48938 + read_unlock(&gr_inode_lock);
48939 +
48940 + if (unlikely(retval == NULL)) {
48941 + read_lock(&gr_inode_lock);
48942 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
48943 + __get_dev(real_root), role);
48944 + read_unlock(&gr_inode_lock);
48945 + }
48946 +out:
48947 + spin_unlock(&vfsmount_lock);
48948 + spin_unlock(&dcache_lock);
48949 +
48950 + BUG_ON(retval == NULL);
48951 +
48952 + return retval;
48953 +}
48954 +
48955 +static void
48956 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
48957 +{
48958 + struct task_struct *task = current;
48959 + const struct cred *cred = current_cred();
48960 +
48961 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48962 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48963 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48964 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
48965 +
48966 + return;
48967 +}
48968 +
48969 +static void
48970 +gr_log_learn_sysctl(const char *path, const __u32 mode)
48971 +{
48972 + struct task_struct *task = current;
48973 + const struct cred *cred = current_cred();
48974 +
48975 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48976 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48977 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48978 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
48979 +
48980 + return;
48981 +}
48982 +
48983 +static void
48984 +gr_log_learn_id_change(const char type, const unsigned int real,
48985 + const unsigned int effective, const unsigned int fs)
48986 +{
48987 + struct task_struct *task = current;
48988 + const struct cred *cred = current_cred();
48989 +
48990 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
48991 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48992 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48993 + type, real, effective, fs, &task->signal->saved_ip);
48994 +
48995 + return;
48996 +}
48997 +
48998 +__u32
48999 +gr_check_link(const struct dentry * new_dentry,
49000 + const struct dentry * parent_dentry,
49001 + const struct vfsmount * parent_mnt,
49002 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
49003 +{
49004 + struct acl_object_label *obj;
49005 + __u32 oldmode, newmode;
49006 + __u32 needmode;
49007 +
49008 + if (unlikely(!(gr_status & GR_READY)))
49009 + return (GR_CREATE | GR_LINK);
49010 +
49011 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
49012 + oldmode = obj->mode;
49013 +
49014 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49015 + oldmode |= (GR_CREATE | GR_LINK);
49016 +
49017 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
49018 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49019 + needmode |= GR_SETID | GR_AUDIT_SETID;
49020 +
49021 + newmode =
49022 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
49023 + oldmode | needmode);
49024 +
49025 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
49026 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
49027 + GR_INHERIT | GR_AUDIT_INHERIT);
49028 +
49029 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
49030 + goto bad;
49031 +
49032 + if ((oldmode & needmode) != needmode)
49033 + goto bad;
49034 +
49035 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
49036 + if ((newmode & needmode) != needmode)
49037 + goto bad;
49038 +
49039 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
49040 + return newmode;
49041 +bad:
49042 + needmode = oldmode;
49043 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49044 + needmode |= GR_SETID;
49045 +
49046 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49047 + gr_log_learn(old_dentry, old_mnt, needmode);
49048 + return (GR_CREATE | GR_LINK);
49049 + } else if (newmode & GR_SUPPRESS)
49050 + return GR_SUPPRESS;
49051 + else
49052 + return 0;
49053 +}
49054 +
49055 +__u32
49056 +gr_search_file(const struct dentry * dentry, const __u32 mode,
49057 + const struct vfsmount * mnt)
49058 +{
49059 + __u32 retval = mode;
49060 + struct acl_subject_label *curracl;
49061 + struct acl_object_label *currobj;
49062 +
49063 + if (unlikely(!(gr_status & GR_READY)))
49064 + return (mode & ~GR_AUDITS);
49065 +
49066 + curracl = current->acl;
49067 +
49068 + currobj = chk_obj_label(dentry, mnt, curracl);
49069 + retval = currobj->mode & mode;
49070 +
49071 + /* if we're opening a specified transfer file for writing
49072 + (e.g. /dev/initctl), then transfer our role to init
49073 + */
49074 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
49075 + current->role->roletype & GR_ROLE_PERSIST)) {
49076 + struct task_struct *task = init_pid_ns.child_reaper;
49077 +
49078 + if (task->role != current->role) {
49079 + task->acl_sp_role = 0;
49080 + task->acl_role_id = current->acl_role_id;
49081 + task->role = current->role;
49082 + rcu_read_lock();
49083 + read_lock(&grsec_exec_file_lock);
49084 + gr_apply_subject_to_task(task);
49085 + read_unlock(&grsec_exec_file_lock);
49086 + rcu_read_unlock();
49087 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
49088 + }
49089 + }
49090 +
49091 + if (unlikely
49092 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
49093 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
49094 + __u32 new_mode = mode;
49095 +
49096 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49097 +
49098 + retval = new_mode;
49099 +
49100 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
49101 + new_mode |= GR_INHERIT;
49102 +
49103 + if (!(mode & GR_NOLEARN))
49104 + gr_log_learn(dentry, mnt, new_mode);
49105 + }
49106 +
49107 + return retval;
49108 +}
49109 +
49110 +__u32
49111 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
49112 + const struct vfsmount * mnt, const __u32 mode)
49113 +{
49114 + struct name_entry *match;
49115 + struct acl_object_label *matchpo;
49116 + struct acl_subject_label *curracl;
49117 + char *path;
49118 + __u32 retval;
49119 +
49120 + if (unlikely(!(gr_status & GR_READY)))
49121 + return (mode & ~GR_AUDITS);
49122 +
49123 + preempt_disable();
49124 + path = gr_to_filename_rbac(new_dentry, mnt);
49125 + match = lookup_name_entry_create(path);
49126 +
49127 + if (!match)
49128 + goto check_parent;
49129 +
49130 + curracl = current->acl;
49131 +
49132 + read_lock(&gr_inode_lock);
49133 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
49134 + read_unlock(&gr_inode_lock);
49135 +
49136 + if (matchpo) {
49137 + if ((matchpo->mode & mode) !=
49138 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
49139 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49140 + __u32 new_mode = mode;
49141 +
49142 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49143 +
49144 + gr_log_learn(new_dentry, mnt, new_mode);
49145 +
49146 + preempt_enable();
49147 + return new_mode;
49148 + }
49149 + preempt_enable();
49150 + return (matchpo->mode & mode);
49151 + }
49152 +
49153 + check_parent:
49154 + curracl = current->acl;
49155 +
49156 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
49157 + retval = matchpo->mode & mode;
49158 +
49159 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
49160 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
49161 + __u32 new_mode = mode;
49162 +
49163 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49164 +
49165 + gr_log_learn(new_dentry, mnt, new_mode);
49166 + preempt_enable();
49167 + return new_mode;
49168 + }
49169 +
49170 + preempt_enable();
49171 + return retval;
49172 +}
49173 +
49174 +int
49175 +gr_check_hidden_task(const struct task_struct *task)
49176 +{
49177 + if (unlikely(!(gr_status & GR_READY)))
49178 + return 0;
49179 +
49180 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49181 + return 1;
49182 +
49183 + return 0;
49184 +}
49185 +
49186 +int
49187 +gr_check_protected_task(const struct task_struct *task)
49188 +{
49189 + if (unlikely(!(gr_status & GR_READY) || !task))
49190 + return 0;
49191 +
49192 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49193 + task->acl != current->acl)
49194 + return 1;
49195 +
49196 + return 0;
49197 +}
49198 +
49199 +int
49200 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49201 +{
49202 + struct task_struct *p;
49203 + int ret = 0;
49204 +
49205 + if (unlikely(!(gr_status & GR_READY) || !pid))
49206 + return ret;
49207 +
49208 + read_lock(&tasklist_lock);
49209 + do_each_pid_task(pid, type, p) {
49210 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49211 + p->acl != current->acl) {
49212 + ret = 1;
49213 + goto out;
49214 + }
49215 + } while_each_pid_task(pid, type, p);
49216 +out:
49217 + read_unlock(&tasklist_lock);
49218 +
49219 + return ret;
49220 +}
49221 +
49222 +void
49223 +gr_copy_label(struct task_struct *tsk)
49224 +{
49225 + tsk->signal->used_accept = 0;
49226 + tsk->acl_sp_role = 0;
49227 + tsk->acl_role_id = current->acl_role_id;
49228 + tsk->acl = current->acl;
49229 + tsk->role = current->role;
49230 + tsk->signal->curr_ip = current->signal->curr_ip;
49231 + tsk->signal->saved_ip = current->signal->saved_ip;
49232 + if (current->exec_file)
49233 + get_file(current->exec_file);
49234 + tsk->exec_file = current->exec_file;
49235 + tsk->is_writable = current->is_writable;
49236 + if (unlikely(current->signal->used_accept)) {
49237 + current->signal->curr_ip = 0;
49238 + current->signal->saved_ip = 0;
49239 + }
49240 +
49241 + return;
49242 +}
49243 +
49244 +static void
49245 +gr_set_proc_res(struct task_struct *task)
49246 +{
49247 + struct acl_subject_label *proc;
49248 + unsigned short i;
49249 +
49250 + proc = task->acl;
49251 +
49252 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49253 + return;
49254 +
49255 + for (i = 0; i < RLIM_NLIMITS; i++) {
49256 + if (!(proc->resmask & (1 << i)))
49257 + continue;
49258 +
49259 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49260 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49261 + }
49262 +
49263 + return;
49264 +}
49265 +
49266 +extern int __gr_process_user_ban(struct user_struct *user);
49267 +
49268 +int
49269 +gr_check_user_change(int real, int effective, int fs)
49270 +{
49271 + unsigned int i;
49272 + __u16 num;
49273 + uid_t *uidlist;
49274 + int curuid;
49275 + int realok = 0;
49276 + int effectiveok = 0;
49277 + int fsok = 0;
49278 +
49279 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49280 + struct user_struct *user;
49281 +
49282 + if (real == -1)
49283 + goto skipit;
49284 +
49285 + user = find_user(real);
49286 + if (user == NULL)
49287 + goto skipit;
49288 +
49289 + if (__gr_process_user_ban(user)) {
49290 + /* for find_user */
49291 + free_uid(user);
49292 + return 1;
49293 + }
49294 +
49295 + /* for find_user */
49296 + free_uid(user);
49297 +
49298 +skipit:
49299 +#endif
49300 +
49301 + if (unlikely(!(gr_status & GR_READY)))
49302 + return 0;
49303 +
49304 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49305 + gr_log_learn_id_change('u', real, effective, fs);
49306 +
49307 + num = current->acl->user_trans_num;
49308 + uidlist = current->acl->user_transitions;
49309 +
49310 + if (uidlist == NULL)
49311 + return 0;
49312 +
49313 + if (real == -1)
49314 + realok = 1;
49315 + if (effective == -1)
49316 + effectiveok = 1;
49317 + if (fs == -1)
49318 + fsok = 1;
49319 +
49320 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
49321 + for (i = 0; i < num; i++) {
49322 + curuid = (int)uidlist[i];
49323 + if (real == curuid)
49324 + realok = 1;
49325 + if (effective == curuid)
49326 + effectiveok = 1;
49327 + if (fs == curuid)
49328 + fsok = 1;
49329 + }
49330 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
49331 + for (i = 0; i < num; i++) {
49332 + curuid = (int)uidlist[i];
49333 + if (real == curuid)
49334 + break;
49335 + if (effective == curuid)
49336 + break;
49337 + if (fs == curuid)
49338 + break;
49339 + }
49340 + /* not in deny list */
49341 + if (i == num) {
49342 + realok = 1;
49343 + effectiveok = 1;
49344 + fsok = 1;
49345 + }
49346 + }
49347 +
49348 + if (realok && effectiveok && fsok)
49349 + return 0;
49350 + else {
49351 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49352 + return 1;
49353 + }
49354 +}
49355 +
49356 +int
49357 +gr_check_group_change(int real, int effective, int fs)
49358 +{
49359 + unsigned int i;
49360 + __u16 num;
49361 + gid_t *gidlist;
49362 + int curgid;
49363 + int realok = 0;
49364 + int effectiveok = 0;
49365 + int fsok = 0;
49366 +
49367 + if (unlikely(!(gr_status & GR_READY)))
49368 + return 0;
49369 +
49370 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49371 + gr_log_learn_id_change('g', real, effective, fs);
49372 +
49373 + num = current->acl->group_trans_num;
49374 + gidlist = current->acl->group_transitions;
49375 +
49376 + if (gidlist == NULL)
49377 + return 0;
49378 +
49379 + if (real == -1)
49380 + realok = 1;
49381 + if (effective == -1)
49382 + effectiveok = 1;
49383 + if (fs == -1)
49384 + fsok = 1;
49385 +
49386 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
49387 + for (i = 0; i < num; i++) {
49388 + curgid = (int)gidlist[i];
49389 + if (real == curgid)
49390 + realok = 1;
49391 + if (effective == curgid)
49392 + effectiveok = 1;
49393 + if (fs == curgid)
49394 + fsok = 1;
49395 + }
49396 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
49397 + for (i = 0; i < num; i++) {
49398 + curgid = (int)gidlist[i];
49399 + if (real == curgid)
49400 + break;
49401 + if (effective == curgid)
49402 + break;
49403 + if (fs == curgid)
49404 + break;
49405 + }
49406 + /* not in deny list */
49407 + if (i == num) {
49408 + realok = 1;
49409 + effectiveok = 1;
49410 + fsok = 1;
49411 + }
49412 + }
49413 +
49414 + if (realok && effectiveok && fsok)
49415 + return 0;
49416 + else {
49417 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49418 + return 1;
49419 + }
49420 +}
49421 +
49422 +void
49423 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49424 +{
49425 + struct acl_role_label *role = task->role;
49426 + struct acl_subject_label *subj = NULL;
49427 + struct acl_object_label *obj;
49428 + struct file *filp;
49429 +
49430 + if (unlikely(!(gr_status & GR_READY)))
49431 + return;
49432 +
49433 + filp = task->exec_file;
49434 +
49435 + /* kernel process, we'll give them the kernel role */
49436 + if (unlikely(!filp)) {
49437 + task->role = kernel_role;
49438 + task->acl = kernel_role->root_label;
49439 + return;
49440 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49441 + role = lookup_acl_role_label(task, uid, gid);
49442 +
49443 + /* perform subject lookup in possibly new role
49444 + we can use this result below in the case where role == task->role
49445 + */
49446 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49447 +
49448 + /* if we changed uid/gid, but result in the same role
49449 + and are using inheritance, don't lose the inherited subject
49450 + if current subject is other than what normal lookup
49451 + would result in, we arrived via inheritance, don't
49452 + lose subject
49453 + */
49454 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49455 + (subj == task->acl)))
49456 + task->acl = subj;
49457 +
49458 + task->role = role;
49459 +
49460 + task->is_writable = 0;
49461 +
49462 + /* ignore additional mmap checks for processes that are writable
49463 + by the default ACL */
49464 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49465 + if (unlikely(obj->mode & GR_WRITE))
49466 + task->is_writable = 1;
49467 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49468 + if (unlikely(obj->mode & GR_WRITE))
49469 + task->is_writable = 1;
49470 +
49471 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49472 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49473 +#endif
49474 +
49475 + gr_set_proc_res(task);
49476 +
49477 + return;
49478 +}
49479 +
49480 +int
49481 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49482 + const int unsafe_share)
49483 +{
49484 + struct task_struct *task = current;
49485 + struct acl_subject_label *newacl;
49486 + struct acl_object_label *obj;
49487 + __u32 retmode;
49488 +
49489 + if (unlikely(!(gr_status & GR_READY)))
49490 + return 0;
49491 +
49492 + newacl = chk_subj_label(dentry, mnt, task->role);
49493 +
49494 + task_lock(task);
49495 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49496 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49497 + !(task->role->roletype & GR_ROLE_GOD) &&
49498 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49499 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49500 + task_unlock(task);
49501 + if (unsafe_share)
49502 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49503 + else
49504 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49505 + return -EACCES;
49506 + }
49507 + task_unlock(task);
49508 +
49509 + obj = chk_obj_label(dentry, mnt, task->acl);
49510 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49511 +
49512 + if (!(task->acl->mode & GR_INHERITLEARN) &&
49513 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49514 + if (obj->nested)
49515 + task->acl = obj->nested;
49516 + else
49517 + task->acl = newacl;
49518 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49519 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49520 +
49521 + task->is_writable = 0;
49522 +
49523 + /* ignore additional mmap checks for processes that are writable
49524 + by the default ACL */
49525 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
49526 + if (unlikely(obj->mode & GR_WRITE))
49527 + task->is_writable = 1;
49528 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
49529 + if (unlikely(obj->mode & GR_WRITE))
49530 + task->is_writable = 1;
49531 +
49532 + gr_set_proc_res(task);
49533 +
49534 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49535 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49536 +#endif
49537 + return 0;
49538 +}
49539 +
49540 +/* always called with valid inodev ptr */
49541 +static void
49542 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49543 +{
49544 + struct acl_object_label *matchpo;
49545 + struct acl_subject_label *matchps;
49546 + struct acl_subject_label *subj;
49547 + struct acl_role_label *role;
49548 + unsigned int x;
49549 +
49550 + FOR_EACH_ROLE_START(role)
49551 + FOR_EACH_SUBJECT_START(role, subj, x)
49552 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49553 + matchpo->mode |= GR_DELETED;
49554 + FOR_EACH_SUBJECT_END(subj,x)
49555 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
49556 + if (subj->inode == ino && subj->device == dev)
49557 + subj->mode |= GR_DELETED;
49558 + FOR_EACH_NESTED_SUBJECT_END(subj)
49559 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49560 + matchps->mode |= GR_DELETED;
49561 + FOR_EACH_ROLE_END(role)
49562 +
49563 + inodev->nentry->deleted = 1;
49564 +
49565 + return;
49566 +}
49567 +
49568 +void
49569 +gr_handle_delete(const ino_t ino, const dev_t dev)
49570 +{
49571 + struct inodev_entry *inodev;
49572 +
49573 + if (unlikely(!(gr_status & GR_READY)))
49574 + return;
49575 +
49576 + write_lock(&gr_inode_lock);
49577 + inodev = lookup_inodev_entry(ino, dev);
49578 + if (inodev != NULL)
49579 + do_handle_delete(inodev, ino, dev);
49580 + write_unlock(&gr_inode_lock);
49581 +
49582 + return;
49583 +}
49584 +
49585 +static void
49586 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49587 + const ino_t newinode, const dev_t newdevice,
49588 + struct acl_subject_label *subj)
49589 +{
49590 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49591 + struct acl_object_label *match;
49592 +
49593 + match = subj->obj_hash[index];
49594 +
49595 + while (match && (match->inode != oldinode ||
49596 + match->device != olddevice ||
49597 + !(match->mode & GR_DELETED)))
49598 + match = match->next;
49599 +
49600 + if (match && (match->inode == oldinode)
49601 + && (match->device == olddevice)
49602 + && (match->mode & GR_DELETED)) {
49603 + if (match->prev == NULL) {
49604 + subj->obj_hash[index] = match->next;
49605 + if (match->next != NULL)
49606 + match->next->prev = NULL;
49607 + } else {
49608 + match->prev->next = match->next;
49609 + if (match->next != NULL)
49610 + match->next->prev = match->prev;
49611 + }
49612 + match->prev = NULL;
49613 + match->next = NULL;
49614 + match->inode = newinode;
49615 + match->device = newdevice;
49616 + match->mode &= ~GR_DELETED;
49617 +
49618 + insert_acl_obj_label(match, subj);
49619 + }
49620 +
49621 + return;
49622 +}
49623 +
49624 +static void
49625 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49626 + const ino_t newinode, const dev_t newdevice,
49627 + struct acl_role_label *role)
49628 +{
49629 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49630 + struct acl_subject_label *match;
49631 +
49632 + match = role->subj_hash[index];
49633 +
49634 + while (match && (match->inode != oldinode ||
49635 + match->device != olddevice ||
49636 + !(match->mode & GR_DELETED)))
49637 + match = match->next;
49638 +
49639 + if (match && (match->inode == oldinode)
49640 + && (match->device == olddevice)
49641 + && (match->mode & GR_DELETED)) {
49642 + if (match->prev == NULL) {
49643 + role->subj_hash[index] = match->next;
49644 + if (match->next != NULL)
49645 + match->next->prev = NULL;
49646 + } else {
49647 + match->prev->next = match->next;
49648 + if (match->next != NULL)
49649 + match->next->prev = match->prev;
49650 + }
49651 + match->prev = NULL;
49652 + match->next = NULL;
49653 + match->inode = newinode;
49654 + match->device = newdevice;
49655 + match->mode &= ~GR_DELETED;
49656 +
49657 + insert_acl_subj_label(match, role);
49658 + }
49659 +
49660 + return;
49661 +}
49662 +
49663 +static void
49664 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49665 + const ino_t newinode, const dev_t newdevice)
49666 +{
49667 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49668 + struct inodev_entry *match;
49669 +
49670 + match = inodev_set.i_hash[index];
49671 +
49672 + while (match && (match->nentry->inode != oldinode ||
49673 + match->nentry->device != olddevice || !match->nentry->deleted))
49674 + match = match->next;
49675 +
49676 + if (match && (match->nentry->inode == oldinode)
49677 + && (match->nentry->device == olddevice) &&
49678 + match->nentry->deleted) {
49679 + if (match->prev == NULL) {
49680 + inodev_set.i_hash[index] = match->next;
49681 + if (match->next != NULL)
49682 + match->next->prev = NULL;
49683 + } else {
49684 + match->prev->next = match->next;
49685 + if (match->next != NULL)
49686 + match->next->prev = match->prev;
49687 + }
49688 + match->prev = NULL;
49689 + match->next = NULL;
49690 + match->nentry->inode = newinode;
49691 + match->nentry->device = newdevice;
49692 + match->nentry->deleted = 0;
49693 +
49694 + insert_inodev_entry(match);
49695 + }
49696 +
49697 + return;
49698 +}
49699 +
49700 +static void
49701 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
49702 + const struct vfsmount *mnt)
49703 +{
49704 + struct acl_subject_label *subj;
49705 + struct acl_role_label *role;
49706 + unsigned int x;
49707 + ino_t inode = dentry->d_inode->i_ino;
49708 + dev_t dev = __get_dev(dentry);
49709 +
49710 + FOR_EACH_ROLE_START(role)
49711 + update_acl_subj_label(matchn->inode, matchn->device,
49712 + inode, dev, role);
49713 +
49714 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
49715 + if ((subj->inode == inode) && (subj->device == dev)) {
49716 + subj->inode = inode;
49717 + subj->device = dev;
49718 + }
49719 + FOR_EACH_NESTED_SUBJECT_END(subj)
49720 + FOR_EACH_SUBJECT_START(role, subj, x)
49721 + update_acl_obj_label(matchn->inode, matchn->device,
49722 + inode, dev, subj);
49723 + FOR_EACH_SUBJECT_END(subj,x)
49724 + FOR_EACH_ROLE_END(role)
49725 +
49726 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
49727 +
49728 + return;
49729 +}
49730 +
49731 +void
49732 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49733 +{
49734 + struct name_entry *matchn;
49735 +
49736 + if (unlikely(!(gr_status & GR_READY)))
49737 + return;
49738 +
49739 + preempt_disable();
49740 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
49741 +
49742 + if (unlikely((unsigned long)matchn)) {
49743 + write_lock(&gr_inode_lock);
49744 + do_handle_create(matchn, dentry, mnt);
49745 + write_unlock(&gr_inode_lock);
49746 + }
49747 + preempt_enable();
49748 +
49749 + return;
49750 +}
49751 +
49752 +void
49753 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49754 + struct dentry *old_dentry,
49755 + struct dentry *new_dentry,
49756 + struct vfsmount *mnt, const __u8 replace)
49757 +{
49758 + struct name_entry *matchn;
49759 + struct inodev_entry *inodev;
49760 + ino_t oldinode = old_dentry->d_inode->i_ino;
49761 + dev_t olddev = __get_dev(old_dentry);
49762 +
49763 + /* vfs_rename swaps the name and parent link for old_dentry and
49764 + new_dentry
49765 + at this point, old_dentry has the new name, parent link, and inode
49766 + for the renamed file
49767 + if a file is being replaced by a rename, new_dentry has the inode
49768 + and name for the replaced file
49769 + */
49770 +
49771 + if (unlikely(!(gr_status & GR_READY)))
49772 + return;
49773 +
49774 + preempt_disable();
49775 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
49776 +
49777 + /* we wouldn't have to check d_inode if it weren't for
49778 + NFS silly-renaming
49779 + */
49780 +
49781 + write_lock(&gr_inode_lock);
49782 + if (unlikely(replace && new_dentry->d_inode)) {
49783 + ino_t newinode = new_dentry->d_inode->i_ino;
49784 + dev_t newdev = __get_dev(new_dentry);
49785 + inodev = lookup_inodev_entry(newinode, newdev);
49786 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
49787 + do_handle_delete(inodev, newinode, newdev);
49788 + }
49789 +
49790 + inodev = lookup_inodev_entry(oldinode, olddev);
49791 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
49792 + do_handle_delete(inodev, oldinode, olddev);
49793 +
49794 + if (unlikely((unsigned long)matchn))
49795 + do_handle_create(matchn, old_dentry, mnt);
49796 +
49797 + write_unlock(&gr_inode_lock);
49798 + preempt_enable();
49799 +
49800 + return;
49801 +}
49802 +
49803 +static int
49804 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
49805 + unsigned char **sum)
49806 +{
49807 + struct acl_role_label *r;
49808 + struct role_allowed_ip *ipp;
49809 + struct role_transition *trans;
49810 + unsigned int i;
49811 + int found = 0;
49812 + u32 curr_ip = current->signal->curr_ip;
49813 +
49814 + current->signal->saved_ip = curr_ip;
49815 +
49816 + /* check transition table */
49817 +
49818 + for (trans = current->role->transitions; trans; trans = trans->next) {
49819 + if (!strcmp(rolename, trans->rolename)) {
49820 + found = 1;
49821 + break;
49822 + }
49823 + }
49824 +
49825 + if (!found)
49826 + return 0;
49827 +
49828 + /* handle special roles that do not require authentication
49829 + and check ip */
49830 +
49831 + FOR_EACH_ROLE_START(r)
49832 + if (!strcmp(rolename, r->rolename) &&
49833 + (r->roletype & GR_ROLE_SPECIAL)) {
49834 + found = 0;
49835 + if (r->allowed_ips != NULL) {
49836 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
49837 + if ((ntohl(curr_ip) & ipp->netmask) ==
49838 + (ntohl(ipp->addr) & ipp->netmask))
49839 + found = 1;
49840 + }
49841 + } else
49842 + found = 2;
49843 + if (!found)
49844 + return 0;
49845 +
49846 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
49847 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
49848 + *salt = NULL;
49849 + *sum = NULL;
49850 + return 1;
49851 + }
49852 + }
49853 + FOR_EACH_ROLE_END(r)
49854 +
49855 + for (i = 0; i < num_sprole_pws; i++) {
49856 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
49857 + *salt = acl_special_roles[i]->salt;
49858 + *sum = acl_special_roles[i]->sum;
49859 + return 1;
49860 + }
49861 + }
49862 +
49863 + return 0;
49864 +}
49865 +
49866 +static void
49867 +assign_special_role(char *rolename)
49868 +{
49869 + struct acl_object_label *obj;
49870 + struct acl_role_label *r;
49871 + struct acl_role_label *assigned = NULL;
49872 + struct task_struct *tsk;
49873 + struct file *filp;
49874 +
49875 + FOR_EACH_ROLE_START(r)
49876 + if (!strcmp(rolename, r->rolename) &&
49877 + (r->roletype & GR_ROLE_SPECIAL)) {
49878 + assigned = r;
49879 + break;
49880 + }
49881 + FOR_EACH_ROLE_END(r)
49882 +
49883 + if (!assigned)
49884 + return;
49885 +
49886 + read_lock(&tasklist_lock);
49887 + read_lock(&grsec_exec_file_lock);
49888 +
49889 + tsk = current->real_parent;
49890 + if (tsk == NULL)
49891 + goto out_unlock;
49892 +
49893 + filp = tsk->exec_file;
49894 + if (filp == NULL)
49895 + goto out_unlock;
49896 +
49897 + tsk->is_writable = 0;
49898 +
49899 + tsk->acl_sp_role = 1;
49900 + tsk->acl_role_id = ++acl_sp_role_value;
49901 + tsk->role = assigned;
49902 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
49903 +
49904 + /* ignore additional mmap checks for processes that are writable
49905 + by the default ACL */
49906 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49907 + if (unlikely(obj->mode & GR_WRITE))
49908 + tsk->is_writable = 1;
49909 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
49910 + if (unlikely(obj->mode & GR_WRITE))
49911 + tsk->is_writable = 1;
49912 +
49913 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49914 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
49915 +#endif
49916 +
49917 +out_unlock:
49918 + read_unlock(&grsec_exec_file_lock);
49919 + read_unlock(&tasklist_lock);
49920 + return;
49921 +}
49922 +
49923 +int gr_check_secure_terminal(struct task_struct *task)
49924 +{
49925 + struct task_struct *p, *p2, *p3;
49926 + struct files_struct *files;
49927 + struct fdtable *fdt;
49928 + struct file *our_file = NULL, *file;
49929 + int i;
49930 +
49931 + if (task->signal->tty == NULL)
49932 + return 1;
49933 +
49934 + files = get_files_struct(task);
49935 + if (files != NULL) {
49936 + rcu_read_lock();
49937 + fdt = files_fdtable(files);
49938 + for (i=0; i < fdt->max_fds; i++) {
49939 + file = fcheck_files(files, i);
49940 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
49941 + get_file(file);
49942 + our_file = file;
49943 + }
49944 + }
49945 + rcu_read_unlock();
49946 + put_files_struct(files);
49947 + }
49948 +
49949 + if (our_file == NULL)
49950 + return 1;
49951 +
49952 + read_lock(&tasklist_lock);
49953 + do_each_thread(p2, p) {
49954 + files = get_files_struct(p);
49955 + if (files == NULL ||
49956 + (p->signal && p->signal->tty == task->signal->tty)) {
49957 + if (files != NULL)
49958 + put_files_struct(files);
49959 + continue;
49960 + }
49961 + rcu_read_lock();
49962 + fdt = files_fdtable(files);
49963 + for (i=0; i < fdt->max_fds; i++) {
49964 + file = fcheck_files(files, i);
49965 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
49966 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
49967 + p3 = task;
49968 + while (p3->pid > 0) {
49969 + if (p3 == p)
49970 + break;
49971 + p3 = p3->real_parent;
49972 + }
49973 + if (p3 == p)
49974 + break;
49975 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
49976 + gr_handle_alertkill(p);
49977 + rcu_read_unlock();
49978 + put_files_struct(files);
49979 + read_unlock(&tasklist_lock);
49980 + fput(our_file);
49981 + return 0;
49982 + }
49983 + }
49984 + rcu_read_unlock();
49985 + put_files_struct(files);
49986 + } while_each_thread(p2, p);
49987 + read_unlock(&tasklist_lock);
49988 +
49989 + fput(our_file);
49990 + return 1;
49991 +}
49992 +
49993 +ssize_t
49994 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
49995 +{
49996 + struct gr_arg_wrapper uwrap;
49997 + unsigned char *sprole_salt = NULL;
49998 + unsigned char *sprole_sum = NULL;
49999 + int error = sizeof (struct gr_arg_wrapper);
50000 + int error2 = 0;
50001 +
50002 + mutex_lock(&gr_dev_mutex);
50003 +
50004 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
50005 + error = -EPERM;
50006 + goto out;
50007 + }
50008 +
50009 + if (count != sizeof (struct gr_arg_wrapper)) {
50010 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
50011 + error = -EINVAL;
50012 + goto out;
50013 + }
50014 +
50015 +
50016 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
50017 + gr_auth_expires = 0;
50018 + gr_auth_attempts = 0;
50019 + }
50020 +
50021 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
50022 + error = -EFAULT;
50023 + goto out;
50024 + }
50025 +
50026 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
50027 + error = -EINVAL;
50028 + goto out;
50029 + }
50030 +
50031 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
50032 + error = -EFAULT;
50033 + goto out;
50034 + }
50035 +
50036 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50037 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50038 + time_after(gr_auth_expires, get_seconds())) {
50039 + error = -EBUSY;
50040 + goto out;
50041 + }
50042 +
50043 + /* if non-root trying to do anything other than use a special role,
50044 + do not attempt authentication, do not count towards authentication
50045 + locking
50046 + */
50047 +
50048 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
50049 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50050 + current_uid()) {
50051 + error = -EPERM;
50052 + goto out;
50053 + }
50054 +
50055 + /* ensure pw and special role name are null terminated */
50056 +
50057 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
50058 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
50059 +
50060 + /* Okay.
50061 + * We have our enough of the argument structure..(we have yet
50062 + * to copy_from_user the tables themselves) . Copy the tables
50063 + * only if we need them, i.e. for loading operations. */
50064 +
50065 + switch (gr_usermode->mode) {
50066 + case GR_STATUS:
50067 + if (gr_status & GR_READY) {
50068 + error = 1;
50069 + if (!gr_check_secure_terminal(current))
50070 + error = 3;
50071 + } else
50072 + error = 2;
50073 + goto out;
50074 + case GR_SHUTDOWN:
50075 + if ((gr_status & GR_READY)
50076 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50077 + pax_open_kernel();
50078 + gr_status &= ~GR_READY;
50079 + pax_close_kernel();
50080 +
50081 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
50082 + free_variables();
50083 + memset(gr_usermode, 0, sizeof (struct gr_arg));
50084 + memset(gr_system_salt, 0, GR_SALT_LEN);
50085 + memset(gr_system_sum, 0, GR_SHA_LEN);
50086 + } else if (gr_status & GR_READY) {
50087 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
50088 + error = -EPERM;
50089 + } else {
50090 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
50091 + error = -EAGAIN;
50092 + }
50093 + break;
50094 + case GR_ENABLE:
50095 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
50096 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
50097 + else {
50098 + if (gr_status & GR_READY)
50099 + error = -EAGAIN;
50100 + else
50101 + error = error2;
50102 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
50103 + }
50104 + break;
50105 + case GR_RELOAD:
50106 + if (!(gr_status & GR_READY)) {
50107 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
50108 + error = -EAGAIN;
50109 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50110 + lock_kernel();
50111 +
50112 + pax_open_kernel();
50113 + gr_status &= ~GR_READY;
50114 + pax_close_kernel();
50115 +
50116 + free_variables();
50117 + if (!(error2 = gracl_init(gr_usermode))) {
50118 + unlock_kernel();
50119 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50120 + } else {
50121 + unlock_kernel();
50122 + error = error2;
50123 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50124 + }
50125 + } else {
50126 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50127 + error = -EPERM;
50128 + }
50129 + break;
50130 + case GR_SEGVMOD:
50131 + if (unlikely(!(gr_status & GR_READY))) {
50132 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50133 + error = -EAGAIN;
50134 + break;
50135 + }
50136 +
50137 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50138 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50139 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50140 + struct acl_subject_label *segvacl;
50141 + segvacl =
50142 + lookup_acl_subj_label(gr_usermode->segv_inode,
50143 + gr_usermode->segv_device,
50144 + current->role);
50145 + if (segvacl) {
50146 + segvacl->crashes = 0;
50147 + segvacl->expires = 0;
50148 + }
50149 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50150 + gr_remove_uid(gr_usermode->segv_uid);
50151 + }
50152 + } else {
50153 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50154 + error = -EPERM;
50155 + }
50156 + break;
50157 + case GR_SPROLE:
50158 + case GR_SPROLEPAM:
50159 + if (unlikely(!(gr_status & GR_READY))) {
50160 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50161 + error = -EAGAIN;
50162 + break;
50163 + }
50164 +
50165 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50166 + current->role->expires = 0;
50167 + current->role->auth_attempts = 0;
50168 + }
50169 +
50170 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50171 + time_after(current->role->expires, get_seconds())) {
50172 + error = -EBUSY;
50173 + goto out;
50174 + }
50175 +
50176 + if (lookup_special_role_auth
50177 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50178 + && ((!sprole_salt && !sprole_sum)
50179 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50180 + char *p = "";
50181 + assign_special_role(gr_usermode->sp_role);
50182 + read_lock(&tasklist_lock);
50183 + if (current->real_parent)
50184 + p = current->real_parent->role->rolename;
50185 + read_unlock(&tasklist_lock);
50186 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50187 + p, acl_sp_role_value);
50188 + } else {
50189 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50190 + error = -EPERM;
50191 + if(!(current->role->auth_attempts++))
50192 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50193 +
50194 + goto out;
50195 + }
50196 + break;
50197 + case GR_UNSPROLE:
50198 + if (unlikely(!(gr_status & GR_READY))) {
50199 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50200 + error = -EAGAIN;
50201 + break;
50202 + }
50203 +
50204 + if (current->role->roletype & GR_ROLE_SPECIAL) {
50205 + char *p = "";
50206 + int i = 0;
50207 +
50208 + read_lock(&tasklist_lock);
50209 + if (current->real_parent) {
50210 + p = current->real_parent->role->rolename;
50211 + i = current->real_parent->acl_role_id;
50212 + }
50213 + read_unlock(&tasklist_lock);
50214 +
50215 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50216 + gr_set_acls(1);
50217 + } else {
50218 + error = -EPERM;
50219 + goto out;
50220 + }
50221 + break;
50222 + default:
50223 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50224 + error = -EINVAL;
50225 + break;
50226 + }
50227 +
50228 + if (error != -EPERM)
50229 + goto out;
50230 +
50231 + if(!(gr_auth_attempts++))
50232 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50233 +
50234 + out:
50235 + mutex_unlock(&gr_dev_mutex);
50236 + return error;
50237 +}
50238 +
50239 +/* must be called with
50240 + rcu_read_lock();
50241 + read_lock(&tasklist_lock);
50242 + read_lock(&grsec_exec_file_lock);
50243 +*/
50244 +int gr_apply_subject_to_task(struct task_struct *task)
50245 +{
50246 + struct acl_object_label *obj;
50247 + char *tmpname;
50248 + struct acl_subject_label *tmpsubj;
50249 + struct file *filp;
50250 + struct name_entry *nmatch;
50251 +
50252 + filp = task->exec_file;
50253 + if (filp == NULL)
50254 + return 0;
50255 +
50256 + /* the following is to apply the correct subject
50257 + on binaries running when the RBAC system
50258 + is enabled, when the binaries have been
50259 + replaced or deleted since their execution
50260 + -----
50261 + when the RBAC system starts, the inode/dev
50262 + from exec_file will be one the RBAC system
50263 + is unaware of. It only knows the inode/dev
50264 + of the present file on disk, or the absence
50265 + of it.
50266 + */
50267 + preempt_disable();
50268 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50269 +
50270 + nmatch = lookup_name_entry(tmpname);
50271 + preempt_enable();
50272 + tmpsubj = NULL;
50273 + if (nmatch) {
50274 + if (nmatch->deleted)
50275 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50276 + else
50277 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50278 + if (tmpsubj != NULL)
50279 + task->acl = tmpsubj;
50280 + }
50281 + if (tmpsubj == NULL)
50282 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50283 + task->role);
50284 + if (task->acl) {
50285 + task->is_writable = 0;
50286 + /* ignore additional mmap checks for processes that are writable
50287 + by the default ACL */
50288 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50289 + if (unlikely(obj->mode & GR_WRITE))
50290 + task->is_writable = 1;
50291 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50292 + if (unlikely(obj->mode & GR_WRITE))
50293 + task->is_writable = 1;
50294 +
50295 + gr_set_proc_res(task);
50296 +
50297 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50298 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50299 +#endif
50300 + } else {
50301 + return 1;
50302 + }
50303 +
50304 + return 0;
50305 +}
50306 +
50307 +int
50308 +gr_set_acls(const int type)
50309 +{
50310 + struct task_struct *task, *task2;
50311 + struct acl_role_label *role = current->role;
50312 + __u16 acl_role_id = current->acl_role_id;
50313 + const struct cred *cred;
50314 + int ret;
50315 +
50316 + rcu_read_lock();
50317 + read_lock(&tasklist_lock);
50318 + read_lock(&grsec_exec_file_lock);
50319 + do_each_thread(task2, task) {
50320 + /* check to see if we're called from the exit handler,
50321 + if so, only replace ACLs that have inherited the admin
50322 + ACL */
50323 +
50324 + if (type && (task->role != role ||
50325 + task->acl_role_id != acl_role_id))
50326 + continue;
50327 +
50328 + task->acl_role_id = 0;
50329 + task->acl_sp_role = 0;
50330 +
50331 + if (task->exec_file) {
50332 + cred = __task_cred(task);
50333 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50334 +
50335 + ret = gr_apply_subject_to_task(task);
50336 + if (ret) {
50337 + read_unlock(&grsec_exec_file_lock);
50338 + read_unlock(&tasklist_lock);
50339 + rcu_read_unlock();
50340 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50341 + return ret;
50342 + }
50343 + } else {
50344 + // it's a kernel process
50345 + task->role = kernel_role;
50346 + task->acl = kernel_role->root_label;
50347 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50348 + task->acl->mode &= ~GR_PROCFIND;
50349 +#endif
50350 + }
50351 + } while_each_thread(task2, task);
50352 + read_unlock(&grsec_exec_file_lock);
50353 + read_unlock(&tasklist_lock);
50354 + rcu_read_unlock();
50355 +
50356 + return 0;
50357 +}
50358 +
50359 +void
50360 +gr_learn_resource(const struct task_struct *task,
50361 + const int res, const unsigned long wanted, const int gt)
50362 +{
50363 + struct acl_subject_label *acl;
50364 + const struct cred *cred;
50365 +
50366 + if (unlikely((gr_status & GR_READY) &&
50367 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50368 + goto skip_reslog;
50369 +
50370 +#ifdef CONFIG_GRKERNSEC_RESLOG
50371 + gr_log_resource(task, res, wanted, gt);
50372 +#endif
50373 + skip_reslog:
50374 +
50375 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50376 + return;
50377 +
50378 + acl = task->acl;
50379 +
50380 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50381 + !(acl->resmask & (1 << (unsigned short) res))))
50382 + return;
50383 +
50384 + if (wanted >= acl->res[res].rlim_cur) {
50385 + unsigned long res_add;
50386 +
50387 + res_add = wanted;
50388 + switch (res) {
50389 + case RLIMIT_CPU:
50390 + res_add += GR_RLIM_CPU_BUMP;
50391 + break;
50392 + case RLIMIT_FSIZE:
50393 + res_add += GR_RLIM_FSIZE_BUMP;
50394 + break;
50395 + case RLIMIT_DATA:
50396 + res_add += GR_RLIM_DATA_BUMP;
50397 + break;
50398 + case RLIMIT_STACK:
50399 + res_add += GR_RLIM_STACK_BUMP;
50400 + break;
50401 + case RLIMIT_CORE:
50402 + res_add += GR_RLIM_CORE_BUMP;
50403 + break;
50404 + case RLIMIT_RSS:
50405 + res_add += GR_RLIM_RSS_BUMP;
50406 + break;
50407 + case RLIMIT_NPROC:
50408 + res_add += GR_RLIM_NPROC_BUMP;
50409 + break;
50410 + case RLIMIT_NOFILE:
50411 + res_add += GR_RLIM_NOFILE_BUMP;
50412 + break;
50413 + case RLIMIT_MEMLOCK:
50414 + res_add += GR_RLIM_MEMLOCK_BUMP;
50415 + break;
50416 + case RLIMIT_AS:
50417 + res_add += GR_RLIM_AS_BUMP;
50418 + break;
50419 + case RLIMIT_LOCKS:
50420 + res_add += GR_RLIM_LOCKS_BUMP;
50421 + break;
50422 + case RLIMIT_SIGPENDING:
50423 + res_add += GR_RLIM_SIGPENDING_BUMP;
50424 + break;
50425 + case RLIMIT_MSGQUEUE:
50426 + res_add += GR_RLIM_MSGQUEUE_BUMP;
50427 + break;
50428 + case RLIMIT_NICE:
50429 + res_add += GR_RLIM_NICE_BUMP;
50430 + break;
50431 + case RLIMIT_RTPRIO:
50432 + res_add += GR_RLIM_RTPRIO_BUMP;
50433 + break;
50434 + case RLIMIT_RTTIME:
50435 + res_add += GR_RLIM_RTTIME_BUMP;
50436 + break;
50437 + }
50438 +
50439 + acl->res[res].rlim_cur = res_add;
50440 +
50441 + if (wanted > acl->res[res].rlim_max)
50442 + acl->res[res].rlim_max = res_add;
50443 +
50444 + /* only log the subject filename, since resource logging is supported for
50445 + single-subject learning only */
50446 + rcu_read_lock();
50447 + cred = __task_cred(task);
50448 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50449 + task->role->roletype, cred->uid, cred->gid, acl->filename,
50450 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50451 + "", (unsigned long) res, &task->signal->saved_ip);
50452 + rcu_read_unlock();
50453 + }
50454 +
50455 + return;
50456 +}
50457 +
50458 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50459 +void
50460 +pax_set_initial_flags(struct linux_binprm *bprm)
50461 +{
50462 + struct task_struct *task = current;
50463 + struct acl_subject_label *proc;
50464 + unsigned long flags;
50465 +
50466 + if (unlikely(!(gr_status & GR_READY)))
50467 + return;
50468 +
50469 + flags = pax_get_flags(task);
50470 +
50471 + proc = task->acl;
50472 +
50473 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50474 + flags &= ~MF_PAX_PAGEEXEC;
50475 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50476 + flags &= ~MF_PAX_SEGMEXEC;
50477 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50478 + flags &= ~MF_PAX_RANDMMAP;
50479 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50480 + flags &= ~MF_PAX_EMUTRAMP;
50481 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50482 + flags &= ~MF_PAX_MPROTECT;
50483 +
50484 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50485 + flags |= MF_PAX_PAGEEXEC;
50486 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50487 + flags |= MF_PAX_SEGMEXEC;
50488 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50489 + flags |= MF_PAX_RANDMMAP;
50490 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50491 + flags |= MF_PAX_EMUTRAMP;
50492 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50493 + flags |= MF_PAX_MPROTECT;
50494 +
50495 + pax_set_flags(task, flags);
50496 +
50497 + return;
50498 +}
50499 +#endif
50500 +
50501 +#ifdef CONFIG_SYSCTL
50502 +/* Eric Biederman likes breaking userland ABI and every inode-based security
50503 + system to save 35kb of memory */
50504 +
50505 +/* we modify the passed in filename, but adjust it back before returning */
50506 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50507 +{
50508 + struct name_entry *nmatch;
50509 + char *p, *lastp = NULL;
50510 + struct acl_object_label *obj = NULL, *tmp;
50511 + struct acl_subject_label *tmpsubj;
50512 + char c = '\0';
50513 +
50514 + read_lock(&gr_inode_lock);
50515 +
50516 + p = name + len - 1;
50517 + do {
50518 + nmatch = lookup_name_entry(name);
50519 + if (lastp != NULL)
50520 + *lastp = c;
50521 +
50522 + if (nmatch == NULL)
50523 + goto next_component;
50524 + tmpsubj = current->acl;
50525 + do {
50526 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50527 + if (obj != NULL) {
50528 + tmp = obj->globbed;
50529 + while (tmp) {
50530 + if (!glob_match(tmp->filename, name)) {
50531 + obj = tmp;
50532 + goto found_obj;
50533 + }
50534 + tmp = tmp->next;
50535 + }
50536 + goto found_obj;
50537 + }
50538 + } while ((tmpsubj = tmpsubj->parent_subject));
50539 +next_component:
50540 + /* end case */
50541 + if (p == name)
50542 + break;
50543 +
50544 + while (*p != '/')
50545 + p--;
50546 + if (p == name)
50547 + lastp = p + 1;
50548 + else {
50549 + lastp = p;
50550 + p--;
50551 + }
50552 + c = *lastp;
50553 + *lastp = '\0';
50554 + } while (1);
50555 +found_obj:
50556 + read_unlock(&gr_inode_lock);
50557 + /* obj returned will always be non-null */
50558 + return obj;
50559 +}
50560 +
50561 +/* returns 0 when allowing, non-zero on error
50562 + op of 0 is used for readdir, so we don't log the names of hidden files
50563 +*/
50564 +__u32
50565 +gr_handle_sysctl(const struct ctl_table *table, const int op)
50566 +{
50567 + ctl_table *tmp;
50568 + const char *proc_sys = "/proc/sys";
50569 + char *path;
50570 + struct acl_object_label *obj;
50571 + unsigned short len = 0, pos = 0, depth = 0, i;
50572 + __u32 err = 0;
50573 + __u32 mode = 0;
50574 +
50575 + if (unlikely(!(gr_status & GR_READY)))
50576 + return 0;
50577 +
50578 + /* for now, ignore operations on non-sysctl entries if it's not a
50579 + readdir*/
50580 + if (table->child != NULL && op != 0)
50581 + return 0;
50582 +
50583 + mode |= GR_FIND;
50584 + /* it's only a read if it's an entry, read on dirs is for readdir */
50585 + if (op & MAY_READ)
50586 + mode |= GR_READ;
50587 + if (op & MAY_WRITE)
50588 + mode |= GR_WRITE;
50589 +
50590 + preempt_disable();
50591 +
50592 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50593 +
50594 + /* it's only a read/write if it's an actual entry, not a dir
50595 + (which are opened for readdir)
50596 + */
50597 +
50598 + /* convert the requested sysctl entry into a pathname */
50599 +
50600 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50601 + len += strlen(tmp->procname);
50602 + len++;
50603 + depth++;
50604 + }
50605 +
50606 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50607 + /* deny */
50608 + goto out;
50609 + }
50610 +
50611 + memset(path, 0, PAGE_SIZE);
50612 +
50613 + memcpy(path, proc_sys, strlen(proc_sys));
50614 +
50615 + pos += strlen(proc_sys);
50616 +
50617 + for (; depth > 0; depth--) {
50618 + path[pos] = '/';
50619 + pos++;
50620 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50621 + if (depth == i) {
50622 + memcpy(path + pos, tmp->procname,
50623 + strlen(tmp->procname));
50624 + pos += strlen(tmp->procname);
50625 + }
50626 + i++;
50627 + }
50628 + }
50629 +
50630 + obj = gr_lookup_by_name(path, pos);
50631 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50632 +
50633 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50634 + ((err & mode) != mode))) {
50635 + __u32 new_mode = mode;
50636 +
50637 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50638 +
50639 + err = 0;
50640 + gr_log_learn_sysctl(path, new_mode);
50641 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50642 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50643 + err = -ENOENT;
50644 + } else if (!(err & GR_FIND)) {
50645 + err = -ENOENT;
50646 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50647 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50648 + path, (mode & GR_READ) ? " reading" : "",
50649 + (mode & GR_WRITE) ? " writing" : "");
50650 + err = -EACCES;
50651 + } else if ((err & mode) != mode) {
50652 + err = -EACCES;
50653 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50654 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50655 + path, (mode & GR_READ) ? " reading" : "",
50656 + (mode & GR_WRITE) ? " writing" : "");
50657 + err = 0;
50658 + } else
50659 + err = 0;
50660 +
50661 + out:
50662 + preempt_enable();
50663 +
50664 + return err;
50665 +}
50666 +#endif
50667 +
50668 +int
50669 +gr_handle_proc_ptrace(struct task_struct *task)
50670 +{
50671 + struct file *filp;
50672 + struct task_struct *tmp = task;
50673 + struct task_struct *curtemp = current;
50674 + __u32 retmode;
50675 +
50676 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50677 + if (unlikely(!(gr_status & GR_READY)))
50678 + return 0;
50679 +#endif
50680 +
50681 + read_lock(&tasklist_lock);
50682 + read_lock(&grsec_exec_file_lock);
50683 + filp = task->exec_file;
50684 +
50685 + while (tmp->pid > 0) {
50686 + if (tmp == curtemp)
50687 + break;
50688 + tmp = tmp->real_parent;
50689 + }
50690 +
50691 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50692 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
50693 + read_unlock(&grsec_exec_file_lock);
50694 + read_unlock(&tasklist_lock);
50695 + return 1;
50696 + }
50697 +
50698 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50699 + if (!(gr_status & GR_READY)) {
50700 + read_unlock(&grsec_exec_file_lock);
50701 + read_unlock(&tasklist_lock);
50702 + return 0;
50703 + }
50704 +#endif
50705 +
50706 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
50707 + read_unlock(&grsec_exec_file_lock);
50708 + read_unlock(&tasklist_lock);
50709 +
50710 + if (retmode & GR_NOPTRACE)
50711 + return 1;
50712 +
50713 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
50714 + && (current->acl != task->acl || (current->acl != current->role->root_label
50715 + && current->pid != task->pid)))
50716 + return 1;
50717 +
50718 + return 0;
50719 +}
50720 +
50721 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
50722 +{
50723 + if (unlikely(!(gr_status & GR_READY)))
50724 + return;
50725 +
50726 + if (!(current->role->roletype & GR_ROLE_GOD))
50727 + return;
50728 +
50729 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
50730 + p->role->rolename, gr_task_roletype_to_char(p),
50731 + p->acl->filename);
50732 +}
50733 +
50734 +int
50735 +gr_handle_ptrace(struct task_struct *task, const long request)
50736 +{
50737 + struct task_struct *tmp = task;
50738 + struct task_struct *curtemp = current;
50739 + __u32 retmode;
50740 +
50741 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50742 + if (unlikely(!(gr_status & GR_READY)))
50743 + return 0;
50744 +#endif
50745 +
50746 + read_lock(&tasklist_lock);
50747 + while (tmp->pid > 0) {
50748 + if (tmp == curtemp)
50749 + break;
50750 + tmp = tmp->real_parent;
50751 + }
50752 +
50753 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50754 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
50755 + read_unlock(&tasklist_lock);
50756 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50757 + return 1;
50758 + }
50759 + read_unlock(&tasklist_lock);
50760 +
50761 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50762 + if (!(gr_status & GR_READY))
50763 + return 0;
50764 +#endif
50765 +
50766 + read_lock(&grsec_exec_file_lock);
50767 + if (unlikely(!task->exec_file)) {
50768 + read_unlock(&grsec_exec_file_lock);
50769 + return 0;
50770 + }
50771 +
50772 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
50773 + read_unlock(&grsec_exec_file_lock);
50774 +
50775 + if (retmode & GR_NOPTRACE) {
50776 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50777 + return 1;
50778 + }
50779 +
50780 + if (retmode & GR_PTRACERD) {
50781 + switch (request) {
50782 + case PTRACE_POKETEXT:
50783 + case PTRACE_POKEDATA:
50784 + case PTRACE_POKEUSR:
50785 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
50786 + case PTRACE_SETREGS:
50787 + case PTRACE_SETFPREGS:
50788 +#endif
50789 +#ifdef CONFIG_X86
50790 + case PTRACE_SETFPXREGS:
50791 +#endif
50792 +#ifdef CONFIG_ALTIVEC
50793 + case PTRACE_SETVRREGS:
50794 +#endif
50795 + return 1;
50796 + default:
50797 + return 0;
50798 + }
50799 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
50800 + !(current->role->roletype & GR_ROLE_GOD) &&
50801 + (current->acl != task->acl)) {
50802 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50803 + return 1;
50804 + }
50805 +
50806 + return 0;
50807 +}
50808 +
50809 +static int is_writable_mmap(const struct file *filp)
50810 +{
50811 + struct task_struct *task = current;
50812 + struct acl_object_label *obj, *obj2;
50813 +
50814 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
50815 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
50816 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50817 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
50818 + task->role->root_label);
50819 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
50820 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
50821 + return 1;
50822 + }
50823 + }
50824 + return 0;
50825 +}
50826 +
50827 +int
50828 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
50829 +{
50830 + __u32 mode;
50831 +
50832 + if (unlikely(!file || !(prot & PROT_EXEC)))
50833 + return 1;
50834 +
50835 + if (is_writable_mmap(file))
50836 + return 0;
50837 +
50838 + mode =
50839 + gr_search_file(file->f_path.dentry,
50840 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50841 + file->f_path.mnt);
50842 +
50843 + if (!gr_tpe_allow(file))
50844 + return 0;
50845 +
50846 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50847 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50848 + return 0;
50849 + } else if (unlikely(!(mode & GR_EXEC))) {
50850 + return 0;
50851 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50852 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50853 + return 1;
50854 + }
50855 +
50856 + return 1;
50857 +}
50858 +
50859 +int
50860 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50861 +{
50862 + __u32 mode;
50863 +
50864 + if (unlikely(!file || !(prot & PROT_EXEC)))
50865 + return 1;
50866 +
50867 + if (is_writable_mmap(file))
50868 + return 0;
50869 +
50870 + mode =
50871 + gr_search_file(file->f_path.dentry,
50872 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50873 + file->f_path.mnt);
50874 +
50875 + if (!gr_tpe_allow(file))
50876 + return 0;
50877 +
50878 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50879 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50880 + return 0;
50881 + } else if (unlikely(!(mode & GR_EXEC))) {
50882 + return 0;
50883 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50884 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50885 + return 1;
50886 + }
50887 +
50888 + return 1;
50889 +}
50890 +
50891 +void
50892 +gr_acl_handle_psacct(struct task_struct *task, const long code)
50893 +{
50894 + unsigned long runtime;
50895 + unsigned long cputime;
50896 + unsigned int wday, cday;
50897 + __u8 whr, chr;
50898 + __u8 wmin, cmin;
50899 + __u8 wsec, csec;
50900 + struct timespec timeval;
50901 +
50902 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
50903 + !(task->acl->mode & GR_PROCACCT)))
50904 + return;
50905 +
50906 + do_posix_clock_monotonic_gettime(&timeval);
50907 + runtime = timeval.tv_sec - task->start_time.tv_sec;
50908 + wday = runtime / (3600 * 24);
50909 + runtime -= wday * (3600 * 24);
50910 + whr = runtime / 3600;
50911 + runtime -= whr * 3600;
50912 + wmin = runtime / 60;
50913 + runtime -= wmin * 60;
50914 + wsec = runtime;
50915 +
50916 + cputime = (task->utime + task->stime) / HZ;
50917 + cday = cputime / (3600 * 24);
50918 + cputime -= cday * (3600 * 24);
50919 + chr = cputime / 3600;
50920 + cputime -= chr * 3600;
50921 + cmin = cputime / 60;
50922 + cputime -= cmin * 60;
50923 + csec = cputime;
50924 +
50925 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
50926 +
50927 + return;
50928 +}
50929 +
50930 +void gr_set_kernel_label(struct task_struct *task)
50931 +{
50932 + if (gr_status & GR_READY) {
50933 + task->role = kernel_role;
50934 + task->acl = kernel_role->root_label;
50935 + }
50936 + return;
50937 +}
50938 +
50939 +#ifdef CONFIG_TASKSTATS
50940 +int gr_is_taskstats_denied(int pid)
50941 +{
50942 + struct task_struct *task;
50943 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50944 + const struct cred *cred;
50945 +#endif
50946 + int ret = 0;
50947 +
50948 + /* restrict taskstats viewing to un-chrooted root users
50949 + who have the 'view' subject flag if the RBAC system is enabled
50950 + */
50951 +
50952 + rcu_read_lock();
50953 + read_lock(&tasklist_lock);
50954 + task = find_task_by_vpid(pid);
50955 + if (task) {
50956 +#ifdef CONFIG_GRKERNSEC_CHROOT
50957 + if (proc_is_chrooted(task))
50958 + ret = -EACCES;
50959 +#endif
50960 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50961 + cred = __task_cred(task);
50962 +#ifdef CONFIG_GRKERNSEC_PROC_USER
50963 + if (cred->uid != 0)
50964 + ret = -EACCES;
50965 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50966 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
50967 + ret = -EACCES;
50968 +#endif
50969 +#endif
50970 + if (gr_status & GR_READY) {
50971 + if (!(task->acl->mode & GR_VIEW))
50972 + ret = -EACCES;
50973 + }
50974 + } else
50975 + ret = -ENOENT;
50976 +
50977 + read_unlock(&tasklist_lock);
50978 + rcu_read_unlock();
50979 +
50980 + return ret;
50981 +}
50982 +#endif
50983 +
50984 +/* AUXV entries are filled via a descendant of search_binary_handler
50985 + after we've already applied the subject for the target
50986 +*/
50987 +int gr_acl_enable_at_secure(void)
50988 +{
50989 + if (unlikely(!(gr_status & GR_READY)))
50990 + return 0;
50991 +
50992 + if (current->acl->mode & GR_ATSECURE)
50993 + return 1;
50994 +
50995 + return 0;
50996 +}
50997 +
50998 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
50999 +{
51000 + struct task_struct *task = current;
51001 + struct dentry *dentry = file->f_path.dentry;
51002 + struct vfsmount *mnt = file->f_path.mnt;
51003 + struct acl_object_label *obj, *tmp;
51004 + struct acl_subject_label *subj;
51005 + unsigned int bufsize;
51006 + int is_not_root;
51007 + char *path;
51008 + dev_t dev = __get_dev(dentry);
51009 +
51010 + if (unlikely(!(gr_status & GR_READY)))
51011 + return 1;
51012 +
51013 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51014 + return 1;
51015 +
51016 + /* ignore Eric Biederman */
51017 + if (IS_PRIVATE(dentry->d_inode))
51018 + return 1;
51019 +
51020 + subj = task->acl;
51021 + do {
51022 + obj = lookup_acl_obj_label(ino, dev, subj);
51023 + if (obj != NULL)
51024 + return (obj->mode & GR_FIND) ? 1 : 0;
51025 + } while ((subj = subj->parent_subject));
51026 +
51027 + /* this is purely an optimization since we're looking for an object
51028 + for the directory we're doing a readdir on
51029 + if it's possible for any globbed object to match the entry we're
51030 + filling into the directory, then the object we find here will be
51031 + an anchor point with attached globbed objects
51032 + */
51033 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
51034 + if (obj->globbed == NULL)
51035 + return (obj->mode & GR_FIND) ? 1 : 0;
51036 +
51037 + is_not_root = ((obj->filename[0] == '/') &&
51038 + (obj->filename[1] == '\0')) ? 0 : 1;
51039 + bufsize = PAGE_SIZE - namelen - is_not_root;
51040 +
51041 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
51042 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
51043 + return 1;
51044 +
51045 + preempt_disable();
51046 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51047 + bufsize);
51048 +
51049 + bufsize = strlen(path);
51050 +
51051 + /* if base is "/", don't append an additional slash */
51052 + if (is_not_root)
51053 + *(path + bufsize) = '/';
51054 + memcpy(path + bufsize + is_not_root, name, namelen);
51055 + *(path + bufsize + namelen + is_not_root) = '\0';
51056 +
51057 + tmp = obj->globbed;
51058 + while (tmp) {
51059 + if (!glob_match(tmp->filename, path)) {
51060 + preempt_enable();
51061 + return (tmp->mode & GR_FIND) ? 1 : 0;
51062 + }
51063 + tmp = tmp->next;
51064 + }
51065 + preempt_enable();
51066 + return (obj->mode & GR_FIND) ? 1 : 0;
51067 +}
51068 +
51069 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
51070 +EXPORT_SYMBOL(gr_acl_is_enabled);
51071 +#endif
51072 +EXPORT_SYMBOL(gr_learn_resource);
51073 +EXPORT_SYMBOL(gr_set_kernel_label);
51074 +#ifdef CONFIG_SECURITY
51075 +EXPORT_SYMBOL(gr_check_user_change);
51076 +EXPORT_SYMBOL(gr_check_group_change);
51077 +#endif
51078 +
51079 diff -urNp linux-2.6.32.45/grsecurity/gracl_cap.c linux-2.6.32.45/grsecurity/gracl_cap.c
51080 --- linux-2.6.32.45/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
51081 +++ linux-2.6.32.45/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
51082 @@ -0,0 +1,138 @@
51083 +#include <linux/kernel.h>
51084 +#include <linux/module.h>
51085 +#include <linux/sched.h>
51086 +#include <linux/gracl.h>
51087 +#include <linux/grsecurity.h>
51088 +#include <linux/grinternal.h>
51089 +
51090 +static const char *captab_log[] = {
51091 + "CAP_CHOWN",
51092 + "CAP_DAC_OVERRIDE",
51093 + "CAP_DAC_READ_SEARCH",
51094 + "CAP_FOWNER",
51095 + "CAP_FSETID",
51096 + "CAP_KILL",
51097 + "CAP_SETGID",
51098 + "CAP_SETUID",
51099 + "CAP_SETPCAP",
51100 + "CAP_LINUX_IMMUTABLE",
51101 + "CAP_NET_BIND_SERVICE",
51102 + "CAP_NET_BROADCAST",
51103 + "CAP_NET_ADMIN",
51104 + "CAP_NET_RAW",
51105 + "CAP_IPC_LOCK",
51106 + "CAP_IPC_OWNER",
51107 + "CAP_SYS_MODULE",
51108 + "CAP_SYS_RAWIO",
51109 + "CAP_SYS_CHROOT",
51110 + "CAP_SYS_PTRACE",
51111 + "CAP_SYS_PACCT",
51112 + "CAP_SYS_ADMIN",
51113 + "CAP_SYS_BOOT",
51114 + "CAP_SYS_NICE",
51115 + "CAP_SYS_RESOURCE",
51116 + "CAP_SYS_TIME",
51117 + "CAP_SYS_TTY_CONFIG",
51118 + "CAP_MKNOD",
51119 + "CAP_LEASE",
51120 + "CAP_AUDIT_WRITE",
51121 + "CAP_AUDIT_CONTROL",
51122 + "CAP_SETFCAP",
51123 + "CAP_MAC_OVERRIDE",
51124 + "CAP_MAC_ADMIN"
51125 +};
51126 +
51127 +EXPORT_SYMBOL(gr_is_capable);
51128 +EXPORT_SYMBOL(gr_is_capable_nolog);
51129 +
51130 +int
51131 +gr_is_capable(const int cap)
51132 +{
51133 + struct task_struct *task = current;
51134 + const struct cred *cred = current_cred();
51135 + struct acl_subject_label *curracl;
51136 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51137 + kernel_cap_t cap_audit = __cap_empty_set;
51138 +
51139 + if (!gr_acl_is_enabled())
51140 + return 1;
51141 +
51142 + curracl = task->acl;
51143 +
51144 + cap_drop = curracl->cap_lower;
51145 + cap_mask = curracl->cap_mask;
51146 + cap_audit = curracl->cap_invert_audit;
51147 +
51148 + while ((curracl = curracl->parent_subject)) {
51149 + /* if the cap isn't specified in the current computed mask but is specified in the
51150 + current level subject, and is lowered in the current level subject, then add
51151 + it to the set of dropped capabilities
51152 + otherwise, add the current level subject's mask to the current computed mask
51153 + */
51154 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51155 + cap_raise(cap_mask, cap);
51156 + if (cap_raised(curracl->cap_lower, cap))
51157 + cap_raise(cap_drop, cap);
51158 + if (cap_raised(curracl->cap_invert_audit, cap))
51159 + cap_raise(cap_audit, cap);
51160 + }
51161 + }
51162 +
51163 + if (!cap_raised(cap_drop, cap)) {
51164 + if (cap_raised(cap_audit, cap))
51165 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51166 + return 1;
51167 + }
51168 +
51169 + curracl = task->acl;
51170 +
51171 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51172 + && cap_raised(cred->cap_effective, cap)) {
51173 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51174 + task->role->roletype, cred->uid,
51175 + cred->gid, task->exec_file ?
51176 + gr_to_filename(task->exec_file->f_path.dentry,
51177 + task->exec_file->f_path.mnt) : curracl->filename,
51178 + curracl->filename, 0UL,
51179 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51180 + return 1;
51181 + }
51182 +
51183 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51184 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51185 + return 0;
51186 +}
51187 +
51188 +int
51189 +gr_is_capable_nolog(const int cap)
51190 +{
51191 + struct acl_subject_label *curracl;
51192 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51193 +
51194 + if (!gr_acl_is_enabled())
51195 + return 1;
51196 +
51197 + curracl = current->acl;
51198 +
51199 + cap_drop = curracl->cap_lower;
51200 + cap_mask = curracl->cap_mask;
51201 +
51202 + while ((curracl = curracl->parent_subject)) {
51203 + /* if the cap isn't specified in the current computed mask but is specified in the
51204 + current level subject, and is lowered in the current level subject, then add
51205 + it to the set of dropped capabilities
51206 + otherwise, add the current level subject's mask to the current computed mask
51207 + */
51208 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51209 + cap_raise(cap_mask, cap);
51210 + if (cap_raised(curracl->cap_lower, cap))
51211 + cap_raise(cap_drop, cap);
51212 + }
51213 + }
51214 +
51215 + if (!cap_raised(cap_drop, cap))
51216 + return 1;
51217 +
51218 + return 0;
51219 +}
51220 +
51221 diff -urNp linux-2.6.32.45/grsecurity/gracl_fs.c linux-2.6.32.45/grsecurity/gracl_fs.c
51222 --- linux-2.6.32.45/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51223 +++ linux-2.6.32.45/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
51224 @@ -0,0 +1,431 @@
51225 +#include <linux/kernel.h>
51226 +#include <linux/sched.h>
51227 +#include <linux/types.h>
51228 +#include <linux/fs.h>
51229 +#include <linux/file.h>
51230 +#include <linux/stat.h>
51231 +#include <linux/grsecurity.h>
51232 +#include <linux/grinternal.h>
51233 +#include <linux/gracl.h>
51234 +
51235 +__u32
51236 +gr_acl_handle_hidden_file(const struct dentry * dentry,
51237 + const struct vfsmount * mnt)
51238 +{
51239 + __u32 mode;
51240 +
51241 + if (unlikely(!dentry->d_inode))
51242 + return GR_FIND;
51243 +
51244 + mode =
51245 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51246 +
51247 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51248 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51249 + return mode;
51250 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51251 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51252 + return 0;
51253 + } else if (unlikely(!(mode & GR_FIND)))
51254 + return 0;
51255 +
51256 + return GR_FIND;
51257 +}
51258 +
51259 +__u32
51260 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51261 + const int fmode)
51262 +{
51263 + __u32 reqmode = GR_FIND;
51264 + __u32 mode;
51265 +
51266 + if (unlikely(!dentry->d_inode))
51267 + return reqmode;
51268 +
51269 + if (unlikely(fmode & O_APPEND))
51270 + reqmode |= GR_APPEND;
51271 + else if (unlikely(fmode & FMODE_WRITE))
51272 + reqmode |= GR_WRITE;
51273 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51274 + reqmode |= GR_READ;
51275 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
51276 + reqmode &= ~GR_READ;
51277 + mode =
51278 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51279 + mnt);
51280 +
51281 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51282 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51283 + reqmode & GR_READ ? " reading" : "",
51284 + reqmode & GR_WRITE ? " writing" : reqmode &
51285 + GR_APPEND ? " appending" : "");
51286 + return reqmode;
51287 + } else
51288 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51289 + {
51290 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51291 + reqmode & GR_READ ? " reading" : "",
51292 + reqmode & GR_WRITE ? " writing" : reqmode &
51293 + GR_APPEND ? " appending" : "");
51294 + return 0;
51295 + } else if (unlikely((mode & reqmode) != reqmode))
51296 + return 0;
51297 +
51298 + return reqmode;
51299 +}
51300 +
51301 +__u32
51302 +gr_acl_handle_creat(const struct dentry * dentry,
51303 + const struct dentry * p_dentry,
51304 + const struct vfsmount * p_mnt, const int fmode,
51305 + const int imode)
51306 +{
51307 + __u32 reqmode = GR_WRITE | GR_CREATE;
51308 + __u32 mode;
51309 +
51310 + if (unlikely(fmode & O_APPEND))
51311 + reqmode |= GR_APPEND;
51312 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51313 + reqmode |= GR_READ;
51314 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
51315 + reqmode |= GR_SETID;
51316 +
51317 + mode =
51318 + gr_check_create(dentry, p_dentry, p_mnt,
51319 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51320 +
51321 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51322 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51323 + reqmode & GR_READ ? " reading" : "",
51324 + reqmode & GR_WRITE ? " writing" : reqmode &
51325 + GR_APPEND ? " appending" : "");
51326 + return reqmode;
51327 + } else
51328 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51329 + {
51330 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51331 + reqmode & GR_READ ? " reading" : "",
51332 + reqmode & GR_WRITE ? " writing" : reqmode &
51333 + GR_APPEND ? " appending" : "");
51334 + return 0;
51335 + } else if (unlikely((mode & reqmode) != reqmode))
51336 + return 0;
51337 +
51338 + return reqmode;
51339 +}
51340 +
51341 +__u32
51342 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51343 + const int fmode)
51344 +{
51345 + __u32 mode, reqmode = GR_FIND;
51346 +
51347 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51348 + reqmode |= GR_EXEC;
51349 + if (fmode & S_IWOTH)
51350 + reqmode |= GR_WRITE;
51351 + if (fmode & S_IROTH)
51352 + reqmode |= GR_READ;
51353 +
51354 + mode =
51355 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51356 + mnt);
51357 +
51358 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51359 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51360 + reqmode & GR_READ ? " reading" : "",
51361 + reqmode & GR_WRITE ? " writing" : "",
51362 + reqmode & GR_EXEC ? " executing" : "");
51363 + return reqmode;
51364 + } else
51365 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51366 + {
51367 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51368 + reqmode & GR_READ ? " reading" : "",
51369 + reqmode & GR_WRITE ? " writing" : "",
51370 + reqmode & GR_EXEC ? " executing" : "");
51371 + return 0;
51372 + } else if (unlikely((mode & reqmode) != reqmode))
51373 + return 0;
51374 +
51375 + return reqmode;
51376 +}
51377 +
51378 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51379 +{
51380 + __u32 mode;
51381 +
51382 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51383 +
51384 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51385 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51386 + return mode;
51387 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51388 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51389 + return 0;
51390 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
51391 + return 0;
51392 +
51393 + return (reqmode);
51394 +}
51395 +
51396 +__u32
51397 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51398 +{
51399 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51400 +}
51401 +
51402 +__u32
51403 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51404 +{
51405 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51406 +}
51407 +
51408 +__u32
51409 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51410 +{
51411 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51412 +}
51413 +
51414 +__u32
51415 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51416 +{
51417 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51418 +}
51419 +
51420 +__u32
51421 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51422 + mode_t mode)
51423 +{
51424 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51425 + return 1;
51426 +
51427 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51428 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51429 + GR_FCHMOD_ACL_MSG);
51430 + } else {
51431 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51432 + }
51433 +}
51434 +
51435 +__u32
51436 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51437 + mode_t mode)
51438 +{
51439 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51440 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51441 + GR_CHMOD_ACL_MSG);
51442 + } else {
51443 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51444 + }
51445 +}
51446 +
51447 +__u32
51448 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51449 +{
51450 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51451 +}
51452 +
51453 +__u32
51454 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51455 +{
51456 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51457 +}
51458 +
51459 +__u32
51460 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51461 +{
51462 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51463 +}
51464 +
51465 +__u32
51466 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51467 +{
51468 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51469 + GR_UNIXCONNECT_ACL_MSG);
51470 +}
51471 +
51472 +/* hardlinks require at minimum create permission,
51473 + any additional privilege required is based on the
51474 + privilege of the file being linked to
51475 +*/
51476 +__u32
51477 +gr_acl_handle_link(const struct dentry * new_dentry,
51478 + const struct dentry * parent_dentry,
51479 + const struct vfsmount * parent_mnt,
51480 + const struct dentry * old_dentry,
51481 + const struct vfsmount * old_mnt, const char *to)
51482 +{
51483 + __u32 mode;
51484 + __u32 needmode = GR_CREATE | GR_LINK;
51485 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51486 +
51487 + mode =
51488 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51489 + old_mnt);
51490 +
51491 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51492 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51493 + return mode;
51494 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51495 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51496 + return 0;
51497 + } else if (unlikely((mode & needmode) != needmode))
51498 + return 0;
51499 +
51500 + return 1;
51501 +}
51502 +
51503 +__u32
51504 +gr_acl_handle_symlink(const struct dentry * new_dentry,
51505 + const struct dentry * parent_dentry,
51506 + const struct vfsmount * parent_mnt, const char *from)
51507 +{
51508 + __u32 needmode = GR_WRITE | GR_CREATE;
51509 + __u32 mode;
51510 +
51511 + mode =
51512 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
51513 + GR_CREATE | GR_AUDIT_CREATE |
51514 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51515 +
51516 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51517 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51518 + return mode;
51519 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51520 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51521 + return 0;
51522 + } else if (unlikely((mode & needmode) != needmode))
51523 + return 0;
51524 +
51525 + return (GR_WRITE | GR_CREATE);
51526 +}
51527 +
51528 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51529 +{
51530 + __u32 mode;
51531 +
51532 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51533 +
51534 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51535 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51536 + return mode;
51537 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51538 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51539 + return 0;
51540 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
51541 + return 0;
51542 +
51543 + return (reqmode);
51544 +}
51545 +
51546 +__u32
51547 +gr_acl_handle_mknod(const struct dentry * new_dentry,
51548 + const struct dentry * parent_dentry,
51549 + const struct vfsmount * parent_mnt,
51550 + const int mode)
51551 +{
51552 + __u32 reqmode = GR_WRITE | GR_CREATE;
51553 + if (unlikely(mode & (S_ISUID | S_ISGID)))
51554 + reqmode |= GR_SETID;
51555 +
51556 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51557 + reqmode, GR_MKNOD_ACL_MSG);
51558 +}
51559 +
51560 +__u32
51561 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
51562 + const struct dentry *parent_dentry,
51563 + const struct vfsmount *parent_mnt)
51564 +{
51565 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51566 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51567 +}
51568 +
51569 +#define RENAME_CHECK_SUCCESS(old, new) \
51570 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51571 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51572 +
51573 +int
51574 +gr_acl_handle_rename(struct dentry *new_dentry,
51575 + struct dentry *parent_dentry,
51576 + const struct vfsmount *parent_mnt,
51577 + struct dentry *old_dentry,
51578 + struct inode *old_parent_inode,
51579 + struct vfsmount *old_mnt, const char *newname)
51580 +{
51581 + __u32 comp1, comp2;
51582 + int error = 0;
51583 +
51584 + if (unlikely(!gr_acl_is_enabled()))
51585 + return 0;
51586 +
51587 + if (!new_dentry->d_inode) {
51588 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51589 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51590 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51591 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51592 + GR_DELETE | GR_AUDIT_DELETE |
51593 + GR_AUDIT_READ | GR_AUDIT_WRITE |
51594 + GR_SUPPRESS, old_mnt);
51595 + } else {
51596 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51597 + GR_CREATE | GR_DELETE |
51598 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51599 + GR_AUDIT_READ | GR_AUDIT_WRITE |
51600 + GR_SUPPRESS, parent_mnt);
51601 + comp2 =
51602 + gr_search_file(old_dentry,
51603 + GR_READ | GR_WRITE | GR_AUDIT_READ |
51604 + GR_DELETE | GR_AUDIT_DELETE |
51605 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51606 + }
51607 +
51608 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51609 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51610 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51611 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51612 + && !(comp2 & GR_SUPPRESS)) {
51613 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51614 + error = -EACCES;
51615 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51616 + error = -EACCES;
51617 +
51618 + return error;
51619 +}
51620 +
51621 +void
51622 +gr_acl_handle_exit(void)
51623 +{
51624 + u16 id;
51625 + char *rolename;
51626 + struct file *exec_file;
51627 +
51628 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51629 + !(current->role->roletype & GR_ROLE_PERSIST))) {
51630 + id = current->acl_role_id;
51631 + rolename = current->role->rolename;
51632 + gr_set_acls(1);
51633 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51634 + }
51635 +
51636 + write_lock(&grsec_exec_file_lock);
51637 + exec_file = current->exec_file;
51638 + current->exec_file = NULL;
51639 + write_unlock(&grsec_exec_file_lock);
51640 +
51641 + if (exec_file)
51642 + fput(exec_file);
51643 +}
51644 +
51645 +int
51646 +gr_acl_handle_procpidmem(const struct task_struct *task)
51647 +{
51648 + if (unlikely(!gr_acl_is_enabled()))
51649 + return 0;
51650 +
51651 + if (task != current && task->acl->mode & GR_PROTPROCFD)
51652 + return -EACCES;
51653 +
51654 + return 0;
51655 +}
51656 diff -urNp linux-2.6.32.45/grsecurity/gracl_ip.c linux-2.6.32.45/grsecurity/gracl_ip.c
51657 --- linux-2.6.32.45/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51658 +++ linux-2.6.32.45/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
51659 @@ -0,0 +1,382 @@
51660 +#include <linux/kernel.h>
51661 +#include <asm/uaccess.h>
51662 +#include <asm/errno.h>
51663 +#include <net/sock.h>
51664 +#include <linux/file.h>
51665 +#include <linux/fs.h>
51666 +#include <linux/net.h>
51667 +#include <linux/in.h>
51668 +#include <linux/skbuff.h>
51669 +#include <linux/ip.h>
51670 +#include <linux/udp.h>
51671 +#include <linux/smp_lock.h>
51672 +#include <linux/types.h>
51673 +#include <linux/sched.h>
51674 +#include <linux/netdevice.h>
51675 +#include <linux/inetdevice.h>
51676 +#include <linux/gracl.h>
51677 +#include <linux/grsecurity.h>
51678 +#include <linux/grinternal.h>
51679 +
51680 +#define GR_BIND 0x01
51681 +#define GR_CONNECT 0x02
51682 +#define GR_INVERT 0x04
51683 +#define GR_BINDOVERRIDE 0x08
51684 +#define GR_CONNECTOVERRIDE 0x10
51685 +#define GR_SOCK_FAMILY 0x20
51686 +
51687 +static const char * gr_protocols[IPPROTO_MAX] = {
51688 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51689 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51690 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51691 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51692 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51693 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51694 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51695 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
51696 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
51697 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
51698 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
51699 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
51700 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
51701 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
51702 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
51703 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
51704 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
51705 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
51706 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
51707 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
51708 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
51709 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
51710 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
51711 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
51712 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
51713 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
51714 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
51715 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
51716 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
51717 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
51718 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
51719 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
51720 + };
51721 +
51722 +static const char * gr_socktypes[SOCK_MAX] = {
51723 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
51724 + "unknown:7", "unknown:8", "unknown:9", "packet"
51725 + };
51726 +
51727 +static const char * gr_sockfamilies[AF_MAX+1] = {
51728 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
51729 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
51730 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
51731 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
51732 + };
51733 +
51734 +const char *
51735 +gr_proto_to_name(unsigned char proto)
51736 +{
51737 + return gr_protocols[proto];
51738 +}
51739 +
51740 +const char *
51741 +gr_socktype_to_name(unsigned char type)
51742 +{
51743 + return gr_socktypes[type];
51744 +}
51745 +
51746 +const char *
51747 +gr_sockfamily_to_name(unsigned char family)
51748 +{
51749 + return gr_sockfamilies[family];
51750 +}
51751 +
51752 +int
51753 +gr_search_socket(const int domain, const int type, const int protocol)
51754 +{
51755 + struct acl_subject_label *curr;
51756 + const struct cred *cred = current_cred();
51757 +
51758 + if (unlikely(!gr_acl_is_enabled()))
51759 + goto exit;
51760 +
51761 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
51762 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
51763 + goto exit; // let the kernel handle it
51764 +
51765 + curr = current->acl;
51766 +
51767 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
51768 + /* the family is allowed, if this is PF_INET allow it only if
51769 + the extra sock type/protocol checks pass */
51770 + if (domain == PF_INET)
51771 + goto inet_check;
51772 + goto exit;
51773 + } else {
51774 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51775 + __u32 fakeip = 0;
51776 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51777 + current->role->roletype, cred->uid,
51778 + cred->gid, current->exec_file ?
51779 + gr_to_filename(current->exec_file->f_path.dentry,
51780 + current->exec_file->f_path.mnt) :
51781 + curr->filename, curr->filename,
51782 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
51783 + &current->signal->saved_ip);
51784 + goto exit;
51785 + }
51786 + goto exit_fail;
51787 + }
51788 +
51789 +inet_check:
51790 + /* the rest of this checking is for IPv4 only */
51791 + if (!curr->ips)
51792 + goto exit;
51793 +
51794 + if ((curr->ip_type & (1 << type)) &&
51795 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
51796 + goto exit;
51797 +
51798 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51799 + /* we don't place acls on raw sockets , and sometimes
51800 + dgram/ip sockets are opened for ioctl and not
51801 + bind/connect, so we'll fake a bind learn log */
51802 + if (type == SOCK_RAW || type == SOCK_PACKET) {
51803 + __u32 fakeip = 0;
51804 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51805 + current->role->roletype, cred->uid,
51806 + cred->gid, current->exec_file ?
51807 + gr_to_filename(current->exec_file->f_path.dentry,
51808 + current->exec_file->f_path.mnt) :
51809 + curr->filename, curr->filename,
51810 + &fakeip, 0, type,
51811 + protocol, GR_CONNECT, &current->signal->saved_ip);
51812 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
51813 + __u32 fakeip = 0;
51814 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51815 + current->role->roletype, cred->uid,
51816 + cred->gid, current->exec_file ?
51817 + gr_to_filename(current->exec_file->f_path.dentry,
51818 + current->exec_file->f_path.mnt) :
51819 + curr->filename, curr->filename,
51820 + &fakeip, 0, type,
51821 + protocol, GR_BIND, &current->signal->saved_ip);
51822 + }
51823 + /* we'll log when they use connect or bind */
51824 + goto exit;
51825 + }
51826 +
51827 +exit_fail:
51828 + if (domain == PF_INET)
51829 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
51830 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
51831 + else
51832 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
51833 + gr_socktype_to_name(type), protocol);
51834 +
51835 + return 0;
51836 +exit:
51837 + return 1;
51838 +}
51839 +
51840 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
51841 +{
51842 + if ((ip->mode & mode) &&
51843 + (ip_port >= ip->low) &&
51844 + (ip_port <= ip->high) &&
51845 + ((ntohl(ip_addr) & our_netmask) ==
51846 + (ntohl(our_addr) & our_netmask))
51847 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
51848 + && (ip->type & (1 << type))) {
51849 + if (ip->mode & GR_INVERT)
51850 + return 2; // specifically denied
51851 + else
51852 + return 1; // allowed
51853 + }
51854 +
51855 + return 0; // not specifically allowed, may continue parsing
51856 +}
51857 +
51858 +static int
51859 +gr_search_connectbind(const int full_mode, struct sock *sk,
51860 + struct sockaddr_in *addr, const int type)
51861 +{
51862 + char iface[IFNAMSIZ] = {0};
51863 + struct acl_subject_label *curr;
51864 + struct acl_ip_label *ip;
51865 + struct inet_sock *isk;
51866 + struct net_device *dev;
51867 + struct in_device *idev;
51868 + unsigned long i;
51869 + int ret;
51870 + int mode = full_mode & (GR_BIND | GR_CONNECT);
51871 + __u32 ip_addr = 0;
51872 + __u32 our_addr;
51873 + __u32 our_netmask;
51874 + char *p;
51875 + __u16 ip_port = 0;
51876 + const struct cred *cred = current_cred();
51877 +
51878 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
51879 + return 0;
51880 +
51881 + curr = current->acl;
51882 + isk = inet_sk(sk);
51883 +
51884 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
51885 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
51886 + addr->sin_addr.s_addr = curr->inaddr_any_override;
51887 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
51888 + struct sockaddr_in saddr;
51889 + int err;
51890 +
51891 + saddr.sin_family = AF_INET;
51892 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
51893 + saddr.sin_port = isk->sport;
51894 +
51895 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51896 + if (err)
51897 + return err;
51898 +
51899 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51900 + if (err)
51901 + return err;
51902 + }
51903 +
51904 + if (!curr->ips)
51905 + return 0;
51906 +
51907 + ip_addr = addr->sin_addr.s_addr;
51908 + ip_port = ntohs(addr->sin_port);
51909 +
51910 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51911 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51912 + current->role->roletype, cred->uid,
51913 + cred->gid, current->exec_file ?
51914 + gr_to_filename(current->exec_file->f_path.dentry,
51915 + current->exec_file->f_path.mnt) :
51916 + curr->filename, curr->filename,
51917 + &ip_addr, ip_port, type,
51918 + sk->sk_protocol, mode, &current->signal->saved_ip);
51919 + return 0;
51920 + }
51921 +
51922 + for (i = 0; i < curr->ip_num; i++) {
51923 + ip = *(curr->ips + i);
51924 + if (ip->iface != NULL) {
51925 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
51926 + p = strchr(iface, ':');
51927 + if (p != NULL)
51928 + *p = '\0';
51929 + dev = dev_get_by_name(sock_net(sk), iface);
51930 + if (dev == NULL)
51931 + continue;
51932 + idev = in_dev_get(dev);
51933 + if (idev == NULL) {
51934 + dev_put(dev);
51935 + continue;
51936 + }
51937 + rcu_read_lock();
51938 + for_ifa(idev) {
51939 + if (!strcmp(ip->iface, ifa->ifa_label)) {
51940 + our_addr = ifa->ifa_address;
51941 + our_netmask = 0xffffffff;
51942 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51943 + if (ret == 1) {
51944 + rcu_read_unlock();
51945 + in_dev_put(idev);
51946 + dev_put(dev);
51947 + return 0;
51948 + } else if (ret == 2) {
51949 + rcu_read_unlock();
51950 + in_dev_put(idev);
51951 + dev_put(dev);
51952 + goto denied;
51953 + }
51954 + }
51955 + } endfor_ifa(idev);
51956 + rcu_read_unlock();
51957 + in_dev_put(idev);
51958 + dev_put(dev);
51959 + } else {
51960 + our_addr = ip->addr;
51961 + our_netmask = ip->netmask;
51962 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51963 + if (ret == 1)
51964 + return 0;
51965 + else if (ret == 2)
51966 + goto denied;
51967 + }
51968 + }
51969 +
51970 +denied:
51971 + if (mode == GR_BIND)
51972 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51973 + else if (mode == GR_CONNECT)
51974 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51975 +
51976 + return -EACCES;
51977 +}
51978 +
51979 +int
51980 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
51981 +{
51982 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
51983 +}
51984 +
51985 +int
51986 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
51987 +{
51988 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
51989 +}
51990 +
51991 +int gr_search_listen(struct socket *sock)
51992 +{
51993 + struct sock *sk = sock->sk;
51994 + struct sockaddr_in addr;
51995 +
51996 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51997 + addr.sin_port = inet_sk(sk)->sport;
51998 +
51999 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52000 +}
52001 +
52002 +int gr_search_accept(struct socket *sock)
52003 +{
52004 + struct sock *sk = sock->sk;
52005 + struct sockaddr_in addr;
52006 +
52007 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
52008 + addr.sin_port = inet_sk(sk)->sport;
52009 +
52010 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52011 +}
52012 +
52013 +int
52014 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
52015 +{
52016 + if (addr)
52017 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
52018 + else {
52019 + struct sockaddr_in sin;
52020 + const struct inet_sock *inet = inet_sk(sk);
52021 +
52022 + sin.sin_addr.s_addr = inet->daddr;
52023 + sin.sin_port = inet->dport;
52024 +
52025 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52026 + }
52027 +}
52028 +
52029 +int
52030 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
52031 +{
52032 + struct sockaddr_in sin;
52033 +
52034 + if (unlikely(skb->len < sizeof (struct udphdr)))
52035 + return 0; // skip this packet
52036 +
52037 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
52038 + sin.sin_port = udp_hdr(skb)->source;
52039 +
52040 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52041 +}
52042 diff -urNp linux-2.6.32.45/grsecurity/gracl_learn.c linux-2.6.32.45/grsecurity/gracl_learn.c
52043 --- linux-2.6.32.45/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
52044 +++ linux-2.6.32.45/grsecurity/gracl_learn.c 2011-07-14 21:02:03.000000000 -0400
52045 @@ -0,0 +1,208 @@
52046 +#include <linux/kernel.h>
52047 +#include <linux/mm.h>
52048 +#include <linux/sched.h>
52049 +#include <linux/poll.h>
52050 +#include <linux/smp_lock.h>
52051 +#include <linux/string.h>
52052 +#include <linux/file.h>
52053 +#include <linux/types.h>
52054 +#include <linux/vmalloc.h>
52055 +#include <linux/grinternal.h>
52056 +
52057 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
52058 + size_t count, loff_t *ppos);
52059 +extern int gr_acl_is_enabled(void);
52060 +
52061 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
52062 +static int gr_learn_attached;
52063 +
52064 +/* use a 512k buffer */
52065 +#define LEARN_BUFFER_SIZE (512 * 1024)
52066 +
52067 +static DEFINE_SPINLOCK(gr_learn_lock);
52068 +static DEFINE_MUTEX(gr_learn_user_mutex);
52069 +
52070 +/* we need to maintain two buffers, so that the kernel context of grlearn
52071 + uses a semaphore around the userspace copying, and the other kernel contexts
52072 + use a spinlock when copying into the buffer, since they cannot sleep
52073 +*/
52074 +static char *learn_buffer;
52075 +static char *learn_buffer_user;
52076 +static int learn_buffer_len;
52077 +static int learn_buffer_user_len;
52078 +
52079 +static ssize_t
52080 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
52081 +{
52082 + DECLARE_WAITQUEUE(wait, current);
52083 + ssize_t retval = 0;
52084 +
52085 + add_wait_queue(&learn_wait, &wait);
52086 + set_current_state(TASK_INTERRUPTIBLE);
52087 + do {
52088 + mutex_lock(&gr_learn_user_mutex);
52089 + spin_lock(&gr_learn_lock);
52090 + if (learn_buffer_len)
52091 + break;
52092 + spin_unlock(&gr_learn_lock);
52093 + mutex_unlock(&gr_learn_user_mutex);
52094 + if (file->f_flags & O_NONBLOCK) {
52095 + retval = -EAGAIN;
52096 + goto out;
52097 + }
52098 + if (signal_pending(current)) {
52099 + retval = -ERESTARTSYS;
52100 + goto out;
52101 + }
52102 +
52103 + schedule();
52104 + } while (1);
52105 +
52106 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
52107 + learn_buffer_user_len = learn_buffer_len;
52108 + retval = learn_buffer_len;
52109 + learn_buffer_len = 0;
52110 +
52111 + spin_unlock(&gr_learn_lock);
52112 +
52113 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
52114 + retval = -EFAULT;
52115 +
52116 + mutex_unlock(&gr_learn_user_mutex);
52117 +out:
52118 + set_current_state(TASK_RUNNING);
52119 + remove_wait_queue(&learn_wait, &wait);
52120 + return retval;
52121 +}
52122 +
52123 +static unsigned int
52124 +poll_learn(struct file * file, poll_table * wait)
52125 +{
52126 + poll_wait(file, &learn_wait, wait);
52127 +
52128 + if (learn_buffer_len)
52129 + return (POLLIN | POLLRDNORM);
52130 +
52131 + return 0;
52132 +}
52133 +
52134 +void
52135 +gr_clear_learn_entries(void)
52136 +{
52137 + char *tmp;
52138 +
52139 + mutex_lock(&gr_learn_user_mutex);
52140 + spin_lock(&gr_learn_lock);
52141 + tmp = learn_buffer;
52142 + learn_buffer = NULL;
52143 + spin_unlock(&gr_learn_lock);
52144 + if (tmp)
52145 + vfree(tmp);
52146 + if (learn_buffer_user != NULL) {
52147 + vfree(learn_buffer_user);
52148 + learn_buffer_user = NULL;
52149 + }
52150 + learn_buffer_len = 0;
52151 + mutex_unlock(&gr_learn_user_mutex);
52152 +
52153 + return;
52154 +}
52155 +
52156 +void
52157 +gr_add_learn_entry(const char *fmt, ...)
52158 +{
52159 + va_list args;
52160 + unsigned int len;
52161 +
52162 + if (!gr_learn_attached)
52163 + return;
52164 +
52165 + spin_lock(&gr_learn_lock);
52166 +
52167 + /* leave a gap at the end so we know when it's "full" but don't have to
52168 + compute the exact length of the string we're trying to append
52169 + */
52170 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52171 + spin_unlock(&gr_learn_lock);
52172 + wake_up_interruptible(&learn_wait);
52173 + return;
52174 + }
52175 + if (learn_buffer == NULL) {
52176 + spin_unlock(&gr_learn_lock);
52177 + return;
52178 + }
52179 +
52180 + va_start(args, fmt);
52181 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52182 + va_end(args);
52183 +
52184 + learn_buffer_len += len + 1;
52185 +
52186 + spin_unlock(&gr_learn_lock);
52187 + wake_up_interruptible(&learn_wait);
52188 +
52189 + return;
52190 +}
52191 +
52192 +static int
52193 +open_learn(struct inode *inode, struct file *file)
52194 +{
52195 + if (file->f_mode & FMODE_READ && gr_learn_attached)
52196 + return -EBUSY;
52197 + if (file->f_mode & FMODE_READ) {
52198 + int retval = 0;
52199 + mutex_lock(&gr_learn_user_mutex);
52200 + if (learn_buffer == NULL)
52201 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52202 + if (learn_buffer_user == NULL)
52203 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52204 + if (learn_buffer == NULL) {
52205 + retval = -ENOMEM;
52206 + goto out_error;
52207 + }
52208 + if (learn_buffer_user == NULL) {
52209 + retval = -ENOMEM;
52210 + goto out_error;
52211 + }
52212 + learn_buffer_len = 0;
52213 + learn_buffer_user_len = 0;
52214 + gr_learn_attached = 1;
52215 +out_error:
52216 + mutex_unlock(&gr_learn_user_mutex);
52217 + return retval;
52218 + }
52219 + return 0;
52220 +}
52221 +
52222 +static int
52223 +close_learn(struct inode *inode, struct file *file)
52224 +{
52225 + if (file->f_mode & FMODE_READ) {
52226 + char *tmp = NULL;
52227 + mutex_lock(&gr_learn_user_mutex);
52228 + spin_lock(&gr_learn_lock);
52229 + tmp = learn_buffer;
52230 + learn_buffer = NULL;
52231 + spin_unlock(&gr_learn_lock);
52232 + if (tmp)
52233 + vfree(tmp);
52234 + if (learn_buffer_user != NULL) {
52235 + vfree(learn_buffer_user);
52236 + learn_buffer_user = NULL;
52237 + }
52238 + learn_buffer_len = 0;
52239 + learn_buffer_user_len = 0;
52240 + gr_learn_attached = 0;
52241 + mutex_unlock(&gr_learn_user_mutex);
52242 + }
52243 +
52244 + return 0;
52245 +}
52246 +
52247 +const struct file_operations grsec_fops = {
52248 + .read = read_learn,
52249 + .write = write_grsec_handler,
52250 + .open = open_learn,
52251 + .release = close_learn,
52252 + .poll = poll_learn,
52253 +};
52254 diff -urNp linux-2.6.32.45/grsecurity/gracl_res.c linux-2.6.32.45/grsecurity/gracl_res.c
52255 --- linux-2.6.32.45/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52256 +++ linux-2.6.32.45/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
52257 @@ -0,0 +1,67 @@
52258 +#include <linux/kernel.h>
52259 +#include <linux/sched.h>
52260 +#include <linux/gracl.h>
52261 +#include <linux/grinternal.h>
52262 +
52263 +static const char *restab_log[] = {
52264 + [RLIMIT_CPU] = "RLIMIT_CPU",
52265 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52266 + [RLIMIT_DATA] = "RLIMIT_DATA",
52267 + [RLIMIT_STACK] = "RLIMIT_STACK",
52268 + [RLIMIT_CORE] = "RLIMIT_CORE",
52269 + [RLIMIT_RSS] = "RLIMIT_RSS",
52270 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
52271 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52272 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52273 + [RLIMIT_AS] = "RLIMIT_AS",
52274 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52275 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52276 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52277 + [RLIMIT_NICE] = "RLIMIT_NICE",
52278 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52279 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52280 + [GR_CRASH_RES] = "RLIMIT_CRASH"
52281 +};
52282 +
52283 +void
52284 +gr_log_resource(const struct task_struct *task,
52285 + const int res, const unsigned long wanted, const int gt)
52286 +{
52287 + const struct cred *cred;
52288 + unsigned long rlim;
52289 +
52290 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
52291 + return;
52292 +
52293 + // not yet supported resource
52294 + if (unlikely(!restab_log[res]))
52295 + return;
52296 +
52297 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52298 + rlim = task->signal->rlim[res].rlim_max;
52299 + else
52300 + rlim = task->signal->rlim[res].rlim_cur;
52301 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52302 + return;
52303 +
52304 + rcu_read_lock();
52305 + cred = __task_cred(task);
52306 +
52307 + if (res == RLIMIT_NPROC &&
52308 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52309 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52310 + goto out_rcu_unlock;
52311 + else if (res == RLIMIT_MEMLOCK &&
52312 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52313 + goto out_rcu_unlock;
52314 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52315 + goto out_rcu_unlock;
52316 + rcu_read_unlock();
52317 +
52318 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52319 +
52320 + return;
52321 +out_rcu_unlock:
52322 + rcu_read_unlock();
52323 + return;
52324 +}
52325 diff -urNp linux-2.6.32.45/grsecurity/gracl_segv.c linux-2.6.32.45/grsecurity/gracl_segv.c
52326 --- linux-2.6.32.45/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52327 +++ linux-2.6.32.45/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
52328 @@ -0,0 +1,284 @@
52329 +#include <linux/kernel.h>
52330 +#include <linux/mm.h>
52331 +#include <asm/uaccess.h>
52332 +#include <asm/errno.h>
52333 +#include <asm/mman.h>
52334 +#include <net/sock.h>
52335 +#include <linux/file.h>
52336 +#include <linux/fs.h>
52337 +#include <linux/net.h>
52338 +#include <linux/in.h>
52339 +#include <linux/smp_lock.h>
52340 +#include <linux/slab.h>
52341 +#include <linux/types.h>
52342 +#include <linux/sched.h>
52343 +#include <linux/timer.h>
52344 +#include <linux/gracl.h>
52345 +#include <linux/grsecurity.h>
52346 +#include <linux/grinternal.h>
52347 +
52348 +static struct crash_uid *uid_set;
52349 +static unsigned short uid_used;
52350 +static DEFINE_SPINLOCK(gr_uid_lock);
52351 +extern rwlock_t gr_inode_lock;
52352 +extern struct acl_subject_label *
52353 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52354 + struct acl_role_label *role);
52355 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
52356 +
52357 +int
52358 +gr_init_uidset(void)
52359 +{
52360 + uid_set =
52361 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52362 + uid_used = 0;
52363 +
52364 + return uid_set ? 1 : 0;
52365 +}
52366 +
52367 +void
52368 +gr_free_uidset(void)
52369 +{
52370 + if (uid_set)
52371 + kfree(uid_set);
52372 +
52373 + return;
52374 +}
52375 +
52376 +int
52377 +gr_find_uid(const uid_t uid)
52378 +{
52379 + struct crash_uid *tmp = uid_set;
52380 + uid_t buid;
52381 + int low = 0, high = uid_used - 1, mid;
52382 +
52383 + while (high >= low) {
52384 + mid = (low + high) >> 1;
52385 + buid = tmp[mid].uid;
52386 + if (buid == uid)
52387 + return mid;
52388 + if (buid > uid)
52389 + high = mid - 1;
52390 + if (buid < uid)
52391 + low = mid + 1;
52392 + }
52393 +
52394 + return -1;
52395 +}
52396 +
52397 +static __inline__ void
52398 +gr_insertsort(void)
52399 +{
52400 + unsigned short i, j;
52401 + struct crash_uid index;
52402 +
52403 + for (i = 1; i < uid_used; i++) {
52404 + index = uid_set[i];
52405 + j = i;
52406 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52407 + uid_set[j] = uid_set[j - 1];
52408 + j--;
52409 + }
52410 + uid_set[j] = index;
52411 + }
52412 +
52413 + return;
52414 +}
52415 +
52416 +static __inline__ void
52417 +gr_insert_uid(const uid_t uid, const unsigned long expires)
52418 +{
52419 + int loc;
52420 +
52421 + if (uid_used == GR_UIDTABLE_MAX)
52422 + return;
52423 +
52424 + loc = gr_find_uid(uid);
52425 +
52426 + if (loc >= 0) {
52427 + uid_set[loc].expires = expires;
52428 + return;
52429 + }
52430 +
52431 + uid_set[uid_used].uid = uid;
52432 + uid_set[uid_used].expires = expires;
52433 + uid_used++;
52434 +
52435 + gr_insertsort();
52436 +
52437 + return;
52438 +}
52439 +
52440 +void
52441 +gr_remove_uid(const unsigned short loc)
52442 +{
52443 + unsigned short i;
52444 +
52445 + for (i = loc + 1; i < uid_used; i++)
52446 + uid_set[i - 1] = uid_set[i];
52447 +
52448 + uid_used--;
52449 +
52450 + return;
52451 +}
52452 +
52453 +int
52454 +gr_check_crash_uid(const uid_t uid)
52455 +{
52456 + int loc;
52457 + int ret = 0;
52458 +
52459 + if (unlikely(!gr_acl_is_enabled()))
52460 + return 0;
52461 +
52462 + spin_lock(&gr_uid_lock);
52463 + loc = gr_find_uid(uid);
52464 +
52465 + if (loc < 0)
52466 + goto out_unlock;
52467 +
52468 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
52469 + gr_remove_uid(loc);
52470 + else
52471 + ret = 1;
52472 +
52473 +out_unlock:
52474 + spin_unlock(&gr_uid_lock);
52475 + return ret;
52476 +}
52477 +
52478 +static __inline__ int
52479 +proc_is_setxid(const struct cred *cred)
52480 +{
52481 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
52482 + cred->uid != cred->fsuid)
52483 + return 1;
52484 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52485 + cred->gid != cred->fsgid)
52486 + return 1;
52487 +
52488 + return 0;
52489 +}
52490 +
52491 +void
52492 +gr_handle_crash(struct task_struct *task, const int sig)
52493 +{
52494 + struct acl_subject_label *curr;
52495 + struct acl_subject_label *curr2;
52496 + struct task_struct *tsk, *tsk2;
52497 + const struct cred *cred;
52498 + const struct cred *cred2;
52499 +
52500 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52501 + return;
52502 +
52503 + if (unlikely(!gr_acl_is_enabled()))
52504 + return;
52505 +
52506 + curr = task->acl;
52507 +
52508 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
52509 + return;
52510 +
52511 + if (time_before_eq(curr->expires, get_seconds())) {
52512 + curr->expires = 0;
52513 + curr->crashes = 0;
52514 + }
52515 +
52516 + curr->crashes++;
52517 +
52518 + if (!curr->expires)
52519 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52520 +
52521 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52522 + time_after(curr->expires, get_seconds())) {
52523 + rcu_read_lock();
52524 + cred = __task_cred(task);
52525 + if (cred->uid && proc_is_setxid(cred)) {
52526 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52527 + spin_lock(&gr_uid_lock);
52528 + gr_insert_uid(cred->uid, curr->expires);
52529 + spin_unlock(&gr_uid_lock);
52530 + curr->expires = 0;
52531 + curr->crashes = 0;
52532 + read_lock(&tasklist_lock);
52533 + do_each_thread(tsk2, tsk) {
52534 + cred2 = __task_cred(tsk);
52535 + if (tsk != task && cred2->uid == cred->uid)
52536 + gr_fake_force_sig(SIGKILL, tsk);
52537 + } while_each_thread(tsk2, tsk);
52538 + read_unlock(&tasklist_lock);
52539 + } else {
52540 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52541 + read_lock(&tasklist_lock);
52542 + do_each_thread(tsk2, tsk) {
52543 + if (likely(tsk != task)) {
52544 + curr2 = tsk->acl;
52545 +
52546 + if (curr2->device == curr->device &&
52547 + curr2->inode == curr->inode)
52548 + gr_fake_force_sig(SIGKILL, tsk);
52549 + }
52550 + } while_each_thread(tsk2, tsk);
52551 + read_unlock(&tasklist_lock);
52552 + }
52553 + rcu_read_unlock();
52554 + }
52555 +
52556 + return;
52557 +}
52558 +
52559 +int
52560 +gr_check_crash_exec(const struct file *filp)
52561 +{
52562 + struct acl_subject_label *curr;
52563 +
52564 + if (unlikely(!gr_acl_is_enabled()))
52565 + return 0;
52566 +
52567 + read_lock(&gr_inode_lock);
52568 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52569 + filp->f_path.dentry->d_inode->i_sb->s_dev,
52570 + current->role);
52571 + read_unlock(&gr_inode_lock);
52572 +
52573 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52574 + (!curr->crashes && !curr->expires))
52575 + return 0;
52576 +
52577 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52578 + time_after(curr->expires, get_seconds()))
52579 + return 1;
52580 + else if (time_before_eq(curr->expires, get_seconds())) {
52581 + curr->crashes = 0;
52582 + curr->expires = 0;
52583 + }
52584 +
52585 + return 0;
52586 +}
52587 +
52588 +void
52589 +gr_handle_alertkill(struct task_struct *task)
52590 +{
52591 + struct acl_subject_label *curracl;
52592 + __u32 curr_ip;
52593 + struct task_struct *p, *p2;
52594 +
52595 + if (unlikely(!gr_acl_is_enabled()))
52596 + return;
52597 +
52598 + curracl = task->acl;
52599 + curr_ip = task->signal->curr_ip;
52600 +
52601 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52602 + read_lock(&tasklist_lock);
52603 + do_each_thread(p2, p) {
52604 + if (p->signal->curr_ip == curr_ip)
52605 + gr_fake_force_sig(SIGKILL, p);
52606 + } while_each_thread(p2, p);
52607 + read_unlock(&tasklist_lock);
52608 + } else if (curracl->mode & GR_KILLPROC)
52609 + gr_fake_force_sig(SIGKILL, task);
52610 +
52611 + return;
52612 +}
52613 diff -urNp linux-2.6.32.45/grsecurity/gracl_shm.c linux-2.6.32.45/grsecurity/gracl_shm.c
52614 --- linux-2.6.32.45/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52615 +++ linux-2.6.32.45/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
52616 @@ -0,0 +1,40 @@
52617 +#include <linux/kernel.h>
52618 +#include <linux/mm.h>
52619 +#include <linux/sched.h>
52620 +#include <linux/file.h>
52621 +#include <linux/ipc.h>
52622 +#include <linux/gracl.h>
52623 +#include <linux/grsecurity.h>
52624 +#include <linux/grinternal.h>
52625 +
52626 +int
52627 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52628 + const time_t shm_createtime, const uid_t cuid, const int shmid)
52629 +{
52630 + struct task_struct *task;
52631 +
52632 + if (!gr_acl_is_enabled())
52633 + return 1;
52634 +
52635 + rcu_read_lock();
52636 + read_lock(&tasklist_lock);
52637 +
52638 + task = find_task_by_vpid(shm_cprid);
52639 +
52640 + if (unlikely(!task))
52641 + task = find_task_by_vpid(shm_lapid);
52642 +
52643 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52644 + (task->pid == shm_lapid)) &&
52645 + (task->acl->mode & GR_PROTSHM) &&
52646 + (task->acl != current->acl))) {
52647 + read_unlock(&tasklist_lock);
52648 + rcu_read_unlock();
52649 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52650 + return 0;
52651 + }
52652 + read_unlock(&tasklist_lock);
52653 + rcu_read_unlock();
52654 +
52655 + return 1;
52656 +}
52657 diff -urNp linux-2.6.32.45/grsecurity/grsec_chdir.c linux-2.6.32.45/grsecurity/grsec_chdir.c
52658 --- linux-2.6.32.45/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52659 +++ linux-2.6.32.45/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
52660 @@ -0,0 +1,19 @@
52661 +#include <linux/kernel.h>
52662 +#include <linux/sched.h>
52663 +#include <linux/fs.h>
52664 +#include <linux/file.h>
52665 +#include <linux/grsecurity.h>
52666 +#include <linux/grinternal.h>
52667 +
52668 +void
52669 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52670 +{
52671 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52672 + if ((grsec_enable_chdir && grsec_enable_group &&
52673 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52674 + !grsec_enable_group)) {
52675 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52676 + }
52677 +#endif
52678 + return;
52679 +}
52680 diff -urNp linux-2.6.32.45/grsecurity/grsec_chroot.c linux-2.6.32.45/grsecurity/grsec_chroot.c
52681 --- linux-2.6.32.45/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
52682 +++ linux-2.6.32.45/grsecurity/grsec_chroot.c 2011-07-18 17:14:10.000000000 -0400
52683 @@ -0,0 +1,384 @@
52684 +#include <linux/kernel.h>
52685 +#include <linux/module.h>
52686 +#include <linux/sched.h>
52687 +#include <linux/file.h>
52688 +#include <linux/fs.h>
52689 +#include <linux/mount.h>
52690 +#include <linux/types.h>
52691 +#include <linux/pid_namespace.h>
52692 +#include <linux/grsecurity.h>
52693 +#include <linux/grinternal.h>
52694 +
52695 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
52696 +{
52697 +#ifdef CONFIG_GRKERNSEC
52698 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
52699 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
52700 + task->gr_is_chrooted = 1;
52701 + else
52702 + task->gr_is_chrooted = 0;
52703 +
52704 + task->gr_chroot_dentry = path->dentry;
52705 +#endif
52706 + return;
52707 +}
52708 +
52709 +void gr_clear_chroot_entries(struct task_struct *task)
52710 +{
52711 +#ifdef CONFIG_GRKERNSEC
52712 + task->gr_is_chrooted = 0;
52713 + task->gr_chroot_dentry = NULL;
52714 +#endif
52715 + return;
52716 +}
52717 +
52718 +int
52719 +gr_handle_chroot_unix(const pid_t pid)
52720 +{
52721 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52722 + struct task_struct *p;
52723 +
52724 + if (unlikely(!grsec_enable_chroot_unix))
52725 + return 1;
52726 +
52727 + if (likely(!proc_is_chrooted(current)))
52728 + return 1;
52729 +
52730 + rcu_read_lock();
52731 + read_lock(&tasklist_lock);
52732 +
52733 + p = find_task_by_vpid_unrestricted(pid);
52734 + if (unlikely(p && !have_same_root(current, p))) {
52735 + read_unlock(&tasklist_lock);
52736 + rcu_read_unlock();
52737 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
52738 + return 0;
52739 + }
52740 + read_unlock(&tasklist_lock);
52741 + rcu_read_unlock();
52742 +#endif
52743 + return 1;
52744 +}
52745 +
52746 +int
52747 +gr_handle_chroot_nice(void)
52748 +{
52749 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52750 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
52751 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
52752 + return -EPERM;
52753 + }
52754 +#endif
52755 + return 0;
52756 +}
52757 +
52758 +int
52759 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
52760 +{
52761 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52762 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
52763 + && proc_is_chrooted(current)) {
52764 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
52765 + return -EACCES;
52766 + }
52767 +#endif
52768 + return 0;
52769 +}
52770 +
52771 +int
52772 +gr_handle_chroot_rawio(const struct inode *inode)
52773 +{
52774 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52775 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
52776 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
52777 + return 1;
52778 +#endif
52779 + return 0;
52780 +}
52781 +
52782 +int
52783 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
52784 +{
52785 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52786 + struct task_struct *p;
52787 + int ret = 0;
52788 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
52789 + return ret;
52790 +
52791 + read_lock(&tasklist_lock);
52792 + do_each_pid_task(pid, type, p) {
52793 + if (!have_same_root(current, p)) {
52794 + ret = 1;
52795 + goto out;
52796 + }
52797 + } while_each_pid_task(pid, type, p);
52798 +out:
52799 + read_unlock(&tasklist_lock);
52800 + return ret;
52801 +#endif
52802 + return 0;
52803 +}
52804 +
52805 +int
52806 +gr_pid_is_chrooted(struct task_struct *p)
52807 +{
52808 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52809 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
52810 + return 0;
52811 +
52812 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
52813 + !have_same_root(current, p)) {
52814 + return 1;
52815 + }
52816 +#endif
52817 + return 0;
52818 +}
52819 +
52820 +EXPORT_SYMBOL(gr_pid_is_chrooted);
52821 +
52822 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
52823 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
52824 +{
52825 + struct dentry *dentry = (struct dentry *)u_dentry;
52826 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
52827 + struct dentry *realroot;
52828 + struct vfsmount *realrootmnt;
52829 + struct dentry *currentroot;
52830 + struct vfsmount *currentmnt;
52831 + struct task_struct *reaper = &init_task;
52832 + int ret = 1;
52833 +
52834 + read_lock(&reaper->fs->lock);
52835 + realrootmnt = mntget(reaper->fs->root.mnt);
52836 + realroot = dget(reaper->fs->root.dentry);
52837 + read_unlock(&reaper->fs->lock);
52838 +
52839 + read_lock(&current->fs->lock);
52840 + currentmnt = mntget(current->fs->root.mnt);
52841 + currentroot = dget(current->fs->root.dentry);
52842 + read_unlock(&current->fs->lock);
52843 +
52844 + spin_lock(&dcache_lock);
52845 + for (;;) {
52846 + if (unlikely((dentry == realroot && mnt == realrootmnt)
52847 + || (dentry == currentroot && mnt == currentmnt)))
52848 + break;
52849 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
52850 + if (mnt->mnt_parent == mnt)
52851 + break;
52852 + dentry = mnt->mnt_mountpoint;
52853 + mnt = mnt->mnt_parent;
52854 + continue;
52855 + }
52856 + dentry = dentry->d_parent;
52857 + }
52858 + spin_unlock(&dcache_lock);
52859 +
52860 + dput(currentroot);
52861 + mntput(currentmnt);
52862 +
52863 + /* access is outside of chroot */
52864 + if (dentry == realroot && mnt == realrootmnt)
52865 + ret = 0;
52866 +
52867 + dput(realroot);
52868 + mntput(realrootmnt);
52869 + return ret;
52870 +}
52871 +#endif
52872 +
52873 +int
52874 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
52875 +{
52876 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52877 + if (!grsec_enable_chroot_fchdir)
52878 + return 1;
52879 +
52880 + if (!proc_is_chrooted(current))
52881 + return 1;
52882 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
52883 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
52884 + return 0;
52885 + }
52886 +#endif
52887 + return 1;
52888 +}
52889 +
52890 +int
52891 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52892 + const time_t shm_createtime)
52893 +{
52894 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52895 + struct task_struct *p;
52896 + time_t starttime;
52897 +
52898 + if (unlikely(!grsec_enable_chroot_shmat))
52899 + return 1;
52900 +
52901 + if (likely(!proc_is_chrooted(current)))
52902 + return 1;
52903 +
52904 + rcu_read_lock();
52905 + read_lock(&tasklist_lock);
52906 +
52907 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
52908 + starttime = p->start_time.tv_sec;
52909 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
52910 + if (have_same_root(current, p)) {
52911 + goto allow;
52912 + } else {
52913 + read_unlock(&tasklist_lock);
52914 + rcu_read_unlock();
52915 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52916 + return 0;
52917 + }
52918 + }
52919 + /* creator exited, pid reuse, fall through to next check */
52920 + }
52921 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
52922 + if (unlikely(!have_same_root(current, p))) {
52923 + read_unlock(&tasklist_lock);
52924 + rcu_read_unlock();
52925 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52926 + return 0;
52927 + }
52928 + }
52929 +
52930 +allow:
52931 + read_unlock(&tasklist_lock);
52932 + rcu_read_unlock();
52933 +#endif
52934 + return 1;
52935 +}
52936 +
52937 +void
52938 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
52939 +{
52940 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52941 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
52942 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
52943 +#endif
52944 + return;
52945 +}
52946 +
52947 +int
52948 +gr_handle_chroot_mknod(const struct dentry *dentry,
52949 + const struct vfsmount *mnt, const int mode)
52950 +{
52951 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52952 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
52953 + proc_is_chrooted(current)) {
52954 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
52955 + return -EPERM;
52956 + }
52957 +#endif
52958 + return 0;
52959 +}
52960 +
52961 +int
52962 +gr_handle_chroot_mount(const struct dentry *dentry,
52963 + const struct vfsmount *mnt, const char *dev_name)
52964 +{
52965 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52966 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
52967 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
52968 + return -EPERM;
52969 + }
52970 +#endif
52971 + return 0;
52972 +}
52973 +
52974 +int
52975 +gr_handle_chroot_pivot(void)
52976 +{
52977 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52978 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
52979 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
52980 + return -EPERM;
52981 + }
52982 +#endif
52983 + return 0;
52984 +}
52985 +
52986 +int
52987 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
52988 +{
52989 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52990 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
52991 + !gr_is_outside_chroot(dentry, mnt)) {
52992 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
52993 + return -EPERM;
52994 + }
52995 +#endif
52996 + return 0;
52997 +}
52998 +
52999 +int
53000 +gr_handle_chroot_caps(struct path *path)
53001 +{
53002 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53003 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
53004 + (init_task.fs->root.dentry != path->dentry) &&
53005 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
53006 +
53007 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
53008 + const struct cred *old = current_cred();
53009 + struct cred *new = prepare_creds();
53010 + if (new == NULL)
53011 + return 1;
53012 +
53013 + new->cap_permitted = cap_drop(old->cap_permitted,
53014 + chroot_caps);
53015 + new->cap_inheritable = cap_drop(old->cap_inheritable,
53016 + chroot_caps);
53017 + new->cap_effective = cap_drop(old->cap_effective,
53018 + chroot_caps);
53019 +
53020 + commit_creds(new);
53021 +
53022 + return 0;
53023 + }
53024 +#endif
53025 + return 0;
53026 +}
53027 +
53028 +int
53029 +gr_handle_chroot_sysctl(const int op)
53030 +{
53031 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53032 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
53033 + && (op & MAY_WRITE))
53034 + return -EACCES;
53035 +#endif
53036 + return 0;
53037 +}
53038 +
53039 +void
53040 +gr_handle_chroot_chdir(struct path *path)
53041 +{
53042 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53043 + if (grsec_enable_chroot_chdir)
53044 + set_fs_pwd(current->fs, path);
53045 +#endif
53046 + return;
53047 +}
53048 +
53049 +int
53050 +gr_handle_chroot_chmod(const struct dentry *dentry,
53051 + const struct vfsmount *mnt, const int mode)
53052 +{
53053 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53054 + /* allow chmod +s on directories, but not on files */
53055 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
53056 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
53057 + proc_is_chrooted(current)) {
53058 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
53059 + return -EPERM;
53060 + }
53061 +#endif
53062 + return 0;
53063 +}
53064 +
53065 +#ifdef CONFIG_SECURITY
53066 +EXPORT_SYMBOL(gr_handle_chroot_caps);
53067 +#endif
53068 diff -urNp linux-2.6.32.45/grsecurity/grsec_disabled.c linux-2.6.32.45/grsecurity/grsec_disabled.c
53069 --- linux-2.6.32.45/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
53070 +++ linux-2.6.32.45/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
53071 @@ -0,0 +1,447 @@
53072 +#include <linux/kernel.h>
53073 +#include <linux/module.h>
53074 +#include <linux/sched.h>
53075 +#include <linux/file.h>
53076 +#include <linux/fs.h>
53077 +#include <linux/kdev_t.h>
53078 +#include <linux/net.h>
53079 +#include <linux/in.h>
53080 +#include <linux/ip.h>
53081 +#include <linux/skbuff.h>
53082 +#include <linux/sysctl.h>
53083 +
53084 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
53085 +void
53086 +pax_set_initial_flags(struct linux_binprm *bprm)
53087 +{
53088 + return;
53089 +}
53090 +#endif
53091 +
53092 +#ifdef CONFIG_SYSCTL
53093 +__u32
53094 +gr_handle_sysctl(const struct ctl_table * table, const int op)
53095 +{
53096 + return 0;
53097 +}
53098 +#endif
53099 +
53100 +#ifdef CONFIG_TASKSTATS
53101 +int gr_is_taskstats_denied(int pid)
53102 +{
53103 + return 0;
53104 +}
53105 +#endif
53106 +
53107 +int
53108 +gr_acl_is_enabled(void)
53109 +{
53110 + return 0;
53111 +}
53112 +
53113 +int
53114 +gr_handle_rawio(const struct inode *inode)
53115 +{
53116 + return 0;
53117 +}
53118 +
53119 +void
53120 +gr_acl_handle_psacct(struct task_struct *task, const long code)
53121 +{
53122 + return;
53123 +}
53124 +
53125 +int
53126 +gr_handle_ptrace(struct task_struct *task, const long request)
53127 +{
53128 + return 0;
53129 +}
53130 +
53131 +int
53132 +gr_handle_proc_ptrace(struct task_struct *task)
53133 +{
53134 + return 0;
53135 +}
53136 +
53137 +void
53138 +gr_learn_resource(const struct task_struct *task,
53139 + const int res, const unsigned long wanted, const int gt)
53140 +{
53141 + return;
53142 +}
53143 +
53144 +int
53145 +gr_set_acls(const int type)
53146 +{
53147 + return 0;
53148 +}
53149 +
53150 +int
53151 +gr_check_hidden_task(const struct task_struct *tsk)
53152 +{
53153 + return 0;
53154 +}
53155 +
53156 +int
53157 +gr_check_protected_task(const struct task_struct *task)
53158 +{
53159 + return 0;
53160 +}
53161 +
53162 +int
53163 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53164 +{
53165 + return 0;
53166 +}
53167 +
53168 +void
53169 +gr_copy_label(struct task_struct *tsk)
53170 +{
53171 + return;
53172 +}
53173 +
53174 +void
53175 +gr_set_pax_flags(struct task_struct *task)
53176 +{
53177 + return;
53178 +}
53179 +
53180 +int
53181 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53182 + const int unsafe_share)
53183 +{
53184 + return 0;
53185 +}
53186 +
53187 +void
53188 +gr_handle_delete(const ino_t ino, const dev_t dev)
53189 +{
53190 + return;
53191 +}
53192 +
53193 +void
53194 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53195 +{
53196 + return;
53197 +}
53198 +
53199 +void
53200 +gr_handle_crash(struct task_struct *task, const int sig)
53201 +{
53202 + return;
53203 +}
53204 +
53205 +int
53206 +gr_check_crash_exec(const struct file *filp)
53207 +{
53208 + return 0;
53209 +}
53210 +
53211 +int
53212 +gr_check_crash_uid(const uid_t uid)
53213 +{
53214 + return 0;
53215 +}
53216 +
53217 +void
53218 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53219 + struct dentry *old_dentry,
53220 + struct dentry *new_dentry,
53221 + struct vfsmount *mnt, const __u8 replace)
53222 +{
53223 + return;
53224 +}
53225 +
53226 +int
53227 +gr_search_socket(const int family, const int type, const int protocol)
53228 +{
53229 + return 1;
53230 +}
53231 +
53232 +int
53233 +gr_search_connectbind(const int mode, const struct socket *sock,
53234 + const struct sockaddr_in *addr)
53235 +{
53236 + return 0;
53237 +}
53238 +
53239 +int
53240 +gr_is_capable(const int cap)
53241 +{
53242 + return 1;
53243 +}
53244 +
53245 +int
53246 +gr_is_capable_nolog(const int cap)
53247 +{
53248 + return 1;
53249 +}
53250 +
53251 +void
53252 +gr_handle_alertkill(struct task_struct *task)
53253 +{
53254 + return;
53255 +}
53256 +
53257 +__u32
53258 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53259 +{
53260 + return 1;
53261 +}
53262 +
53263 +__u32
53264 +gr_acl_handle_hidden_file(const struct dentry * dentry,
53265 + const struct vfsmount * mnt)
53266 +{
53267 + return 1;
53268 +}
53269 +
53270 +__u32
53271 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53272 + const int fmode)
53273 +{
53274 + return 1;
53275 +}
53276 +
53277 +__u32
53278 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53279 +{
53280 + return 1;
53281 +}
53282 +
53283 +__u32
53284 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53285 +{
53286 + return 1;
53287 +}
53288 +
53289 +int
53290 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53291 + unsigned int *vm_flags)
53292 +{
53293 + return 1;
53294 +}
53295 +
53296 +__u32
53297 +gr_acl_handle_truncate(const struct dentry * dentry,
53298 + const struct vfsmount * mnt)
53299 +{
53300 + return 1;
53301 +}
53302 +
53303 +__u32
53304 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53305 +{
53306 + return 1;
53307 +}
53308 +
53309 +__u32
53310 +gr_acl_handle_access(const struct dentry * dentry,
53311 + const struct vfsmount * mnt, const int fmode)
53312 +{
53313 + return 1;
53314 +}
53315 +
53316 +__u32
53317 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53318 + mode_t mode)
53319 +{
53320 + return 1;
53321 +}
53322 +
53323 +__u32
53324 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53325 + mode_t mode)
53326 +{
53327 + return 1;
53328 +}
53329 +
53330 +__u32
53331 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53332 +{
53333 + return 1;
53334 +}
53335 +
53336 +__u32
53337 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53338 +{
53339 + return 1;
53340 +}
53341 +
53342 +void
53343 +grsecurity_init(void)
53344 +{
53345 + return;
53346 +}
53347 +
53348 +__u32
53349 +gr_acl_handle_mknod(const struct dentry * new_dentry,
53350 + const struct dentry * parent_dentry,
53351 + const struct vfsmount * parent_mnt,
53352 + const int mode)
53353 +{
53354 + return 1;
53355 +}
53356 +
53357 +__u32
53358 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
53359 + const struct dentry * parent_dentry,
53360 + const struct vfsmount * parent_mnt)
53361 +{
53362 + return 1;
53363 +}
53364 +
53365 +__u32
53366 +gr_acl_handle_symlink(const struct dentry * new_dentry,
53367 + const struct dentry * parent_dentry,
53368 + const struct vfsmount * parent_mnt, const char *from)
53369 +{
53370 + return 1;
53371 +}
53372 +
53373 +__u32
53374 +gr_acl_handle_link(const struct dentry * new_dentry,
53375 + const struct dentry * parent_dentry,
53376 + const struct vfsmount * parent_mnt,
53377 + const struct dentry * old_dentry,
53378 + const struct vfsmount * old_mnt, const char *to)
53379 +{
53380 + return 1;
53381 +}
53382 +
53383 +int
53384 +gr_acl_handle_rename(const struct dentry *new_dentry,
53385 + const struct dentry *parent_dentry,
53386 + const struct vfsmount *parent_mnt,
53387 + const struct dentry *old_dentry,
53388 + const struct inode *old_parent_inode,
53389 + const struct vfsmount *old_mnt, const char *newname)
53390 +{
53391 + return 0;
53392 +}
53393 +
53394 +int
53395 +gr_acl_handle_filldir(const struct file *file, const char *name,
53396 + const int namelen, const ino_t ino)
53397 +{
53398 + return 1;
53399 +}
53400 +
53401 +int
53402 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53403 + const time_t shm_createtime, const uid_t cuid, const int shmid)
53404 +{
53405 + return 1;
53406 +}
53407 +
53408 +int
53409 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53410 +{
53411 + return 0;
53412 +}
53413 +
53414 +int
53415 +gr_search_accept(const struct socket *sock)
53416 +{
53417 + return 0;
53418 +}
53419 +
53420 +int
53421 +gr_search_listen(const struct socket *sock)
53422 +{
53423 + return 0;
53424 +}
53425 +
53426 +int
53427 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53428 +{
53429 + return 0;
53430 +}
53431 +
53432 +__u32
53433 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53434 +{
53435 + return 1;
53436 +}
53437 +
53438 +__u32
53439 +gr_acl_handle_creat(const struct dentry * dentry,
53440 + const struct dentry * p_dentry,
53441 + const struct vfsmount * p_mnt, const int fmode,
53442 + const int imode)
53443 +{
53444 + return 1;
53445 +}
53446 +
53447 +void
53448 +gr_acl_handle_exit(void)
53449 +{
53450 + return;
53451 +}
53452 +
53453 +int
53454 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53455 +{
53456 + return 1;
53457 +}
53458 +
53459 +void
53460 +gr_set_role_label(const uid_t uid, const gid_t gid)
53461 +{
53462 + return;
53463 +}
53464 +
53465 +int
53466 +gr_acl_handle_procpidmem(const struct task_struct *task)
53467 +{
53468 + return 0;
53469 +}
53470 +
53471 +int
53472 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53473 +{
53474 + return 0;
53475 +}
53476 +
53477 +int
53478 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53479 +{
53480 + return 0;
53481 +}
53482 +
53483 +void
53484 +gr_set_kernel_label(struct task_struct *task)
53485 +{
53486 + return;
53487 +}
53488 +
53489 +int
53490 +gr_check_user_change(int real, int effective, int fs)
53491 +{
53492 + return 0;
53493 +}
53494 +
53495 +int
53496 +gr_check_group_change(int real, int effective, int fs)
53497 +{
53498 + return 0;
53499 +}
53500 +
53501 +int gr_acl_enable_at_secure(void)
53502 +{
53503 + return 0;
53504 +}
53505 +
53506 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53507 +{
53508 + return dentry->d_inode->i_sb->s_dev;
53509 +}
53510 +
53511 +EXPORT_SYMBOL(gr_is_capable);
53512 +EXPORT_SYMBOL(gr_is_capable_nolog);
53513 +EXPORT_SYMBOL(gr_learn_resource);
53514 +EXPORT_SYMBOL(gr_set_kernel_label);
53515 +#ifdef CONFIG_SECURITY
53516 +EXPORT_SYMBOL(gr_check_user_change);
53517 +EXPORT_SYMBOL(gr_check_group_change);
53518 +#endif
53519 diff -urNp linux-2.6.32.45/grsecurity/grsec_exec.c linux-2.6.32.45/grsecurity/grsec_exec.c
53520 --- linux-2.6.32.45/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53521 +++ linux-2.6.32.45/grsecurity/grsec_exec.c 2011-08-11 19:57:19.000000000 -0400
53522 @@ -0,0 +1,132 @@
53523 +#include <linux/kernel.h>
53524 +#include <linux/sched.h>
53525 +#include <linux/file.h>
53526 +#include <linux/binfmts.h>
53527 +#include <linux/smp_lock.h>
53528 +#include <linux/fs.h>
53529 +#include <linux/types.h>
53530 +#include <linux/grdefs.h>
53531 +#include <linux/grinternal.h>
53532 +#include <linux/capability.h>
53533 +#include <linux/compat.h>
53534 +
53535 +#include <asm/uaccess.h>
53536 +
53537 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53538 +static char gr_exec_arg_buf[132];
53539 +static DEFINE_MUTEX(gr_exec_arg_mutex);
53540 +#endif
53541 +
53542 +void
53543 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
53544 +{
53545 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53546 + char *grarg = gr_exec_arg_buf;
53547 + unsigned int i, x, execlen = 0;
53548 + char c;
53549 +
53550 + if (!((grsec_enable_execlog && grsec_enable_group &&
53551 + in_group_p(grsec_audit_gid))
53552 + || (grsec_enable_execlog && !grsec_enable_group)))
53553 + return;
53554 +
53555 + mutex_lock(&gr_exec_arg_mutex);
53556 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
53557 +
53558 + if (unlikely(argv == NULL))
53559 + goto log;
53560 +
53561 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
53562 + const char __user *p;
53563 + unsigned int len;
53564 +
53565 + if (copy_from_user(&p, argv + i, sizeof(p)))
53566 + goto log;
53567 + if (!p)
53568 + goto log;
53569 + len = strnlen_user(p, 128 - execlen);
53570 + if (len > 128 - execlen)
53571 + len = 128 - execlen;
53572 + else if (len > 0)
53573 + len--;
53574 + if (copy_from_user(grarg + execlen, p, len))
53575 + goto log;
53576 +
53577 + /* rewrite unprintable characters */
53578 + for (x = 0; x < len; x++) {
53579 + c = *(grarg + execlen + x);
53580 + if (c < 32 || c > 126)
53581 + *(grarg + execlen + x) = ' ';
53582 + }
53583 +
53584 + execlen += len;
53585 + *(grarg + execlen) = ' ';
53586 + *(grarg + execlen + 1) = '\0';
53587 + execlen++;
53588 + }
53589 +
53590 + log:
53591 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53592 + bprm->file->f_path.mnt, grarg);
53593 + mutex_unlock(&gr_exec_arg_mutex);
53594 +#endif
53595 + return;
53596 +}
53597 +
53598 +#ifdef CONFIG_COMPAT
53599 +void
53600 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
53601 +{
53602 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53603 + char *grarg = gr_exec_arg_buf;
53604 + unsigned int i, x, execlen = 0;
53605 + char c;
53606 +
53607 + if (!((grsec_enable_execlog && grsec_enable_group &&
53608 + in_group_p(grsec_audit_gid))
53609 + || (grsec_enable_execlog && !grsec_enable_group)))
53610 + return;
53611 +
53612 + mutex_lock(&gr_exec_arg_mutex);
53613 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
53614 +
53615 + if (unlikely(argv == NULL))
53616 + goto log;
53617 +
53618 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
53619 + compat_uptr_t p;
53620 + unsigned int len;
53621 +
53622 + if (get_user(p, argv + i))
53623 + goto log;
53624 + len = strnlen_user(compat_ptr(p), 128 - execlen);
53625 + if (len > 128 - execlen)
53626 + len = 128 - execlen;
53627 + else if (len > 0)
53628 + len--;
53629 + else
53630 + goto log;
53631 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
53632 + goto log;
53633 +
53634 + /* rewrite unprintable characters */
53635 + for (x = 0; x < len; x++) {
53636 + c = *(grarg + execlen + x);
53637 + if (c < 32 || c > 126)
53638 + *(grarg + execlen + x) = ' ';
53639 + }
53640 +
53641 + execlen += len;
53642 + *(grarg + execlen) = ' ';
53643 + *(grarg + execlen + 1) = '\0';
53644 + execlen++;
53645 + }
53646 +
53647 + log:
53648 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53649 + bprm->file->f_path.mnt, grarg);
53650 + mutex_unlock(&gr_exec_arg_mutex);
53651 +#endif
53652 + return;
53653 +}
53654 +#endif
53655 diff -urNp linux-2.6.32.45/grsecurity/grsec_fifo.c linux-2.6.32.45/grsecurity/grsec_fifo.c
53656 --- linux-2.6.32.45/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53657 +++ linux-2.6.32.45/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
53658 @@ -0,0 +1,24 @@
53659 +#include <linux/kernel.h>
53660 +#include <linux/sched.h>
53661 +#include <linux/fs.h>
53662 +#include <linux/file.h>
53663 +#include <linux/grinternal.h>
53664 +
53665 +int
53666 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53667 + const struct dentry *dir, const int flag, const int acc_mode)
53668 +{
53669 +#ifdef CONFIG_GRKERNSEC_FIFO
53670 + const struct cred *cred = current_cred();
53671 +
53672 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53673 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53674 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53675 + (cred->fsuid != dentry->d_inode->i_uid)) {
53676 + if (!inode_permission(dentry->d_inode, acc_mode))
53677 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53678 + return -EACCES;
53679 + }
53680 +#endif
53681 + return 0;
53682 +}
53683 diff -urNp linux-2.6.32.45/grsecurity/grsec_fork.c linux-2.6.32.45/grsecurity/grsec_fork.c
53684 --- linux-2.6.32.45/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53685 +++ linux-2.6.32.45/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
53686 @@ -0,0 +1,23 @@
53687 +#include <linux/kernel.h>
53688 +#include <linux/sched.h>
53689 +#include <linux/grsecurity.h>
53690 +#include <linux/grinternal.h>
53691 +#include <linux/errno.h>
53692 +
53693 +void
53694 +gr_log_forkfail(const int retval)
53695 +{
53696 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
53697 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53698 + switch (retval) {
53699 + case -EAGAIN:
53700 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53701 + break;
53702 + case -ENOMEM:
53703 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53704 + break;
53705 + }
53706 + }
53707 +#endif
53708 + return;
53709 +}
53710 diff -urNp linux-2.6.32.45/grsecurity/grsec_init.c linux-2.6.32.45/grsecurity/grsec_init.c
53711 --- linux-2.6.32.45/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
53712 +++ linux-2.6.32.45/grsecurity/grsec_init.c 2011-08-11 19:57:42.000000000 -0400
53713 @@ -0,0 +1,270 @@
53714 +#include <linux/kernel.h>
53715 +#include <linux/sched.h>
53716 +#include <linux/mm.h>
53717 +#include <linux/smp_lock.h>
53718 +#include <linux/gracl.h>
53719 +#include <linux/slab.h>
53720 +#include <linux/vmalloc.h>
53721 +#include <linux/percpu.h>
53722 +#include <linux/module.h>
53723 +
53724 +int grsec_enable_brute;
53725 +int grsec_enable_link;
53726 +int grsec_enable_dmesg;
53727 +int grsec_enable_harden_ptrace;
53728 +int grsec_enable_fifo;
53729 +int grsec_enable_execlog;
53730 +int grsec_enable_signal;
53731 +int grsec_enable_forkfail;
53732 +int grsec_enable_audit_ptrace;
53733 +int grsec_enable_time;
53734 +int grsec_enable_audit_textrel;
53735 +int grsec_enable_group;
53736 +int grsec_audit_gid;
53737 +int grsec_enable_chdir;
53738 +int grsec_enable_mount;
53739 +int grsec_enable_rofs;
53740 +int grsec_enable_chroot_findtask;
53741 +int grsec_enable_chroot_mount;
53742 +int grsec_enable_chroot_shmat;
53743 +int grsec_enable_chroot_fchdir;
53744 +int grsec_enable_chroot_double;
53745 +int grsec_enable_chroot_pivot;
53746 +int grsec_enable_chroot_chdir;
53747 +int grsec_enable_chroot_chmod;
53748 +int grsec_enable_chroot_mknod;
53749 +int grsec_enable_chroot_nice;
53750 +int grsec_enable_chroot_execlog;
53751 +int grsec_enable_chroot_caps;
53752 +int grsec_enable_chroot_sysctl;
53753 +int grsec_enable_chroot_unix;
53754 +int grsec_enable_tpe;
53755 +int grsec_tpe_gid;
53756 +int grsec_enable_blackhole;
53757 +#ifdef CONFIG_IPV6_MODULE
53758 +EXPORT_SYMBOL(grsec_enable_blackhole);
53759 +#endif
53760 +int grsec_lastack_retries;
53761 +int grsec_enable_tpe_all;
53762 +int grsec_enable_tpe_invert;
53763 +int grsec_enable_socket_all;
53764 +int grsec_socket_all_gid;
53765 +int grsec_enable_socket_client;
53766 +int grsec_socket_client_gid;
53767 +int grsec_enable_socket_server;
53768 +int grsec_socket_server_gid;
53769 +int grsec_resource_logging;
53770 +int grsec_disable_privio;
53771 +int grsec_enable_log_rwxmaps;
53772 +int grsec_lock;
53773 +
53774 +DEFINE_SPINLOCK(grsec_alert_lock);
53775 +unsigned long grsec_alert_wtime = 0;
53776 +unsigned long grsec_alert_fyet = 0;
53777 +
53778 +DEFINE_SPINLOCK(grsec_audit_lock);
53779 +
53780 +DEFINE_RWLOCK(grsec_exec_file_lock);
53781 +
53782 +char *gr_shared_page[4];
53783 +
53784 +char *gr_alert_log_fmt;
53785 +char *gr_audit_log_fmt;
53786 +char *gr_alert_log_buf;
53787 +char *gr_audit_log_buf;
53788 +
53789 +extern struct gr_arg *gr_usermode;
53790 +extern unsigned char *gr_system_salt;
53791 +extern unsigned char *gr_system_sum;
53792 +
53793 +void __init
53794 +grsecurity_init(void)
53795 +{
53796 + int j;
53797 + /* create the per-cpu shared pages */
53798 +
53799 +#ifdef CONFIG_X86
53800 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
53801 +#endif
53802 +
53803 + for (j = 0; j < 4; j++) {
53804 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
53805 + if (gr_shared_page[j] == NULL) {
53806 + panic("Unable to allocate grsecurity shared page");
53807 + return;
53808 + }
53809 + }
53810 +
53811 + /* allocate log buffers */
53812 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
53813 + if (!gr_alert_log_fmt) {
53814 + panic("Unable to allocate grsecurity alert log format buffer");
53815 + return;
53816 + }
53817 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
53818 + if (!gr_audit_log_fmt) {
53819 + panic("Unable to allocate grsecurity audit log format buffer");
53820 + return;
53821 + }
53822 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53823 + if (!gr_alert_log_buf) {
53824 + panic("Unable to allocate grsecurity alert log buffer");
53825 + return;
53826 + }
53827 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53828 + if (!gr_audit_log_buf) {
53829 + panic("Unable to allocate grsecurity audit log buffer");
53830 + return;
53831 + }
53832 +
53833 + /* allocate memory for authentication structure */
53834 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
53835 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
53836 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
53837 +
53838 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
53839 + panic("Unable to allocate grsecurity authentication structure");
53840 + return;
53841 + }
53842 +
53843 +
53844 +#ifdef CONFIG_GRKERNSEC_IO
53845 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
53846 + grsec_disable_privio = 1;
53847 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53848 + grsec_disable_privio = 1;
53849 +#else
53850 + grsec_disable_privio = 0;
53851 +#endif
53852 +#endif
53853 +
53854 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53855 + /* for backward compatibility, tpe_invert always defaults to on if
53856 + enabled in the kernel
53857 + */
53858 + grsec_enable_tpe_invert = 1;
53859 +#endif
53860 +
53861 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53862 +#ifndef CONFIG_GRKERNSEC_SYSCTL
53863 + grsec_lock = 1;
53864 +#endif
53865 +
53866 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53867 + grsec_enable_audit_textrel = 1;
53868 +#endif
53869 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53870 + grsec_enable_log_rwxmaps = 1;
53871 +#endif
53872 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53873 + grsec_enable_group = 1;
53874 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
53875 +#endif
53876 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53877 + grsec_enable_chdir = 1;
53878 +#endif
53879 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53880 + grsec_enable_harden_ptrace = 1;
53881 +#endif
53882 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53883 + grsec_enable_mount = 1;
53884 +#endif
53885 +#ifdef CONFIG_GRKERNSEC_LINK
53886 + grsec_enable_link = 1;
53887 +#endif
53888 +#ifdef CONFIG_GRKERNSEC_BRUTE
53889 + grsec_enable_brute = 1;
53890 +#endif
53891 +#ifdef CONFIG_GRKERNSEC_DMESG
53892 + grsec_enable_dmesg = 1;
53893 +#endif
53894 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
53895 + grsec_enable_blackhole = 1;
53896 + grsec_lastack_retries = 4;
53897 +#endif
53898 +#ifdef CONFIG_GRKERNSEC_FIFO
53899 + grsec_enable_fifo = 1;
53900 +#endif
53901 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53902 + grsec_enable_execlog = 1;
53903 +#endif
53904 +#ifdef CONFIG_GRKERNSEC_SIGNAL
53905 + grsec_enable_signal = 1;
53906 +#endif
53907 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
53908 + grsec_enable_forkfail = 1;
53909 +#endif
53910 +#ifdef CONFIG_GRKERNSEC_TIME
53911 + grsec_enable_time = 1;
53912 +#endif
53913 +#ifdef CONFIG_GRKERNSEC_RESLOG
53914 + grsec_resource_logging = 1;
53915 +#endif
53916 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53917 + grsec_enable_chroot_findtask = 1;
53918 +#endif
53919 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53920 + grsec_enable_chroot_unix = 1;
53921 +#endif
53922 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53923 + grsec_enable_chroot_mount = 1;
53924 +#endif
53925 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53926 + grsec_enable_chroot_fchdir = 1;
53927 +#endif
53928 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53929 + grsec_enable_chroot_shmat = 1;
53930 +#endif
53931 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53932 + grsec_enable_audit_ptrace = 1;
53933 +#endif
53934 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53935 + grsec_enable_chroot_double = 1;
53936 +#endif
53937 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53938 + grsec_enable_chroot_pivot = 1;
53939 +#endif
53940 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53941 + grsec_enable_chroot_chdir = 1;
53942 +#endif
53943 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53944 + grsec_enable_chroot_chmod = 1;
53945 +#endif
53946 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53947 + grsec_enable_chroot_mknod = 1;
53948 +#endif
53949 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53950 + grsec_enable_chroot_nice = 1;
53951 +#endif
53952 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53953 + grsec_enable_chroot_execlog = 1;
53954 +#endif
53955 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53956 + grsec_enable_chroot_caps = 1;
53957 +#endif
53958 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53959 + grsec_enable_chroot_sysctl = 1;
53960 +#endif
53961 +#ifdef CONFIG_GRKERNSEC_TPE
53962 + grsec_enable_tpe = 1;
53963 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
53964 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
53965 + grsec_enable_tpe_all = 1;
53966 +#endif
53967 +#endif
53968 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53969 + grsec_enable_socket_all = 1;
53970 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
53971 +#endif
53972 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53973 + grsec_enable_socket_client = 1;
53974 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
53975 +#endif
53976 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53977 + grsec_enable_socket_server = 1;
53978 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
53979 +#endif
53980 +#endif
53981 +
53982 + return;
53983 +}
53984 diff -urNp linux-2.6.32.45/grsecurity/grsec_link.c linux-2.6.32.45/grsecurity/grsec_link.c
53985 --- linux-2.6.32.45/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
53986 +++ linux-2.6.32.45/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
53987 @@ -0,0 +1,43 @@
53988 +#include <linux/kernel.h>
53989 +#include <linux/sched.h>
53990 +#include <linux/fs.h>
53991 +#include <linux/file.h>
53992 +#include <linux/grinternal.h>
53993 +
53994 +int
53995 +gr_handle_follow_link(const struct inode *parent,
53996 + const struct inode *inode,
53997 + const struct dentry *dentry, const struct vfsmount *mnt)
53998 +{
53999 +#ifdef CONFIG_GRKERNSEC_LINK
54000 + const struct cred *cred = current_cred();
54001 +
54002 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
54003 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
54004 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
54005 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
54006 + return -EACCES;
54007 + }
54008 +#endif
54009 + return 0;
54010 +}
54011 +
54012 +int
54013 +gr_handle_hardlink(const struct dentry *dentry,
54014 + const struct vfsmount *mnt,
54015 + struct inode *inode, const int mode, const char *to)
54016 +{
54017 +#ifdef CONFIG_GRKERNSEC_LINK
54018 + const struct cred *cred = current_cred();
54019 +
54020 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
54021 + (!S_ISREG(mode) || (mode & S_ISUID) ||
54022 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
54023 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
54024 + !capable(CAP_FOWNER) && cred->uid) {
54025 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
54026 + return -EPERM;
54027 + }
54028 +#endif
54029 + return 0;
54030 +}
54031 diff -urNp linux-2.6.32.45/grsecurity/grsec_log.c linux-2.6.32.45/grsecurity/grsec_log.c
54032 --- linux-2.6.32.45/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
54033 +++ linux-2.6.32.45/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
54034 @@ -0,0 +1,310 @@
54035 +#include <linux/kernel.h>
54036 +#include <linux/sched.h>
54037 +#include <linux/file.h>
54038 +#include <linux/tty.h>
54039 +#include <linux/fs.h>
54040 +#include <linux/grinternal.h>
54041 +
54042 +#ifdef CONFIG_TREE_PREEMPT_RCU
54043 +#define DISABLE_PREEMPT() preempt_disable()
54044 +#define ENABLE_PREEMPT() preempt_enable()
54045 +#else
54046 +#define DISABLE_PREEMPT()
54047 +#define ENABLE_PREEMPT()
54048 +#endif
54049 +
54050 +#define BEGIN_LOCKS(x) \
54051 + DISABLE_PREEMPT(); \
54052 + rcu_read_lock(); \
54053 + read_lock(&tasklist_lock); \
54054 + read_lock(&grsec_exec_file_lock); \
54055 + if (x != GR_DO_AUDIT) \
54056 + spin_lock(&grsec_alert_lock); \
54057 + else \
54058 + spin_lock(&grsec_audit_lock)
54059 +
54060 +#define END_LOCKS(x) \
54061 + if (x != GR_DO_AUDIT) \
54062 + spin_unlock(&grsec_alert_lock); \
54063 + else \
54064 + spin_unlock(&grsec_audit_lock); \
54065 + read_unlock(&grsec_exec_file_lock); \
54066 + read_unlock(&tasklist_lock); \
54067 + rcu_read_unlock(); \
54068 + ENABLE_PREEMPT(); \
54069 + if (x == GR_DONT_AUDIT) \
54070 + gr_handle_alertkill(current)
54071 +
54072 +enum {
54073 + FLOODING,
54074 + NO_FLOODING
54075 +};
54076 +
54077 +extern char *gr_alert_log_fmt;
54078 +extern char *gr_audit_log_fmt;
54079 +extern char *gr_alert_log_buf;
54080 +extern char *gr_audit_log_buf;
54081 +
54082 +static int gr_log_start(int audit)
54083 +{
54084 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
54085 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
54086 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54087 +
54088 + if (audit == GR_DO_AUDIT)
54089 + goto set_fmt;
54090 +
54091 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
54092 + grsec_alert_wtime = jiffies;
54093 + grsec_alert_fyet = 0;
54094 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
54095 + grsec_alert_fyet++;
54096 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
54097 + grsec_alert_wtime = jiffies;
54098 + grsec_alert_fyet++;
54099 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
54100 + return FLOODING;
54101 + } else return FLOODING;
54102 +
54103 +set_fmt:
54104 + memset(buf, 0, PAGE_SIZE);
54105 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
54106 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
54107 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54108 + } else if (current->signal->curr_ip) {
54109 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
54110 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
54111 + } else if (gr_acl_is_enabled()) {
54112 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
54113 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54114 + } else {
54115 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
54116 + strcpy(buf, fmt);
54117 + }
54118 +
54119 + return NO_FLOODING;
54120 +}
54121 +
54122 +static void gr_log_middle(int audit, const char *msg, va_list ap)
54123 + __attribute__ ((format (printf, 2, 0)));
54124 +
54125 +static void gr_log_middle(int audit, const char *msg, va_list ap)
54126 +{
54127 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54128 + unsigned int len = strlen(buf);
54129 +
54130 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54131 +
54132 + return;
54133 +}
54134 +
54135 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
54136 + __attribute__ ((format (printf, 2, 3)));
54137 +
54138 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
54139 +{
54140 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54141 + unsigned int len = strlen(buf);
54142 + va_list ap;
54143 +
54144 + va_start(ap, msg);
54145 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54146 + va_end(ap);
54147 +
54148 + return;
54149 +}
54150 +
54151 +static void gr_log_end(int audit)
54152 +{
54153 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54154 + unsigned int len = strlen(buf);
54155 +
54156 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
54157 + printk("%s\n", buf);
54158 +
54159 + return;
54160 +}
54161 +
54162 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
54163 +{
54164 + int logtype;
54165 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
54166 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
54167 + void *voidptr = NULL;
54168 + int num1 = 0, num2 = 0;
54169 + unsigned long ulong1 = 0, ulong2 = 0;
54170 + struct dentry *dentry = NULL;
54171 + struct vfsmount *mnt = NULL;
54172 + struct file *file = NULL;
54173 + struct task_struct *task = NULL;
54174 + const struct cred *cred, *pcred;
54175 + va_list ap;
54176 +
54177 + BEGIN_LOCKS(audit);
54178 + logtype = gr_log_start(audit);
54179 + if (logtype == FLOODING) {
54180 + END_LOCKS(audit);
54181 + return;
54182 + }
54183 + va_start(ap, argtypes);
54184 + switch (argtypes) {
54185 + case GR_TTYSNIFF:
54186 + task = va_arg(ap, struct task_struct *);
54187 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54188 + break;
54189 + case GR_SYSCTL_HIDDEN:
54190 + str1 = va_arg(ap, char *);
54191 + gr_log_middle_varargs(audit, msg, result, str1);
54192 + break;
54193 + case GR_RBAC:
54194 + dentry = va_arg(ap, struct dentry *);
54195 + mnt = va_arg(ap, struct vfsmount *);
54196 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54197 + break;
54198 + case GR_RBAC_STR:
54199 + dentry = va_arg(ap, struct dentry *);
54200 + mnt = va_arg(ap, struct vfsmount *);
54201 + str1 = va_arg(ap, char *);
54202 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54203 + break;
54204 + case GR_STR_RBAC:
54205 + str1 = va_arg(ap, char *);
54206 + dentry = va_arg(ap, struct dentry *);
54207 + mnt = va_arg(ap, struct vfsmount *);
54208 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54209 + break;
54210 + case GR_RBAC_MODE2:
54211 + dentry = va_arg(ap, struct dentry *);
54212 + mnt = va_arg(ap, struct vfsmount *);
54213 + str1 = va_arg(ap, char *);
54214 + str2 = va_arg(ap, char *);
54215 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54216 + break;
54217 + case GR_RBAC_MODE3:
54218 + dentry = va_arg(ap, struct dentry *);
54219 + mnt = va_arg(ap, struct vfsmount *);
54220 + str1 = va_arg(ap, char *);
54221 + str2 = va_arg(ap, char *);
54222 + str3 = va_arg(ap, char *);
54223 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54224 + break;
54225 + case GR_FILENAME:
54226 + dentry = va_arg(ap, struct dentry *);
54227 + mnt = va_arg(ap, struct vfsmount *);
54228 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54229 + break;
54230 + case GR_STR_FILENAME:
54231 + str1 = va_arg(ap, char *);
54232 + dentry = va_arg(ap, struct dentry *);
54233 + mnt = va_arg(ap, struct vfsmount *);
54234 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54235 + break;
54236 + case GR_FILENAME_STR:
54237 + dentry = va_arg(ap, struct dentry *);
54238 + mnt = va_arg(ap, struct vfsmount *);
54239 + str1 = va_arg(ap, char *);
54240 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54241 + break;
54242 + case GR_FILENAME_TWO_INT:
54243 + dentry = va_arg(ap, struct dentry *);
54244 + mnt = va_arg(ap, struct vfsmount *);
54245 + num1 = va_arg(ap, int);
54246 + num2 = va_arg(ap, int);
54247 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54248 + break;
54249 + case GR_FILENAME_TWO_INT_STR:
54250 + dentry = va_arg(ap, struct dentry *);
54251 + mnt = va_arg(ap, struct vfsmount *);
54252 + num1 = va_arg(ap, int);
54253 + num2 = va_arg(ap, int);
54254 + str1 = va_arg(ap, char *);
54255 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54256 + break;
54257 + case GR_TEXTREL:
54258 + file = va_arg(ap, struct file *);
54259 + ulong1 = va_arg(ap, unsigned long);
54260 + ulong2 = va_arg(ap, unsigned long);
54261 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54262 + break;
54263 + case GR_PTRACE:
54264 + task = va_arg(ap, struct task_struct *);
54265 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54266 + break;
54267 + case GR_RESOURCE:
54268 + task = va_arg(ap, struct task_struct *);
54269 + cred = __task_cred(task);
54270 + pcred = __task_cred(task->real_parent);
54271 + ulong1 = va_arg(ap, unsigned long);
54272 + str1 = va_arg(ap, char *);
54273 + ulong2 = va_arg(ap, unsigned long);
54274 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54275 + break;
54276 + case GR_CAP:
54277 + task = va_arg(ap, struct task_struct *);
54278 + cred = __task_cred(task);
54279 + pcred = __task_cred(task->real_parent);
54280 + str1 = va_arg(ap, char *);
54281 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54282 + break;
54283 + case GR_SIG:
54284 + str1 = va_arg(ap, char *);
54285 + voidptr = va_arg(ap, void *);
54286 + gr_log_middle_varargs(audit, msg, str1, voidptr);
54287 + break;
54288 + case GR_SIG2:
54289 + task = va_arg(ap, struct task_struct *);
54290 + cred = __task_cred(task);
54291 + pcred = __task_cred(task->real_parent);
54292 + num1 = va_arg(ap, int);
54293 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54294 + break;
54295 + case GR_CRASH1:
54296 + task = va_arg(ap, struct task_struct *);
54297 + cred = __task_cred(task);
54298 + pcred = __task_cred(task->real_parent);
54299 + ulong1 = va_arg(ap, unsigned long);
54300 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54301 + break;
54302 + case GR_CRASH2:
54303 + task = va_arg(ap, struct task_struct *);
54304 + cred = __task_cred(task);
54305 + pcred = __task_cred(task->real_parent);
54306 + ulong1 = va_arg(ap, unsigned long);
54307 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54308 + break;
54309 + case GR_RWXMAP:
54310 + file = va_arg(ap, struct file *);
54311 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54312 + break;
54313 + case GR_PSACCT:
54314 + {
54315 + unsigned int wday, cday;
54316 + __u8 whr, chr;
54317 + __u8 wmin, cmin;
54318 + __u8 wsec, csec;
54319 + char cur_tty[64] = { 0 };
54320 + char parent_tty[64] = { 0 };
54321 +
54322 + task = va_arg(ap, struct task_struct *);
54323 + wday = va_arg(ap, unsigned int);
54324 + cday = va_arg(ap, unsigned int);
54325 + whr = va_arg(ap, int);
54326 + chr = va_arg(ap, int);
54327 + wmin = va_arg(ap, int);
54328 + cmin = va_arg(ap, int);
54329 + wsec = va_arg(ap, int);
54330 + csec = va_arg(ap, int);
54331 + ulong1 = va_arg(ap, unsigned long);
54332 + cred = __task_cred(task);
54333 + pcred = __task_cred(task->real_parent);
54334 +
54335 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54336 + }
54337 + break;
54338 + default:
54339 + gr_log_middle(audit, msg, ap);
54340 + }
54341 + va_end(ap);
54342 + gr_log_end(audit);
54343 + END_LOCKS(audit);
54344 +}
54345 diff -urNp linux-2.6.32.45/grsecurity/grsec_mem.c linux-2.6.32.45/grsecurity/grsec_mem.c
54346 --- linux-2.6.32.45/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54347 +++ linux-2.6.32.45/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
54348 @@ -0,0 +1,33 @@
54349 +#include <linux/kernel.h>
54350 +#include <linux/sched.h>
54351 +#include <linux/mm.h>
54352 +#include <linux/mman.h>
54353 +#include <linux/grinternal.h>
54354 +
54355 +void
54356 +gr_handle_ioperm(void)
54357 +{
54358 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54359 + return;
54360 +}
54361 +
54362 +void
54363 +gr_handle_iopl(void)
54364 +{
54365 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54366 + return;
54367 +}
54368 +
54369 +void
54370 +gr_handle_mem_readwrite(u64 from, u64 to)
54371 +{
54372 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54373 + return;
54374 +}
54375 +
54376 +void
54377 +gr_handle_vm86(void)
54378 +{
54379 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54380 + return;
54381 +}
54382 diff -urNp linux-2.6.32.45/grsecurity/grsec_mount.c linux-2.6.32.45/grsecurity/grsec_mount.c
54383 --- linux-2.6.32.45/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54384 +++ linux-2.6.32.45/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
54385 @@ -0,0 +1,62 @@
54386 +#include <linux/kernel.h>
54387 +#include <linux/sched.h>
54388 +#include <linux/mount.h>
54389 +#include <linux/grsecurity.h>
54390 +#include <linux/grinternal.h>
54391 +
54392 +void
54393 +gr_log_remount(const char *devname, const int retval)
54394 +{
54395 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54396 + if (grsec_enable_mount && (retval >= 0))
54397 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54398 +#endif
54399 + return;
54400 +}
54401 +
54402 +void
54403 +gr_log_unmount(const char *devname, const int retval)
54404 +{
54405 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54406 + if (grsec_enable_mount && (retval >= 0))
54407 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54408 +#endif
54409 + return;
54410 +}
54411 +
54412 +void
54413 +gr_log_mount(const char *from, const char *to, const int retval)
54414 +{
54415 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54416 + if (grsec_enable_mount && (retval >= 0))
54417 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54418 +#endif
54419 + return;
54420 +}
54421 +
54422 +int
54423 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54424 +{
54425 +#ifdef CONFIG_GRKERNSEC_ROFS
54426 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54427 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54428 + return -EPERM;
54429 + } else
54430 + return 0;
54431 +#endif
54432 + return 0;
54433 +}
54434 +
54435 +int
54436 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54437 +{
54438 +#ifdef CONFIG_GRKERNSEC_ROFS
54439 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54440 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54441 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54442 + return -EPERM;
54443 + } else
54444 + return 0;
54445 +#endif
54446 + return 0;
54447 +}
54448 diff -urNp linux-2.6.32.45/grsecurity/grsec_pax.c linux-2.6.32.45/grsecurity/grsec_pax.c
54449 --- linux-2.6.32.45/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54450 +++ linux-2.6.32.45/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
54451 @@ -0,0 +1,36 @@
54452 +#include <linux/kernel.h>
54453 +#include <linux/sched.h>
54454 +#include <linux/mm.h>
54455 +#include <linux/file.h>
54456 +#include <linux/grinternal.h>
54457 +#include <linux/grsecurity.h>
54458 +
54459 +void
54460 +gr_log_textrel(struct vm_area_struct * vma)
54461 +{
54462 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54463 + if (grsec_enable_audit_textrel)
54464 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54465 +#endif
54466 + return;
54467 +}
54468 +
54469 +void
54470 +gr_log_rwxmmap(struct file *file)
54471 +{
54472 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54473 + if (grsec_enable_log_rwxmaps)
54474 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54475 +#endif
54476 + return;
54477 +}
54478 +
54479 +void
54480 +gr_log_rwxmprotect(struct file *file)
54481 +{
54482 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54483 + if (grsec_enable_log_rwxmaps)
54484 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54485 +#endif
54486 + return;
54487 +}
54488 diff -urNp linux-2.6.32.45/grsecurity/grsec_ptrace.c linux-2.6.32.45/grsecurity/grsec_ptrace.c
54489 --- linux-2.6.32.45/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54490 +++ linux-2.6.32.45/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
54491 @@ -0,0 +1,14 @@
54492 +#include <linux/kernel.h>
54493 +#include <linux/sched.h>
54494 +#include <linux/grinternal.h>
54495 +#include <linux/grsecurity.h>
54496 +
54497 +void
54498 +gr_audit_ptrace(struct task_struct *task)
54499 +{
54500 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54501 + if (grsec_enable_audit_ptrace)
54502 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54503 +#endif
54504 + return;
54505 +}
54506 diff -urNp linux-2.6.32.45/grsecurity/grsec_sig.c linux-2.6.32.45/grsecurity/grsec_sig.c
54507 --- linux-2.6.32.45/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54508 +++ linux-2.6.32.45/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
54509 @@ -0,0 +1,205 @@
54510 +#include <linux/kernel.h>
54511 +#include <linux/sched.h>
54512 +#include <linux/delay.h>
54513 +#include <linux/grsecurity.h>
54514 +#include <linux/grinternal.h>
54515 +#include <linux/hardirq.h>
54516 +
54517 +char *signames[] = {
54518 + [SIGSEGV] = "Segmentation fault",
54519 + [SIGILL] = "Illegal instruction",
54520 + [SIGABRT] = "Abort",
54521 + [SIGBUS] = "Invalid alignment/Bus error"
54522 +};
54523 +
54524 +void
54525 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54526 +{
54527 +#ifdef CONFIG_GRKERNSEC_SIGNAL
54528 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54529 + (sig == SIGABRT) || (sig == SIGBUS))) {
54530 + if (t->pid == current->pid) {
54531 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54532 + } else {
54533 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54534 + }
54535 + }
54536 +#endif
54537 + return;
54538 +}
54539 +
54540 +int
54541 +gr_handle_signal(const struct task_struct *p, const int sig)
54542 +{
54543 +#ifdef CONFIG_GRKERNSEC
54544 + if (current->pid > 1 && gr_check_protected_task(p)) {
54545 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54546 + return -EPERM;
54547 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54548 + return -EPERM;
54549 + }
54550 +#endif
54551 + return 0;
54552 +}
54553 +
54554 +#ifdef CONFIG_GRKERNSEC
54555 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54556 +
54557 +int gr_fake_force_sig(int sig, struct task_struct *t)
54558 +{
54559 + unsigned long int flags;
54560 + int ret, blocked, ignored;
54561 + struct k_sigaction *action;
54562 +
54563 + spin_lock_irqsave(&t->sighand->siglock, flags);
54564 + action = &t->sighand->action[sig-1];
54565 + ignored = action->sa.sa_handler == SIG_IGN;
54566 + blocked = sigismember(&t->blocked, sig);
54567 + if (blocked || ignored) {
54568 + action->sa.sa_handler = SIG_DFL;
54569 + if (blocked) {
54570 + sigdelset(&t->blocked, sig);
54571 + recalc_sigpending_and_wake(t);
54572 + }
54573 + }
54574 + if (action->sa.sa_handler == SIG_DFL)
54575 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
54576 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54577 +
54578 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
54579 +
54580 + return ret;
54581 +}
54582 +#endif
54583 +
54584 +#ifdef CONFIG_GRKERNSEC_BRUTE
54585 +#define GR_USER_BAN_TIME (15 * 60)
54586 +
54587 +static int __get_dumpable(unsigned long mm_flags)
54588 +{
54589 + int ret;
54590 +
54591 + ret = mm_flags & MMF_DUMPABLE_MASK;
54592 + return (ret >= 2) ? 2 : ret;
54593 +}
54594 +#endif
54595 +
54596 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54597 +{
54598 +#ifdef CONFIG_GRKERNSEC_BRUTE
54599 + uid_t uid = 0;
54600 +
54601 + if (!grsec_enable_brute)
54602 + return;
54603 +
54604 + rcu_read_lock();
54605 + read_lock(&tasklist_lock);
54606 + read_lock(&grsec_exec_file_lock);
54607 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54608 + p->real_parent->brute = 1;
54609 + else {
54610 + const struct cred *cred = __task_cred(p), *cred2;
54611 + struct task_struct *tsk, *tsk2;
54612 +
54613 + if (!__get_dumpable(mm_flags) && cred->uid) {
54614 + struct user_struct *user;
54615 +
54616 + uid = cred->uid;
54617 +
54618 + /* this is put upon execution past expiration */
54619 + user = find_user(uid);
54620 + if (user == NULL)
54621 + goto unlock;
54622 + user->banned = 1;
54623 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54624 + if (user->ban_expires == ~0UL)
54625 + user->ban_expires--;
54626 +
54627 + do_each_thread(tsk2, tsk) {
54628 + cred2 = __task_cred(tsk);
54629 + if (tsk != p && cred2->uid == uid)
54630 + gr_fake_force_sig(SIGKILL, tsk);
54631 + } while_each_thread(tsk2, tsk);
54632 + }
54633 + }
54634 +unlock:
54635 + read_unlock(&grsec_exec_file_lock);
54636 + read_unlock(&tasklist_lock);
54637 + rcu_read_unlock();
54638 +
54639 + if (uid)
54640 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54641 +#endif
54642 + return;
54643 +}
54644 +
54645 +void gr_handle_brute_check(void)
54646 +{
54647 +#ifdef CONFIG_GRKERNSEC_BRUTE
54648 + if (current->brute)
54649 + msleep(30 * 1000);
54650 +#endif
54651 + return;
54652 +}
54653 +
54654 +void gr_handle_kernel_exploit(void)
54655 +{
54656 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54657 + const struct cred *cred;
54658 + struct task_struct *tsk, *tsk2;
54659 + struct user_struct *user;
54660 + uid_t uid;
54661 +
54662 + if (in_irq() || in_serving_softirq() || in_nmi())
54663 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54664 +
54665 + uid = current_uid();
54666 +
54667 + if (uid == 0)
54668 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
54669 + else {
54670 + /* kill all the processes of this user, hold a reference
54671 + to their creds struct, and prevent them from creating
54672 + another process until system reset
54673 + */
54674 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54675 + /* we intentionally leak this ref */
54676 + user = get_uid(current->cred->user);
54677 + if (user) {
54678 + user->banned = 1;
54679 + user->ban_expires = ~0UL;
54680 + }
54681 +
54682 + read_lock(&tasklist_lock);
54683 + do_each_thread(tsk2, tsk) {
54684 + cred = __task_cred(tsk);
54685 + if (cred->uid == uid)
54686 + gr_fake_force_sig(SIGKILL, tsk);
54687 + } while_each_thread(tsk2, tsk);
54688 + read_unlock(&tasklist_lock);
54689 + }
54690 +#endif
54691 +}
54692 +
54693 +int __gr_process_user_ban(struct user_struct *user)
54694 +{
54695 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54696 + if (unlikely(user->banned)) {
54697 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
54698 + user->banned = 0;
54699 + user->ban_expires = 0;
54700 + free_uid(user);
54701 + } else
54702 + return -EPERM;
54703 + }
54704 +#endif
54705 + return 0;
54706 +}
54707 +
54708 +int gr_process_user_ban(void)
54709 +{
54710 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54711 + return __gr_process_user_ban(current->cred->user);
54712 +#endif
54713 + return 0;
54714 +}
54715 diff -urNp linux-2.6.32.45/grsecurity/grsec_sock.c linux-2.6.32.45/grsecurity/grsec_sock.c
54716 --- linux-2.6.32.45/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
54717 +++ linux-2.6.32.45/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
54718 @@ -0,0 +1,275 @@
54719 +#include <linux/kernel.h>
54720 +#include <linux/module.h>
54721 +#include <linux/sched.h>
54722 +#include <linux/file.h>
54723 +#include <linux/net.h>
54724 +#include <linux/in.h>
54725 +#include <linux/ip.h>
54726 +#include <net/sock.h>
54727 +#include <net/inet_sock.h>
54728 +#include <linux/grsecurity.h>
54729 +#include <linux/grinternal.h>
54730 +#include <linux/gracl.h>
54731 +
54732 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
54733 +EXPORT_SYMBOL(gr_cap_rtnetlink);
54734 +
54735 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
54736 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
54737 +
54738 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
54739 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
54740 +
54741 +#ifdef CONFIG_UNIX_MODULE
54742 +EXPORT_SYMBOL(gr_acl_handle_unix);
54743 +EXPORT_SYMBOL(gr_acl_handle_mknod);
54744 +EXPORT_SYMBOL(gr_handle_chroot_unix);
54745 +EXPORT_SYMBOL(gr_handle_create);
54746 +#endif
54747 +
54748 +#ifdef CONFIG_GRKERNSEC
54749 +#define gr_conn_table_size 32749
54750 +struct conn_table_entry {
54751 + struct conn_table_entry *next;
54752 + struct signal_struct *sig;
54753 +};
54754 +
54755 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
54756 +DEFINE_SPINLOCK(gr_conn_table_lock);
54757 +
54758 +extern const char * gr_socktype_to_name(unsigned char type);
54759 +extern const char * gr_proto_to_name(unsigned char proto);
54760 +extern const char * gr_sockfamily_to_name(unsigned char family);
54761 +
54762 +static __inline__ int
54763 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
54764 +{
54765 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
54766 +}
54767 +
54768 +static __inline__ int
54769 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
54770 + __u16 sport, __u16 dport)
54771 +{
54772 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
54773 + sig->gr_sport == sport && sig->gr_dport == dport))
54774 + return 1;
54775 + else
54776 + return 0;
54777 +}
54778 +
54779 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
54780 +{
54781 + struct conn_table_entry **match;
54782 + unsigned int index;
54783 +
54784 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54785 + sig->gr_sport, sig->gr_dport,
54786 + gr_conn_table_size);
54787 +
54788 + newent->sig = sig;
54789 +
54790 + match = &gr_conn_table[index];
54791 + newent->next = *match;
54792 + *match = newent;
54793 +
54794 + return;
54795 +}
54796 +
54797 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
54798 +{
54799 + struct conn_table_entry *match, *last = NULL;
54800 + unsigned int index;
54801 +
54802 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54803 + sig->gr_sport, sig->gr_dport,
54804 + gr_conn_table_size);
54805 +
54806 + match = gr_conn_table[index];
54807 + while (match && !conn_match(match->sig,
54808 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
54809 + sig->gr_dport)) {
54810 + last = match;
54811 + match = match->next;
54812 + }
54813 +
54814 + if (match) {
54815 + if (last)
54816 + last->next = match->next;
54817 + else
54818 + gr_conn_table[index] = NULL;
54819 + kfree(match);
54820 + }
54821 +
54822 + return;
54823 +}
54824 +
54825 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
54826 + __u16 sport, __u16 dport)
54827 +{
54828 + struct conn_table_entry *match;
54829 + unsigned int index;
54830 +
54831 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
54832 +
54833 + match = gr_conn_table[index];
54834 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
54835 + match = match->next;
54836 +
54837 + if (match)
54838 + return match->sig;
54839 + else
54840 + return NULL;
54841 +}
54842 +
54843 +#endif
54844 +
54845 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
54846 +{
54847 +#ifdef CONFIG_GRKERNSEC
54848 + struct signal_struct *sig = task->signal;
54849 + struct conn_table_entry *newent;
54850 +
54851 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
54852 + if (newent == NULL)
54853 + return;
54854 + /* no bh lock needed since we are called with bh disabled */
54855 + spin_lock(&gr_conn_table_lock);
54856 + gr_del_task_from_ip_table_nolock(sig);
54857 + sig->gr_saddr = inet->rcv_saddr;
54858 + sig->gr_daddr = inet->daddr;
54859 + sig->gr_sport = inet->sport;
54860 + sig->gr_dport = inet->dport;
54861 + gr_add_to_task_ip_table_nolock(sig, newent);
54862 + spin_unlock(&gr_conn_table_lock);
54863 +#endif
54864 + return;
54865 +}
54866 +
54867 +void gr_del_task_from_ip_table(struct task_struct *task)
54868 +{
54869 +#ifdef CONFIG_GRKERNSEC
54870 + spin_lock_bh(&gr_conn_table_lock);
54871 + gr_del_task_from_ip_table_nolock(task->signal);
54872 + spin_unlock_bh(&gr_conn_table_lock);
54873 +#endif
54874 + return;
54875 +}
54876 +
54877 +void
54878 +gr_attach_curr_ip(const struct sock *sk)
54879 +{
54880 +#ifdef CONFIG_GRKERNSEC
54881 + struct signal_struct *p, *set;
54882 + const struct inet_sock *inet = inet_sk(sk);
54883 +
54884 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
54885 + return;
54886 +
54887 + set = current->signal;
54888 +
54889 + spin_lock_bh(&gr_conn_table_lock);
54890 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
54891 + inet->dport, inet->sport);
54892 + if (unlikely(p != NULL)) {
54893 + set->curr_ip = p->curr_ip;
54894 + set->used_accept = 1;
54895 + gr_del_task_from_ip_table_nolock(p);
54896 + spin_unlock_bh(&gr_conn_table_lock);
54897 + return;
54898 + }
54899 + spin_unlock_bh(&gr_conn_table_lock);
54900 +
54901 + set->curr_ip = inet->daddr;
54902 + set->used_accept = 1;
54903 +#endif
54904 + return;
54905 +}
54906 +
54907 +int
54908 +gr_handle_sock_all(const int family, const int type, const int protocol)
54909 +{
54910 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54911 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
54912 + (family != AF_UNIX)) {
54913 + if (family == AF_INET)
54914 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
54915 + else
54916 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
54917 + return -EACCES;
54918 + }
54919 +#endif
54920 + return 0;
54921 +}
54922 +
54923 +int
54924 +gr_handle_sock_server(const struct sockaddr *sck)
54925 +{
54926 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54927 + if (grsec_enable_socket_server &&
54928 + in_group_p(grsec_socket_server_gid) &&
54929 + sck && (sck->sa_family != AF_UNIX) &&
54930 + (sck->sa_family != AF_LOCAL)) {
54931 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54932 + return -EACCES;
54933 + }
54934 +#endif
54935 + return 0;
54936 +}
54937 +
54938 +int
54939 +gr_handle_sock_server_other(const struct sock *sck)
54940 +{
54941 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54942 + if (grsec_enable_socket_server &&
54943 + in_group_p(grsec_socket_server_gid) &&
54944 + sck && (sck->sk_family != AF_UNIX) &&
54945 + (sck->sk_family != AF_LOCAL)) {
54946 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54947 + return -EACCES;
54948 + }
54949 +#endif
54950 + return 0;
54951 +}
54952 +
54953 +int
54954 +gr_handle_sock_client(const struct sockaddr *sck)
54955 +{
54956 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54957 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
54958 + sck && (sck->sa_family != AF_UNIX) &&
54959 + (sck->sa_family != AF_LOCAL)) {
54960 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
54961 + return -EACCES;
54962 + }
54963 +#endif
54964 + return 0;
54965 +}
54966 +
54967 +kernel_cap_t
54968 +gr_cap_rtnetlink(struct sock *sock)
54969 +{
54970 +#ifdef CONFIG_GRKERNSEC
54971 + if (!gr_acl_is_enabled())
54972 + return current_cap();
54973 + else if (sock->sk_protocol == NETLINK_ISCSI &&
54974 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
54975 + gr_is_capable(CAP_SYS_ADMIN))
54976 + return current_cap();
54977 + else if (sock->sk_protocol == NETLINK_AUDIT &&
54978 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
54979 + gr_is_capable(CAP_AUDIT_WRITE) &&
54980 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
54981 + gr_is_capable(CAP_AUDIT_CONTROL))
54982 + return current_cap();
54983 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
54984 + ((sock->sk_protocol == NETLINK_ROUTE) ?
54985 + gr_is_capable_nolog(CAP_NET_ADMIN) :
54986 + gr_is_capable(CAP_NET_ADMIN)))
54987 + return current_cap();
54988 + else
54989 + return __cap_empty_set;
54990 +#else
54991 + return current_cap();
54992 +#endif
54993 +}
54994 diff -urNp linux-2.6.32.45/grsecurity/grsec_sysctl.c linux-2.6.32.45/grsecurity/grsec_sysctl.c
54995 --- linux-2.6.32.45/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
54996 +++ linux-2.6.32.45/grsecurity/grsec_sysctl.c 2011-08-11 19:57:54.000000000 -0400
54997 @@ -0,0 +1,479 @@
54998 +#include <linux/kernel.h>
54999 +#include <linux/sched.h>
55000 +#include <linux/sysctl.h>
55001 +#include <linux/grsecurity.h>
55002 +#include <linux/grinternal.h>
55003 +
55004 +int
55005 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
55006 +{
55007 +#ifdef CONFIG_GRKERNSEC_SYSCTL
55008 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
55009 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
55010 + return -EACCES;
55011 + }
55012 +#endif
55013 + return 0;
55014 +}
55015 +
55016 +#ifdef CONFIG_GRKERNSEC_ROFS
55017 +static int __maybe_unused one = 1;
55018 +#endif
55019 +
55020 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
55021 +ctl_table grsecurity_table[] = {
55022 +#ifdef CONFIG_GRKERNSEC_SYSCTL
55023 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
55024 +#ifdef CONFIG_GRKERNSEC_IO
55025 + {
55026 + .ctl_name = CTL_UNNUMBERED,
55027 + .procname = "disable_priv_io",
55028 + .data = &grsec_disable_privio,
55029 + .maxlen = sizeof(int),
55030 + .mode = 0600,
55031 + .proc_handler = &proc_dointvec,
55032 + },
55033 +#endif
55034 +#endif
55035 +#ifdef CONFIG_GRKERNSEC_LINK
55036 + {
55037 + .ctl_name = CTL_UNNUMBERED,
55038 + .procname = "linking_restrictions",
55039 + .data = &grsec_enable_link,
55040 + .maxlen = sizeof(int),
55041 + .mode = 0600,
55042 + .proc_handler = &proc_dointvec,
55043 + },
55044 +#endif
55045 +#ifdef CONFIG_GRKERNSEC_BRUTE
55046 + {
55047 + .ctl_name = CTL_UNNUMBERED,
55048 + .procname = "deter_bruteforce",
55049 + .data = &grsec_enable_brute,
55050 + .maxlen = sizeof(int),
55051 + .mode = 0600,
55052 + .proc_handler = &proc_dointvec,
55053 + },
55054 +#endif
55055 +#ifdef CONFIG_GRKERNSEC_FIFO
55056 + {
55057 + .ctl_name = CTL_UNNUMBERED,
55058 + .procname = "fifo_restrictions",
55059 + .data = &grsec_enable_fifo,
55060 + .maxlen = sizeof(int),
55061 + .mode = 0600,
55062 + .proc_handler = &proc_dointvec,
55063 + },
55064 +#endif
55065 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55066 + {
55067 + .ctl_name = CTL_UNNUMBERED,
55068 + .procname = "ip_blackhole",
55069 + .data = &grsec_enable_blackhole,
55070 + .maxlen = sizeof(int),
55071 + .mode = 0600,
55072 + .proc_handler = &proc_dointvec,
55073 + },
55074 + {
55075 + .ctl_name = CTL_UNNUMBERED,
55076 + .procname = "lastack_retries",
55077 + .data = &grsec_lastack_retries,
55078 + .maxlen = sizeof(int),
55079 + .mode = 0600,
55080 + .proc_handler = &proc_dointvec,
55081 + },
55082 +#endif
55083 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55084 + {
55085 + .ctl_name = CTL_UNNUMBERED,
55086 + .procname = "exec_logging",
55087 + .data = &grsec_enable_execlog,
55088 + .maxlen = sizeof(int),
55089 + .mode = 0600,
55090 + .proc_handler = &proc_dointvec,
55091 + },
55092 +#endif
55093 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55094 + {
55095 + .ctl_name = CTL_UNNUMBERED,
55096 + .procname = "rwxmap_logging",
55097 + .data = &grsec_enable_log_rwxmaps,
55098 + .maxlen = sizeof(int),
55099 + .mode = 0600,
55100 + .proc_handler = &proc_dointvec,
55101 + },
55102 +#endif
55103 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55104 + {
55105 + .ctl_name = CTL_UNNUMBERED,
55106 + .procname = "signal_logging",
55107 + .data = &grsec_enable_signal,
55108 + .maxlen = sizeof(int),
55109 + .mode = 0600,
55110 + .proc_handler = &proc_dointvec,
55111 + },
55112 +#endif
55113 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55114 + {
55115 + .ctl_name = CTL_UNNUMBERED,
55116 + .procname = "forkfail_logging",
55117 + .data = &grsec_enable_forkfail,
55118 + .maxlen = sizeof(int),
55119 + .mode = 0600,
55120 + .proc_handler = &proc_dointvec,
55121 + },
55122 +#endif
55123 +#ifdef CONFIG_GRKERNSEC_TIME
55124 + {
55125 + .ctl_name = CTL_UNNUMBERED,
55126 + .procname = "timechange_logging",
55127 + .data = &grsec_enable_time,
55128 + .maxlen = sizeof(int),
55129 + .mode = 0600,
55130 + .proc_handler = &proc_dointvec,
55131 + },
55132 +#endif
55133 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55134 + {
55135 + .ctl_name = CTL_UNNUMBERED,
55136 + .procname = "chroot_deny_shmat",
55137 + .data = &grsec_enable_chroot_shmat,
55138 + .maxlen = sizeof(int),
55139 + .mode = 0600,
55140 + .proc_handler = &proc_dointvec,
55141 + },
55142 +#endif
55143 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55144 + {
55145 + .ctl_name = CTL_UNNUMBERED,
55146 + .procname = "chroot_deny_unix",
55147 + .data = &grsec_enable_chroot_unix,
55148 + .maxlen = sizeof(int),
55149 + .mode = 0600,
55150 + .proc_handler = &proc_dointvec,
55151 + },
55152 +#endif
55153 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55154 + {
55155 + .ctl_name = CTL_UNNUMBERED,
55156 + .procname = "chroot_deny_mount",
55157 + .data = &grsec_enable_chroot_mount,
55158 + .maxlen = sizeof(int),
55159 + .mode = 0600,
55160 + .proc_handler = &proc_dointvec,
55161 + },
55162 +#endif
55163 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55164 + {
55165 + .ctl_name = CTL_UNNUMBERED,
55166 + .procname = "chroot_deny_fchdir",
55167 + .data = &grsec_enable_chroot_fchdir,
55168 + .maxlen = sizeof(int),
55169 + .mode = 0600,
55170 + .proc_handler = &proc_dointvec,
55171 + },
55172 +#endif
55173 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55174 + {
55175 + .ctl_name = CTL_UNNUMBERED,
55176 + .procname = "chroot_deny_chroot",
55177 + .data = &grsec_enable_chroot_double,
55178 + .maxlen = sizeof(int),
55179 + .mode = 0600,
55180 + .proc_handler = &proc_dointvec,
55181 + },
55182 +#endif
55183 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55184 + {
55185 + .ctl_name = CTL_UNNUMBERED,
55186 + .procname = "chroot_deny_pivot",
55187 + .data = &grsec_enable_chroot_pivot,
55188 + .maxlen = sizeof(int),
55189 + .mode = 0600,
55190 + .proc_handler = &proc_dointvec,
55191 + },
55192 +#endif
55193 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55194 + {
55195 + .ctl_name = CTL_UNNUMBERED,
55196 + .procname = "chroot_enforce_chdir",
55197 + .data = &grsec_enable_chroot_chdir,
55198 + .maxlen = sizeof(int),
55199 + .mode = 0600,
55200 + .proc_handler = &proc_dointvec,
55201 + },
55202 +#endif
55203 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55204 + {
55205 + .ctl_name = CTL_UNNUMBERED,
55206 + .procname = "chroot_deny_chmod",
55207 + .data = &grsec_enable_chroot_chmod,
55208 + .maxlen = sizeof(int),
55209 + .mode = 0600,
55210 + .proc_handler = &proc_dointvec,
55211 + },
55212 +#endif
55213 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55214 + {
55215 + .ctl_name = CTL_UNNUMBERED,
55216 + .procname = "chroot_deny_mknod",
55217 + .data = &grsec_enable_chroot_mknod,
55218 + .maxlen = sizeof(int),
55219 + .mode = 0600,
55220 + .proc_handler = &proc_dointvec,
55221 + },
55222 +#endif
55223 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55224 + {
55225 + .ctl_name = CTL_UNNUMBERED,
55226 + .procname = "chroot_restrict_nice",
55227 + .data = &grsec_enable_chroot_nice,
55228 + .maxlen = sizeof(int),
55229 + .mode = 0600,
55230 + .proc_handler = &proc_dointvec,
55231 + },
55232 +#endif
55233 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55234 + {
55235 + .ctl_name = CTL_UNNUMBERED,
55236 + .procname = "chroot_execlog",
55237 + .data = &grsec_enable_chroot_execlog,
55238 + .maxlen = sizeof(int),
55239 + .mode = 0600,
55240 + .proc_handler = &proc_dointvec,
55241 + },
55242 +#endif
55243 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55244 + {
55245 + .ctl_name = CTL_UNNUMBERED,
55246 + .procname = "chroot_caps",
55247 + .data = &grsec_enable_chroot_caps,
55248 + .maxlen = sizeof(int),
55249 + .mode = 0600,
55250 + .proc_handler = &proc_dointvec,
55251 + },
55252 +#endif
55253 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55254 + {
55255 + .ctl_name = CTL_UNNUMBERED,
55256 + .procname = "chroot_deny_sysctl",
55257 + .data = &grsec_enable_chroot_sysctl,
55258 + .maxlen = sizeof(int),
55259 + .mode = 0600,
55260 + .proc_handler = &proc_dointvec,
55261 + },
55262 +#endif
55263 +#ifdef CONFIG_GRKERNSEC_TPE
55264 + {
55265 + .ctl_name = CTL_UNNUMBERED,
55266 + .procname = "tpe",
55267 + .data = &grsec_enable_tpe,
55268 + .maxlen = sizeof(int),
55269 + .mode = 0600,
55270 + .proc_handler = &proc_dointvec,
55271 + },
55272 + {
55273 + .ctl_name = CTL_UNNUMBERED,
55274 + .procname = "tpe_gid",
55275 + .data = &grsec_tpe_gid,
55276 + .maxlen = sizeof(int),
55277 + .mode = 0600,
55278 + .proc_handler = &proc_dointvec,
55279 + },
55280 +#endif
55281 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55282 + {
55283 + .ctl_name = CTL_UNNUMBERED,
55284 + .procname = "tpe_invert",
55285 + .data = &grsec_enable_tpe_invert,
55286 + .maxlen = sizeof(int),
55287 + .mode = 0600,
55288 + .proc_handler = &proc_dointvec,
55289 + },
55290 +#endif
55291 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55292 + {
55293 + .ctl_name = CTL_UNNUMBERED,
55294 + .procname = "tpe_restrict_all",
55295 + .data = &grsec_enable_tpe_all,
55296 + .maxlen = sizeof(int),
55297 + .mode = 0600,
55298 + .proc_handler = &proc_dointvec,
55299 + },
55300 +#endif
55301 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55302 + {
55303 + .ctl_name = CTL_UNNUMBERED,
55304 + .procname = "socket_all",
55305 + .data = &grsec_enable_socket_all,
55306 + .maxlen = sizeof(int),
55307 + .mode = 0600,
55308 + .proc_handler = &proc_dointvec,
55309 + },
55310 + {
55311 + .ctl_name = CTL_UNNUMBERED,
55312 + .procname = "socket_all_gid",
55313 + .data = &grsec_socket_all_gid,
55314 + .maxlen = sizeof(int),
55315 + .mode = 0600,
55316 + .proc_handler = &proc_dointvec,
55317 + },
55318 +#endif
55319 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55320 + {
55321 + .ctl_name = CTL_UNNUMBERED,
55322 + .procname = "socket_client",
55323 + .data = &grsec_enable_socket_client,
55324 + .maxlen = sizeof(int),
55325 + .mode = 0600,
55326 + .proc_handler = &proc_dointvec,
55327 + },
55328 + {
55329 + .ctl_name = CTL_UNNUMBERED,
55330 + .procname = "socket_client_gid",
55331 + .data = &grsec_socket_client_gid,
55332 + .maxlen = sizeof(int),
55333 + .mode = 0600,
55334 + .proc_handler = &proc_dointvec,
55335 + },
55336 +#endif
55337 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55338 + {
55339 + .ctl_name = CTL_UNNUMBERED,
55340 + .procname = "socket_server",
55341 + .data = &grsec_enable_socket_server,
55342 + .maxlen = sizeof(int),
55343 + .mode = 0600,
55344 + .proc_handler = &proc_dointvec,
55345 + },
55346 + {
55347 + .ctl_name = CTL_UNNUMBERED,
55348 + .procname = "socket_server_gid",
55349 + .data = &grsec_socket_server_gid,
55350 + .maxlen = sizeof(int),
55351 + .mode = 0600,
55352 + .proc_handler = &proc_dointvec,
55353 + },
55354 +#endif
55355 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55356 + {
55357 + .ctl_name = CTL_UNNUMBERED,
55358 + .procname = "audit_group",
55359 + .data = &grsec_enable_group,
55360 + .maxlen = sizeof(int),
55361 + .mode = 0600,
55362 + .proc_handler = &proc_dointvec,
55363 + },
55364 + {
55365 + .ctl_name = CTL_UNNUMBERED,
55366 + .procname = "audit_gid",
55367 + .data = &grsec_audit_gid,
55368 + .maxlen = sizeof(int),
55369 + .mode = 0600,
55370 + .proc_handler = &proc_dointvec,
55371 + },
55372 +#endif
55373 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55374 + {
55375 + .ctl_name = CTL_UNNUMBERED,
55376 + .procname = "audit_chdir",
55377 + .data = &grsec_enable_chdir,
55378 + .maxlen = sizeof(int),
55379 + .mode = 0600,
55380 + .proc_handler = &proc_dointvec,
55381 + },
55382 +#endif
55383 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55384 + {
55385 + .ctl_name = CTL_UNNUMBERED,
55386 + .procname = "audit_mount",
55387 + .data = &grsec_enable_mount,
55388 + .maxlen = sizeof(int),
55389 + .mode = 0600,
55390 + .proc_handler = &proc_dointvec,
55391 + },
55392 +#endif
55393 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55394 + {
55395 + .ctl_name = CTL_UNNUMBERED,
55396 + .procname = "audit_textrel",
55397 + .data = &grsec_enable_audit_textrel,
55398 + .maxlen = sizeof(int),
55399 + .mode = 0600,
55400 + .proc_handler = &proc_dointvec,
55401 + },
55402 +#endif
55403 +#ifdef CONFIG_GRKERNSEC_DMESG
55404 + {
55405 + .ctl_name = CTL_UNNUMBERED,
55406 + .procname = "dmesg",
55407 + .data = &grsec_enable_dmesg,
55408 + .maxlen = sizeof(int),
55409 + .mode = 0600,
55410 + .proc_handler = &proc_dointvec,
55411 + },
55412 +#endif
55413 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55414 + {
55415 + .ctl_name = CTL_UNNUMBERED,
55416 + .procname = "chroot_findtask",
55417 + .data = &grsec_enable_chroot_findtask,
55418 + .maxlen = sizeof(int),
55419 + .mode = 0600,
55420 + .proc_handler = &proc_dointvec,
55421 + },
55422 +#endif
55423 +#ifdef CONFIG_GRKERNSEC_RESLOG
55424 + {
55425 + .ctl_name = CTL_UNNUMBERED,
55426 + .procname = "resource_logging",
55427 + .data = &grsec_resource_logging,
55428 + .maxlen = sizeof(int),
55429 + .mode = 0600,
55430 + .proc_handler = &proc_dointvec,
55431 + },
55432 +#endif
55433 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55434 + {
55435 + .ctl_name = CTL_UNNUMBERED,
55436 + .procname = "audit_ptrace",
55437 + .data = &grsec_enable_audit_ptrace,
55438 + .maxlen = sizeof(int),
55439 + .mode = 0600,
55440 + .proc_handler = &proc_dointvec,
55441 + },
55442 +#endif
55443 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55444 + {
55445 + .ctl_name = CTL_UNNUMBERED,
55446 + .procname = "harden_ptrace",
55447 + .data = &grsec_enable_harden_ptrace,
55448 + .maxlen = sizeof(int),
55449 + .mode = 0600,
55450 + .proc_handler = &proc_dointvec,
55451 + },
55452 +#endif
55453 + {
55454 + .ctl_name = CTL_UNNUMBERED,
55455 + .procname = "grsec_lock",
55456 + .data = &grsec_lock,
55457 + .maxlen = sizeof(int),
55458 + .mode = 0600,
55459 + .proc_handler = &proc_dointvec,
55460 + },
55461 +#endif
55462 +#ifdef CONFIG_GRKERNSEC_ROFS
55463 + {
55464 + .ctl_name = CTL_UNNUMBERED,
55465 + .procname = "romount_protect",
55466 + .data = &grsec_enable_rofs,
55467 + .maxlen = sizeof(int),
55468 + .mode = 0600,
55469 + .proc_handler = &proc_dointvec_minmax,
55470 + .extra1 = &one,
55471 + .extra2 = &one,
55472 + },
55473 +#endif
55474 + { .ctl_name = 0 }
55475 +};
55476 +#endif
55477 diff -urNp linux-2.6.32.45/grsecurity/grsec_time.c linux-2.6.32.45/grsecurity/grsec_time.c
55478 --- linux-2.6.32.45/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55479 +++ linux-2.6.32.45/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
55480 @@ -0,0 +1,16 @@
55481 +#include <linux/kernel.h>
55482 +#include <linux/sched.h>
55483 +#include <linux/grinternal.h>
55484 +#include <linux/module.h>
55485 +
55486 +void
55487 +gr_log_timechange(void)
55488 +{
55489 +#ifdef CONFIG_GRKERNSEC_TIME
55490 + if (grsec_enable_time)
55491 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55492 +#endif
55493 + return;
55494 +}
55495 +
55496 +EXPORT_SYMBOL(gr_log_timechange);
55497 diff -urNp linux-2.6.32.45/grsecurity/grsec_tpe.c linux-2.6.32.45/grsecurity/grsec_tpe.c
55498 --- linux-2.6.32.45/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55499 +++ linux-2.6.32.45/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
55500 @@ -0,0 +1,39 @@
55501 +#include <linux/kernel.h>
55502 +#include <linux/sched.h>
55503 +#include <linux/file.h>
55504 +#include <linux/fs.h>
55505 +#include <linux/grinternal.h>
55506 +
55507 +extern int gr_acl_tpe_check(void);
55508 +
55509 +int
55510 +gr_tpe_allow(const struct file *file)
55511 +{
55512 +#ifdef CONFIG_GRKERNSEC
55513 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55514 + const struct cred *cred = current_cred();
55515 +
55516 + if (cred->uid && ((grsec_enable_tpe &&
55517 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55518 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55519 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55520 +#else
55521 + in_group_p(grsec_tpe_gid)
55522 +#endif
55523 + ) || gr_acl_tpe_check()) &&
55524 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55525 + (inode->i_mode & S_IWOTH))))) {
55526 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55527 + return 0;
55528 + }
55529 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55530 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55531 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55532 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55533 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55534 + return 0;
55535 + }
55536 +#endif
55537 +#endif
55538 + return 1;
55539 +}
55540 diff -urNp linux-2.6.32.45/grsecurity/grsum.c linux-2.6.32.45/grsecurity/grsum.c
55541 --- linux-2.6.32.45/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55542 +++ linux-2.6.32.45/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
55543 @@ -0,0 +1,61 @@
55544 +#include <linux/err.h>
55545 +#include <linux/kernel.h>
55546 +#include <linux/sched.h>
55547 +#include <linux/mm.h>
55548 +#include <linux/scatterlist.h>
55549 +#include <linux/crypto.h>
55550 +#include <linux/gracl.h>
55551 +
55552 +
55553 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55554 +#error "crypto and sha256 must be built into the kernel"
55555 +#endif
55556 +
55557 +int
55558 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55559 +{
55560 + char *p;
55561 + struct crypto_hash *tfm;
55562 + struct hash_desc desc;
55563 + struct scatterlist sg;
55564 + unsigned char temp_sum[GR_SHA_LEN];
55565 + volatile int retval = 0;
55566 + volatile int dummy = 0;
55567 + unsigned int i;
55568 +
55569 + sg_init_table(&sg, 1);
55570 +
55571 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55572 + if (IS_ERR(tfm)) {
55573 + /* should never happen, since sha256 should be built in */
55574 + return 1;
55575 + }
55576 +
55577 + desc.tfm = tfm;
55578 + desc.flags = 0;
55579 +
55580 + crypto_hash_init(&desc);
55581 +
55582 + p = salt;
55583 + sg_set_buf(&sg, p, GR_SALT_LEN);
55584 + crypto_hash_update(&desc, &sg, sg.length);
55585 +
55586 + p = entry->pw;
55587 + sg_set_buf(&sg, p, strlen(p));
55588 +
55589 + crypto_hash_update(&desc, &sg, sg.length);
55590 +
55591 + crypto_hash_final(&desc, temp_sum);
55592 +
55593 + memset(entry->pw, 0, GR_PW_LEN);
55594 +
55595 + for (i = 0; i < GR_SHA_LEN; i++)
55596 + if (sum[i] != temp_sum[i])
55597 + retval = 1;
55598 + else
55599 + dummy = 1; // waste a cycle
55600 +
55601 + crypto_free_hash(tfm);
55602 +
55603 + return retval;
55604 +}
55605 diff -urNp linux-2.6.32.45/grsecurity/Kconfig linux-2.6.32.45/grsecurity/Kconfig
55606 --- linux-2.6.32.45/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55607 +++ linux-2.6.32.45/grsecurity/Kconfig 2011-08-17 19:04:25.000000000 -0400
55608 @@ -0,0 +1,1037 @@
55609 +#
55610 +# grecurity configuration
55611 +#
55612 +
55613 +menu "Grsecurity"
55614 +
55615 +config GRKERNSEC
55616 + bool "Grsecurity"
55617 + select CRYPTO
55618 + select CRYPTO_SHA256
55619 + help
55620 + If you say Y here, you will be able to configure many features
55621 + that will enhance the security of your system. It is highly
55622 + recommended that you say Y here and read through the help
55623 + for each option so that you fully understand the features and
55624 + can evaluate their usefulness for your machine.
55625 +
55626 +choice
55627 + prompt "Security Level"
55628 + depends on GRKERNSEC
55629 + default GRKERNSEC_CUSTOM
55630 +
55631 +config GRKERNSEC_LOW
55632 + bool "Low"
55633 + select GRKERNSEC_LINK
55634 + select GRKERNSEC_FIFO
55635 + select GRKERNSEC_RANDNET
55636 + select GRKERNSEC_DMESG
55637 + select GRKERNSEC_CHROOT
55638 + select GRKERNSEC_CHROOT_CHDIR
55639 +
55640 + help
55641 + If you choose this option, several of the grsecurity options will
55642 + be enabled that will give you greater protection against a number
55643 + of attacks, while assuring that none of your software will have any
55644 + conflicts with the additional security measures. If you run a lot
55645 + of unusual software, or you are having problems with the higher
55646 + security levels, you should say Y here. With this option, the
55647 + following features are enabled:
55648 +
55649 + - Linking restrictions
55650 + - FIFO restrictions
55651 + - Restricted dmesg
55652 + - Enforced chdir("/") on chroot
55653 + - Runtime module disabling
55654 +
55655 +config GRKERNSEC_MEDIUM
55656 + bool "Medium"
55657 + select PAX
55658 + select PAX_EI_PAX
55659 + select PAX_PT_PAX_FLAGS
55660 + select PAX_HAVE_ACL_FLAGS
55661 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55662 + select GRKERNSEC_CHROOT
55663 + select GRKERNSEC_CHROOT_SYSCTL
55664 + select GRKERNSEC_LINK
55665 + select GRKERNSEC_FIFO
55666 + select GRKERNSEC_DMESG
55667 + select GRKERNSEC_RANDNET
55668 + select GRKERNSEC_FORKFAIL
55669 + select GRKERNSEC_TIME
55670 + select GRKERNSEC_SIGNAL
55671 + select GRKERNSEC_CHROOT
55672 + select GRKERNSEC_CHROOT_UNIX
55673 + select GRKERNSEC_CHROOT_MOUNT
55674 + select GRKERNSEC_CHROOT_PIVOT
55675 + select GRKERNSEC_CHROOT_DOUBLE
55676 + select GRKERNSEC_CHROOT_CHDIR
55677 + select GRKERNSEC_CHROOT_MKNOD
55678 + select GRKERNSEC_PROC
55679 + select GRKERNSEC_PROC_USERGROUP
55680 + select PAX_RANDUSTACK
55681 + select PAX_ASLR
55682 + select PAX_RANDMMAP
55683 + select PAX_REFCOUNT if (X86 || SPARC64)
55684 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55685 +
55686 + help
55687 + If you say Y here, several features in addition to those included
55688 + in the low additional security level will be enabled. These
55689 + features provide even more security to your system, though in rare
55690 + cases they may be incompatible with very old or poorly written
55691 + software. If you enable this option, make sure that your auth
55692 + service (identd) is running as gid 1001. With this option,
55693 + the following features (in addition to those provided in the
55694 + low additional security level) will be enabled:
55695 +
55696 + - Failed fork logging
55697 + - Time change logging
55698 + - Signal logging
55699 + - Deny mounts in chroot
55700 + - Deny double chrooting
55701 + - Deny sysctl writes in chroot
55702 + - Deny mknod in chroot
55703 + - Deny access to abstract AF_UNIX sockets out of chroot
55704 + - Deny pivot_root in chroot
55705 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
55706 + - /proc restrictions with special GID set to 10 (usually wheel)
55707 + - Address Space Layout Randomization (ASLR)
55708 + - Prevent exploitation of most refcount overflows
55709 + - Bounds checking of copying between the kernel and userland
55710 +
55711 +config GRKERNSEC_HIGH
55712 + bool "High"
55713 + select GRKERNSEC_LINK
55714 + select GRKERNSEC_FIFO
55715 + select GRKERNSEC_DMESG
55716 + select GRKERNSEC_FORKFAIL
55717 + select GRKERNSEC_TIME
55718 + select GRKERNSEC_SIGNAL
55719 + select GRKERNSEC_CHROOT
55720 + select GRKERNSEC_CHROOT_SHMAT
55721 + select GRKERNSEC_CHROOT_UNIX
55722 + select GRKERNSEC_CHROOT_MOUNT
55723 + select GRKERNSEC_CHROOT_FCHDIR
55724 + select GRKERNSEC_CHROOT_PIVOT
55725 + select GRKERNSEC_CHROOT_DOUBLE
55726 + select GRKERNSEC_CHROOT_CHDIR
55727 + select GRKERNSEC_CHROOT_MKNOD
55728 + select GRKERNSEC_CHROOT_CAPS
55729 + select GRKERNSEC_CHROOT_SYSCTL
55730 + select GRKERNSEC_CHROOT_FINDTASK
55731 + select GRKERNSEC_SYSFS_RESTRICT
55732 + select GRKERNSEC_PROC
55733 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55734 + select GRKERNSEC_HIDESYM
55735 + select GRKERNSEC_BRUTE
55736 + select GRKERNSEC_PROC_USERGROUP
55737 + select GRKERNSEC_KMEM
55738 + select GRKERNSEC_RESLOG
55739 + select GRKERNSEC_RANDNET
55740 + select GRKERNSEC_PROC_ADD
55741 + select GRKERNSEC_CHROOT_CHMOD
55742 + select GRKERNSEC_CHROOT_NICE
55743 + select GRKERNSEC_AUDIT_MOUNT
55744 + select GRKERNSEC_MODHARDEN if (MODULES)
55745 + select GRKERNSEC_HARDEN_PTRACE
55746 + select GRKERNSEC_VM86 if (X86_32)
55747 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55748 + select PAX
55749 + select PAX_RANDUSTACK
55750 + select PAX_ASLR
55751 + select PAX_RANDMMAP
55752 + select PAX_NOEXEC
55753 + select PAX_MPROTECT
55754 + select PAX_EI_PAX
55755 + select PAX_PT_PAX_FLAGS
55756 + select PAX_HAVE_ACL_FLAGS
55757 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55758 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
55759 + select PAX_RANDKSTACK if (X86_TSC && X86)
55760 + select PAX_SEGMEXEC if (X86_32)
55761 + select PAX_PAGEEXEC
55762 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55763 + select PAX_EMUTRAMP if (PARISC)
55764 + select PAX_EMUSIGRT if (PARISC)
55765 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55766 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55767 + select PAX_REFCOUNT if (X86 || SPARC64)
55768 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55769 + help
55770 + If you say Y here, many of the features of grsecurity will be
55771 + enabled, which will protect you against many kinds of attacks
55772 + against your system. The heightened security comes at a cost
55773 + of an increased chance of incompatibilities with rare software
55774 + on your machine. Since this security level enables PaX, you should
55775 + view <http://pax.grsecurity.net> and read about the PaX
55776 + project. While you are there, download chpax and run it on
55777 + binaries that cause problems with PaX. Also remember that
55778 + since the /proc restrictions are enabled, you must run your
55779 + identd as gid 1001. This security level enables the following
55780 + features in addition to those listed in the low and medium
55781 + security levels:
55782 +
55783 + - Additional /proc restrictions
55784 + - Chmod restrictions in chroot
55785 + - No signals, ptrace, or viewing of processes outside of chroot
55786 + - Capability restrictions in chroot
55787 + - Deny fchdir out of chroot
55788 + - Priority restrictions in chroot
55789 + - Segmentation-based implementation of PaX
55790 + - Mprotect restrictions
55791 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55792 + - Kernel stack randomization
55793 + - Mount/unmount/remount logging
55794 + - Kernel symbol hiding
55795 + - Prevention of memory exhaustion-based exploits
55796 + - Hardening of module auto-loading
55797 + - Ptrace restrictions
55798 + - Restricted vm86 mode
55799 + - Restricted sysfs/debugfs
55800 + - Active kernel exploit response
55801 +
55802 +config GRKERNSEC_CUSTOM
55803 + bool "Custom"
55804 + help
55805 + If you say Y here, you will be able to configure every grsecurity
55806 + option, which allows you to enable many more features that aren't
55807 + covered in the basic security levels. These additional features
55808 + include TPE, socket restrictions, and the sysctl system for
55809 + grsecurity. It is advised that you read through the help for
55810 + each option to determine its usefulness in your situation.
55811 +
55812 +endchoice
55813 +
55814 +menu "Address Space Protection"
55815 +depends on GRKERNSEC
55816 +
55817 +config GRKERNSEC_KMEM
55818 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
55819 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55820 + help
55821 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55822 + be written to via mmap or otherwise to modify the running kernel.
55823 + /dev/port will also not be allowed to be opened. If you have module
55824 + support disabled, enabling this will close up four ways that are
55825 + currently used to insert malicious code into the running kernel.
55826 + Even with all these features enabled, we still highly recommend that
55827 + you use the RBAC system, as it is still possible for an attacker to
55828 + modify the running kernel through privileged I/O granted by ioperm/iopl.
55829 + If you are not using XFree86, you may be able to stop this additional
55830 + case by enabling the 'Disable privileged I/O' option. Though nothing
55831 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55832 + but only to video memory, which is the only writing we allow in this
55833 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55834 + not be allowed to mprotect it with PROT_WRITE later.
55835 + It is highly recommended that you say Y here if you meet all the
55836 + conditions above.
55837 +
55838 +config GRKERNSEC_VM86
55839 + bool "Restrict VM86 mode"
55840 + depends on X86_32
55841 +
55842 + help
55843 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55844 + make use of a special execution mode on 32bit x86 processors called
55845 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55846 + video cards and will still work with this option enabled. The purpose
55847 + of the option is to prevent exploitation of emulation errors in
55848 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
55849 + Nearly all users should be able to enable this option.
55850 +
55851 +config GRKERNSEC_IO
55852 + bool "Disable privileged I/O"
55853 + depends on X86
55854 + select RTC_CLASS
55855 + select RTC_INTF_DEV
55856 + select RTC_DRV_CMOS
55857 +
55858 + help
55859 + If you say Y here, all ioperm and iopl calls will return an error.
55860 + Ioperm and iopl can be used to modify the running kernel.
55861 + Unfortunately, some programs need this access to operate properly,
55862 + the most notable of which are XFree86 and hwclock. hwclock can be
55863 + remedied by having RTC support in the kernel, so real-time
55864 + clock support is enabled if this option is enabled, to ensure
55865 + that hwclock operates correctly. XFree86 still will not
55866 + operate correctly with this option enabled, so DO NOT CHOOSE Y
55867 + IF YOU USE XFree86. If you use XFree86 and you still want to
55868 + protect your kernel against modification, use the RBAC system.
55869 +
55870 +config GRKERNSEC_PROC_MEMMAP
55871 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55872 + default y if (PAX_NOEXEC || PAX_ASLR)
55873 + depends on PAX_NOEXEC || PAX_ASLR
55874 + help
55875 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55876 + give no information about the addresses of its mappings if
55877 + PaX features that rely on random addresses are enabled on the task.
55878 + If you use PaX it is greatly recommended that you say Y here as it
55879 + closes up a hole that makes the full ASLR useless for suid
55880 + binaries.
55881 +
55882 +config GRKERNSEC_BRUTE
55883 + bool "Deter exploit bruteforcing"
55884 + help
55885 + If you say Y here, attempts to bruteforce exploits against forking
55886 + daemons such as apache or sshd, as well as against suid/sgid binaries
55887 + will be deterred. When a child of a forking daemon is killed by PaX
55888 + or crashes due to an illegal instruction or other suspicious signal,
55889 + the parent process will be delayed 30 seconds upon every subsequent
55890 + fork until the administrator is able to assess the situation and
55891 + restart the daemon.
55892 + In the suid/sgid case, the attempt is logged, the user has all their
55893 + processes terminated, and they are prevented from executing any further
55894 + processes for 15 minutes.
55895 + It is recommended that you also enable signal logging in the auditing
55896 + section so that logs are generated when a process triggers a suspicious
55897 + signal.
55898 + If the sysctl option is enabled, a sysctl option with name
55899 + "deter_bruteforce" is created.
55900 +
55901 +config GRKERNSEC_MODHARDEN
55902 + bool "Harden module auto-loading"
55903 + depends on MODULES
55904 + help
55905 + If you say Y here, module auto-loading in response to use of some
55906 + feature implemented by an unloaded module will be restricted to
55907 + root users. Enabling this option helps defend against attacks
55908 + by unprivileged users who abuse the auto-loading behavior to
55909 + cause a vulnerable module to load that is then exploited.
55910 +
55911 + If this option prevents a legitimate use of auto-loading for a
55912 + non-root user, the administrator can execute modprobe manually
55913 + with the exact name of the module mentioned in the alert log.
55914 + Alternatively, the administrator can add the module to the list
55915 + of modules loaded at boot by modifying init scripts.
55916 +
55917 + Modification of init scripts will most likely be needed on
55918 + Ubuntu servers with encrypted home directory support enabled,
55919 + as the first non-root user logging in will cause the ecb(aes),
55920 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55921 +
55922 +config GRKERNSEC_HIDESYM
55923 + bool "Hide kernel symbols"
55924 + help
55925 + If you say Y here, getting information on loaded modules, and
55926 + displaying all kernel symbols through a syscall will be restricted
55927 + to users with CAP_SYS_MODULE. For software compatibility reasons,
55928 + /proc/kallsyms will be restricted to the root user. The RBAC
55929 + system can hide that entry even from root.
55930 +
55931 + This option also prevents leaking of kernel addresses through
55932 + several /proc entries.
55933 +
55934 + Note that this option is only effective provided the following
55935 + conditions are met:
55936 + 1) The kernel using grsecurity is not precompiled by some distribution
55937 + 2) You have also enabled GRKERNSEC_DMESG
55938 + 3) You are using the RBAC system and hiding other files such as your
55939 + kernel image and System.map. Alternatively, enabling this option
55940 + causes the permissions on /boot, /lib/modules, and the kernel
55941 + source directory to change at compile time to prevent
55942 + reading by non-root users.
55943 + If the above conditions are met, this option will aid in providing a
55944 + useful protection against local kernel exploitation of overflows
55945 + and arbitrary read/write vulnerabilities.
55946 +
55947 +config GRKERNSEC_KERN_LOCKOUT
55948 + bool "Active kernel exploit response"
55949 + depends on X86 || ARM || PPC || SPARC
55950 + help
55951 + If you say Y here, when a PaX alert is triggered due to suspicious
55952 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55953 + or an OOPs occurs due to bad memory accesses, instead of just
55954 + terminating the offending process (and potentially allowing
55955 + a subsequent exploit from the same user), we will take one of two
55956 + actions:
55957 + If the user was root, we will panic the system
55958 + If the user was non-root, we will log the attempt, terminate
55959 + all processes owned by the user, then prevent them from creating
55960 + any new processes until the system is restarted
55961 + This deters repeated kernel exploitation/bruteforcing attempts
55962 + and is useful for later forensics.
55963 +
55964 +endmenu
55965 +menu "Role Based Access Control Options"
55966 +depends on GRKERNSEC
55967 +
55968 +config GRKERNSEC_RBAC_DEBUG
55969 + bool
55970 +
55971 +config GRKERNSEC_NO_RBAC
55972 + bool "Disable RBAC system"
55973 + help
55974 + If you say Y here, the /dev/grsec device will be removed from the kernel,
55975 + preventing the RBAC system from being enabled. You should only say Y
55976 + here if you have no intention of using the RBAC system, so as to prevent
55977 + an attacker with root access from misusing the RBAC system to hide files
55978 + and processes when loadable module support and /dev/[k]mem have been
55979 + locked down.
55980 +
55981 +config GRKERNSEC_ACL_HIDEKERN
55982 + bool "Hide kernel processes"
55983 + help
55984 + If you say Y here, all kernel threads will be hidden to all
55985 + processes but those whose subject has the "view hidden processes"
55986 + flag.
55987 +
55988 +config GRKERNSEC_ACL_MAXTRIES
55989 + int "Maximum tries before password lockout"
55990 + default 3
55991 + help
55992 + This option enforces the maximum number of times a user can attempt
55993 + to authorize themselves with the grsecurity RBAC system before being
55994 + denied the ability to attempt authorization again for a specified time.
55995 + The lower the number, the harder it will be to brute-force a password.
55996 +
55997 +config GRKERNSEC_ACL_TIMEOUT
55998 + int "Time to wait after max password tries, in seconds"
55999 + default 30
56000 + help
56001 + This option specifies the time the user must wait after attempting to
56002 + authorize to the RBAC system with the maximum number of invalid
56003 + passwords. The higher the number, the harder it will be to brute-force
56004 + a password.
56005 +
56006 +endmenu
56007 +menu "Filesystem Protections"
56008 +depends on GRKERNSEC
56009 +
56010 +config GRKERNSEC_PROC
56011 + bool "Proc restrictions"
56012 + help
56013 + If you say Y here, the permissions of the /proc filesystem
56014 + will be altered to enhance system security and privacy. You MUST
56015 + choose either a user only restriction or a user and group restriction.
56016 + Depending upon the option you choose, you can either restrict users to
56017 + see only the processes they themselves run, or choose a group that can
56018 + view all processes and files normally restricted to root if you choose
56019 + the "restrict to user only" option. NOTE: If you're running identd as
56020 + a non-root user, you will have to run it as the group you specify here.
56021 +
56022 +config GRKERNSEC_PROC_USER
56023 + bool "Restrict /proc to user only"
56024 + depends on GRKERNSEC_PROC
56025 + help
56026 + If you say Y here, non-root users will only be able to view their own
56027 + processes, and restricts them from viewing network-related information,
56028 + and viewing kernel symbol and module information.
56029 +
56030 +config GRKERNSEC_PROC_USERGROUP
56031 + bool "Allow special group"
56032 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56033 + help
56034 + If you say Y here, you will be able to select a group that will be
56035 + able to view all processes and network-related information. If you've
56036 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
56037 + remain hidden. This option is useful if you want to run identd as
56038 + a non-root user.
56039 +
56040 +config GRKERNSEC_PROC_GID
56041 + int "GID for special group"
56042 + depends on GRKERNSEC_PROC_USERGROUP
56043 + default 1001
56044 +
56045 +config GRKERNSEC_PROC_ADD
56046 + bool "Additional restrictions"
56047 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56048 + help
56049 + If you say Y here, additional restrictions will be placed on
56050 + /proc that keep normal users from viewing device information and
56051 + slabinfo information that could be useful for exploits.
56052 +
56053 +config GRKERNSEC_LINK
56054 + bool "Linking restrictions"
56055 + help
56056 + If you say Y here, /tmp race exploits will be prevented, since users
56057 + will no longer be able to follow symlinks owned by other users in
56058 + world-writable +t directories (e.g. /tmp), unless the owner of the
56059 + symlink is the owner of the directory. users will also not be
56060 + able to hardlink to files they do not own. If the sysctl option is
56061 + enabled, a sysctl option with name "linking_restrictions" is created.
56062 +
56063 +config GRKERNSEC_FIFO
56064 + bool "FIFO restrictions"
56065 + help
56066 + If you say Y here, users will not be able to write to FIFOs they don't
56067 + own in world-writable +t directories (e.g. /tmp), unless the owner of
56068 + the FIFO is the same owner of the directory it's held in. If the sysctl
56069 + option is enabled, a sysctl option with name "fifo_restrictions" is
56070 + created.
56071 +
56072 +config GRKERNSEC_SYSFS_RESTRICT
56073 + bool "Sysfs/debugfs restriction"
56074 + depends on SYSFS
56075 + help
56076 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56077 + any filesystem normally mounted under it (e.g. debugfs) will only
56078 + be accessible by root. These filesystems generally provide access
56079 + to hardware and debug information that isn't appropriate for unprivileged
56080 + users of the system. Sysfs and debugfs have also become a large source
56081 + of new vulnerabilities, ranging from infoleaks to local compromise.
56082 + There has been very little oversight with an eye toward security involved
56083 + in adding new exporters of information to these filesystems, so their
56084 + use is discouraged.
56085 + This option is equivalent to a chmod 0700 of the mount paths.
56086 +
56087 +config GRKERNSEC_ROFS
56088 + bool "Runtime read-only mount protection"
56089 + help
56090 + If you say Y here, a sysctl option with name "romount_protect" will
56091 + be created. By setting this option to 1 at runtime, filesystems
56092 + will be protected in the following ways:
56093 + * No new writable mounts will be allowed
56094 + * Existing read-only mounts won't be able to be remounted read/write
56095 + * Write operations will be denied on all block devices
56096 + This option acts independently of grsec_lock: once it is set to 1,
56097 + it cannot be turned off. Therefore, please be mindful of the resulting
56098 + behavior if this option is enabled in an init script on a read-only
56099 + filesystem. This feature is mainly intended for secure embedded systems.
56100 +
56101 +config GRKERNSEC_CHROOT
56102 + bool "Chroot jail restrictions"
56103 + help
56104 + If you say Y here, you will be able to choose several options that will
56105 + make breaking out of a chrooted jail much more difficult. If you
56106 + encounter no software incompatibilities with the following options, it
56107 + is recommended that you enable each one.
56108 +
56109 +config GRKERNSEC_CHROOT_MOUNT
56110 + bool "Deny mounts"
56111 + depends on GRKERNSEC_CHROOT
56112 + help
56113 + If you say Y here, processes inside a chroot will not be able to
56114 + mount or remount filesystems. If the sysctl option is enabled, a
56115 + sysctl option with name "chroot_deny_mount" is created.
56116 +
56117 +config GRKERNSEC_CHROOT_DOUBLE
56118 + bool "Deny double-chroots"
56119 + depends on GRKERNSEC_CHROOT
56120 + help
56121 + If you say Y here, processes inside a chroot will not be able to chroot
56122 + again outside the chroot. This is a widely used method of breaking
56123 + out of a chroot jail and should not be allowed. If the sysctl
56124 + option is enabled, a sysctl option with name
56125 + "chroot_deny_chroot" is created.
56126 +
56127 +config GRKERNSEC_CHROOT_PIVOT
56128 + bool "Deny pivot_root in chroot"
56129 + depends on GRKERNSEC_CHROOT
56130 + help
56131 + If you say Y here, processes inside a chroot will not be able to use
56132 + a function called pivot_root() that was introduced in Linux 2.3.41. It
56133 + works similar to chroot in that it changes the root filesystem. This
56134 + function could be misused in a chrooted process to attempt to break out
56135 + of the chroot, and therefore should not be allowed. If the sysctl
56136 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
56137 + created.
56138 +
56139 +config GRKERNSEC_CHROOT_CHDIR
56140 + bool "Enforce chdir(\"/\") on all chroots"
56141 + depends on GRKERNSEC_CHROOT
56142 + help
56143 + If you say Y here, the current working directory of all newly-chrooted
56144 + applications will be set to the the root directory of the chroot.
56145 + The man page on chroot(2) states:
56146 + Note that this call does not change the current working
56147 + directory, so that `.' can be outside the tree rooted at
56148 + `/'. In particular, the super-user can escape from a
56149 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56150 +
56151 + It is recommended that you say Y here, since it's not known to break
56152 + any software. If the sysctl option is enabled, a sysctl option with
56153 + name "chroot_enforce_chdir" is created.
56154 +
56155 +config GRKERNSEC_CHROOT_CHMOD
56156 + bool "Deny (f)chmod +s"
56157 + depends on GRKERNSEC_CHROOT
56158 + help
56159 + If you say Y here, processes inside a chroot will not be able to chmod
56160 + or fchmod files to make them have suid or sgid bits. This protects
56161 + against another published method of breaking a chroot. If the sysctl
56162 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
56163 + created.
56164 +
56165 +config GRKERNSEC_CHROOT_FCHDIR
56166 + bool "Deny fchdir out of chroot"
56167 + depends on GRKERNSEC_CHROOT
56168 + help
56169 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
56170 + to a file descriptor of the chrooting process that points to a directory
56171 + outside the filesystem will be stopped. If the sysctl option
56172 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56173 +
56174 +config GRKERNSEC_CHROOT_MKNOD
56175 + bool "Deny mknod"
56176 + depends on GRKERNSEC_CHROOT
56177 + help
56178 + If you say Y here, processes inside a chroot will not be allowed to
56179 + mknod. The problem with using mknod inside a chroot is that it
56180 + would allow an attacker to create a device entry that is the same
56181 + as one on the physical root of your system, which could range from
56182 + anything from the console device to a device for your harddrive (which
56183 + they could then use to wipe the drive or steal data). It is recommended
56184 + that you say Y here, unless you run into software incompatibilities.
56185 + If the sysctl option is enabled, a sysctl option with name
56186 + "chroot_deny_mknod" is created.
56187 +
56188 +config GRKERNSEC_CHROOT_SHMAT
56189 + bool "Deny shmat() out of chroot"
56190 + depends on GRKERNSEC_CHROOT
56191 + help
56192 + If you say Y here, processes inside a chroot will not be able to attach
56193 + to shared memory segments that were created outside of the chroot jail.
56194 + It is recommended that you say Y here. If the sysctl option is enabled,
56195 + a sysctl option with name "chroot_deny_shmat" is created.
56196 +
56197 +config GRKERNSEC_CHROOT_UNIX
56198 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
56199 + depends on GRKERNSEC_CHROOT
56200 + help
56201 + If you say Y here, processes inside a chroot will not be able to
56202 + connect to abstract (meaning not belonging to a filesystem) Unix
56203 + domain sockets that were bound outside of a chroot. It is recommended
56204 + that you say Y here. If the sysctl option is enabled, a sysctl option
56205 + with name "chroot_deny_unix" is created.
56206 +
56207 +config GRKERNSEC_CHROOT_FINDTASK
56208 + bool "Protect outside processes"
56209 + depends on GRKERNSEC_CHROOT
56210 + help
56211 + If you say Y here, processes inside a chroot will not be able to
56212 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56213 + getsid, or view any process outside of the chroot. If the sysctl
56214 + option is enabled, a sysctl option with name "chroot_findtask" is
56215 + created.
56216 +
56217 +config GRKERNSEC_CHROOT_NICE
56218 + bool "Restrict priority changes"
56219 + depends on GRKERNSEC_CHROOT
56220 + help
56221 + If you say Y here, processes inside a chroot will not be able to raise
56222 + the priority of processes in the chroot, or alter the priority of
56223 + processes outside the chroot. This provides more security than simply
56224 + removing CAP_SYS_NICE from the process' capability set. If the
56225 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56226 + is created.
56227 +
56228 +config GRKERNSEC_CHROOT_SYSCTL
56229 + bool "Deny sysctl writes"
56230 + depends on GRKERNSEC_CHROOT
56231 + help
56232 + If you say Y here, an attacker in a chroot will not be able to
56233 + write to sysctl entries, either by sysctl(2) or through a /proc
56234 + interface. It is strongly recommended that you say Y here. If the
56235 + sysctl option is enabled, a sysctl option with name
56236 + "chroot_deny_sysctl" is created.
56237 +
56238 +config GRKERNSEC_CHROOT_CAPS
56239 + bool "Capability restrictions"
56240 + depends on GRKERNSEC_CHROOT
56241 + help
56242 + If you say Y here, the capabilities on all root processes within a
56243 + chroot jail will be lowered to stop module insertion, raw i/o,
56244 + system and net admin tasks, rebooting the system, modifying immutable
56245 + files, modifying IPC owned by another, and changing the system time.
56246 + This is left an option because it can break some apps. Disable this
56247 + if your chrooted apps are having problems performing those kinds of
56248 + tasks. If the sysctl option is enabled, a sysctl option with
56249 + name "chroot_caps" is created.
56250 +
56251 +endmenu
56252 +menu "Kernel Auditing"
56253 +depends on GRKERNSEC
56254 +
56255 +config GRKERNSEC_AUDIT_GROUP
56256 + bool "Single group for auditing"
56257 + help
56258 + If you say Y here, the exec, chdir, and (un)mount logging features
56259 + will only operate on a group you specify. This option is recommended
56260 + if you only want to watch certain users instead of having a large
56261 + amount of logs from the entire system. If the sysctl option is enabled,
56262 + a sysctl option with name "audit_group" is created.
56263 +
56264 +config GRKERNSEC_AUDIT_GID
56265 + int "GID for auditing"
56266 + depends on GRKERNSEC_AUDIT_GROUP
56267 + default 1007
56268 +
56269 +config GRKERNSEC_EXECLOG
56270 + bool "Exec logging"
56271 + help
56272 + If you say Y here, all execve() calls will be logged (since the
56273 + other exec*() calls are frontends to execve(), all execution
56274 + will be logged). Useful for shell-servers that like to keep track
56275 + of their users. If the sysctl option is enabled, a sysctl option with
56276 + name "exec_logging" is created.
56277 + WARNING: This option when enabled will produce a LOT of logs, especially
56278 + on an active system.
56279 +
56280 +config GRKERNSEC_RESLOG
56281 + bool "Resource logging"
56282 + help
56283 + If you say Y here, all attempts to overstep resource limits will
56284 + be logged with the resource name, the requested size, and the current
56285 + limit. It is highly recommended that you say Y here. If the sysctl
56286 + option is enabled, a sysctl option with name "resource_logging" is
56287 + created. If the RBAC system is enabled, the sysctl value is ignored.
56288 +
56289 +config GRKERNSEC_CHROOT_EXECLOG
56290 + bool "Log execs within chroot"
56291 + help
56292 + If you say Y here, all executions inside a chroot jail will be logged
56293 + to syslog. This can cause a large amount of logs if certain
56294 + applications (eg. djb's daemontools) are installed on the system, and
56295 + is therefore left as an option. If the sysctl option is enabled, a
56296 + sysctl option with name "chroot_execlog" is created.
56297 +
56298 +config GRKERNSEC_AUDIT_PTRACE
56299 + bool "Ptrace logging"
56300 + help
56301 + If you say Y here, all attempts to attach to a process via ptrace
56302 + will be logged. If the sysctl option is enabled, a sysctl option
56303 + with name "audit_ptrace" is created.
56304 +
56305 +config GRKERNSEC_AUDIT_CHDIR
56306 + bool "Chdir logging"
56307 + help
56308 + If you say Y here, all chdir() calls will be logged. If the sysctl
56309 + option is enabled, a sysctl option with name "audit_chdir" is created.
56310 +
56311 +config GRKERNSEC_AUDIT_MOUNT
56312 + bool "(Un)Mount logging"
56313 + help
56314 + If you say Y here, all mounts and unmounts will be logged. If the
56315 + sysctl option is enabled, a sysctl option with name "audit_mount" is
56316 + created.
56317 +
56318 +config GRKERNSEC_SIGNAL
56319 + bool "Signal logging"
56320 + help
56321 + If you say Y here, certain important signals will be logged, such as
56322 + SIGSEGV, which will as a result inform you of when a error in a program
56323 + occurred, which in some cases could mean a possible exploit attempt.
56324 + If the sysctl option is enabled, a sysctl option with name
56325 + "signal_logging" is created.
56326 +
56327 +config GRKERNSEC_FORKFAIL
56328 + bool "Fork failure logging"
56329 + help
56330 + If you say Y here, all failed fork() attempts will be logged.
56331 + This could suggest a fork bomb, or someone attempting to overstep
56332 + their process limit. If the sysctl option is enabled, a sysctl option
56333 + with name "forkfail_logging" is created.
56334 +
56335 +config GRKERNSEC_TIME
56336 + bool "Time change logging"
56337 + help
56338 + If you say Y here, any changes of the system clock will be logged.
56339 + If the sysctl option is enabled, a sysctl option with name
56340 + "timechange_logging" is created.
56341 +
56342 +config GRKERNSEC_PROC_IPADDR
56343 + bool "/proc/<pid>/ipaddr support"
56344 + help
56345 + If you say Y here, a new entry will be added to each /proc/<pid>
56346 + directory that contains the IP address of the person using the task.
56347 + The IP is carried across local TCP and AF_UNIX stream sockets.
56348 + This information can be useful for IDS/IPSes to perform remote response
56349 + to a local attack. The entry is readable by only the owner of the
56350 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56351 + the RBAC system), and thus does not create privacy concerns.
56352 +
56353 +config GRKERNSEC_RWXMAP_LOG
56354 + bool 'Denied RWX mmap/mprotect logging'
56355 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56356 + help
56357 + If you say Y here, calls to mmap() and mprotect() with explicit
56358 + usage of PROT_WRITE and PROT_EXEC together will be logged when
56359 + denied by the PAX_MPROTECT feature. If the sysctl option is
56360 + enabled, a sysctl option with name "rwxmap_logging" is created.
56361 +
56362 +config GRKERNSEC_AUDIT_TEXTREL
56363 + bool 'ELF text relocations logging (READ HELP)'
56364 + depends on PAX_MPROTECT
56365 + help
56366 + If you say Y here, text relocations will be logged with the filename
56367 + of the offending library or binary. The purpose of the feature is
56368 + to help Linux distribution developers get rid of libraries and
56369 + binaries that need text relocations which hinder the future progress
56370 + of PaX. Only Linux distribution developers should say Y here, and
56371 + never on a production machine, as this option creates an information
56372 + leak that could aid an attacker in defeating the randomization of
56373 + a single memory region. If the sysctl option is enabled, a sysctl
56374 + option with name "audit_textrel" is created.
56375 +
56376 +endmenu
56377 +
56378 +menu "Executable Protections"
56379 +depends on GRKERNSEC
56380 +
56381 +config GRKERNSEC_DMESG
56382 + bool "Dmesg(8) restriction"
56383 + help
56384 + If you say Y here, non-root users will not be able to use dmesg(8)
56385 + to view up to the last 4kb of messages in the kernel's log buffer.
56386 + The kernel's log buffer often contains kernel addresses and other
56387 + identifying information useful to an attacker in fingerprinting a
56388 + system for a targeted exploit.
56389 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
56390 + created.
56391 +
56392 +config GRKERNSEC_HARDEN_PTRACE
56393 + bool "Deter ptrace-based process snooping"
56394 + help
56395 + If you say Y here, TTY sniffers and other malicious monitoring
56396 + programs implemented through ptrace will be defeated. If you
56397 + have been using the RBAC system, this option has already been
56398 + enabled for several years for all users, with the ability to make
56399 + fine-grained exceptions.
56400 +
56401 + This option only affects the ability of non-root users to ptrace
56402 + processes that are not a descendent of the ptracing process.
56403 + This means that strace ./binary and gdb ./binary will still work,
56404 + but attaching to arbitrary processes will not. If the sysctl
56405 + option is enabled, a sysctl option with name "harden_ptrace" is
56406 + created.
56407 +
56408 +config GRKERNSEC_TPE
56409 + bool "Trusted Path Execution (TPE)"
56410 + help
56411 + If you say Y here, you will be able to choose a gid to add to the
56412 + supplementary groups of users you want to mark as "untrusted."
56413 + These users will not be able to execute any files that are not in
56414 + root-owned directories writable only by root. If the sysctl option
56415 + is enabled, a sysctl option with name "tpe" is created.
56416 +
56417 +config GRKERNSEC_TPE_ALL
56418 + bool "Partially restrict all non-root users"
56419 + depends on GRKERNSEC_TPE
56420 + help
56421 + If you say Y here, all non-root users will be covered under
56422 + a weaker TPE restriction. This is separate from, and in addition to,
56423 + the main TPE options that you have selected elsewhere. Thus, if a
56424 + "trusted" GID is chosen, this restriction applies to even that GID.
56425 + Under this restriction, all non-root users will only be allowed to
56426 + execute files in directories they own that are not group or
56427 + world-writable, or in directories owned by root and writable only by
56428 + root. If the sysctl option is enabled, a sysctl option with name
56429 + "tpe_restrict_all" is created.
56430 +
56431 +config GRKERNSEC_TPE_INVERT
56432 + bool "Invert GID option"
56433 + depends on GRKERNSEC_TPE
56434 + help
56435 + If you say Y here, the group you specify in the TPE configuration will
56436 + decide what group TPE restrictions will be *disabled* for. This
56437 + option is useful if you want TPE restrictions to be applied to most
56438 + users on the system. If the sysctl option is enabled, a sysctl option
56439 + with name "tpe_invert" is created. Unlike other sysctl options, this
56440 + entry will default to on for backward-compatibility.
56441 +
56442 +config GRKERNSEC_TPE_GID
56443 + int "GID for untrusted users"
56444 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56445 + default 1005
56446 + help
56447 + Setting this GID determines what group TPE restrictions will be
56448 + *enabled* for. If the sysctl option is enabled, a sysctl option
56449 + with name "tpe_gid" is created.
56450 +
56451 +config GRKERNSEC_TPE_GID
56452 + int "GID for trusted users"
56453 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56454 + default 1005
56455 + help
56456 + Setting this GID determines what group TPE restrictions will be
56457 + *disabled* for. If the sysctl option is enabled, a sysctl option
56458 + with name "tpe_gid" is created.
56459 +
56460 +endmenu
56461 +menu "Network Protections"
56462 +depends on GRKERNSEC
56463 +
56464 +config GRKERNSEC_RANDNET
56465 + bool "Larger entropy pools"
56466 + help
56467 + If you say Y here, the entropy pools used for many features of Linux
56468 + and grsecurity will be doubled in size. Since several grsecurity
56469 + features use additional randomness, it is recommended that you say Y
56470 + here. Saying Y here has a similar effect as modifying
56471 + /proc/sys/kernel/random/poolsize.
56472 +
56473 +config GRKERNSEC_BLACKHOLE
56474 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56475 + depends on NET
56476 + help
56477 + If you say Y here, neither TCP resets nor ICMP
56478 + destination-unreachable packets will be sent in response to packets
56479 + sent to ports for which no associated listening process exists.
56480 + This feature supports both IPV4 and IPV6 and exempts the
56481 + loopback interface from blackholing. Enabling this feature
56482 + makes a host more resilient to DoS attacks and reduces network
56483 + visibility against scanners.
56484 +
56485 + The blackhole feature as-implemented is equivalent to the FreeBSD
56486 + blackhole feature, as it prevents RST responses to all packets, not
56487 + just SYNs. Under most application behavior this causes no
56488 + problems, but applications (like haproxy) may not close certain
56489 + connections in a way that cleanly terminates them on the remote
56490 + end, leaving the remote host in LAST_ACK state. Because of this
56491 + side-effect and to prevent intentional LAST_ACK DoSes, this
56492 + feature also adds automatic mitigation against such attacks.
56493 + The mitigation drastically reduces the amount of time a socket
56494 + can spend in LAST_ACK state. If you're using haproxy and not
56495 + all servers it connects to have this option enabled, consider
56496 + disabling this feature on the haproxy host.
56497 +
56498 + If the sysctl option is enabled, two sysctl options with names
56499 + "ip_blackhole" and "lastack_retries" will be created.
56500 + While "ip_blackhole" takes the standard zero/non-zero on/off
56501 + toggle, "lastack_retries" uses the same kinds of values as
56502 + "tcp_retries1" and "tcp_retries2". The default value of 4
56503 + prevents a socket from lasting more than 45 seconds in LAST_ACK
56504 + state.
56505 +
56506 +config GRKERNSEC_SOCKET
56507 + bool "Socket restrictions"
56508 + depends on NET
56509 + help
56510 + If you say Y here, you will be able to choose from several options.
56511 + If you assign a GID on your system and add it to the supplementary
56512 + groups of users you want to restrict socket access to, this patch
56513 + will perform up to three things, based on the option(s) you choose.
56514 +
56515 +config GRKERNSEC_SOCKET_ALL
56516 + bool "Deny any sockets to group"
56517 + depends on GRKERNSEC_SOCKET
56518 + help
56519 + If you say Y here, you will be able to choose a GID of whose users will
56520 + be unable to connect to other hosts from your machine or run server
56521 + applications from your machine. If the sysctl option is enabled, a
56522 + sysctl option with name "socket_all" is created.
56523 +
56524 +config GRKERNSEC_SOCKET_ALL_GID
56525 + int "GID to deny all sockets for"
56526 + depends on GRKERNSEC_SOCKET_ALL
56527 + default 1004
56528 + help
56529 + Here you can choose the GID to disable socket access for. Remember to
56530 + add the users you want socket access disabled for to the GID
56531 + specified here. If the sysctl option is enabled, a sysctl option
56532 + with name "socket_all_gid" is created.
56533 +
56534 +config GRKERNSEC_SOCKET_CLIENT
56535 + bool "Deny client sockets to group"
56536 + depends on GRKERNSEC_SOCKET
56537 + help
56538 + If you say Y here, you will be able to choose a GID of whose users will
56539 + be unable to connect to other hosts from your machine, but will be
56540 + able to run servers. If this option is enabled, all users in the group
56541 + you specify will have to use passive mode when initiating ftp transfers
56542 + from the shell on your machine. If the sysctl option is enabled, a
56543 + sysctl option with name "socket_client" is created.
56544 +
56545 +config GRKERNSEC_SOCKET_CLIENT_GID
56546 + int "GID to deny client sockets for"
56547 + depends on GRKERNSEC_SOCKET_CLIENT
56548 + default 1003
56549 + help
56550 + Here you can choose the GID to disable client socket access for.
56551 + Remember to add the users you want client socket access disabled for to
56552 + the GID specified here. If the sysctl option is enabled, a sysctl
56553 + option with name "socket_client_gid" is created.
56554 +
56555 +config GRKERNSEC_SOCKET_SERVER
56556 + bool "Deny server sockets to group"
56557 + depends on GRKERNSEC_SOCKET
56558 + help
56559 + If you say Y here, you will be able to choose a GID of whose users will
56560 + be unable to run server applications from your machine. If the sysctl
56561 + option is enabled, a sysctl option with name "socket_server" is created.
56562 +
56563 +config GRKERNSEC_SOCKET_SERVER_GID
56564 + int "GID to deny server sockets for"
56565 + depends on GRKERNSEC_SOCKET_SERVER
56566 + default 1002
56567 + help
56568 + Here you can choose the GID to disable server socket access for.
56569 + Remember to add the users you want server socket access disabled for to
56570 + the GID specified here. If the sysctl option is enabled, a sysctl
56571 + option with name "socket_server_gid" is created.
56572 +
56573 +endmenu
56574 +menu "Sysctl support"
56575 +depends on GRKERNSEC && SYSCTL
56576 +
56577 +config GRKERNSEC_SYSCTL
56578 + bool "Sysctl support"
56579 + help
56580 + If you say Y here, you will be able to change the options that
56581 + grsecurity runs with at bootup, without having to recompile your
56582 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56583 + to enable (1) or disable (0) various features. All the sysctl entries
56584 + are mutable until the "grsec_lock" entry is set to a non-zero value.
56585 + All features enabled in the kernel configuration are disabled at boot
56586 + if you do not say Y to the "Turn on features by default" option.
56587 + All options should be set at startup, and the grsec_lock entry should
56588 + be set to a non-zero value after all the options are set.
56589 + *THIS IS EXTREMELY IMPORTANT*
56590 +
56591 +config GRKERNSEC_SYSCTL_DISTRO
56592 + bool "Extra sysctl support for distro makers (READ HELP)"
56593 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56594 + help
56595 + If you say Y here, additional sysctl options will be created
56596 + for features that affect processes running as root. Therefore,
56597 + it is critical when using this option that the grsec_lock entry be
56598 + enabled after boot. Only distros with prebuilt kernel packages
56599 + with this option enabled that can ensure grsec_lock is enabled
56600 + after boot should use this option.
56601 + *Failure to set grsec_lock after boot makes all grsec features
56602 + this option covers useless*
56603 +
56604 + Currently this option creates the following sysctl entries:
56605 + "Disable Privileged I/O": "disable_priv_io"
56606 +
56607 +config GRKERNSEC_SYSCTL_ON
56608 + bool "Turn on features by default"
56609 + depends on GRKERNSEC_SYSCTL
56610 + help
56611 + If you say Y here, instead of having all features enabled in the
56612 + kernel configuration disabled at boot time, the features will be
56613 + enabled at boot time. It is recommended you say Y here unless
56614 + there is some reason you would want all sysctl-tunable features to
56615 + be disabled by default. As mentioned elsewhere, it is important
56616 + to enable the grsec_lock entry once you have finished modifying
56617 + the sysctl entries.
56618 +
56619 +endmenu
56620 +menu "Logging Options"
56621 +depends on GRKERNSEC
56622 +
56623 +config GRKERNSEC_FLOODTIME
56624 + int "Seconds in between log messages (minimum)"
56625 + default 10
56626 + help
56627 + This option allows you to enforce the number of seconds between
56628 + grsecurity log messages. The default should be suitable for most
56629 + people, however, if you choose to change it, choose a value small enough
56630 + to allow informative logs to be produced, but large enough to
56631 + prevent flooding.
56632 +
56633 +config GRKERNSEC_FLOODBURST
56634 + int "Number of messages in a burst (maximum)"
56635 + default 4
56636 + help
56637 + This option allows you to choose the maximum number of messages allowed
56638 + within the flood time interval you chose in a separate option. The
56639 + default should be suitable for most people, however if you find that
56640 + many of your logs are being interpreted as flooding, you may want to
56641 + raise this value.
56642 +
56643 +endmenu
56644 +
56645 +endmenu
56646 diff -urNp linux-2.6.32.45/grsecurity/Makefile linux-2.6.32.45/grsecurity/Makefile
56647 --- linux-2.6.32.45/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56648 +++ linux-2.6.32.45/grsecurity/Makefile 2011-08-17 19:02:41.000000000 -0400
56649 @@ -0,0 +1,33 @@
56650 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56651 +# during 2001-2009 it has been completely redesigned by Brad Spengler
56652 +# into an RBAC system
56653 +#
56654 +# All code in this directory and various hooks inserted throughout the kernel
56655 +# are copyright Brad Spengler - Open Source Security, Inc., and released
56656 +# under the GPL v2 or higher
56657 +
56658 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56659 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
56660 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56661 +
56662 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56663 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56664 + gracl_learn.o grsec_log.o
56665 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56666 +
56667 +ifdef CONFIG_NET
56668 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o grsec_sock.o
56669 +endif
56670 +
56671 +ifndef CONFIG_GRKERNSEC
56672 +obj-y += grsec_disabled.o
56673 +endif
56674 +
56675 +ifdef CONFIG_GRKERNSEC_HIDESYM
56676 +extra-y := grsec_hidesym.o
56677 +$(obj)/grsec_hidesym.o:
56678 + @-chmod -f 500 /boot
56679 + @-chmod -f 500 /lib/modules
56680 + @-chmod -f 700 .
56681 + @echo ' grsec: protected kernel image paths'
56682 +endif
56683 diff -urNp linux-2.6.32.45/include/acpi/acpi_bus.h linux-2.6.32.45/include/acpi/acpi_bus.h
56684 --- linux-2.6.32.45/include/acpi/acpi_bus.h 2011-03-27 14:31:47.000000000 -0400
56685 +++ linux-2.6.32.45/include/acpi/acpi_bus.h 2011-08-05 20:33:55.000000000 -0400
56686 @@ -107,7 +107,7 @@ struct acpi_device_ops {
56687 acpi_op_bind bind;
56688 acpi_op_unbind unbind;
56689 acpi_op_notify notify;
56690 -};
56691 +} __no_const;
56692
56693 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56694
56695 diff -urNp linux-2.6.32.45/include/acpi/acpi_drivers.h linux-2.6.32.45/include/acpi/acpi_drivers.h
56696 --- linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
56697 +++ linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
56698 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
56699 Dock Station
56700 -------------------------------------------------------------------------- */
56701 struct acpi_dock_ops {
56702 - acpi_notify_handler handler;
56703 - acpi_notify_handler uevent;
56704 + const acpi_notify_handler handler;
56705 + const acpi_notify_handler uevent;
56706 };
56707
56708 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
56709 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
56710 extern int register_dock_notifier(struct notifier_block *nb);
56711 extern void unregister_dock_notifier(struct notifier_block *nb);
56712 extern int register_hotplug_dock_device(acpi_handle handle,
56713 - struct acpi_dock_ops *ops,
56714 + const struct acpi_dock_ops *ops,
56715 void *context);
56716 extern void unregister_hotplug_dock_device(acpi_handle handle);
56717 #else
56718 @@ -144,7 +144,7 @@ static inline void unregister_dock_notif
56719 {
56720 }
56721 static inline int register_hotplug_dock_device(acpi_handle handle,
56722 - struct acpi_dock_ops *ops,
56723 + const struct acpi_dock_ops *ops,
56724 void *context)
56725 {
56726 return -ENODEV;
56727 diff -urNp linux-2.6.32.45/include/asm-generic/atomic-long.h linux-2.6.32.45/include/asm-generic/atomic-long.h
56728 --- linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
56729 +++ linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-07-13 22:21:25.000000000 -0400
56730 @@ -22,6 +22,12 @@
56731
56732 typedef atomic64_t atomic_long_t;
56733
56734 +#ifdef CONFIG_PAX_REFCOUNT
56735 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
56736 +#else
56737 +typedef atomic64_t atomic_long_unchecked_t;
56738 +#endif
56739 +
56740 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56741
56742 static inline long atomic_long_read(atomic_long_t *l)
56743 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56744 return (long)atomic64_read(v);
56745 }
56746
56747 +#ifdef CONFIG_PAX_REFCOUNT
56748 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56749 +{
56750 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56751 +
56752 + return (long)atomic64_read_unchecked(v);
56753 +}
56754 +#endif
56755 +
56756 static inline void atomic_long_set(atomic_long_t *l, long i)
56757 {
56758 atomic64_t *v = (atomic64_t *)l;
56759 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56760 atomic64_set(v, i);
56761 }
56762
56763 +#ifdef CONFIG_PAX_REFCOUNT
56764 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56765 +{
56766 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56767 +
56768 + atomic64_set_unchecked(v, i);
56769 +}
56770 +#endif
56771 +
56772 static inline void atomic_long_inc(atomic_long_t *l)
56773 {
56774 atomic64_t *v = (atomic64_t *)l;
56775 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56776 atomic64_inc(v);
56777 }
56778
56779 +#ifdef CONFIG_PAX_REFCOUNT
56780 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56781 +{
56782 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56783 +
56784 + atomic64_inc_unchecked(v);
56785 +}
56786 +#endif
56787 +
56788 static inline void atomic_long_dec(atomic_long_t *l)
56789 {
56790 atomic64_t *v = (atomic64_t *)l;
56791 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56792 atomic64_dec(v);
56793 }
56794
56795 +#ifdef CONFIG_PAX_REFCOUNT
56796 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56797 +{
56798 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56799 +
56800 + atomic64_dec_unchecked(v);
56801 +}
56802 +#endif
56803 +
56804 static inline void atomic_long_add(long i, atomic_long_t *l)
56805 {
56806 atomic64_t *v = (atomic64_t *)l;
56807 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56808 atomic64_add(i, v);
56809 }
56810
56811 +#ifdef CONFIG_PAX_REFCOUNT
56812 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56813 +{
56814 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56815 +
56816 + atomic64_add_unchecked(i, v);
56817 +}
56818 +#endif
56819 +
56820 static inline void atomic_long_sub(long i, atomic_long_t *l)
56821 {
56822 atomic64_t *v = (atomic64_t *)l;
56823 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
56824 return (long)atomic64_inc_return(v);
56825 }
56826
56827 +#ifdef CONFIG_PAX_REFCOUNT
56828 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56829 +{
56830 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56831 +
56832 + return (long)atomic64_inc_return_unchecked(v);
56833 +}
56834 +#endif
56835 +
56836 static inline long atomic_long_dec_return(atomic_long_t *l)
56837 {
56838 atomic64_t *v = (atomic64_t *)l;
56839 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
56840
56841 typedef atomic_t atomic_long_t;
56842
56843 +#ifdef CONFIG_PAX_REFCOUNT
56844 +typedef atomic_unchecked_t atomic_long_unchecked_t;
56845 +#else
56846 +typedef atomic_t atomic_long_unchecked_t;
56847 +#endif
56848 +
56849 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56850 static inline long atomic_long_read(atomic_long_t *l)
56851 {
56852 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
56853 return (long)atomic_read(v);
56854 }
56855
56856 +#ifdef CONFIG_PAX_REFCOUNT
56857 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56858 +{
56859 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56860 +
56861 + return (long)atomic_read_unchecked(v);
56862 +}
56863 +#endif
56864 +
56865 static inline void atomic_long_set(atomic_long_t *l, long i)
56866 {
56867 atomic_t *v = (atomic_t *)l;
56868 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
56869 atomic_set(v, i);
56870 }
56871
56872 +#ifdef CONFIG_PAX_REFCOUNT
56873 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56874 +{
56875 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56876 +
56877 + atomic_set_unchecked(v, i);
56878 +}
56879 +#endif
56880 +
56881 static inline void atomic_long_inc(atomic_long_t *l)
56882 {
56883 atomic_t *v = (atomic_t *)l;
56884 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
56885 atomic_inc(v);
56886 }
56887
56888 +#ifdef CONFIG_PAX_REFCOUNT
56889 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56890 +{
56891 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56892 +
56893 + atomic_inc_unchecked(v);
56894 +}
56895 +#endif
56896 +
56897 static inline void atomic_long_dec(atomic_long_t *l)
56898 {
56899 atomic_t *v = (atomic_t *)l;
56900 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
56901 atomic_dec(v);
56902 }
56903
56904 +#ifdef CONFIG_PAX_REFCOUNT
56905 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56906 +{
56907 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56908 +
56909 + atomic_dec_unchecked(v);
56910 +}
56911 +#endif
56912 +
56913 static inline void atomic_long_add(long i, atomic_long_t *l)
56914 {
56915 atomic_t *v = (atomic_t *)l;
56916 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
56917 atomic_add(i, v);
56918 }
56919
56920 +#ifdef CONFIG_PAX_REFCOUNT
56921 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56922 +{
56923 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56924 +
56925 + atomic_add_unchecked(i, v);
56926 +}
56927 +#endif
56928 +
56929 static inline void atomic_long_sub(long i, atomic_long_t *l)
56930 {
56931 atomic_t *v = (atomic_t *)l;
56932 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
56933 return (long)atomic_inc_return(v);
56934 }
56935
56936 +#ifdef CONFIG_PAX_REFCOUNT
56937 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56938 +{
56939 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56940 +
56941 + return (long)atomic_inc_return_unchecked(v);
56942 +}
56943 +#endif
56944 +
56945 static inline long atomic_long_dec_return(atomic_long_t *l)
56946 {
56947 atomic_t *v = (atomic_t *)l;
56948 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
56949
56950 #endif /* BITS_PER_LONG == 64 */
56951
56952 +#ifdef CONFIG_PAX_REFCOUNT
56953 +static inline void pax_refcount_needs_these_functions(void)
56954 +{
56955 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
56956 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
56957 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
56958 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
56959 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
56960 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
56961 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
56962 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
56963 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
56964 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
56965 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
56966 +
56967 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
56968 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
56969 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
56970 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
56971 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
56972 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
56973 +}
56974 +#else
56975 +#define atomic_read_unchecked(v) atomic_read(v)
56976 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
56977 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
56978 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
56979 +#define atomic_inc_unchecked(v) atomic_inc(v)
56980 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
56981 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
56982 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
56983 +#define atomic_dec_unchecked(v) atomic_dec(v)
56984 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
56985 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
56986 +
56987 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
56988 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
56989 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
56990 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
56991 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
56992 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
56993 +#endif
56994 +
56995 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
56996 diff -urNp linux-2.6.32.45/include/asm-generic/cache.h linux-2.6.32.45/include/asm-generic/cache.h
56997 --- linux-2.6.32.45/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
56998 +++ linux-2.6.32.45/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
56999 @@ -6,7 +6,7 @@
57000 * cache lines need to provide their own cache.h.
57001 */
57002
57003 -#define L1_CACHE_SHIFT 5
57004 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57005 +#define L1_CACHE_SHIFT 5UL
57006 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57007
57008 #endif /* __ASM_GENERIC_CACHE_H */
57009 diff -urNp linux-2.6.32.45/include/asm-generic/dma-mapping-common.h linux-2.6.32.45/include/asm-generic/dma-mapping-common.h
57010 --- linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
57011 +++ linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
57012 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
57013 enum dma_data_direction dir,
57014 struct dma_attrs *attrs)
57015 {
57016 - struct dma_map_ops *ops = get_dma_ops(dev);
57017 + const struct dma_map_ops *ops = get_dma_ops(dev);
57018 dma_addr_t addr;
57019
57020 kmemcheck_mark_initialized(ptr, size);
57021 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
57022 enum dma_data_direction dir,
57023 struct dma_attrs *attrs)
57024 {
57025 - struct dma_map_ops *ops = get_dma_ops(dev);
57026 + const struct dma_map_ops *ops = get_dma_ops(dev);
57027
57028 BUG_ON(!valid_dma_direction(dir));
57029 if (ops->unmap_page)
57030 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
57031 int nents, enum dma_data_direction dir,
57032 struct dma_attrs *attrs)
57033 {
57034 - struct dma_map_ops *ops = get_dma_ops(dev);
57035 + const struct dma_map_ops *ops = get_dma_ops(dev);
57036 int i, ents;
57037 struct scatterlist *s;
57038
57039 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
57040 int nents, enum dma_data_direction dir,
57041 struct dma_attrs *attrs)
57042 {
57043 - struct dma_map_ops *ops = get_dma_ops(dev);
57044 + const struct dma_map_ops *ops = get_dma_ops(dev);
57045
57046 BUG_ON(!valid_dma_direction(dir));
57047 debug_dma_unmap_sg(dev, sg, nents, dir);
57048 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
57049 size_t offset, size_t size,
57050 enum dma_data_direction dir)
57051 {
57052 - struct dma_map_ops *ops = get_dma_ops(dev);
57053 + const struct dma_map_ops *ops = get_dma_ops(dev);
57054 dma_addr_t addr;
57055
57056 kmemcheck_mark_initialized(page_address(page) + offset, size);
57057 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
57058 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
57059 size_t size, enum dma_data_direction dir)
57060 {
57061 - struct dma_map_ops *ops = get_dma_ops(dev);
57062 + const struct dma_map_ops *ops = get_dma_ops(dev);
57063
57064 BUG_ON(!valid_dma_direction(dir));
57065 if (ops->unmap_page)
57066 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
57067 size_t size,
57068 enum dma_data_direction dir)
57069 {
57070 - struct dma_map_ops *ops = get_dma_ops(dev);
57071 + const struct dma_map_ops *ops = get_dma_ops(dev);
57072
57073 BUG_ON(!valid_dma_direction(dir));
57074 if (ops->sync_single_for_cpu)
57075 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
57076 dma_addr_t addr, size_t size,
57077 enum dma_data_direction dir)
57078 {
57079 - struct dma_map_ops *ops = get_dma_ops(dev);
57080 + const struct dma_map_ops *ops = get_dma_ops(dev);
57081
57082 BUG_ON(!valid_dma_direction(dir));
57083 if (ops->sync_single_for_device)
57084 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
57085 size_t size,
57086 enum dma_data_direction dir)
57087 {
57088 - struct dma_map_ops *ops = get_dma_ops(dev);
57089 + const struct dma_map_ops *ops = get_dma_ops(dev);
57090
57091 BUG_ON(!valid_dma_direction(dir));
57092 if (ops->sync_single_range_for_cpu) {
57093 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
57094 size_t size,
57095 enum dma_data_direction dir)
57096 {
57097 - struct dma_map_ops *ops = get_dma_ops(dev);
57098 + const struct dma_map_ops *ops = get_dma_ops(dev);
57099
57100 BUG_ON(!valid_dma_direction(dir));
57101 if (ops->sync_single_range_for_device) {
57102 @@ -155,7 +155,7 @@ static inline void
57103 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
57104 int nelems, enum dma_data_direction dir)
57105 {
57106 - struct dma_map_ops *ops = get_dma_ops(dev);
57107 + const struct dma_map_ops *ops = get_dma_ops(dev);
57108
57109 BUG_ON(!valid_dma_direction(dir));
57110 if (ops->sync_sg_for_cpu)
57111 @@ -167,7 +167,7 @@ static inline void
57112 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
57113 int nelems, enum dma_data_direction dir)
57114 {
57115 - struct dma_map_ops *ops = get_dma_ops(dev);
57116 + const struct dma_map_ops *ops = get_dma_ops(dev);
57117
57118 BUG_ON(!valid_dma_direction(dir));
57119 if (ops->sync_sg_for_device)
57120 diff -urNp linux-2.6.32.45/include/asm-generic/futex.h linux-2.6.32.45/include/asm-generic/futex.h
57121 --- linux-2.6.32.45/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
57122 +++ linux-2.6.32.45/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
57123 @@ -6,7 +6,7 @@
57124 #include <asm/errno.h>
57125
57126 static inline int
57127 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
57128 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
57129 {
57130 int op = (encoded_op >> 28) & 7;
57131 int cmp = (encoded_op >> 24) & 15;
57132 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
57133 }
57134
57135 static inline int
57136 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
57137 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
57138 {
57139 return -ENOSYS;
57140 }
57141 diff -urNp linux-2.6.32.45/include/asm-generic/int-l64.h linux-2.6.32.45/include/asm-generic/int-l64.h
57142 --- linux-2.6.32.45/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
57143 +++ linux-2.6.32.45/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
57144 @@ -46,6 +46,8 @@ typedef unsigned int u32;
57145 typedef signed long s64;
57146 typedef unsigned long u64;
57147
57148 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57149 +
57150 #define S8_C(x) x
57151 #define U8_C(x) x ## U
57152 #define S16_C(x) x
57153 diff -urNp linux-2.6.32.45/include/asm-generic/int-ll64.h linux-2.6.32.45/include/asm-generic/int-ll64.h
57154 --- linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
57155 +++ linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
57156 @@ -51,6 +51,8 @@ typedef unsigned int u32;
57157 typedef signed long long s64;
57158 typedef unsigned long long u64;
57159
57160 +typedef unsigned long long intoverflow_t;
57161 +
57162 #define S8_C(x) x
57163 #define U8_C(x) x ## U
57164 #define S16_C(x) x
57165 diff -urNp linux-2.6.32.45/include/asm-generic/kmap_types.h linux-2.6.32.45/include/asm-generic/kmap_types.h
57166 --- linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
57167 +++ linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
57168 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
57169 KMAP_D(16) KM_IRQ_PTE,
57170 KMAP_D(17) KM_NMI,
57171 KMAP_D(18) KM_NMI_PTE,
57172 -KMAP_D(19) KM_TYPE_NR
57173 +KMAP_D(19) KM_CLEARPAGE,
57174 +KMAP_D(20) KM_TYPE_NR
57175 };
57176
57177 #undef KMAP_D
57178 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable.h linux-2.6.32.45/include/asm-generic/pgtable.h
57179 --- linux-2.6.32.45/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
57180 +++ linux-2.6.32.45/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
57181 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
57182 unsigned long size);
57183 #endif
57184
57185 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57186 +static inline unsigned long pax_open_kernel(void) { return 0; }
57187 +#endif
57188 +
57189 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57190 +static inline unsigned long pax_close_kernel(void) { return 0; }
57191 +#endif
57192 +
57193 #endif /* !__ASSEMBLY__ */
57194
57195 #endif /* _ASM_GENERIC_PGTABLE_H */
57196 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h
57197 --- linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
57198 +++ linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
57199 @@ -1,14 +1,19 @@
57200 #ifndef _PGTABLE_NOPMD_H
57201 #define _PGTABLE_NOPMD_H
57202
57203 -#ifndef __ASSEMBLY__
57204 -
57205 #include <asm-generic/pgtable-nopud.h>
57206
57207 -struct mm_struct;
57208 -
57209 #define __PAGETABLE_PMD_FOLDED
57210
57211 +#define PMD_SHIFT PUD_SHIFT
57212 +#define PTRS_PER_PMD 1
57213 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57214 +#define PMD_MASK (~(PMD_SIZE-1))
57215 +
57216 +#ifndef __ASSEMBLY__
57217 +
57218 +struct mm_struct;
57219 +
57220 /*
57221 * Having the pmd type consist of a pud gets the size right, and allows
57222 * us to conceptually access the pud entry that this pmd is folded into
57223 @@ -16,11 +21,6 @@ struct mm_struct;
57224 */
57225 typedef struct { pud_t pud; } pmd_t;
57226
57227 -#define PMD_SHIFT PUD_SHIFT
57228 -#define PTRS_PER_PMD 1
57229 -#define PMD_SIZE (1UL << PMD_SHIFT)
57230 -#define PMD_MASK (~(PMD_SIZE-1))
57231 -
57232 /*
57233 * The "pud_xxx()" functions here are trivial for a folded two-level
57234 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57235 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopud.h linux-2.6.32.45/include/asm-generic/pgtable-nopud.h
57236 --- linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
57237 +++ linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
57238 @@ -1,10 +1,15 @@
57239 #ifndef _PGTABLE_NOPUD_H
57240 #define _PGTABLE_NOPUD_H
57241
57242 -#ifndef __ASSEMBLY__
57243 -
57244 #define __PAGETABLE_PUD_FOLDED
57245
57246 +#define PUD_SHIFT PGDIR_SHIFT
57247 +#define PTRS_PER_PUD 1
57248 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57249 +#define PUD_MASK (~(PUD_SIZE-1))
57250 +
57251 +#ifndef __ASSEMBLY__
57252 +
57253 /*
57254 * Having the pud type consist of a pgd gets the size right, and allows
57255 * us to conceptually access the pgd entry that this pud is folded into
57256 @@ -12,11 +17,6 @@
57257 */
57258 typedef struct { pgd_t pgd; } pud_t;
57259
57260 -#define PUD_SHIFT PGDIR_SHIFT
57261 -#define PTRS_PER_PUD 1
57262 -#define PUD_SIZE (1UL << PUD_SHIFT)
57263 -#define PUD_MASK (~(PUD_SIZE-1))
57264 -
57265 /*
57266 * The "pgd_xxx()" functions here are trivial for a folded two-level
57267 * setup: the pud is never bad, and a pud always exists (as it's folded
57268 diff -urNp linux-2.6.32.45/include/asm-generic/vmlinux.lds.h linux-2.6.32.45/include/asm-generic/vmlinux.lds.h
57269 --- linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
57270 +++ linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
57271 @@ -199,6 +199,7 @@
57272 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57273 VMLINUX_SYMBOL(__start_rodata) = .; \
57274 *(.rodata) *(.rodata.*) \
57275 + *(.data.read_only) \
57276 *(__vermagic) /* Kernel version magic */ \
57277 *(__markers_strings) /* Markers: strings */ \
57278 *(__tracepoints_strings)/* Tracepoints: strings */ \
57279 @@ -656,22 +657,24 @@
57280 * section in the linker script will go there too. @phdr should have
57281 * a leading colon.
57282 *
57283 - * Note that this macros defines __per_cpu_load as an absolute symbol.
57284 + * Note that this macros defines per_cpu_load as an absolute symbol.
57285 * If there is no need to put the percpu section at a predetermined
57286 * address, use PERCPU().
57287 */
57288 #define PERCPU_VADDR(vaddr, phdr) \
57289 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
57290 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57291 + per_cpu_load = .; \
57292 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57293 - LOAD_OFFSET) { \
57294 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57295 VMLINUX_SYMBOL(__per_cpu_start) = .; \
57296 *(.data.percpu.first) \
57297 - *(.data.percpu.page_aligned) \
57298 *(.data.percpu) \
57299 + . = ALIGN(PAGE_SIZE); \
57300 + *(.data.percpu.page_aligned) \
57301 *(.data.percpu.shared_aligned) \
57302 VMLINUX_SYMBOL(__per_cpu_end) = .; \
57303 } phdr \
57304 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
57305 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
57306
57307 /**
57308 * PERCPU - define output section for percpu area, simple version
57309 diff -urNp linux-2.6.32.45/include/drm/drm_crtc_helper.h linux-2.6.32.45/include/drm/drm_crtc_helper.h
57310 --- linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-03-27 14:31:47.000000000 -0400
57311 +++ linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-08-05 20:33:55.000000000 -0400
57312 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
57313
57314 /* reload the current crtc LUT */
57315 void (*load_lut)(struct drm_crtc *crtc);
57316 -};
57317 +} __no_const;
57318
57319 struct drm_encoder_helper_funcs {
57320 void (*dpms)(struct drm_encoder *encoder, int mode);
57321 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
57322 struct drm_connector *connector);
57323 /* disable encoder when not in use - more explicit than dpms off */
57324 void (*disable)(struct drm_encoder *encoder);
57325 -};
57326 +} __no_const;
57327
57328 struct drm_connector_helper_funcs {
57329 int (*get_modes)(struct drm_connector *connector);
57330 diff -urNp linux-2.6.32.45/include/drm/drmP.h linux-2.6.32.45/include/drm/drmP.h
57331 --- linux-2.6.32.45/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
57332 +++ linux-2.6.32.45/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
57333 @@ -71,6 +71,7 @@
57334 #include <linux/workqueue.h>
57335 #include <linux/poll.h>
57336 #include <asm/pgalloc.h>
57337 +#include <asm/local.h>
57338 #include "drm.h"
57339
57340 #include <linux/idr.h>
57341 @@ -814,7 +815,7 @@ struct drm_driver {
57342 void (*vgaarb_irq)(struct drm_device *dev, bool state);
57343
57344 /* Driver private ops for this object */
57345 - struct vm_operations_struct *gem_vm_ops;
57346 + const struct vm_operations_struct *gem_vm_ops;
57347
57348 int major;
57349 int minor;
57350 @@ -917,7 +918,7 @@ struct drm_device {
57351
57352 /** \name Usage Counters */
57353 /*@{ */
57354 - int open_count; /**< Outstanding files open */
57355 + local_t open_count; /**< Outstanding files open */
57356 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57357 atomic_t vma_count; /**< Outstanding vma areas open */
57358 int buf_use; /**< Buffers in use -- cannot alloc */
57359 @@ -928,7 +929,7 @@ struct drm_device {
57360 /*@{ */
57361 unsigned long counters;
57362 enum drm_stat_type types[15];
57363 - atomic_t counts[15];
57364 + atomic_unchecked_t counts[15];
57365 /*@} */
57366
57367 struct list_head filelist;
57368 @@ -1016,7 +1017,7 @@ struct drm_device {
57369 struct pci_controller *hose;
57370 #endif
57371 struct drm_sg_mem *sg; /**< Scatter gather memory */
57372 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
57373 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
57374 void *dev_private; /**< device private data */
57375 void *mm_private;
57376 struct address_space *dev_mapping;
57377 @@ -1042,11 +1043,11 @@ struct drm_device {
57378 spinlock_t object_name_lock;
57379 struct idr object_name_idr;
57380 atomic_t object_count;
57381 - atomic_t object_memory;
57382 + atomic_unchecked_t object_memory;
57383 atomic_t pin_count;
57384 - atomic_t pin_memory;
57385 + atomic_unchecked_t pin_memory;
57386 atomic_t gtt_count;
57387 - atomic_t gtt_memory;
57388 + atomic_unchecked_t gtt_memory;
57389 uint32_t gtt_total;
57390 uint32_t invalidate_domains; /* domains pending invalidation */
57391 uint32_t flush_domains; /* domains pending flush */
57392 diff -urNp linux-2.6.32.45/include/drm/ttm/ttm_memory.h linux-2.6.32.45/include/drm/ttm/ttm_memory.h
57393 --- linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-03-27 14:31:47.000000000 -0400
57394 +++ linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-08-05 20:33:55.000000000 -0400
57395 @@ -47,7 +47,7 @@
57396
57397 struct ttm_mem_shrink {
57398 int (*do_shrink) (struct ttm_mem_shrink *);
57399 -};
57400 +} __no_const;
57401
57402 /**
57403 * struct ttm_mem_global - Global memory accounting structure.
57404 diff -urNp linux-2.6.32.45/include/linux/a.out.h linux-2.6.32.45/include/linux/a.out.h
57405 --- linux-2.6.32.45/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
57406 +++ linux-2.6.32.45/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
57407 @@ -39,6 +39,14 @@ enum machine_type {
57408 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57409 };
57410
57411 +/* Constants for the N_FLAGS field */
57412 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57413 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57414 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57415 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57416 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57417 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57418 +
57419 #if !defined (N_MAGIC)
57420 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57421 #endif
57422 diff -urNp linux-2.6.32.45/include/linux/atmdev.h linux-2.6.32.45/include/linux/atmdev.h
57423 --- linux-2.6.32.45/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
57424 +++ linux-2.6.32.45/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
57425 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57426 #endif
57427
57428 struct k_atm_aal_stats {
57429 -#define __HANDLE_ITEM(i) atomic_t i
57430 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57431 __AAL_STAT_ITEMS
57432 #undef __HANDLE_ITEM
57433 };
57434 diff -urNp linux-2.6.32.45/include/linux/backlight.h linux-2.6.32.45/include/linux/backlight.h
57435 --- linux-2.6.32.45/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
57436 +++ linux-2.6.32.45/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
57437 @@ -36,18 +36,18 @@ struct backlight_device;
57438 struct fb_info;
57439
57440 struct backlight_ops {
57441 - unsigned int options;
57442 + const unsigned int options;
57443
57444 #define BL_CORE_SUSPENDRESUME (1 << 0)
57445
57446 /* Notify the backlight driver some property has changed */
57447 - int (*update_status)(struct backlight_device *);
57448 + int (* const update_status)(struct backlight_device *);
57449 /* Return the current backlight brightness (accounting for power,
57450 fb_blank etc.) */
57451 - int (*get_brightness)(struct backlight_device *);
57452 + int (* const get_brightness)(struct backlight_device *);
57453 /* Check if given framebuffer device is the one bound to this backlight;
57454 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
57455 - int (*check_fb)(struct fb_info *);
57456 + int (* const check_fb)(struct fb_info *);
57457 };
57458
57459 /* This structure defines all the properties of a backlight */
57460 @@ -86,7 +86,7 @@ struct backlight_device {
57461 registered this device has been unloaded, and if class_get_devdata()
57462 points to something in the body of that driver, it is also invalid. */
57463 struct mutex ops_lock;
57464 - struct backlight_ops *ops;
57465 + const struct backlight_ops *ops;
57466
57467 /* The framebuffer notifier block */
57468 struct notifier_block fb_notif;
57469 @@ -103,7 +103,7 @@ static inline void backlight_update_stat
57470 }
57471
57472 extern struct backlight_device *backlight_device_register(const char *name,
57473 - struct device *dev, void *devdata, struct backlight_ops *ops);
57474 + struct device *dev, void *devdata, const struct backlight_ops *ops);
57475 extern void backlight_device_unregister(struct backlight_device *bd);
57476 extern void backlight_force_update(struct backlight_device *bd,
57477 enum backlight_update_reason reason);
57478 diff -urNp linux-2.6.32.45/include/linux/binfmts.h linux-2.6.32.45/include/linux/binfmts.h
57479 --- linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
57480 +++ linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
57481 @@ -83,6 +83,7 @@ struct linux_binfmt {
57482 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57483 int (*load_shlib)(struct file *);
57484 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
57485 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57486 unsigned long min_coredump; /* minimal dump size */
57487 int hasvdso;
57488 };
57489 diff -urNp linux-2.6.32.45/include/linux/blkdev.h linux-2.6.32.45/include/linux/blkdev.h
57490 --- linux-2.6.32.45/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
57491 +++ linux-2.6.32.45/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
57492 @@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
57493 #endif /* CONFIG_BLK_DEV_INTEGRITY */
57494
57495 struct block_device_operations {
57496 - int (*open) (struct block_device *, fmode_t);
57497 - int (*release) (struct gendisk *, fmode_t);
57498 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57499 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57500 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57501 - int (*direct_access) (struct block_device *, sector_t,
57502 + int (* const open) (struct block_device *, fmode_t);
57503 + int (* const release) (struct gendisk *, fmode_t);
57504 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57505 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57506 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57507 + int (* const direct_access) (struct block_device *, sector_t,
57508 void **, unsigned long *);
57509 - int (*media_changed) (struct gendisk *);
57510 - unsigned long long (*set_capacity) (struct gendisk *,
57511 + int (* const media_changed) (struct gendisk *);
57512 + unsigned long long (* const set_capacity) (struct gendisk *,
57513 unsigned long long);
57514 - int (*revalidate_disk) (struct gendisk *);
57515 - int (*getgeo)(struct block_device *, struct hd_geometry *);
57516 - struct module *owner;
57517 + int (* const revalidate_disk) (struct gendisk *);
57518 + int (*const getgeo)(struct block_device *, struct hd_geometry *);
57519 + struct module * const owner;
57520 };
57521
57522 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57523 diff -urNp linux-2.6.32.45/include/linux/blktrace_api.h linux-2.6.32.45/include/linux/blktrace_api.h
57524 --- linux-2.6.32.45/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
57525 +++ linux-2.6.32.45/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
57526 @@ -160,7 +160,7 @@ struct blk_trace {
57527 struct dentry *dir;
57528 struct dentry *dropped_file;
57529 struct dentry *msg_file;
57530 - atomic_t dropped;
57531 + atomic_unchecked_t dropped;
57532 };
57533
57534 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57535 diff -urNp linux-2.6.32.45/include/linux/byteorder/little_endian.h linux-2.6.32.45/include/linux/byteorder/little_endian.h
57536 --- linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
57537 +++ linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
57538 @@ -42,51 +42,51 @@
57539
57540 static inline __le64 __cpu_to_le64p(const __u64 *p)
57541 {
57542 - return (__force __le64)*p;
57543 + return (__force const __le64)*p;
57544 }
57545 static inline __u64 __le64_to_cpup(const __le64 *p)
57546 {
57547 - return (__force __u64)*p;
57548 + return (__force const __u64)*p;
57549 }
57550 static inline __le32 __cpu_to_le32p(const __u32 *p)
57551 {
57552 - return (__force __le32)*p;
57553 + return (__force const __le32)*p;
57554 }
57555 static inline __u32 __le32_to_cpup(const __le32 *p)
57556 {
57557 - return (__force __u32)*p;
57558 + return (__force const __u32)*p;
57559 }
57560 static inline __le16 __cpu_to_le16p(const __u16 *p)
57561 {
57562 - return (__force __le16)*p;
57563 + return (__force const __le16)*p;
57564 }
57565 static inline __u16 __le16_to_cpup(const __le16 *p)
57566 {
57567 - return (__force __u16)*p;
57568 + return (__force const __u16)*p;
57569 }
57570 static inline __be64 __cpu_to_be64p(const __u64 *p)
57571 {
57572 - return (__force __be64)__swab64p(p);
57573 + return (__force const __be64)__swab64p(p);
57574 }
57575 static inline __u64 __be64_to_cpup(const __be64 *p)
57576 {
57577 - return __swab64p((__u64 *)p);
57578 + return __swab64p((const __u64 *)p);
57579 }
57580 static inline __be32 __cpu_to_be32p(const __u32 *p)
57581 {
57582 - return (__force __be32)__swab32p(p);
57583 + return (__force const __be32)__swab32p(p);
57584 }
57585 static inline __u32 __be32_to_cpup(const __be32 *p)
57586 {
57587 - return __swab32p((__u32 *)p);
57588 + return __swab32p((const __u32 *)p);
57589 }
57590 static inline __be16 __cpu_to_be16p(const __u16 *p)
57591 {
57592 - return (__force __be16)__swab16p(p);
57593 + return (__force const __be16)__swab16p(p);
57594 }
57595 static inline __u16 __be16_to_cpup(const __be16 *p)
57596 {
57597 - return __swab16p((__u16 *)p);
57598 + return __swab16p((const __u16 *)p);
57599 }
57600 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57601 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57602 diff -urNp linux-2.6.32.45/include/linux/cache.h linux-2.6.32.45/include/linux/cache.h
57603 --- linux-2.6.32.45/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
57604 +++ linux-2.6.32.45/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
57605 @@ -16,6 +16,10 @@
57606 #define __read_mostly
57607 #endif
57608
57609 +#ifndef __read_only
57610 +#define __read_only __read_mostly
57611 +#endif
57612 +
57613 #ifndef ____cacheline_aligned
57614 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57615 #endif
57616 diff -urNp linux-2.6.32.45/include/linux/capability.h linux-2.6.32.45/include/linux/capability.h
57617 --- linux-2.6.32.45/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
57618 +++ linux-2.6.32.45/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
57619 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
57620 (security_real_capable_noaudit((t), (cap)) == 0)
57621
57622 extern int capable(int cap);
57623 +int capable_nolog(int cap);
57624
57625 /* audit system wants to get cap info from files as well */
57626 struct dentry;
57627 diff -urNp linux-2.6.32.45/include/linux/compiler-gcc4.h linux-2.6.32.45/include/linux/compiler-gcc4.h
57628 --- linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
57629 +++ linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-08-05 20:33:55.000000000 -0400
57630 @@ -36,4 +36,13 @@
57631 the kernel context */
57632 #define __cold __attribute__((__cold__))
57633
57634 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57635 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57636 +#define __bos0(ptr) __bos((ptr), 0)
57637 +#define __bos1(ptr) __bos((ptr), 1)
57638 +
57639 +#if __GNUC_MINOR__ >= 5
57640 +#define __no_const __attribute__((no_const))
57641 +#endif
57642 +
57643 #endif
57644 diff -urNp linux-2.6.32.45/include/linux/compiler.h linux-2.6.32.45/include/linux/compiler.h
57645 --- linux-2.6.32.45/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
57646 +++ linux-2.6.32.45/include/linux/compiler.h 2011-08-05 20:33:55.000000000 -0400
57647 @@ -247,6 +247,10 @@ void ftrace_likely_update(struct ftrace_
57648 # define __attribute_const__ /* unimplemented */
57649 #endif
57650
57651 +#ifndef __no_const
57652 +# define __no_const
57653 +#endif
57654 +
57655 /*
57656 * Tell gcc if a function is cold. The compiler will assume any path
57657 * directly leading to the call is unlikely.
57658 @@ -256,6 +260,22 @@ void ftrace_likely_update(struct ftrace_
57659 #define __cold
57660 #endif
57661
57662 +#ifndef __alloc_size
57663 +#define __alloc_size(...)
57664 +#endif
57665 +
57666 +#ifndef __bos
57667 +#define __bos(ptr, arg)
57668 +#endif
57669 +
57670 +#ifndef __bos0
57671 +#define __bos0(ptr)
57672 +#endif
57673 +
57674 +#ifndef __bos1
57675 +#define __bos1(ptr)
57676 +#endif
57677 +
57678 /* Simple shorthand for a section definition */
57679 #ifndef __section
57680 # define __section(S) __attribute__ ((__section__(#S)))
57681 @@ -278,6 +298,7 @@ void ftrace_likely_update(struct ftrace_
57682 * use is to mediate communication between process-level code and irq/NMI
57683 * handlers, all running on the same CPU.
57684 */
57685 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57686 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57687 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57688
57689 #endif /* __LINUX_COMPILER_H */
57690 diff -urNp linux-2.6.32.45/include/linux/crypto.h linux-2.6.32.45/include/linux/crypto.h
57691 --- linux-2.6.32.45/include/linux/crypto.h 2011-03-27 14:31:47.000000000 -0400
57692 +++ linux-2.6.32.45/include/linux/crypto.h 2011-08-05 20:33:55.000000000 -0400
57693 @@ -394,7 +394,7 @@ struct cipher_tfm {
57694 const u8 *key, unsigned int keylen);
57695 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57696 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57697 -};
57698 +} __no_const;
57699
57700 struct hash_tfm {
57701 int (*init)(struct hash_desc *desc);
57702 @@ -415,13 +415,13 @@ struct compress_tfm {
57703 int (*cot_decompress)(struct crypto_tfm *tfm,
57704 const u8 *src, unsigned int slen,
57705 u8 *dst, unsigned int *dlen);
57706 -};
57707 +} __no_const;
57708
57709 struct rng_tfm {
57710 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57711 unsigned int dlen);
57712 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57713 -};
57714 +} __no_const;
57715
57716 #define crt_ablkcipher crt_u.ablkcipher
57717 #define crt_aead crt_u.aead
57718 diff -urNp linux-2.6.32.45/include/linux/dcache.h linux-2.6.32.45/include/linux/dcache.h
57719 --- linux-2.6.32.45/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
57720 +++ linux-2.6.32.45/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
57721 @@ -119,6 +119,8 @@ struct dentry {
57722 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
57723 };
57724
57725 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
57726 +
57727 /*
57728 * dentry->d_lock spinlock nesting subclasses:
57729 *
57730 diff -urNp linux-2.6.32.45/include/linux/decompress/mm.h linux-2.6.32.45/include/linux/decompress/mm.h
57731 --- linux-2.6.32.45/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
57732 +++ linux-2.6.32.45/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
57733 @@ -78,7 +78,7 @@ static void free(void *where)
57734 * warnings when not needed (indeed large_malloc / large_free are not
57735 * needed by inflate */
57736
57737 -#define malloc(a) kmalloc(a, GFP_KERNEL)
57738 +#define malloc(a) kmalloc((a), GFP_KERNEL)
57739 #define free(a) kfree(a)
57740
57741 #define large_malloc(a) vmalloc(a)
57742 diff -urNp linux-2.6.32.45/include/linux/dma-mapping.h linux-2.6.32.45/include/linux/dma-mapping.h
57743 --- linux-2.6.32.45/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
57744 +++ linux-2.6.32.45/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
57745 @@ -16,50 +16,50 @@ enum dma_data_direction {
57746 };
57747
57748 struct dma_map_ops {
57749 - void* (*alloc_coherent)(struct device *dev, size_t size,
57750 + void* (* const alloc_coherent)(struct device *dev, size_t size,
57751 dma_addr_t *dma_handle, gfp_t gfp);
57752 - void (*free_coherent)(struct device *dev, size_t size,
57753 + void (* const free_coherent)(struct device *dev, size_t size,
57754 void *vaddr, dma_addr_t dma_handle);
57755 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
57756 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
57757 unsigned long offset, size_t size,
57758 enum dma_data_direction dir,
57759 struct dma_attrs *attrs);
57760 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
57761 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
57762 size_t size, enum dma_data_direction dir,
57763 struct dma_attrs *attrs);
57764 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
57765 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
57766 int nents, enum dma_data_direction dir,
57767 struct dma_attrs *attrs);
57768 - void (*unmap_sg)(struct device *dev,
57769 + void (* const unmap_sg)(struct device *dev,
57770 struct scatterlist *sg, int nents,
57771 enum dma_data_direction dir,
57772 struct dma_attrs *attrs);
57773 - void (*sync_single_for_cpu)(struct device *dev,
57774 + void (* const sync_single_for_cpu)(struct device *dev,
57775 dma_addr_t dma_handle, size_t size,
57776 enum dma_data_direction dir);
57777 - void (*sync_single_for_device)(struct device *dev,
57778 + void (* const sync_single_for_device)(struct device *dev,
57779 dma_addr_t dma_handle, size_t size,
57780 enum dma_data_direction dir);
57781 - void (*sync_single_range_for_cpu)(struct device *dev,
57782 + void (* const sync_single_range_for_cpu)(struct device *dev,
57783 dma_addr_t dma_handle,
57784 unsigned long offset,
57785 size_t size,
57786 enum dma_data_direction dir);
57787 - void (*sync_single_range_for_device)(struct device *dev,
57788 + void (* const sync_single_range_for_device)(struct device *dev,
57789 dma_addr_t dma_handle,
57790 unsigned long offset,
57791 size_t size,
57792 enum dma_data_direction dir);
57793 - void (*sync_sg_for_cpu)(struct device *dev,
57794 + void (* const sync_sg_for_cpu)(struct device *dev,
57795 struct scatterlist *sg, int nents,
57796 enum dma_data_direction dir);
57797 - void (*sync_sg_for_device)(struct device *dev,
57798 + void (* const sync_sg_for_device)(struct device *dev,
57799 struct scatterlist *sg, int nents,
57800 enum dma_data_direction dir);
57801 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
57802 - int (*dma_supported)(struct device *dev, u64 mask);
57803 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
57804 + int (* const dma_supported)(struct device *dev, u64 mask);
57805 int (*set_dma_mask)(struct device *dev, u64 mask);
57806 - int is_phys;
57807 + const int is_phys;
57808 };
57809
57810 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57811 diff -urNp linux-2.6.32.45/include/linux/dst.h linux-2.6.32.45/include/linux/dst.h
57812 --- linux-2.6.32.45/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
57813 +++ linux-2.6.32.45/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
57814 @@ -380,7 +380,7 @@ struct dst_node
57815 struct thread_pool *pool;
57816
57817 /* Transaction IDs live here */
57818 - atomic_long_t gen;
57819 + atomic_long_unchecked_t gen;
57820
57821 /*
57822 * How frequently and how many times transaction
57823 diff -urNp linux-2.6.32.45/include/linux/elf.h linux-2.6.32.45/include/linux/elf.h
57824 --- linux-2.6.32.45/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
57825 +++ linux-2.6.32.45/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
57826 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57827 #define PT_GNU_EH_FRAME 0x6474e550
57828
57829 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57830 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57831 +
57832 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57833 +
57834 +/* Constants for the e_flags field */
57835 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57836 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57837 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57838 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57839 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57840 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57841
57842 /* These constants define the different elf file types */
57843 #define ET_NONE 0
57844 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
57845 #define DT_DEBUG 21
57846 #define DT_TEXTREL 22
57847 #define DT_JMPREL 23
57848 +#define DT_FLAGS 30
57849 + #define DF_TEXTREL 0x00000004
57850 #define DT_ENCODING 32
57851 #define OLD_DT_LOOS 0x60000000
57852 #define DT_LOOS 0x6000000d
57853 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
57854 #define PF_W 0x2
57855 #define PF_X 0x1
57856
57857 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57858 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57859 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57860 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57861 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57862 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57863 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57864 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57865 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57866 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57867 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57868 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57869 +
57870 typedef struct elf32_phdr{
57871 Elf32_Word p_type;
57872 Elf32_Off p_offset;
57873 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
57874 #define EI_OSABI 7
57875 #define EI_PAD 8
57876
57877 +#define EI_PAX 14
57878 +
57879 #define ELFMAG0 0x7f /* EI_MAG */
57880 #define ELFMAG1 'E'
57881 #define ELFMAG2 'L'
57882 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
57883 #define elf_phdr elf32_phdr
57884 #define elf_note elf32_note
57885 #define elf_addr_t Elf32_Off
57886 +#define elf_dyn Elf32_Dyn
57887
57888 #else
57889
57890 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
57891 #define elf_phdr elf64_phdr
57892 #define elf_note elf64_note
57893 #define elf_addr_t Elf64_Off
57894 +#define elf_dyn Elf64_Dyn
57895
57896 #endif
57897
57898 diff -urNp linux-2.6.32.45/include/linux/fscache-cache.h linux-2.6.32.45/include/linux/fscache-cache.h
57899 --- linux-2.6.32.45/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
57900 +++ linux-2.6.32.45/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
57901 @@ -116,7 +116,7 @@ struct fscache_operation {
57902 #endif
57903 };
57904
57905 -extern atomic_t fscache_op_debug_id;
57906 +extern atomic_unchecked_t fscache_op_debug_id;
57907 extern const struct slow_work_ops fscache_op_slow_work_ops;
57908
57909 extern void fscache_enqueue_operation(struct fscache_operation *);
57910 @@ -134,7 +134,7 @@ static inline void fscache_operation_ini
57911 fscache_operation_release_t release)
57912 {
57913 atomic_set(&op->usage, 1);
57914 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57915 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57916 op->release = release;
57917 INIT_LIST_HEAD(&op->pend_link);
57918 fscache_set_op_state(op, "Init");
57919 diff -urNp linux-2.6.32.45/include/linux/fs.h linux-2.6.32.45/include/linux/fs.h
57920 --- linux-2.6.32.45/include/linux/fs.h 2011-07-13 17:23:04.000000000 -0400
57921 +++ linux-2.6.32.45/include/linux/fs.h 2011-08-05 20:33:55.000000000 -0400
57922 @@ -90,6 +90,11 @@ struct inodes_stat_t {
57923 /* Expect random access pattern */
57924 #define FMODE_RANDOM ((__force fmode_t)4096)
57925
57926 +/* Hack for grsec so as not to require read permission simply to execute
57927 + * a binary
57928 + */
57929 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
57930 +
57931 /*
57932 * The below are the various read and write types that we support. Some of
57933 * them include behavioral modifiers that send information down to the
57934 @@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
57935 unsigned long, unsigned long);
57936
57937 struct address_space_operations {
57938 - int (*writepage)(struct page *page, struct writeback_control *wbc);
57939 - int (*readpage)(struct file *, struct page *);
57940 - void (*sync_page)(struct page *);
57941 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
57942 + int (* const readpage)(struct file *, struct page *);
57943 + void (* const sync_page)(struct page *);
57944
57945 /* Write back some dirty pages from this mapping. */
57946 - int (*writepages)(struct address_space *, struct writeback_control *);
57947 + int (* const writepages)(struct address_space *, struct writeback_control *);
57948
57949 /* Set a page dirty. Return true if this dirtied it */
57950 - int (*set_page_dirty)(struct page *page);
57951 + int (* const set_page_dirty)(struct page *page);
57952
57953 - int (*readpages)(struct file *filp, struct address_space *mapping,
57954 + int (* const readpages)(struct file *filp, struct address_space *mapping,
57955 struct list_head *pages, unsigned nr_pages);
57956
57957 - int (*write_begin)(struct file *, struct address_space *mapping,
57958 + int (* const write_begin)(struct file *, struct address_space *mapping,
57959 loff_t pos, unsigned len, unsigned flags,
57960 struct page **pagep, void **fsdata);
57961 - int (*write_end)(struct file *, struct address_space *mapping,
57962 + int (* const write_end)(struct file *, struct address_space *mapping,
57963 loff_t pos, unsigned len, unsigned copied,
57964 struct page *page, void *fsdata);
57965
57966 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
57967 - sector_t (*bmap)(struct address_space *, sector_t);
57968 - void (*invalidatepage) (struct page *, unsigned long);
57969 - int (*releasepage) (struct page *, gfp_t);
57970 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
57971 + sector_t (* const bmap)(struct address_space *, sector_t);
57972 + void (* const invalidatepage) (struct page *, unsigned long);
57973 + int (* const releasepage) (struct page *, gfp_t);
57974 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
57975 loff_t offset, unsigned long nr_segs);
57976 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
57977 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
57978 void **, unsigned long *);
57979 /* migrate the contents of a page to the specified target */
57980 - int (*migratepage) (struct address_space *,
57981 + int (* const migratepage) (struct address_space *,
57982 struct page *, struct page *);
57983 - int (*launder_page) (struct page *);
57984 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
57985 + int (* const launder_page) (struct page *);
57986 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
57987 unsigned long);
57988 - int (*error_remove_page)(struct address_space *, struct page *);
57989 + int (* const error_remove_page)(struct address_space *, struct page *);
57990 };
57991
57992 /*
57993 @@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
57994 typedef struct files_struct *fl_owner_t;
57995
57996 struct file_lock_operations {
57997 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57998 - void (*fl_release_private)(struct file_lock *);
57999 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
58000 + void (* const fl_release_private)(struct file_lock *);
58001 };
58002
58003 struct lock_manager_operations {
58004 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
58005 - void (*fl_notify)(struct file_lock *); /* unblock callback */
58006 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
58007 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
58008 - void (*fl_release_private)(struct file_lock *);
58009 - void (*fl_break)(struct file_lock *);
58010 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
58011 - int (*fl_change)(struct file_lock **, int);
58012 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
58013 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
58014 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
58015 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
58016 + void (* const fl_release_private)(struct file_lock *);
58017 + void (* const fl_break)(struct file_lock *);
58018 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
58019 + int (* const fl_change)(struct file_lock **, int);
58020 };
58021
58022 struct lock_manager {
58023 @@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
58024 unsigned int fi_flags; /* Flags as passed from user */
58025 unsigned int fi_extents_mapped; /* Number of mapped extents */
58026 unsigned int fi_extents_max; /* Size of fiemap_extent array */
58027 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
58028 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
58029 * array */
58030 };
58031 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
58032 @@ -1486,7 +1491,7 @@ struct block_device_operations;
58033 * can be called without the big kernel lock held in all filesystems.
58034 */
58035 struct file_operations {
58036 - struct module *owner;
58037 + struct module * const owner;
58038 loff_t (*llseek) (struct file *, loff_t, int);
58039 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
58040 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
58041 @@ -1559,30 +1564,30 @@ extern ssize_t vfs_writev(struct file *,
58042 unsigned long, loff_t *);
58043
58044 struct super_operations {
58045 - struct inode *(*alloc_inode)(struct super_block *sb);
58046 - void (*destroy_inode)(struct inode *);
58047 + struct inode *(* const alloc_inode)(struct super_block *sb);
58048 + void (* const destroy_inode)(struct inode *);
58049
58050 - void (*dirty_inode) (struct inode *);
58051 - int (*write_inode) (struct inode *, int);
58052 - void (*drop_inode) (struct inode *);
58053 - void (*delete_inode) (struct inode *);
58054 - void (*put_super) (struct super_block *);
58055 - void (*write_super) (struct super_block *);
58056 - int (*sync_fs)(struct super_block *sb, int wait);
58057 - int (*freeze_fs) (struct super_block *);
58058 - int (*unfreeze_fs) (struct super_block *);
58059 - int (*statfs) (struct dentry *, struct kstatfs *);
58060 - int (*remount_fs) (struct super_block *, int *, char *);
58061 - void (*clear_inode) (struct inode *);
58062 - void (*umount_begin) (struct super_block *);
58063 + void (* const dirty_inode) (struct inode *);
58064 + int (* const write_inode) (struct inode *, int);
58065 + void (* const drop_inode) (struct inode *);
58066 + void (* const delete_inode) (struct inode *);
58067 + void (* const put_super) (struct super_block *);
58068 + void (* const write_super) (struct super_block *);
58069 + int (* const sync_fs)(struct super_block *sb, int wait);
58070 + int (* const freeze_fs) (struct super_block *);
58071 + int (* const unfreeze_fs) (struct super_block *);
58072 + int (* const statfs) (struct dentry *, struct kstatfs *);
58073 + int (* const remount_fs) (struct super_block *, int *, char *);
58074 + void (* const clear_inode) (struct inode *);
58075 + void (* const umount_begin) (struct super_block *);
58076
58077 - int (*show_options)(struct seq_file *, struct vfsmount *);
58078 - int (*show_stats)(struct seq_file *, struct vfsmount *);
58079 + int (* const show_options)(struct seq_file *, struct vfsmount *);
58080 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
58081 #ifdef CONFIG_QUOTA
58082 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
58083 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
58084 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
58085 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
58086 #endif
58087 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
58088 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
58089 };
58090
58091 /*
58092 diff -urNp linux-2.6.32.45/include/linux/fs_struct.h linux-2.6.32.45/include/linux/fs_struct.h
58093 --- linux-2.6.32.45/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
58094 +++ linux-2.6.32.45/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
58095 @@ -4,7 +4,7 @@
58096 #include <linux/path.h>
58097
58098 struct fs_struct {
58099 - int users;
58100 + atomic_t users;
58101 rwlock_t lock;
58102 int umask;
58103 int in_exec;
58104 diff -urNp linux-2.6.32.45/include/linux/ftrace_event.h linux-2.6.32.45/include/linux/ftrace_event.h
58105 --- linux-2.6.32.45/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
58106 +++ linux-2.6.32.45/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
58107 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
58108 int filter_type);
58109 extern int trace_define_common_fields(struct ftrace_event_call *call);
58110
58111 -#define is_signed_type(type) (((type)(-1)) < 0)
58112 +#define is_signed_type(type) (((type)(-1)) < (type)1)
58113
58114 int trace_set_clr_event(const char *system, const char *event, int set);
58115
58116 diff -urNp linux-2.6.32.45/include/linux/genhd.h linux-2.6.32.45/include/linux/genhd.h
58117 --- linux-2.6.32.45/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
58118 +++ linux-2.6.32.45/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
58119 @@ -161,7 +161,7 @@ struct gendisk {
58120
58121 struct timer_rand_state *random;
58122
58123 - atomic_t sync_io; /* RAID */
58124 + atomic_unchecked_t sync_io; /* RAID */
58125 struct work_struct async_notify;
58126 #ifdef CONFIG_BLK_DEV_INTEGRITY
58127 struct blk_integrity *integrity;
58128 diff -urNp linux-2.6.32.45/include/linux/gracl.h linux-2.6.32.45/include/linux/gracl.h
58129 --- linux-2.6.32.45/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
58130 +++ linux-2.6.32.45/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
58131 @@ -0,0 +1,317 @@
58132 +#ifndef GR_ACL_H
58133 +#define GR_ACL_H
58134 +
58135 +#include <linux/grdefs.h>
58136 +#include <linux/resource.h>
58137 +#include <linux/capability.h>
58138 +#include <linux/dcache.h>
58139 +#include <asm/resource.h>
58140 +
58141 +/* Major status information */
58142 +
58143 +#define GR_VERSION "grsecurity 2.2.2"
58144 +#define GRSECURITY_VERSION 0x2202
58145 +
58146 +enum {
58147 + GR_SHUTDOWN = 0,
58148 + GR_ENABLE = 1,
58149 + GR_SPROLE = 2,
58150 + GR_RELOAD = 3,
58151 + GR_SEGVMOD = 4,
58152 + GR_STATUS = 5,
58153 + GR_UNSPROLE = 6,
58154 + GR_PASSSET = 7,
58155 + GR_SPROLEPAM = 8,
58156 +};
58157 +
58158 +/* Password setup definitions
58159 + * kernel/grhash.c */
58160 +enum {
58161 + GR_PW_LEN = 128,
58162 + GR_SALT_LEN = 16,
58163 + GR_SHA_LEN = 32,
58164 +};
58165 +
58166 +enum {
58167 + GR_SPROLE_LEN = 64,
58168 +};
58169 +
58170 +enum {
58171 + GR_NO_GLOB = 0,
58172 + GR_REG_GLOB,
58173 + GR_CREATE_GLOB
58174 +};
58175 +
58176 +#define GR_NLIMITS 32
58177 +
58178 +/* Begin Data Structures */
58179 +
58180 +struct sprole_pw {
58181 + unsigned char *rolename;
58182 + unsigned char salt[GR_SALT_LEN];
58183 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58184 +};
58185 +
58186 +struct name_entry {
58187 + __u32 key;
58188 + ino_t inode;
58189 + dev_t device;
58190 + char *name;
58191 + __u16 len;
58192 + __u8 deleted;
58193 + struct name_entry *prev;
58194 + struct name_entry *next;
58195 +};
58196 +
58197 +struct inodev_entry {
58198 + struct name_entry *nentry;
58199 + struct inodev_entry *prev;
58200 + struct inodev_entry *next;
58201 +};
58202 +
58203 +struct acl_role_db {
58204 + struct acl_role_label **r_hash;
58205 + __u32 r_size;
58206 +};
58207 +
58208 +struct inodev_db {
58209 + struct inodev_entry **i_hash;
58210 + __u32 i_size;
58211 +};
58212 +
58213 +struct name_db {
58214 + struct name_entry **n_hash;
58215 + __u32 n_size;
58216 +};
58217 +
58218 +struct crash_uid {
58219 + uid_t uid;
58220 + unsigned long expires;
58221 +};
58222 +
58223 +struct gr_hash_struct {
58224 + void **table;
58225 + void **nametable;
58226 + void *first;
58227 + __u32 table_size;
58228 + __u32 used_size;
58229 + int type;
58230 +};
58231 +
58232 +/* Userspace Grsecurity ACL data structures */
58233 +
58234 +struct acl_subject_label {
58235 + char *filename;
58236 + ino_t inode;
58237 + dev_t device;
58238 + __u32 mode;
58239 + kernel_cap_t cap_mask;
58240 + kernel_cap_t cap_lower;
58241 + kernel_cap_t cap_invert_audit;
58242 +
58243 + struct rlimit res[GR_NLIMITS];
58244 + __u32 resmask;
58245 +
58246 + __u8 user_trans_type;
58247 + __u8 group_trans_type;
58248 + uid_t *user_transitions;
58249 + gid_t *group_transitions;
58250 + __u16 user_trans_num;
58251 + __u16 group_trans_num;
58252 +
58253 + __u32 sock_families[2];
58254 + __u32 ip_proto[8];
58255 + __u32 ip_type;
58256 + struct acl_ip_label **ips;
58257 + __u32 ip_num;
58258 + __u32 inaddr_any_override;
58259 +
58260 + __u32 crashes;
58261 + unsigned long expires;
58262 +
58263 + struct acl_subject_label *parent_subject;
58264 + struct gr_hash_struct *hash;
58265 + struct acl_subject_label *prev;
58266 + struct acl_subject_label *next;
58267 +
58268 + struct acl_object_label **obj_hash;
58269 + __u32 obj_hash_size;
58270 + __u16 pax_flags;
58271 +};
58272 +
58273 +struct role_allowed_ip {
58274 + __u32 addr;
58275 + __u32 netmask;
58276 +
58277 + struct role_allowed_ip *prev;
58278 + struct role_allowed_ip *next;
58279 +};
58280 +
58281 +struct role_transition {
58282 + char *rolename;
58283 +
58284 + struct role_transition *prev;
58285 + struct role_transition *next;
58286 +};
58287 +
58288 +struct acl_role_label {
58289 + char *rolename;
58290 + uid_t uidgid;
58291 + __u16 roletype;
58292 +
58293 + __u16 auth_attempts;
58294 + unsigned long expires;
58295 +
58296 + struct acl_subject_label *root_label;
58297 + struct gr_hash_struct *hash;
58298 +
58299 + struct acl_role_label *prev;
58300 + struct acl_role_label *next;
58301 +
58302 + struct role_transition *transitions;
58303 + struct role_allowed_ip *allowed_ips;
58304 + uid_t *domain_children;
58305 + __u16 domain_child_num;
58306 +
58307 + struct acl_subject_label **subj_hash;
58308 + __u32 subj_hash_size;
58309 +};
58310 +
58311 +struct user_acl_role_db {
58312 + struct acl_role_label **r_table;
58313 + __u32 num_pointers; /* Number of allocations to track */
58314 + __u32 num_roles; /* Number of roles */
58315 + __u32 num_domain_children; /* Number of domain children */
58316 + __u32 num_subjects; /* Number of subjects */
58317 + __u32 num_objects; /* Number of objects */
58318 +};
58319 +
58320 +struct acl_object_label {
58321 + char *filename;
58322 + ino_t inode;
58323 + dev_t device;
58324 + __u32 mode;
58325 +
58326 + struct acl_subject_label *nested;
58327 + struct acl_object_label *globbed;
58328 +
58329 + /* next two structures not used */
58330 +
58331 + struct acl_object_label *prev;
58332 + struct acl_object_label *next;
58333 +};
58334 +
58335 +struct acl_ip_label {
58336 + char *iface;
58337 + __u32 addr;
58338 + __u32 netmask;
58339 + __u16 low, high;
58340 + __u8 mode;
58341 + __u32 type;
58342 + __u32 proto[8];
58343 +
58344 + /* next two structures not used */
58345 +
58346 + struct acl_ip_label *prev;
58347 + struct acl_ip_label *next;
58348 +};
58349 +
58350 +struct gr_arg {
58351 + struct user_acl_role_db role_db;
58352 + unsigned char pw[GR_PW_LEN];
58353 + unsigned char salt[GR_SALT_LEN];
58354 + unsigned char sum[GR_SHA_LEN];
58355 + unsigned char sp_role[GR_SPROLE_LEN];
58356 + struct sprole_pw *sprole_pws;
58357 + dev_t segv_device;
58358 + ino_t segv_inode;
58359 + uid_t segv_uid;
58360 + __u16 num_sprole_pws;
58361 + __u16 mode;
58362 +};
58363 +
58364 +struct gr_arg_wrapper {
58365 + struct gr_arg *arg;
58366 + __u32 version;
58367 + __u32 size;
58368 +};
58369 +
58370 +struct subject_map {
58371 + struct acl_subject_label *user;
58372 + struct acl_subject_label *kernel;
58373 + struct subject_map *prev;
58374 + struct subject_map *next;
58375 +};
58376 +
58377 +struct acl_subj_map_db {
58378 + struct subject_map **s_hash;
58379 + __u32 s_size;
58380 +};
58381 +
58382 +/* End Data Structures Section */
58383 +
58384 +/* Hash functions generated by empirical testing by Brad Spengler
58385 + Makes good use of the low bits of the inode. Generally 0-1 times
58386 + in loop for successful match. 0-3 for unsuccessful match.
58387 + Shift/add algorithm with modulus of table size and an XOR*/
58388 +
58389 +static __inline__ unsigned int
58390 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58391 +{
58392 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
58393 +}
58394 +
58395 + static __inline__ unsigned int
58396 +shash(const struct acl_subject_label *userp, const unsigned int sz)
58397 +{
58398 + return ((const unsigned long)userp % sz);
58399 +}
58400 +
58401 +static __inline__ unsigned int
58402 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58403 +{
58404 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58405 +}
58406 +
58407 +static __inline__ unsigned int
58408 +nhash(const char *name, const __u16 len, const unsigned int sz)
58409 +{
58410 + return full_name_hash((const unsigned char *)name, len) % sz;
58411 +}
58412 +
58413 +#define FOR_EACH_ROLE_START(role) \
58414 + role = role_list; \
58415 + while (role) {
58416 +
58417 +#define FOR_EACH_ROLE_END(role) \
58418 + role = role->prev; \
58419 + }
58420 +
58421 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58422 + subj = NULL; \
58423 + iter = 0; \
58424 + while (iter < role->subj_hash_size) { \
58425 + if (subj == NULL) \
58426 + subj = role->subj_hash[iter]; \
58427 + if (subj == NULL) { \
58428 + iter++; \
58429 + continue; \
58430 + }
58431 +
58432 +#define FOR_EACH_SUBJECT_END(subj,iter) \
58433 + subj = subj->next; \
58434 + if (subj == NULL) \
58435 + iter++; \
58436 + }
58437 +
58438 +
58439 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58440 + subj = role->hash->first; \
58441 + while (subj != NULL) {
58442 +
58443 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58444 + subj = subj->next; \
58445 + }
58446 +
58447 +#endif
58448 +
58449 diff -urNp linux-2.6.32.45/include/linux/gralloc.h linux-2.6.32.45/include/linux/gralloc.h
58450 --- linux-2.6.32.45/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58451 +++ linux-2.6.32.45/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
58452 @@ -0,0 +1,9 @@
58453 +#ifndef __GRALLOC_H
58454 +#define __GRALLOC_H
58455 +
58456 +void acl_free_all(void);
58457 +int acl_alloc_stack_init(unsigned long size);
58458 +void *acl_alloc(unsigned long len);
58459 +void *acl_alloc_num(unsigned long num, unsigned long len);
58460 +
58461 +#endif
58462 diff -urNp linux-2.6.32.45/include/linux/grdefs.h linux-2.6.32.45/include/linux/grdefs.h
58463 --- linux-2.6.32.45/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58464 +++ linux-2.6.32.45/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
58465 @@ -0,0 +1,140 @@
58466 +#ifndef GRDEFS_H
58467 +#define GRDEFS_H
58468 +
58469 +/* Begin grsecurity status declarations */
58470 +
58471 +enum {
58472 + GR_READY = 0x01,
58473 + GR_STATUS_INIT = 0x00 // disabled state
58474 +};
58475 +
58476 +/* Begin ACL declarations */
58477 +
58478 +/* Role flags */
58479 +
58480 +enum {
58481 + GR_ROLE_USER = 0x0001,
58482 + GR_ROLE_GROUP = 0x0002,
58483 + GR_ROLE_DEFAULT = 0x0004,
58484 + GR_ROLE_SPECIAL = 0x0008,
58485 + GR_ROLE_AUTH = 0x0010,
58486 + GR_ROLE_NOPW = 0x0020,
58487 + GR_ROLE_GOD = 0x0040,
58488 + GR_ROLE_LEARN = 0x0080,
58489 + GR_ROLE_TPE = 0x0100,
58490 + GR_ROLE_DOMAIN = 0x0200,
58491 + GR_ROLE_PAM = 0x0400,
58492 + GR_ROLE_PERSIST = 0x800
58493 +};
58494 +
58495 +/* ACL Subject and Object mode flags */
58496 +enum {
58497 + GR_DELETED = 0x80000000
58498 +};
58499 +
58500 +/* ACL Object-only mode flags */
58501 +enum {
58502 + GR_READ = 0x00000001,
58503 + GR_APPEND = 0x00000002,
58504 + GR_WRITE = 0x00000004,
58505 + GR_EXEC = 0x00000008,
58506 + GR_FIND = 0x00000010,
58507 + GR_INHERIT = 0x00000020,
58508 + GR_SETID = 0x00000040,
58509 + GR_CREATE = 0x00000080,
58510 + GR_DELETE = 0x00000100,
58511 + GR_LINK = 0x00000200,
58512 + GR_AUDIT_READ = 0x00000400,
58513 + GR_AUDIT_APPEND = 0x00000800,
58514 + GR_AUDIT_WRITE = 0x00001000,
58515 + GR_AUDIT_EXEC = 0x00002000,
58516 + GR_AUDIT_FIND = 0x00004000,
58517 + GR_AUDIT_INHERIT= 0x00008000,
58518 + GR_AUDIT_SETID = 0x00010000,
58519 + GR_AUDIT_CREATE = 0x00020000,
58520 + GR_AUDIT_DELETE = 0x00040000,
58521 + GR_AUDIT_LINK = 0x00080000,
58522 + GR_PTRACERD = 0x00100000,
58523 + GR_NOPTRACE = 0x00200000,
58524 + GR_SUPPRESS = 0x00400000,
58525 + GR_NOLEARN = 0x00800000,
58526 + GR_INIT_TRANSFER= 0x01000000
58527 +};
58528 +
58529 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58530 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58531 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58532 +
58533 +/* ACL subject-only mode flags */
58534 +enum {
58535 + GR_KILL = 0x00000001,
58536 + GR_VIEW = 0x00000002,
58537 + GR_PROTECTED = 0x00000004,
58538 + GR_LEARN = 0x00000008,
58539 + GR_OVERRIDE = 0x00000010,
58540 + /* just a placeholder, this mode is only used in userspace */
58541 + GR_DUMMY = 0x00000020,
58542 + GR_PROTSHM = 0x00000040,
58543 + GR_KILLPROC = 0x00000080,
58544 + GR_KILLIPPROC = 0x00000100,
58545 + /* just a placeholder, this mode is only used in userspace */
58546 + GR_NOTROJAN = 0x00000200,
58547 + GR_PROTPROCFD = 0x00000400,
58548 + GR_PROCACCT = 0x00000800,
58549 + GR_RELAXPTRACE = 0x00001000,
58550 + GR_NESTED = 0x00002000,
58551 + GR_INHERITLEARN = 0x00004000,
58552 + GR_PROCFIND = 0x00008000,
58553 + GR_POVERRIDE = 0x00010000,
58554 + GR_KERNELAUTH = 0x00020000,
58555 + GR_ATSECURE = 0x00040000,
58556 + GR_SHMEXEC = 0x00080000
58557 +};
58558 +
58559 +enum {
58560 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58561 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58562 + GR_PAX_ENABLE_MPROTECT = 0x0004,
58563 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
58564 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58565 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58566 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58567 + GR_PAX_DISABLE_MPROTECT = 0x0400,
58568 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
58569 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58570 +};
58571 +
58572 +enum {
58573 + GR_ID_USER = 0x01,
58574 + GR_ID_GROUP = 0x02,
58575 +};
58576 +
58577 +enum {
58578 + GR_ID_ALLOW = 0x01,
58579 + GR_ID_DENY = 0x02,
58580 +};
58581 +
58582 +#define GR_CRASH_RES 31
58583 +#define GR_UIDTABLE_MAX 500
58584 +
58585 +/* begin resource learning section */
58586 +enum {
58587 + GR_RLIM_CPU_BUMP = 60,
58588 + GR_RLIM_FSIZE_BUMP = 50000,
58589 + GR_RLIM_DATA_BUMP = 10000,
58590 + GR_RLIM_STACK_BUMP = 1000,
58591 + GR_RLIM_CORE_BUMP = 10000,
58592 + GR_RLIM_RSS_BUMP = 500000,
58593 + GR_RLIM_NPROC_BUMP = 1,
58594 + GR_RLIM_NOFILE_BUMP = 5,
58595 + GR_RLIM_MEMLOCK_BUMP = 50000,
58596 + GR_RLIM_AS_BUMP = 500000,
58597 + GR_RLIM_LOCKS_BUMP = 2,
58598 + GR_RLIM_SIGPENDING_BUMP = 5,
58599 + GR_RLIM_MSGQUEUE_BUMP = 10000,
58600 + GR_RLIM_NICE_BUMP = 1,
58601 + GR_RLIM_RTPRIO_BUMP = 1,
58602 + GR_RLIM_RTTIME_BUMP = 1000000
58603 +};
58604 +
58605 +#endif
58606 diff -urNp linux-2.6.32.45/include/linux/grinternal.h linux-2.6.32.45/include/linux/grinternal.h
58607 --- linux-2.6.32.45/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58608 +++ linux-2.6.32.45/include/linux/grinternal.h 2011-08-11 19:58:37.000000000 -0400
58609 @@ -0,0 +1,217 @@
58610 +#ifndef __GRINTERNAL_H
58611 +#define __GRINTERNAL_H
58612 +
58613 +#ifdef CONFIG_GRKERNSEC
58614 +
58615 +#include <linux/fs.h>
58616 +#include <linux/mnt_namespace.h>
58617 +#include <linux/nsproxy.h>
58618 +#include <linux/gracl.h>
58619 +#include <linux/grdefs.h>
58620 +#include <linux/grmsg.h>
58621 +
58622 +void gr_add_learn_entry(const char *fmt, ...)
58623 + __attribute__ ((format (printf, 1, 2)));
58624 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58625 + const struct vfsmount *mnt);
58626 +__u32 gr_check_create(const struct dentry *new_dentry,
58627 + const struct dentry *parent,
58628 + const struct vfsmount *mnt, const __u32 mode);
58629 +int gr_check_protected_task(const struct task_struct *task);
58630 +__u32 to_gr_audit(const __u32 reqmode);
58631 +int gr_set_acls(const int type);
58632 +int gr_apply_subject_to_task(struct task_struct *task);
58633 +int gr_acl_is_enabled(void);
58634 +char gr_roletype_to_char(void);
58635 +
58636 +void gr_handle_alertkill(struct task_struct *task);
58637 +char *gr_to_filename(const struct dentry *dentry,
58638 + const struct vfsmount *mnt);
58639 +char *gr_to_filename1(const struct dentry *dentry,
58640 + const struct vfsmount *mnt);
58641 +char *gr_to_filename2(const struct dentry *dentry,
58642 + const struct vfsmount *mnt);
58643 +char *gr_to_filename3(const struct dentry *dentry,
58644 + const struct vfsmount *mnt);
58645 +
58646 +extern int grsec_enable_harden_ptrace;
58647 +extern int grsec_enable_link;
58648 +extern int grsec_enable_fifo;
58649 +extern int grsec_enable_shm;
58650 +extern int grsec_enable_execlog;
58651 +extern int grsec_enable_signal;
58652 +extern int grsec_enable_audit_ptrace;
58653 +extern int grsec_enable_forkfail;
58654 +extern int grsec_enable_time;
58655 +extern int grsec_enable_rofs;
58656 +extern int grsec_enable_chroot_shmat;
58657 +extern int grsec_enable_chroot_mount;
58658 +extern int grsec_enable_chroot_double;
58659 +extern int grsec_enable_chroot_pivot;
58660 +extern int grsec_enable_chroot_chdir;
58661 +extern int grsec_enable_chroot_chmod;
58662 +extern int grsec_enable_chroot_mknod;
58663 +extern int grsec_enable_chroot_fchdir;
58664 +extern int grsec_enable_chroot_nice;
58665 +extern int grsec_enable_chroot_execlog;
58666 +extern int grsec_enable_chroot_caps;
58667 +extern int grsec_enable_chroot_sysctl;
58668 +extern int grsec_enable_chroot_unix;
58669 +extern int grsec_enable_tpe;
58670 +extern int grsec_tpe_gid;
58671 +extern int grsec_enable_tpe_all;
58672 +extern int grsec_enable_tpe_invert;
58673 +extern int grsec_enable_socket_all;
58674 +extern int grsec_socket_all_gid;
58675 +extern int grsec_enable_socket_client;
58676 +extern int grsec_socket_client_gid;
58677 +extern int grsec_enable_socket_server;
58678 +extern int grsec_socket_server_gid;
58679 +extern int grsec_audit_gid;
58680 +extern int grsec_enable_group;
58681 +extern int grsec_enable_audit_textrel;
58682 +extern int grsec_enable_log_rwxmaps;
58683 +extern int grsec_enable_mount;
58684 +extern int grsec_enable_chdir;
58685 +extern int grsec_resource_logging;
58686 +extern int grsec_enable_blackhole;
58687 +extern int grsec_lastack_retries;
58688 +extern int grsec_enable_brute;
58689 +extern int grsec_lock;
58690 +
58691 +extern spinlock_t grsec_alert_lock;
58692 +extern unsigned long grsec_alert_wtime;
58693 +extern unsigned long grsec_alert_fyet;
58694 +
58695 +extern spinlock_t grsec_audit_lock;
58696 +
58697 +extern rwlock_t grsec_exec_file_lock;
58698 +
58699 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58700 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58701 + (tsk)->exec_file->f_vfsmnt) : "/")
58702 +
58703 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58704 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58705 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58706 +
58707 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58708 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
58709 + (tsk)->exec_file->f_vfsmnt) : "/")
58710 +
58711 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58712 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58713 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58714 +
58715 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58716 +
58717 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58718 +
58719 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58720 + (task)->pid, (cred)->uid, \
58721 + (cred)->euid, (cred)->gid, (cred)->egid, \
58722 + gr_parent_task_fullpath(task), \
58723 + (task)->real_parent->comm, (task)->real_parent->pid, \
58724 + (pcred)->uid, (pcred)->euid, \
58725 + (pcred)->gid, (pcred)->egid
58726 +
58727 +#define GR_CHROOT_CAPS {{ \
58728 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58729 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58730 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58731 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58732 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58733 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
58734 +
58735 +#define security_learn(normal_msg,args...) \
58736 +({ \
58737 + read_lock(&grsec_exec_file_lock); \
58738 + gr_add_learn_entry(normal_msg "\n", ## args); \
58739 + read_unlock(&grsec_exec_file_lock); \
58740 +})
58741 +
58742 +enum {
58743 + GR_DO_AUDIT,
58744 + GR_DONT_AUDIT,
58745 + GR_DONT_AUDIT_GOOD
58746 +};
58747 +
58748 +enum {
58749 + GR_TTYSNIFF,
58750 + GR_RBAC,
58751 + GR_RBAC_STR,
58752 + GR_STR_RBAC,
58753 + GR_RBAC_MODE2,
58754 + GR_RBAC_MODE3,
58755 + GR_FILENAME,
58756 + GR_SYSCTL_HIDDEN,
58757 + GR_NOARGS,
58758 + GR_ONE_INT,
58759 + GR_ONE_INT_TWO_STR,
58760 + GR_ONE_STR,
58761 + GR_STR_INT,
58762 + GR_TWO_STR_INT,
58763 + GR_TWO_INT,
58764 + GR_TWO_U64,
58765 + GR_THREE_INT,
58766 + GR_FIVE_INT_TWO_STR,
58767 + GR_TWO_STR,
58768 + GR_THREE_STR,
58769 + GR_FOUR_STR,
58770 + GR_STR_FILENAME,
58771 + GR_FILENAME_STR,
58772 + GR_FILENAME_TWO_INT,
58773 + GR_FILENAME_TWO_INT_STR,
58774 + GR_TEXTREL,
58775 + GR_PTRACE,
58776 + GR_RESOURCE,
58777 + GR_CAP,
58778 + GR_SIG,
58779 + GR_SIG2,
58780 + GR_CRASH1,
58781 + GR_CRASH2,
58782 + GR_PSACCT,
58783 + GR_RWXMAP
58784 +};
58785 +
58786 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58787 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58788 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58789 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58790 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58791 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58792 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58793 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58794 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58795 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58796 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58797 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58798 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58799 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58800 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58801 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58802 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58803 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58804 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58805 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58806 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58807 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58808 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58809 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58810 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58811 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58812 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58813 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58814 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58815 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58816 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58817 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58818 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58819 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58820 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58821 +
58822 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58823 +
58824 +#endif
58825 +
58826 +#endif
58827 diff -urNp linux-2.6.32.45/include/linux/grmsg.h linux-2.6.32.45/include/linux/grmsg.h
58828 --- linux-2.6.32.45/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58829 +++ linux-2.6.32.45/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
58830 @@ -0,0 +1,108 @@
58831 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58832 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58833 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58834 +#define GR_STOPMOD_MSG "denied modification of module state by "
58835 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58836 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58837 +#define GR_IOPERM_MSG "denied use of ioperm() by "
58838 +#define GR_IOPL_MSG "denied use of iopl() by "
58839 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58840 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58841 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58842 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58843 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58844 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58845 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58846 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58847 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58848 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58849 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58850 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58851 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58852 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58853 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58854 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58855 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58856 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58857 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58858 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58859 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58860 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58861 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58862 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58863 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58864 +#define GR_NPROC_MSG "denied overstep of process limit by "
58865 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58866 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58867 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58868 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58869 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58870 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58871 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58872 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58873 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58874 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58875 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58876 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58877 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58878 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58879 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58880 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58881 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58882 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58883 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58884 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58885 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58886 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58887 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58888 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58889 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58890 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58891 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58892 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58893 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58894 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58895 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58896 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58897 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58898 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58899 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58900 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58901 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58902 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58903 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58904 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
58905 +#define GR_NICE_CHROOT_MSG "denied priority change by "
58906 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58907 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58908 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58909 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58910 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58911 +#define GR_TIME_MSG "time set by "
58912 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58913 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58914 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58915 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58916 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58917 +#define GR_BIND_MSG "denied bind() by "
58918 +#define GR_CONNECT_MSG "denied connect() by "
58919 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58920 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58921 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58922 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58923 +#define GR_CAP_ACL_MSG "use of %s denied for "
58924 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58925 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58926 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58927 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58928 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58929 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58930 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58931 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58932 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58933 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58934 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58935 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58936 +#define GR_VM86_MSG "denied use of vm86 by "
58937 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58938 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58939 diff -urNp linux-2.6.32.45/include/linux/grsecurity.h linux-2.6.32.45/include/linux/grsecurity.h
58940 --- linux-2.6.32.45/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58941 +++ linux-2.6.32.45/include/linux/grsecurity.h 2011-08-11 19:58:57.000000000 -0400
58942 @@ -0,0 +1,217 @@
58943 +#ifndef GR_SECURITY_H
58944 +#define GR_SECURITY_H
58945 +#include <linux/fs.h>
58946 +#include <linux/fs_struct.h>
58947 +#include <linux/binfmts.h>
58948 +#include <linux/gracl.h>
58949 +#include <linux/compat.h>
58950 +
58951 +/* notify of brain-dead configs */
58952 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58953 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58954 +#endif
58955 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58956 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58957 +#endif
58958 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58959 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58960 +#endif
58961 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58962 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58963 +#endif
58964 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58965 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58966 +#endif
58967 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58968 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
58969 +#endif
58970 +
58971 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58972 +void gr_handle_brute_check(void);
58973 +void gr_handle_kernel_exploit(void);
58974 +int gr_process_user_ban(void);
58975 +
58976 +char gr_roletype_to_char(void);
58977 +
58978 +int gr_acl_enable_at_secure(void);
58979 +
58980 +int gr_check_user_change(int real, int effective, int fs);
58981 +int gr_check_group_change(int real, int effective, int fs);
58982 +
58983 +void gr_del_task_from_ip_table(struct task_struct *p);
58984 +
58985 +int gr_pid_is_chrooted(struct task_struct *p);
58986 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58987 +int gr_handle_chroot_nice(void);
58988 +int gr_handle_chroot_sysctl(const int op);
58989 +int gr_handle_chroot_setpriority(struct task_struct *p,
58990 + const int niceval);
58991 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58992 +int gr_handle_chroot_chroot(const struct dentry *dentry,
58993 + const struct vfsmount *mnt);
58994 +int gr_handle_chroot_caps(struct path *path);
58995 +void gr_handle_chroot_chdir(struct path *path);
58996 +int gr_handle_chroot_chmod(const struct dentry *dentry,
58997 + const struct vfsmount *mnt, const int mode);
58998 +int gr_handle_chroot_mknod(const struct dentry *dentry,
58999 + const struct vfsmount *mnt, const int mode);
59000 +int gr_handle_chroot_mount(const struct dentry *dentry,
59001 + const struct vfsmount *mnt,
59002 + const char *dev_name);
59003 +int gr_handle_chroot_pivot(void);
59004 +int gr_handle_chroot_unix(const pid_t pid);
59005 +
59006 +int gr_handle_rawio(const struct inode *inode);
59007 +
59008 +void gr_handle_ioperm(void);
59009 +void gr_handle_iopl(void);
59010 +
59011 +int gr_tpe_allow(const struct file *file);
59012 +
59013 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59014 +void gr_clear_chroot_entries(struct task_struct *task);
59015 +
59016 +void gr_log_forkfail(const int retval);
59017 +void gr_log_timechange(void);
59018 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59019 +void gr_log_chdir(const struct dentry *dentry,
59020 + const struct vfsmount *mnt);
59021 +void gr_log_chroot_exec(const struct dentry *dentry,
59022 + const struct vfsmount *mnt);
59023 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
59024 +#ifdef CONFIG_COMPAT
59025 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
59026 +#endif
59027 +void gr_log_remount(const char *devname, const int retval);
59028 +void gr_log_unmount(const char *devname, const int retval);
59029 +void gr_log_mount(const char *from, const char *to, const int retval);
59030 +void gr_log_textrel(struct vm_area_struct *vma);
59031 +void gr_log_rwxmmap(struct file *file);
59032 +void gr_log_rwxmprotect(struct file *file);
59033 +
59034 +int gr_handle_follow_link(const struct inode *parent,
59035 + const struct inode *inode,
59036 + const struct dentry *dentry,
59037 + const struct vfsmount *mnt);
59038 +int gr_handle_fifo(const struct dentry *dentry,
59039 + const struct vfsmount *mnt,
59040 + const struct dentry *dir, const int flag,
59041 + const int acc_mode);
59042 +int gr_handle_hardlink(const struct dentry *dentry,
59043 + const struct vfsmount *mnt,
59044 + struct inode *inode,
59045 + const int mode, const char *to);
59046 +
59047 +int gr_is_capable(const int cap);
59048 +int gr_is_capable_nolog(const int cap);
59049 +void gr_learn_resource(const struct task_struct *task, const int limit,
59050 + const unsigned long wanted, const int gt);
59051 +void gr_copy_label(struct task_struct *tsk);
59052 +void gr_handle_crash(struct task_struct *task, const int sig);
59053 +int gr_handle_signal(const struct task_struct *p, const int sig);
59054 +int gr_check_crash_uid(const uid_t uid);
59055 +int gr_check_protected_task(const struct task_struct *task);
59056 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59057 +int gr_acl_handle_mmap(const struct file *file,
59058 + const unsigned long prot);
59059 +int gr_acl_handle_mprotect(const struct file *file,
59060 + const unsigned long prot);
59061 +int gr_check_hidden_task(const struct task_struct *tsk);
59062 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59063 + const struct vfsmount *mnt);
59064 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
59065 + const struct vfsmount *mnt);
59066 +__u32 gr_acl_handle_access(const struct dentry *dentry,
59067 + const struct vfsmount *mnt, const int fmode);
59068 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59069 + const struct vfsmount *mnt, mode_t mode);
59070 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59071 + const struct vfsmount *mnt, mode_t mode);
59072 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
59073 + const struct vfsmount *mnt);
59074 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59075 + const struct vfsmount *mnt);
59076 +int gr_handle_ptrace(struct task_struct *task, const long request);
59077 +int gr_handle_proc_ptrace(struct task_struct *task);
59078 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
59079 + const struct vfsmount *mnt);
59080 +int gr_check_crash_exec(const struct file *filp);
59081 +int gr_acl_is_enabled(void);
59082 +void gr_set_kernel_label(struct task_struct *task);
59083 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
59084 + const gid_t gid);
59085 +int gr_set_proc_label(const struct dentry *dentry,
59086 + const struct vfsmount *mnt,
59087 + const int unsafe_share);
59088 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59089 + const struct vfsmount *mnt);
59090 +__u32 gr_acl_handle_open(const struct dentry *dentry,
59091 + const struct vfsmount *mnt, const int fmode);
59092 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
59093 + const struct dentry *p_dentry,
59094 + const struct vfsmount *p_mnt, const int fmode,
59095 + const int imode);
59096 +void gr_handle_create(const struct dentry *dentry,
59097 + const struct vfsmount *mnt);
59098 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59099 + const struct dentry *parent_dentry,
59100 + const struct vfsmount *parent_mnt,
59101 + const int mode);
59102 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59103 + const struct dentry *parent_dentry,
59104 + const struct vfsmount *parent_mnt);
59105 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59106 + const struct vfsmount *mnt);
59107 +void gr_handle_delete(const ino_t ino, const dev_t dev);
59108 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59109 + const struct vfsmount *mnt);
59110 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59111 + const struct dentry *parent_dentry,
59112 + const struct vfsmount *parent_mnt,
59113 + const char *from);
59114 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59115 + const struct dentry *parent_dentry,
59116 + const struct vfsmount *parent_mnt,
59117 + const struct dentry *old_dentry,
59118 + const struct vfsmount *old_mnt, const char *to);
59119 +int gr_acl_handle_rename(struct dentry *new_dentry,
59120 + struct dentry *parent_dentry,
59121 + const struct vfsmount *parent_mnt,
59122 + struct dentry *old_dentry,
59123 + struct inode *old_parent_inode,
59124 + struct vfsmount *old_mnt, const char *newname);
59125 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59126 + struct dentry *old_dentry,
59127 + struct dentry *new_dentry,
59128 + struct vfsmount *mnt, const __u8 replace);
59129 +__u32 gr_check_link(const struct dentry *new_dentry,
59130 + const struct dentry *parent_dentry,
59131 + const struct vfsmount *parent_mnt,
59132 + const struct dentry *old_dentry,
59133 + const struct vfsmount *old_mnt);
59134 +int gr_acl_handle_filldir(const struct file *file, const char *name,
59135 + const unsigned int namelen, const ino_t ino);
59136 +
59137 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
59138 + const struct vfsmount *mnt);
59139 +void gr_acl_handle_exit(void);
59140 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
59141 +int gr_acl_handle_procpidmem(const struct task_struct *task);
59142 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59143 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59144 +void gr_audit_ptrace(struct task_struct *task);
59145 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59146 +
59147 +#ifdef CONFIG_GRKERNSEC
59148 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59149 +void gr_handle_vm86(void);
59150 +void gr_handle_mem_readwrite(u64 from, u64 to);
59151 +
59152 +extern int grsec_enable_dmesg;
59153 +extern int grsec_disable_privio;
59154 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59155 +extern int grsec_enable_chroot_findtask;
59156 +#endif
59157 +#endif
59158 +
59159 +#endif
59160 diff -urNp linux-2.6.32.45/include/linux/hdpu_features.h linux-2.6.32.45/include/linux/hdpu_features.h
59161 --- linux-2.6.32.45/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
59162 +++ linux-2.6.32.45/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
59163 @@ -3,7 +3,7 @@
59164 struct cpustate_t {
59165 spinlock_t lock;
59166 int excl;
59167 - int open_count;
59168 + atomic_t open_count;
59169 unsigned char cached_val;
59170 int inited;
59171 unsigned long *set_addr;
59172 diff -urNp linux-2.6.32.45/include/linux/highmem.h linux-2.6.32.45/include/linux/highmem.h
59173 --- linux-2.6.32.45/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
59174 +++ linux-2.6.32.45/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
59175 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct
59176 kunmap_atomic(kaddr, KM_USER0);
59177 }
59178
59179 +static inline void sanitize_highpage(struct page *page)
59180 +{
59181 + void *kaddr;
59182 + unsigned long flags;
59183 +
59184 + local_irq_save(flags);
59185 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
59186 + clear_page(kaddr);
59187 + kunmap_atomic(kaddr, KM_CLEARPAGE);
59188 + local_irq_restore(flags);
59189 +}
59190 +
59191 static inline void zero_user_segments(struct page *page,
59192 unsigned start1, unsigned end1,
59193 unsigned start2, unsigned end2)
59194 diff -urNp linux-2.6.32.45/include/linux/i2o.h linux-2.6.32.45/include/linux/i2o.h
59195 --- linux-2.6.32.45/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
59196 +++ linux-2.6.32.45/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
59197 @@ -564,7 +564,7 @@ struct i2o_controller {
59198 struct i2o_device *exec; /* Executive */
59199 #if BITS_PER_LONG == 64
59200 spinlock_t context_list_lock; /* lock for context_list */
59201 - atomic_t context_list_counter; /* needed for unique contexts */
59202 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59203 struct list_head context_list; /* list of context id's
59204 and pointers */
59205 #endif
59206 diff -urNp linux-2.6.32.45/include/linux/init_task.h linux-2.6.32.45/include/linux/init_task.h
59207 --- linux-2.6.32.45/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
59208 +++ linux-2.6.32.45/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
59209 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
59210 #define INIT_IDS
59211 #endif
59212
59213 +#ifdef CONFIG_X86
59214 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59215 +#else
59216 +#define INIT_TASK_THREAD_INFO
59217 +#endif
59218 +
59219 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
59220 /*
59221 * Because of the reduced scope of CAP_SETPCAP when filesystem
59222 @@ -156,6 +162,7 @@ extern struct cred init_cred;
59223 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
59224 .comm = "swapper", \
59225 .thread = INIT_THREAD, \
59226 + INIT_TASK_THREAD_INFO \
59227 .fs = &init_fs, \
59228 .files = &init_files, \
59229 .signal = &init_signals, \
59230 diff -urNp linux-2.6.32.45/include/linux/intel-iommu.h linux-2.6.32.45/include/linux/intel-iommu.h
59231 --- linux-2.6.32.45/include/linux/intel-iommu.h 2011-03-27 14:31:47.000000000 -0400
59232 +++ linux-2.6.32.45/include/linux/intel-iommu.h 2011-08-05 20:33:55.000000000 -0400
59233 @@ -296,7 +296,7 @@ struct iommu_flush {
59234 u8 fm, u64 type);
59235 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59236 unsigned int size_order, u64 type);
59237 -};
59238 +} __no_const;
59239
59240 enum {
59241 SR_DMAR_FECTL_REG,
59242 diff -urNp linux-2.6.32.45/include/linux/interrupt.h linux-2.6.32.45/include/linux/interrupt.h
59243 --- linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
59244 +++ linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
59245 @@ -363,7 +363,7 @@ enum
59246 /* map softirq index to softirq name. update 'softirq_to_name' in
59247 * kernel/softirq.c when adding a new softirq.
59248 */
59249 -extern char *softirq_to_name[NR_SOFTIRQS];
59250 +extern const char * const softirq_to_name[NR_SOFTIRQS];
59251
59252 /* softirq mask and active fields moved to irq_cpustat_t in
59253 * asm/hardirq.h to get better cache usage. KAO
59254 @@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
59255
59256 struct softirq_action
59257 {
59258 - void (*action)(struct softirq_action *);
59259 + void (*action)(void);
59260 };
59261
59262 asmlinkage void do_softirq(void);
59263 asmlinkage void __do_softirq(void);
59264 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59265 +extern void open_softirq(int nr, void (*action)(void));
59266 extern void softirq_init(void);
59267 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
59268 extern void raise_softirq_irqoff(unsigned int nr);
59269 diff -urNp linux-2.6.32.45/include/linux/irq.h linux-2.6.32.45/include/linux/irq.h
59270 --- linux-2.6.32.45/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
59271 +++ linux-2.6.32.45/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
59272 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
59273 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
59274 bool boot)
59275 {
59276 +#ifdef CONFIG_CPUMASK_OFFSTACK
59277 gfp_t gfp = GFP_ATOMIC;
59278
59279 if (boot)
59280 gfp = GFP_NOWAIT;
59281
59282 -#ifdef CONFIG_CPUMASK_OFFSTACK
59283 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
59284 return false;
59285
59286 diff -urNp linux-2.6.32.45/include/linux/kallsyms.h linux-2.6.32.45/include/linux/kallsyms.h
59287 --- linux-2.6.32.45/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
59288 +++ linux-2.6.32.45/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
59289 @@ -15,7 +15,8 @@
59290
59291 struct module;
59292
59293 -#ifdef CONFIG_KALLSYMS
59294 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59295 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59296 /* Lookup the address for a symbol. Returns 0 if not found. */
59297 unsigned long kallsyms_lookup_name(const char *name);
59298
59299 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
59300 /* Stupid that this does nothing, but I didn't create this mess. */
59301 #define __print_symbol(fmt, addr)
59302 #endif /*CONFIG_KALLSYMS*/
59303 +#else /* when included by kallsyms.c, vsnprintf.c, or
59304 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59305 +extern void __print_symbol(const char *fmt, unsigned long address);
59306 +extern int sprint_symbol(char *buffer, unsigned long address);
59307 +const char *kallsyms_lookup(unsigned long addr,
59308 + unsigned long *symbolsize,
59309 + unsigned long *offset,
59310 + char **modname, char *namebuf);
59311 +#endif
59312
59313 /* This macro allows us to keep printk typechecking */
59314 static void __check_printsym_format(const char *fmt, ...)
59315 diff -urNp linux-2.6.32.45/include/linux/kgdb.h linux-2.6.32.45/include/linux/kgdb.h
59316 --- linux-2.6.32.45/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
59317 +++ linux-2.6.32.45/include/linux/kgdb.h 2011-08-05 20:33:55.000000000 -0400
59318 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
59319
59320 extern int kgdb_connected;
59321
59322 -extern atomic_t kgdb_setting_breakpoint;
59323 -extern atomic_t kgdb_cpu_doing_single_step;
59324 +extern atomic_unchecked_t kgdb_setting_breakpoint;
59325 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59326
59327 extern struct task_struct *kgdb_usethread;
59328 extern struct task_struct *kgdb_contthread;
59329 @@ -226,8 +226,8 @@ extern int kgdb_arch_remove_breakpoint(u
59330 * hardware debug registers.
59331 */
59332 struct kgdb_arch {
59333 - unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59334 - unsigned long flags;
59335 + const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59336 + const unsigned long flags;
59337
59338 int (*set_breakpoint)(unsigned long, char *);
59339 int (*remove_breakpoint)(unsigned long, char *);
59340 @@ -251,20 +251,20 @@ struct kgdb_arch {
59341 */
59342 struct kgdb_io {
59343 const char *name;
59344 - int (*read_char) (void);
59345 - void (*write_char) (u8);
59346 - void (*flush) (void);
59347 - int (*init) (void);
59348 - void (*pre_exception) (void);
59349 - void (*post_exception) (void);
59350 + int (* const read_char) (void);
59351 + void (* const write_char) (u8);
59352 + void (* const flush) (void);
59353 + int (* const init) (void);
59354 + void (* const pre_exception) (void);
59355 + void (* const post_exception) (void);
59356 };
59357
59358 -extern struct kgdb_arch arch_kgdb_ops;
59359 +extern const struct kgdb_arch arch_kgdb_ops;
59360
59361 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
59362
59363 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
59364 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
59365 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
59366 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
59367
59368 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
59369 extern int kgdb_mem2hex(char *mem, char *buf, int count);
59370 diff -urNp linux-2.6.32.45/include/linux/kmod.h linux-2.6.32.45/include/linux/kmod.h
59371 --- linux-2.6.32.45/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
59372 +++ linux-2.6.32.45/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
59373 @@ -31,6 +31,8 @@
59374 * usually useless though. */
59375 extern int __request_module(bool wait, const char *name, ...) \
59376 __attribute__((format(printf, 2, 3)));
59377 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59378 + __attribute__((format(printf, 3, 4)));
59379 #define request_module(mod...) __request_module(true, mod)
59380 #define request_module_nowait(mod...) __request_module(false, mod)
59381 #define try_then_request_module(x, mod...) \
59382 diff -urNp linux-2.6.32.45/include/linux/kobject.h linux-2.6.32.45/include/linux/kobject.h
59383 --- linux-2.6.32.45/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
59384 +++ linux-2.6.32.45/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
59385 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
59386
59387 struct kobj_type {
59388 void (*release)(struct kobject *kobj);
59389 - struct sysfs_ops *sysfs_ops;
59390 + const struct sysfs_ops *sysfs_ops;
59391 struct attribute **default_attrs;
59392 };
59393
59394 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
59395 };
59396
59397 struct kset_uevent_ops {
59398 - int (*filter)(struct kset *kset, struct kobject *kobj);
59399 - const char *(*name)(struct kset *kset, struct kobject *kobj);
59400 - int (*uevent)(struct kset *kset, struct kobject *kobj,
59401 + int (* const filter)(struct kset *kset, struct kobject *kobj);
59402 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
59403 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
59404 struct kobj_uevent_env *env);
59405 };
59406
59407 @@ -132,7 +132,7 @@ struct kobj_attribute {
59408 const char *buf, size_t count);
59409 };
59410
59411 -extern struct sysfs_ops kobj_sysfs_ops;
59412 +extern const struct sysfs_ops kobj_sysfs_ops;
59413
59414 /**
59415 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
59416 @@ -155,14 +155,14 @@ struct kset {
59417 struct list_head list;
59418 spinlock_t list_lock;
59419 struct kobject kobj;
59420 - struct kset_uevent_ops *uevent_ops;
59421 + const struct kset_uevent_ops *uevent_ops;
59422 };
59423
59424 extern void kset_init(struct kset *kset);
59425 extern int __must_check kset_register(struct kset *kset);
59426 extern void kset_unregister(struct kset *kset);
59427 extern struct kset * __must_check kset_create_and_add(const char *name,
59428 - struct kset_uevent_ops *u,
59429 + const struct kset_uevent_ops *u,
59430 struct kobject *parent_kobj);
59431
59432 static inline struct kset *to_kset(struct kobject *kobj)
59433 diff -urNp linux-2.6.32.45/include/linux/kvm_host.h linux-2.6.32.45/include/linux/kvm_host.h
59434 --- linux-2.6.32.45/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
59435 +++ linux-2.6.32.45/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
59436 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59437 void vcpu_load(struct kvm_vcpu *vcpu);
59438 void vcpu_put(struct kvm_vcpu *vcpu);
59439
59440 -int kvm_init(void *opaque, unsigned int vcpu_size,
59441 +int kvm_init(const void *opaque, unsigned int vcpu_size,
59442 struct module *module);
59443 void kvm_exit(void);
59444
59445 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59446 struct kvm_guest_debug *dbg);
59447 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59448
59449 -int kvm_arch_init(void *opaque);
59450 +int kvm_arch_init(const void *opaque);
59451 void kvm_arch_exit(void);
59452
59453 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59454 diff -urNp linux-2.6.32.45/include/linux/libata.h linux-2.6.32.45/include/linux/libata.h
59455 --- linux-2.6.32.45/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
59456 +++ linux-2.6.32.45/include/linux/libata.h 2011-08-05 20:33:55.000000000 -0400
59457 @@ -525,11 +525,11 @@ struct ata_ioports {
59458
59459 struct ata_host {
59460 spinlock_t lock;
59461 - struct device *dev;
59462 + struct device *dev;
59463 void __iomem * const *iomap;
59464 unsigned int n_ports;
59465 void *private_data;
59466 - struct ata_port_operations *ops;
59467 + const struct ata_port_operations *ops;
59468 unsigned long flags;
59469 #ifdef CONFIG_ATA_ACPI
59470 acpi_handle acpi_handle;
59471 @@ -710,7 +710,7 @@ struct ata_link {
59472
59473 struct ata_port {
59474 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
59475 - struct ata_port_operations *ops;
59476 + const struct ata_port_operations *ops;
59477 spinlock_t *lock;
59478 /* Flags owned by the EH context. Only EH should touch these once the
59479 port is active */
59480 @@ -883,7 +883,7 @@ struct ata_port_operations {
59481 * ->inherits must be the last field and all the preceding
59482 * fields must be pointers.
59483 */
59484 - const struct ata_port_operations *inherits;
59485 + const struct ata_port_operations * const inherits;
59486 };
59487
59488 struct ata_port_info {
59489 @@ -892,7 +892,7 @@ struct ata_port_info {
59490 unsigned long pio_mask;
59491 unsigned long mwdma_mask;
59492 unsigned long udma_mask;
59493 - struct ata_port_operations *port_ops;
59494 + const struct ata_port_operations *port_ops;
59495 void *private_data;
59496 };
59497
59498 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
59499 extern const unsigned long sata_deb_timing_hotplug[];
59500 extern const unsigned long sata_deb_timing_long[];
59501
59502 -extern struct ata_port_operations ata_dummy_port_ops;
59503 +extern const struct ata_port_operations ata_dummy_port_ops;
59504 extern const struct ata_port_info ata_dummy_port_info;
59505
59506 static inline const unsigned long *
59507 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
59508 struct scsi_host_template *sht);
59509 extern void ata_host_detach(struct ata_host *host);
59510 extern void ata_host_init(struct ata_host *, struct device *,
59511 - unsigned long, struct ata_port_operations *);
59512 + unsigned long, const struct ata_port_operations *);
59513 extern int ata_scsi_detect(struct scsi_host_template *sht);
59514 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
59515 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
59516 diff -urNp linux-2.6.32.45/include/linux/lockd/bind.h linux-2.6.32.45/include/linux/lockd/bind.h
59517 --- linux-2.6.32.45/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
59518 +++ linux-2.6.32.45/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
59519 @@ -23,13 +23,13 @@ struct svc_rqst;
59520 * This is the set of functions for lockd->nfsd communication
59521 */
59522 struct nlmsvc_binding {
59523 - __be32 (*fopen)(struct svc_rqst *,
59524 + __be32 (* const fopen)(struct svc_rqst *,
59525 struct nfs_fh *,
59526 struct file **);
59527 - void (*fclose)(struct file *);
59528 + void (* const fclose)(struct file *);
59529 };
59530
59531 -extern struct nlmsvc_binding * nlmsvc_ops;
59532 +extern const struct nlmsvc_binding * nlmsvc_ops;
59533
59534 /*
59535 * Similar to nfs_client_initdata, but without the NFS-specific
59536 diff -urNp linux-2.6.32.45/include/linux/mca.h linux-2.6.32.45/include/linux/mca.h
59537 --- linux-2.6.32.45/include/linux/mca.h 2011-03-27 14:31:47.000000000 -0400
59538 +++ linux-2.6.32.45/include/linux/mca.h 2011-08-05 20:33:55.000000000 -0400
59539 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59540 int region);
59541 void * (*mca_transform_memory)(struct mca_device *,
59542 void *memory);
59543 -};
59544 +} __no_const;
59545
59546 struct mca_bus {
59547 u64 default_dma_mask;
59548 diff -urNp linux-2.6.32.45/include/linux/memory.h linux-2.6.32.45/include/linux/memory.h
59549 --- linux-2.6.32.45/include/linux/memory.h 2011-03-27 14:31:47.000000000 -0400
59550 +++ linux-2.6.32.45/include/linux/memory.h 2011-08-05 20:33:55.000000000 -0400
59551 @@ -108,7 +108,7 @@ struct memory_accessor {
59552 size_t count);
59553 ssize_t (*write)(struct memory_accessor *, const char *buf,
59554 off_t offset, size_t count);
59555 -};
59556 +} __no_const;
59557
59558 /*
59559 * Kernel text modification mutex, used for code patching. Users of this lock
59560 diff -urNp linux-2.6.32.45/include/linux/mm.h linux-2.6.32.45/include/linux/mm.h
59561 --- linux-2.6.32.45/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
59562 +++ linux-2.6.32.45/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
59563 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
59564
59565 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59566 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59567 +
59568 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59569 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59570 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59571 +#else
59572 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59573 +#endif
59574 +
59575 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59576 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59577
59578 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
59579 int set_page_dirty_lock(struct page *page);
59580 int clear_page_dirty_for_io(struct page *page);
59581
59582 -/* Is the vma a continuation of the stack vma above it? */
59583 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
59584 -{
59585 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59586 -}
59587 -
59588 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59589 unsigned long old_addr, struct vm_area_struct *new_vma,
59590 unsigned long new_addr, unsigned long len);
59591 @@ -890,6 +891,8 @@ struct shrinker {
59592 extern void register_shrinker(struct shrinker *);
59593 extern void unregister_shrinker(struct shrinker *);
59594
59595 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
59596 +
59597 int vma_wants_writenotify(struct vm_area_struct *vma);
59598
59599 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
59600 @@ -1162,6 +1165,7 @@ out:
59601 }
59602
59603 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59604 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59605
59606 extern unsigned long do_brk(unsigned long, unsigned long);
59607
59608 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
59609 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59610 struct vm_area_struct **pprev);
59611
59612 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59613 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59614 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59615 +
59616 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59617 NULL if none. Assume start_addr < end_addr. */
59618 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59619 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
59620 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59621 }
59622
59623 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
59624 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59625 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59626 unsigned long pfn, unsigned long size, pgprot_t);
59627 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
59628 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
59629 extern int sysctl_memory_failure_early_kill;
59630 extern int sysctl_memory_failure_recovery;
59631 -extern atomic_long_t mce_bad_pages;
59632 +extern atomic_long_unchecked_t mce_bad_pages;
59633 +
59634 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59635 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59636 +#else
59637 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59638 +#endif
59639
59640 #endif /* __KERNEL__ */
59641 #endif /* _LINUX_MM_H */
59642 diff -urNp linux-2.6.32.45/include/linux/mm_types.h linux-2.6.32.45/include/linux/mm_types.h
59643 --- linux-2.6.32.45/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
59644 +++ linux-2.6.32.45/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
59645 @@ -186,6 +186,8 @@ struct vm_area_struct {
59646 #ifdef CONFIG_NUMA
59647 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59648 #endif
59649 +
59650 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59651 };
59652
59653 struct core_thread {
59654 @@ -287,6 +289,24 @@ struct mm_struct {
59655 #ifdef CONFIG_MMU_NOTIFIER
59656 struct mmu_notifier_mm *mmu_notifier_mm;
59657 #endif
59658 +
59659 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59660 + unsigned long pax_flags;
59661 +#endif
59662 +
59663 +#ifdef CONFIG_PAX_DLRESOLVE
59664 + unsigned long call_dl_resolve;
59665 +#endif
59666 +
59667 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59668 + unsigned long call_syscall;
59669 +#endif
59670 +
59671 +#ifdef CONFIG_PAX_ASLR
59672 + unsigned long delta_mmap; /* randomized offset */
59673 + unsigned long delta_stack; /* randomized offset */
59674 +#endif
59675 +
59676 };
59677
59678 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
59679 diff -urNp linux-2.6.32.45/include/linux/mmu_notifier.h linux-2.6.32.45/include/linux/mmu_notifier.h
59680 --- linux-2.6.32.45/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
59681 +++ linux-2.6.32.45/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
59682 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
59683 */
59684 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59685 ({ \
59686 - pte_t __pte; \
59687 + pte_t ___pte; \
59688 struct vm_area_struct *___vma = __vma; \
59689 unsigned long ___address = __address; \
59690 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59691 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59692 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59693 - __pte; \
59694 + ___pte; \
59695 })
59696
59697 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
59698 diff -urNp linux-2.6.32.45/include/linux/mmzone.h linux-2.6.32.45/include/linux/mmzone.h
59699 --- linux-2.6.32.45/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
59700 +++ linux-2.6.32.45/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
59701 @@ -350,7 +350,7 @@ struct zone {
59702 unsigned long flags; /* zone flags, see below */
59703
59704 /* Zone statistics */
59705 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59706 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59707
59708 /*
59709 * prev_priority holds the scanning priority for this zone. It is
59710 diff -urNp linux-2.6.32.45/include/linux/mod_devicetable.h linux-2.6.32.45/include/linux/mod_devicetable.h
59711 --- linux-2.6.32.45/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
59712 +++ linux-2.6.32.45/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
59713 @@ -12,7 +12,7 @@
59714 typedef unsigned long kernel_ulong_t;
59715 #endif
59716
59717 -#define PCI_ANY_ID (~0)
59718 +#define PCI_ANY_ID ((__u16)~0)
59719
59720 struct pci_device_id {
59721 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59722 @@ -131,7 +131,7 @@ struct usb_device_id {
59723 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59724 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59725
59726 -#define HID_ANY_ID (~0)
59727 +#define HID_ANY_ID (~0U)
59728
59729 struct hid_device_id {
59730 __u16 bus;
59731 diff -urNp linux-2.6.32.45/include/linux/module.h linux-2.6.32.45/include/linux/module.h
59732 --- linux-2.6.32.45/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
59733 +++ linux-2.6.32.45/include/linux/module.h 2011-08-05 20:33:55.000000000 -0400
59734 @@ -16,6 +16,7 @@
59735 #include <linux/kobject.h>
59736 #include <linux/moduleparam.h>
59737 #include <linux/tracepoint.h>
59738 +#include <linux/fs.h>
59739
59740 #include <asm/local.h>
59741 #include <asm/module.h>
59742 @@ -287,16 +288,16 @@ struct module
59743 int (*init)(void);
59744
59745 /* If this is non-NULL, vfree after init() returns */
59746 - void *module_init;
59747 + void *module_init_rx, *module_init_rw;
59748
59749 /* Here is the actual code + data, vfree'd on unload. */
59750 - void *module_core;
59751 + void *module_core_rx, *module_core_rw;
59752
59753 /* Here are the sizes of the init and core sections */
59754 - unsigned int init_size, core_size;
59755 + unsigned int init_size_rw, core_size_rw;
59756
59757 /* The size of the executable code in each section. */
59758 - unsigned int init_text_size, core_text_size;
59759 + unsigned int init_size_rx, core_size_rx;
59760
59761 /* Arch-specific module values */
59762 struct mod_arch_specific arch;
59763 @@ -345,6 +346,10 @@ struct module
59764 #ifdef CONFIG_EVENT_TRACING
59765 struct ftrace_event_call *trace_events;
59766 unsigned int num_trace_events;
59767 + struct file_operations trace_id;
59768 + struct file_operations trace_enable;
59769 + struct file_operations trace_format;
59770 + struct file_operations trace_filter;
59771 #endif
59772 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59773 unsigned long *ftrace_callsites;
59774 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned
59775 bool is_module_address(unsigned long addr);
59776 bool is_module_text_address(unsigned long addr);
59777
59778 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59779 +{
59780 +
59781 +#ifdef CONFIG_PAX_KERNEXEC
59782 + if (ktla_ktva(addr) >= (unsigned long)start &&
59783 + ktla_ktva(addr) < (unsigned long)start + size)
59784 + return 1;
59785 +#endif
59786 +
59787 + return ((void *)addr >= start && (void *)addr < start + size);
59788 +}
59789 +
59790 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59791 +{
59792 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59793 +}
59794 +
59795 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59796 +{
59797 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59798 +}
59799 +
59800 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59801 +{
59802 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59803 +}
59804 +
59805 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59806 +{
59807 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59808 +}
59809 +
59810 static inline int within_module_core(unsigned long addr, struct module *mod)
59811 {
59812 - return (unsigned long)mod->module_core <= addr &&
59813 - addr < (unsigned long)mod->module_core + mod->core_size;
59814 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59815 }
59816
59817 static inline int within_module_init(unsigned long addr, struct module *mod)
59818 {
59819 - return (unsigned long)mod->module_init <= addr &&
59820 - addr < (unsigned long)mod->module_init + mod->init_size;
59821 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59822 }
59823
59824 /* Search for module by name: must hold module_mutex. */
59825 diff -urNp linux-2.6.32.45/include/linux/moduleloader.h linux-2.6.32.45/include/linux/moduleloader.h
59826 --- linux-2.6.32.45/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
59827 +++ linux-2.6.32.45/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
59828 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59829 sections. Returns NULL on failure. */
59830 void *module_alloc(unsigned long size);
59831
59832 +#ifdef CONFIG_PAX_KERNEXEC
59833 +void *module_alloc_exec(unsigned long size);
59834 +#else
59835 +#define module_alloc_exec(x) module_alloc(x)
59836 +#endif
59837 +
59838 /* Free memory returned from module_alloc. */
59839 void module_free(struct module *mod, void *module_region);
59840
59841 +#ifdef CONFIG_PAX_KERNEXEC
59842 +void module_free_exec(struct module *mod, void *module_region);
59843 +#else
59844 +#define module_free_exec(x, y) module_free((x), (y))
59845 +#endif
59846 +
59847 /* Apply the given relocation to the (simplified) ELF. Return -error
59848 or 0. */
59849 int apply_relocate(Elf_Shdr *sechdrs,
59850 diff -urNp linux-2.6.32.45/include/linux/moduleparam.h linux-2.6.32.45/include/linux/moduleparam.h
59851 --- linux-2.6.32.45/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
59852 +++ linux-2.6.32.45/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
59853 @@ -132,7 +132,7 @@ struct kparam_array
59854
59855 /* Actually copy string: maxlen param is usually sizeof(string). */
59856 #define module_param_string(name, string, len, perm) \
59857 - static const struct kparam_string __param_string_##name \
59858 + static const struct kparam_string __param_string_##name __used \
59859 = { len, string }; \
59860 __module_param_call(MODULE_PARAM_PREFIX, name, \
59861 param_set_copystring, param_get_string, \
59862 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
59863
59864 /* Comma-separated array: *nump is set to number they actually specified. */
59865 #define module_param_array_named(name, array, type, nump, perm) \
59866 - static const struct kparam_array __param_arr_##name \
59867 + static const struct kparam_array __param_arr_##name __used \
59868 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
59869 sizeof(array[0]), array }; \
59870 __module_param_call(MODULE_PARAM_PREFIX, name, \
59871 diff -urNp linux-2.6.32.45/include/linux/mutex.h linux-2.6.32.45/include/linux/mutex.h
59872 --- linux-2.6.32.45/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
59873 +++ linux-2.6.32.45/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
59874 @@ -51,7 +51,7 @@ struct mutex {
59875 spinlock_t wait_lock;
59876 struct list_head wait_list;
59877 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
59878 - struct thread_info *owner;
59879 + struct task_struct *owner;
59880 #endif
59881 #ifdef CONFIG_DEBUG_MUTEXES
59882 const char *name;
59883 diff -urNp linux-2.6.32.45/include/linux/namei.h linux-2.6.32.45/include/linux/namei.h
59884 --- linux-2.6.32.45/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
59885 +++ linux-2.6.32.45/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
59886 @@ -22,7 +22,7 @@ struct nameidata {
59887 unsigned int flags;
59888 int last_type;
59889 unsigned depth;
59890 - char *saved_names[MAX_NESTED_LINKS + 1];
59891 + const char *saved_names[MAX_NESTED_LINKS + 1];
59892
59893 /* Intent data */
59894 union {
59895 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
59896 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59897 extern void unlock_rename(struct dentry *, struct dentry *);
59898
59899 -static inline void nd_set_link(struct nameidata *nd, char *path)
59900 +static inline void nd_set_link(struct nameidata *nd, const char *path)
59901 {
59902 nd->saved_names[nd->depth] = path;
59903 }
59904
59905 -static inline char *nd_get_link(struct nameidata *nd)
59906 +static inline const char *nd_get_link(const struct nameidata *nd)
59907 {
59908 return nd->saved_names[nd->depth];
59909 }
59910 diff -urNp linux-2.6.32.45/include/linux/netfilter/xt_gradm.h linux-2.6.32.45/include/linux/netfilter/xt_gradm.h
59911 --- linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59912 +++ linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
59913 @@ -0,0 +1,9 @@
59914 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
59915 +#define _LINUX_NETFILTER_XT_GRADM_H 1
59916 +
59917 +struct xt_gradm_mtinfo {
59918 + __u16 flags;
59919 + __u16 invflags;
59920 +};
59921 +
59922 +#endif
59923 diff -urNp linux-2.6.32.45/include/linux/nodemask.h linux-2.6.32.45/include/linux/nodemask.h
59924 --- linux-2.6.32.45/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
59925 +++ linux-2.6.32.45/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
59926 @@ -464,11 +464,11 @@ static inline int num_node_state(enum no
59927
59928 #define any_online_node(mask) \
59929 ({ \
59930 - int node; \
59931 - for_each_node_mask(node, (mask)) \
59932 - if (node_online(node)) \
59933 + int __node; \
59934 + for_each_node_mask(__node, (mask)) \
59935 + if (node_online(__node)) \
59936 break; \
59937 - node; \
59938 + __node; \
59939 })
59940
59941 #define num_online_nodes() num_node_state(N_ONLINE)
59942 diff -urNp linux-2.6.32.45/include/linux/oprofile.h linux-2.6.32.45/include/linux/oprofile.h
59943 --- linux-2.6.32.45/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
59944 +++ linux-2.6.32.45/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
59945 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
59946 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59947 char const * name, ulong * val);
59948
59949 -/** Create a file for read-only access to an atomic_t. */
59950 +/** Create a file for read-only access to an atomic_unchecked_t. */
59951 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59952 - char const * name, atomic_t * val);
59953 + char const * name, atomic_unchecked_t * val);
59954
59955 /** create a directory */
59956 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59957 diff -urNp linux-2.6.32.45/include/linux/pagemap.h linux-2.6.32.45/include/linux/pagemap.h
59958 --- linux-2.6.32.45/include/linux/pagemap.h 2011-03-27 14:31:47.000000000 -0400
59959 +++ linux-2.6.32.45/include/linux/pagemap.h 2011-08-17 19:36:28.000000000 -0400
59960 @@ -425,6 +425,7 @@ static inline int fault_in_pages_readabl
59961 if (((unsigned long)uaddr & PAGE_MASK) !=
59962 ((unsigned long)end & PAGE_MASK))
59963 ret = __get_user(c, end);
59964 + (void)c;
59965 }
59966 return ret;
59967 }
59968 diff -urNp linux-2.6.32.45/include/linux/perf_event.h linux-2.6.32.45/include/linux/perf_event.h
59969 --- linux-2.6.32.45/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
59970 +++ linux-2.6.32.45/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
59971 @@ -476,7 +476,7 @@ struct hw_perf_event {
59972 struct hrtimer hrtimer;
59973 };
59974 };
59975 - atomic64_t prev_count;
59976 + atomic64_unchecked_t prev_count;
59977 u64 sample_period;
59978 u64 last_period;
59979 atomic64_t period_left;
59980 @@ -557,7 +557,7 @@ struct perf_event {
59981 const struct pmu *pmu;
59982
59983 enum perf_event_active_state state;
59984 - atomic64_t count;
59985 + atomic64_unchecked_t count;
59986
59987 /*
59988 * These are the total time in nanoseconds that the event
59989 @@ -595,8 +595,8 @@ struct perf_event {
59990 * These accumulate total time (in nanoseconds) that children
59991 * events have been enabled and running, respectively.
59992 */
59993 - atomic64_t child_total_time_enabled;
59994 - atomic64_t child_total_time_running;
59995 + atomic64_unchecked_t child_total_time_enabled;
59996 + atomic64_unchecked_t child_total_time_running;
59997
59998 /*
59999 * Protect attach/detach and child_list:
60000 diff -urNp linux-2.6.32.45/include/linux/pipe_fs_i.h linux-2.6.32.45/include/linux/pipe_fs_i.h
60001 --- linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
60002 +++ linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
60003 @@ -46,9 +46,9 @@ struct pipe_inode_info {
60004 wait_queue_head_t wait;
60005 unsigned int nrbufs, curbuf;
60006 struct page *tmp_page;
60007 - unsigned int readers;
60008 - unsigned int writers;
60009 - unsigned int waiting_writers;
60010 + atomic_t readers;
60011 + atomic_t writers;
60012 + atomic_t waiting_writers;
60013 unsigned int r_counter;
60014 unsigned int w_counter;
60015 struct fasync_struct *fasync_readers;
60016 diff -urNp linux-2.6.32.45/include/linux/poison.h linux-2.6.32.45/include/linux/poison.h
60017 --- linux-2.6.32.45/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
60018 +++ linux-2.6.32.45/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
60019 @@ -19,8 +19,8 @@
60020 * under normal circumstances, used to verify that nobody uses
60021 * non-initialized list entries.
60022 */
60023 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60024 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60025 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60026 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60027
60028 /********** include/linux/timer.h **********/
60029 /*
60030 diff -urNp linux-2.6.32.45/include/linux/posix-timers.h linux-2.6.32.45/include/linux/posix-timers.h
60031 --- linux-2.6.32.45/include/linux/posix-timers.h 2011-03-27 14:31:47.000000000 -0400
60032 +++ linux-2.6.32.45/include/linux/posix-timers.h 2011-08-05 20:33:55.000000000 -0400
60033 @@ -67,7 +67,7 @@ struct k_itimer {
60034 };
60035
60036 struct k_clock {
60037 - int res; /* in nanoseconds */
60038 + const int res; /* in nanoseconds */
60039 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
60040 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
60041 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
60042 diff -urNp linux-2.6.32.45/include/linux/preempt.h linux-2.6.32.45/include/linux/preempt.h
60043 --- linux-2.6.32.45/include/linux/preempt.h 2011-03-27 14:31:47.000000000 -0400
60044 +++ linux-2.6.32.45/include/linux/preempt.h 2011-08-05 20:33:55.000000000 -0400
60045 @@ -110,7 +110,7 @@ struct preempt_ops {
60046 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60047 void (*sched_out)(struct preempt_notifier *notifier,
60048 struct task_struct *next);
60049 -};
60050 +} __no_const;
60051
60052 /**
60053 * preempt_notifier - key for installing preemption notifiers
60054 diff -urNp linux-2.6.32.45/include/linux/proc_fs.h linux-2.6.32.45/include/linux/proc_fs.h
60055 --- linux-2.6.32.45/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
60056 +++ linux-2.6.32.45/include/linux/proc_fs.h 2011-08-05 20:33:55.000000000 -0400
60057 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
60058 return proc_create_data(name, mode, parent, proc_fops, NULL);
60059 }
60060
60061 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60062 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60063 +{
60064 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60065 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60066 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60067 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60068 +#else
60069 + return proc_create_data(name, mode, parent, proc_fops, NULL);
60070 +#endif
60071 +}
60072 +
60073 +
60074 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60075 mode_t mode, struct proc_dir_entry *base,
60076 read_proc_t *read_proc, void * data)
60077 @@ -256,7 +269,7 @@ union proc_op {
60078 int (*proc_show)(struct seq_file *m,
60079 struct pid_namespace *ns, struct pid *pid,
60080 struct task_struct *task);
60081 -};
60082 +} __no_const;
60083
60084 struct ctl_table_header;
60085 struct ctl_table;
60086 diff -urNp linux-2.6.32.45/include/linux/ptrace.h linux-2.6.32.45/include/linux/ptrace.h
60087 --- linux-2.6.32.45/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
60088 +++ linux-2.6.32.45/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
60089 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
60090 extern void exit_ptrace(struct task_struct *tracer);
60091 #define PTRACE_MODE_READ 1
60092 #define PTRACE_MODE_ATTACH 2
60093 -/* Returns 0 on success, -errno on denial. */
60094 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60095 /* Returns true on success, false on denial. */
60096 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60097 +/* Returns true on success, false on denial. */
60098 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60099
60100 static inline int ptrace_reparented(struct task_struct *child)
60101 {
60102 diff -urNp linux-2.6.32.45/include/linux/random.h linux-2.6.32.45/include/linux/random.h
60103 --- linux-2.6.32.45/include/linux/random.h 2011-08-16 20:37:25.000000000 -0400
60104 +++ linux-2.6.32.45/include/linux/random.h 2011-08-07 19:48:09.000000000 -0400
60105 @@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned l
60106 u32 random32(void);
60107 void srandom32(u32 seed);
60108
60109 +static inline unsigned long pax_get_random_long(void)
60110 +{
60111 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60112 +}
60113 +
60114 #endif /* __KERNEL___ */
60115
60116 #endif /* _LINUX_RANDOM_H */
60117 diff -urNp linux-2.6.32.45/include/linux/reboot.h linux-2.6.32.45/include/linux/reboot.h
60118 --- linux-2.6.32.45/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
60119 +++ linux-2.6.32.45/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
60120 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
60121 * Architecture-specific implementations of sys_reboot commands.
60122 */
60123
60124 -extern void machine_restart(char *cmd);
60125 -extern void machine_halt(void);
60126 -extern void machine_power_off(void);
60127 +extern void machine_restart(char *cmd) __noreturn;
60128 +extern void machine_halt(void) __noreturn;
60129 +extern void machine_power_off(void) __noreturn;
60130
60131 extern void machine_shutdown(void);
60132 struct pt_regs;
60133 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
60134 */
60135
60136 extern void kernel_restart_prepare(char *cmd);
60137 -extern void kernel_restart(char *cmd);
60138 -extern void kernel_halt(void);
60139 -extern void kernel_power_off(void);
60140 +extern void kernel_restart(char *cmd) __noreturn;
60141 +extern void kernel_halt(void) __noreturn;
60142 +extern void kernel_power_off(void) __noreturn;
60143
60144 void ctrl_alt_del(void);
60145
60146 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
60147 * Emergency restart, callable from an interrupt handler.
60148 */
60149
60150 -extern void emergency_restart(void);
60151 +extern void emergency_restart(void) __noreturn;
60152 #include <asm/emergency-restart.h>
60153
60154 #endif
60155 diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs.h linux-2.6.32.45/include/linux/reiserfs_fs.h
60156 --- linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
60157 +++ linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
60158 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
60159 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60160
60161 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60162 -#define get_generation(s) atomic_read (&fs_generation(s))
60163 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60164 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60165 #define __fs_changed(gen,s) (gen != get_generation (s))
60166 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
60167 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
60168 */
60169
60170 struct item_operations {
60171 - int (*bytes_number) (struct item_head * ih, int block_size);
60172 - void (*decrement_key) (struct cpu_key *);
60173 - int (*is_left_mergeable) (struct reiserfs_key * ih,
60174 + int (* const bytes_number) (struct item_head * ih, int block_size);
60175 + void (* const decrement_key) (struct cpu_key *);
60176 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
60177 unsigned long bsize);
60178 - void (*print_item) (struct item_head *, char *item);
60179 - void (*check_item) (struct item_head *, char *item);
60180 + void (* const print_item) (struct item_head *, char *item);
60181 + void (* const check_item) (struct item_head *, char *item);
60182
60183 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
60184 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
60185 int is_affected, int insert_size);
60186 - int (*check_left) (struct virtual_item * vi, int free,
60187 + int (* const check_left) (struct virtual_item * vi, int free,
60188 int start_skip, int end_skip);
60189 - int (*check_right) (struct virtual_item * vi, int free);
60190 - int (*part_size) (struct virtual_item * vi, int from, int to);
60191 - int (*unit_num) (struct virtual_item * vi);
60192 - void (*print_vi) (struct virtual_item * vi);
60193 + int (* const check_right) (struct virtual_item * vi, int free);
60194 + int (* const part_size) (struct virtual_item * vi, int from, int to);
60195 + int (* const unit_num) (struct virtual_item * vi);
60196 + void (* const print_vi) (struct virtual_item * vi);
60197 };
60198
60199 -extern struct item_operations *item_ops[TYPE_ANY + 1];
60200 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
60201
60202 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
60203 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
60204 diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs_sb.h linux-2.6.32.45/include/linux/reiserfs_fs_sb.h
60205 --- linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
60206 +++ linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
60207 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
60208 /* Comment? -Hans */
60209 wait_queue_head_t s_wait;
60210 /* To be obsoleted soon by per buffer seals.. -Hans */
60211 - atomic_t s_generation_counter; // increased by one every time the
60212 + atomic_unchecked_t s_generation_counter; // increased by one every time the
60213 // tree gets re-balanced
60214 unsigned long s_properties; /* File system properties. Currently holds
60215 on-disk FS format */
60216 diff -urNp linux-2.6.32.45/include/linux/relay.h linux-2.6.32.45/include/linux/relay.h
60217 --- linux-2.6.32.45/include/linux/relay.h 2011-03-27 14:31:47.000000000 -0400
60218 +++ linux-2.6.32.45/include/linux/relay.h 2011-08-05 20:33:55.000000000 -0400
60219 @@ -159,7 +159,7 @@ struct rchan_callbacks
60220 * The callback should return 0 if successful, negative if not.
60221 */
60222 int (*remove_buf_file)(struct dentry *dentry);
60223 -};
60224 +} __no_const;
60225
60226 /*
60227 * CONFIG_RELAY kernel API, kernel/relay.c
60228 diff -urNp linux-2.6.32.45/include/linux/sched.h linux-2.6.32.45/include/linux/sched.h
60229 --- linux-2.6.32.45/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
60230 +++ linux-2.6.32.45/include/linux/sched.h 2011-08-11 19:48:55.000000000 -0400
60231 @@ -101,6 +101,7 @@ struct bio;
60232 struct fs_struct;
60233 struct bts_context;
60234 struct perf_event_context;
60235 +struct linux_binprm;
60236
60237 /*
60238 * List of flags we want to share for kernel threads,
60239 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
60240 extern signed long schedule_timeout_uninterruptible(signed long timeout);
60241 asmlinkage void __schedule(void);
60242 asmlinkage void schedule(void);
60243 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
60244 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
60245
60246 struct nsproxy;
60247 struct user_namespace;
60248 @@ -371,9 +372,12 @@ struct user_namespace;
60249 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60250
60251 extern int sysctl_max_map_count;
60252 +extern unsigned long sysctl_heap_stack_gap;
60253
60254 #include <linux/aio.h>
60255
60256 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60257 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60258 extern unsigned long
60259 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60260 unsigned long, unsigned long);
60261 @@ -666,6 +670,16 @@ struct signal_struct {
60262 struct tty_audit_buf *tty_audit_buf;
60263 #endif
60264
60265 +#ifdef CONFIG_GRKERNSEC
60266 + u32 curr_ip;
60267 + u32 saved_ip;
60268 + u32 gr_saddr;
60269 + u32 gr_daddr;
60270 + u16 gr_sport;
60271 + u16 gr_dport;
60272 + u8 used_accept:1;
60273 +#endif
60274 +
60275 int oom_adj; /* OOM kill score adjustment (bit shift) */
60276 };
60277
60278 @@ -723,6 +737,11 @@ struct user_struct {
60279 struct key *session_keyring; /* UID's default session keyring */
60280 #endif
60281
60282 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60283 + unsigned int banned;
60284 + unsigned long ban_expires;
60285 +#endif
60286 +
60287 /* Hash table maintenance information */
60288 struct hlist_node uidhash_node;
60289 uid_t uid;
60290 @@ -1328,8 +1347,8 @@ struct task_struct {
60291 struct list_head thread_group;
60292
60293 struct completion *vfork_done; /* for vfork() */
60294 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60295 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60296 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60297 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60298
60299 cputime_t utime, stime, utimescaled, stimescaled;
60300 cputime_t gtime;
60301 @@ -1343,16 +1362,6 @@ struct task_struct {
60302 struct task_cputime cputime_expires;
60303 struct list_head cpu_timers[3];
60304
60305 -/* process credentials */
60306 - const struct cred *real_cred; /* objective and real subjective task
60307 - * credentials (COW) */
60308 - const struct cred *cred; /* effective (overridable) subjective task
60309 - * credentials (COW) */
60310 - struct mutex cred_guard_mutex; /* guard against foreign influences on
60311 - * credential calculations
60312 - * (notably. ptrace) */
60313 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60314 -
60315 char comm[TASK_COMM_LEN]; /* executable name excluding path
60316 - access with [gs]et_task_comm (which lock
60317 it with task_lock())
60318 @@ -1369,6 +1378,10 @@ struct task_struct {
60319 #endif
60320 /* CPU-specific state of this task */
60321 struct thread_struct thread;
60322 +/* thread_info moved to task_struct */
60323 +#ifdef CONFIG_X86
60324 + struct thread_info tinfo;
60325 +#endif
60326 /* filesystem information */
60327 struct fs_struct *fs;
60328 /* open file information */
60329 @@ -1436,6 +1449,15 @@ struct task_struct {
60330 int hardirq_context;
60331 int softirq_context;
60332 #endif
60333 +
60334 +/* process credentials */
60335 + const struct cred *real_cred; /* objective and real subjective task
60336 + * credentials (COW) */
60337 + struct mutex cred_guard_mutex; /* guard against foreign influences on
60338 + * credential calculations
60339 + * (notably. ptrace) */
60340 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60341 +
60342 #ifdef CONFIG_LOCKDEP
60343 # define MAX_LOCK_DEPTH 48UL
60344 u64 curr_chain_key;
60345 @@ -1456,6 +1478,9 @@ struct task_struct {
60346
60347 struct backing_dev_info *backing_dev_info;
60348
60349 + const struct cred *cred; /* effective (overridable) subjective task
60350 + * credentials (COW) */
60351 +
60352 struct io_context *io_context;
60353
60354 unsigned long ptrace_message;
60355 @@ -1519,6 +1544,21 @@ struct task_struct {
60356 unsigned long default_timer_slack_ns;
60357
60358 struct list_head *scm_work_list;
60359 +
60360 +#ifdef CONFIG_GRKERNSEC
60361 + /* grsecurity */
60362 + struct dentry *gr_chroot_dentry;
60363 + struct acl_subject_label *acl;
60364 + struct acl_role_label *role;
60365 + struct file *exec_file;
60366 + u16 acl_role_id;
60367 + /* is this the task that authenticated to the special role */
60368 + u8 acl_sp_role;
60369 + u8 is_writable;
60370 + u8 brute;
60371 + u8 gr_is_chrooted;
60372 +#endif
60373 +
60374 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60375 /* Index of current stored adress in ret_stack */
60376 int curr_ret_stack;
60377 @@ -1542,6 +1582,57 @@ struct task_struct {
60378 #endif /* CONFIG_TRACING */
60379 };
60380
60381 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60382 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60383 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60384 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60385 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60386 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60387 +
60388 +#ifdef CONFIG_PAX_SOFTMODE
60389 +extern int pax_softmode;
60390 +#endif
60391 +
60392 +extern int pax_check_flags(unsigned long *);
60393 +
60394 +/* if tsk != current then task_lock must be held on it */
60395 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60396 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
60397 +{
60398 + if (likely(tsk->mm))
60399 + return tsk->mm->pax_flags;
60400 + else
60401 + return 0UL;
60402 +}
60403 +
60404 +/* if tsk != current then task_lock must be held on it */
60405 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60406 +{
60407 + if (likely(tsk->mm)) {
60408 + tsk->mm->pax_flags = flags;
60409 + return 0;
60410 + }
60411 + return -EINVAL;
60412 +}
60413 +#endif
60414 +
60415 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60416 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
60417 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60418 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60419 +#endif
60420 +
60421 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60422 +extern void pax_report_insns(void *pc, void *sp);
60423 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
60424 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60425 +
60426 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60427 +extern void pax_track_stack(void);
60428 +#else
60429 +static inline void pax_track_stack(void) {}
60430 +#endif
60431 +
60432 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60433 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
60434
60435 @@ -1740,7 +1831,7 @@ extern void thread_group_times(struct ta
60436 #define PF_DUMPCORE 0x00000200 /* dumped core */
60437 #define PF_SIGNALED 0x00000400 /* killed by a signal */
60438 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
60439 -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
60440 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
60441 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
60442 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
60443 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
60444 @@ -1978,7 +2069,9 @@ void yield(void);
60445 extern struct exec_domain default_exec_domain;
60446
60447 union thread_union {
60448 +#ifndef CONFIG_X86
60449 struct thread_info thread_info;
60450 +#endif
60451 unsigned long stack[THREAD_SIZE/sizeof(long)];
60452 };
60453
60454 @@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
60455 */
60456
60457 extern struct task_struct *find_task_by_vpid(pid_t nr);
60458 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60459 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60460 struct pid_namespace *ns);
60461
60462 @@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
60463 extern void exit_itimers(struct signal_struct *);
60464 extern void flush_itimer_signals(void);
60465
60466 -extern NORET_TYPE void do_group_exit(int);
60467 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60468
60469 extern void daemonize(const char *, ...);
60470 extern int allow_signal(int);
60471 @@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
60472
60473 #endif
60474
60475 -static inline int object_is_on_stack(void *obj)
60476 +static inline int object_starts_on_stack(void *obj)
60477 {
60478 - void *stack = task_stack_page(current);
60479 + const void *stack = task_stack_page(current);
60480
60481 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60482 }
60483
60484 +#ifdef CONFIG_PAX_USERCOPY
60485 +extern int object_is_on_stack(const void *obj, unsigned long len);
60486 +#endif
60487 +
60488 extern void thread_info_cache_init(void);
60489
60490 #ifdef CONFIG_DEBUG_STACK_USAGE
60491 diff -urNp linux-2.6.32.45/include/linux/screen_info.h linux-2.6.32.45/include/linux/screen_info.h
60492 --- linux-2.6.32.45/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
60493 +++ linux-2.6.32.45/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
60494 @@ -42,7 +42,8 @@ struct screen_info {
60495 __u16 pages; /* 0x32 */
60496 __u16 vesa_attributes; /* 0x34 */
60497 __u32 capabilities; /* 0x36 */
60498 - __u8 _reserved[6]; /* 0x3a */
60499 + __u16 vesapm_size; /* 0x3a */
60500 + __u8 _reserved[4]; /* 0x3c */
60501 } __attribute__((packed));
60502
60503 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60504 diff -urNp linux-2.6.32.45/include/linux/security.h linux-2.6.32.45/include/linux/security.h
60505 --- linux-2.6.32.45/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
60506 +++ linux-2.6.32.45/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
60507 @@ -34,6 +34,7 @@
60508 #include <linux/key.h>
60509 #include <linux/xfrm.h>
60510 #include <linux/gfp.h>
60511 +#include <linux/grsecurity.h>
60512 #include <net/flow.h>
60513
60514 /* Maximum number of letters for an LSM name string */
60515 diff -urNp linux-2.6.32.45/include/linux/shm.h linux-2.6.32.45/include/linux/shm.h
60516 --- linux-2.6.32.45/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
60517 +++ linux-2.6.32.45/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
60518 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
60519 pid_t shm_cprid;
60520 pid_t shm_lprid;
60521 struct user_struct *mlock_user;
60522 +#ifdef CONFIG_GRKERNSEC
60523 + time_t shm_createtime;
60524 + pid_t shm_lapid;
60525 +#endif
60526 };
60527
60528 /* shm_mode upper byte flags */
60529 diff -urNp linux-2.6.32.45/include/linux/skbuff.h linux-2.6.32.45/include/linux/skbuff.h
60530 --- linux-2.6.32.45/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
60531 +++ linux-2.6.32.45/include/linux/skbuff.h 2011-07-06 19:53:33.000000000 -0400
60532 @@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
60533 */
60534 static inline int skb_queue_empty(const struct sk_buff_head *list)
60535 {
60536 - return list->next == (struct sk_buff *)list;
60537 + return list->next == (const struct sk_buff *)list;
60538 }
60539
60540 /**
60541 @@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
60542 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60543 const struct sk_buff *skb)
60544 {
60545 - return (skb->next == (struct sk_buff *) list);
60546 + return (skb->next == (const struct sk_buff *) list);
60547 }
60548
60549 /**
60550 @@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
60551 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60552 const struct sk_buff *skb)
60553 {
60554 - return (skb->prev == (struct sk_buff *) list);
60555 + return (skb->prev == (const struct sk_buff *) list);
60556 }
60557
60558 /**
60559 @@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
60560 * headroom, you should not reduce this.
60561 */
60562 #ifndef NET_SKB_PAD
60563 -#define NET_SKB_PAD 32
60564 +#define NET_SKB_PAD (_AC(32,UL))
60565 #endif
60566
60567 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60568 diff -urNp linux-2.6.32.45/include/linux/slab_def.h linux-2.6.32.45/include/linux/slab_def.h
60569 --- linux-2.6.32.45/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
60570 +++ linux-2.6.32.45/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
60571 @@ -69,10 +69,10 @@ struct kmem_cache {
60572 unsigned long node_allocs;
60573 unsigned long node_frees;
60574 unsigned long node_overflow;
60575 - atomic_t allochit;
60576 - atomic_t allocmiss;
60577 - atomic_t freehit;
60578 - atomic_t freemiss;
60579 + atomic_unchecked_t allochit;
60580 + atomic_unchecked_t allocmiss;
60581 + atomic_unchecked_t freehit;
60582 + atomic_unchecked_t freemiss;
60583
60584 /*
60585 * If debugging is enabled, then the allocator can add additional
60586 diff -urNp linux-2.6.32.45/include/linux/slab.h linux-2.6.32.45/include/linux/slab.h
60587 --- linux-2.6.32.45/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
60588 +++ linux-2.6.32.45/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
60589 @@ -11,12 +11,20 @@
60590
60591 #include <linux/gfp.h>
60592 #include <linux/types.h>
60593 +#include <linux/err.h>
60594
60595 /*
60596 * Flags to pass to kmem_cache_create().
60597 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60598 */
60599 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60600 +
60601 +#ifdef CONFIG_PAX_USERCOPY
60602 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60603 +#else
60604 +#define SLAB_USERCOPY 0x00000000UL
60605 +#endif
60606 +
60607 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60608 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60609 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60610 @@ -82,10 +90,13 @@
60611 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60612 * Both make kfree a no-op.
60613 */
60614 -#define ZERO_SIZE_PTR ((void *)16)
60615 +#define ZERO_SIZE_PTR \
60616 +({ \
60617 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60618 + (void *)(-MAX_ERRNO-1L); \
60619 +})
60620
60621 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60622 - (unsigned long)ZERO_SIZE_PTR)
60623 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60624
60625 /*
60626 * struct kmem_cache related prototypes
60627 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void
60628 void kfree(const void *);
60629 void kzfree(const void *);
60630 size_t ksize(const void *);
60631 +void check_object_size(const void *ptr, unsigned long n, bool to);
60632
60633 /*
60634 * Allocator specific definitions. These are mainly used to establish optimized
60635 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
60636
60637 void __init kmem_cache_init_late(void);
60638
60639 +#define kmalloc(x, y) \
60640 +({ \
60641 + void *___retval; \
60642 + intoverflow_t ___x = (intoverflow_t)x; \
60643 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
60644 + ___retval = NULL; \
60645 + else \
60646 + ___retval = kmalloc((size_t)___x, (y)); \
60647 + ___retval; \
60648 +})
60649 +
60650 +#define kmalloc_node(x, y, z) \
60651 +({ \
60652 + void *___retval; \
60653 + intoverflow_t ___x = (intoverflow_t)x; \
60654 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60655 + ___retval = NULL; \
60656 + else \
60657 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
60658 + ___retval; \
60659 +})
60660 +
60661 +#define kzalloc(x, y) \
60662 +({ \
60663 + void *___retval; \
60664 + intoverflow_t ___x = (intoverflow_t)x; \
60665 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
60666 + ___retval = NULL; \
60667 + else \
60668 + ___retval = kzalloc((size_t)___x, (y)); \
60669 + ___retval; \
60670 +})
60671 +
60672 #endif /* _LINUX_SLAB_H */
60673 diff -urNp linux-2.6.32.45/include/linux/slub_def.h linux-2.6.32.45/include/linux/slub_def.h
60674 --- linux-2.6.32.45/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
60675 +++ linux-2.6.32.45/include/linux/slub_def.h 2011-08-05 20:33:55.000000000 -0400
60676 @@ -86,7 +86,7 @@ struct kmem_cache {
60677 struct kmem_cache_order_objects max;
60678 struct kmem_cache_order_objects min;
60679 gfp_t allocflags; /* gfp flags to use on each alloc */
60680 - int refcount; /* Refcount for slab cache destroy */
60681 + atomic_t refcount; /* Refcount for slab cache destroy */
60682 void (*ctor)(void *);
60683 int inuse; /* Offset to metadata */
60684 int align; /* Alignment */
60685 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache
60686 #endif
60687
60688 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60689 -void *__kmalloc(size_t size, gfp_t flags);
60690 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60691
60692 #ifdef CONFIG_KMEMTRACE
60693 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
60694 diff -urNp linux-2.6.32.45/include/linux/sonet.h linux-2.6.32.45/include/linux/sonet.h
60695 --- linux-2.6.32.45/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
60696 +++ linux-2.6.32.45/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
60697 @@ -61,7 +61,7 @@ struct sonet_stats {
60698 #include <asm/atomic.h>
60699
60700 struct k_sonet_stats {
60701 -#define __HANDLE_ITEM(i) atomic_t i
60702 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60703 __SONET_ITEMS
60704 #undef __HANDLE_ITEM
60705 };
60706 diff -urNp linux-2.6.32.45/include/linux/sunrpc/cache.h linux-2.6.32.45/include/linux/sunrpc/cache.h
60707 --- linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-03-27 14:31:47.000000000 -0400
60708 +++ linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-08-05 20:33:55.000000000 -0400
60709 @@ -125,7 +125,7 @@ struct cache_detail {
60710 */
60711 struct cache_req {
60712 struct cache_deferred_req *(*defer)(struct cache_req *req);
60713 -};
60714 +} __no_const;
60715 /* this must be embedded in a deferred_request that is being
60716 * delayed awaiting cache-fill
60717 */
60718 diff -urNp linux-2.6.32.45/include/linux/sunrpc/clnt.h linux-2.6.32.45/include/linux/sunrpc/clnt.h
60719 --- linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
60720 +++ linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
60721 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
60722 {
60723 switch (sap->sa_family) {
60724 case AF_INET:
60725 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
60726 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60727 case AF_INET6:
60728 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60729 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60730 }
60731 return 0;
60732 }
60733 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
60734 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60735 const struct sockaddr *src)
60736 {
60737 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60738 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60739 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60740
60741 dsin->sin_family = ssin->sin_family;
60742 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
60743 if (sa->sa_family != AF_INET6)
60744 return 0;
60745
60746 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60747 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60748 }
60749
60750 #endif /* __KERNEL__ */
60751 diff -urNp linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h
60752 --- linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
60753 +++ linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
60754 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60755 extern unsigned int svcrdma_max_requests;
60756 extern unsigned int svcrdma_max_req_size;
60757
60758 -extern atomic_t rdma_stat_recv;
60759 -extern atomic_t rdma_stat_read;
60760 -extern atomic_t rdma_stat_write;
60761 -extern atomic_t rdma_stat_sq_starve;
60762 -extern atomic_t rdma_stat_rq_starve;
60763 -extern atomic_t rdma_stat_rq_poll;
60764 -extern atomic_t rdma_stat_rq_prod;
60765 -extern atomic_t rdma_stat_sq_poll;
60766 -extern atomic_t rdma_stat_sq_prod;
60767 +extern atomic_unchecked_t rdma_stat_recv;
60768 +extern atomic_unchecked_t rdma_stat_read;
60769 +extern atomic_unchecked_t rdma_stat_write;
60770 +extern atomic_unchecked_t rdma_stat_sq_starve;
60771 +extern atomic_unchecked_t rdma_stat_rq_starve;
60772 +extern atomic_unchecked_t rdma_stat_rq_poll;
60773 +extern atomic_unchecked_t rdma_stat_rq_prod;
60774 +extern atomic_unchecked_t rdma_stat_sq_poll;
60775 +extern atomic_unchecked_t rdma_stat_sq_prod;
60776
60777 #define RPCRDMA_VERSION 1
60778
60779 diff -urNp linux-2.6.32.45/include/linux/suspend.h linux-2.6.32.45/include/linux/suspend.h
60780 --- linux-2.6.32.45/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
60781 +++ linux-2.6.32.45/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
60782 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
60783 * which require special recovery actions in that situation.
60784 */
60785 struct platform_suspend_ops {
60786 - int (*valid)(suspend_state_t state);
60787 - int (*begin)(suspend_state_t state);
60788 - int (*prepare)(void);
60789 - int (*prepare_late)(void);
60790 - int (*enter)(suspend_state_t state);
60791 - void (*wake)(void);
60792 - void (*finish)(void);
60793 - void (*end)(void);
60794 - void (*recover)(void);
60795 + int (* const valid)(suspend_state_t state);
60796 + int (* const begin)(suspend_state_t state);
60797 + int (* const prepare)(void);
60798 + int (* const prepare_late)(void);
60799 + int (* const enter)(suspend_state_t state);
60800 + void (* const wake)(void);
60801 + void (* const finish)(void);
60802 + void (* const end)(void);
60803 + void (* const recover)(void);
60804 };
60805
60806 #ifdef CONFIG_SUSPEND
60807 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
60808 * suspend_set_ops - set platform dependent suspend operations
60809 * @ops: The new suspend operations to set.
60810 */
60811 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
60812 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
60813 extern int suspend_valid_only_mem(suspend_state_t state);
60814
60815 /**
60816 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
60817 #else /* !CONFIG_SUSPEND */
60818 #define suspend_valid_only_mem NULL
60819
60820 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
60821 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
60822 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
60823 #endif /* !CONFIG_SUSPEND */
60824
60825 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
60826 * platforms which require special recovery actions in that situation.
60827 */
60828 struct platform_hibernation_ops {
60829 - int (*begin)(void);
60830 - void (*end)(void);
60831 - int (*pre_snapshot)(void);
60832 - void (*finish)(void);
60833 - int (*prepare)(void);
60834 - int (*enter)(void);
60835 - void (*leave)(void);
60836 - int (*pre_restore)(void);
60837 - void (*restore_cleanup)(void);
60838 - void (*recover)(void);
60839 + int (* const begin)(void);
60840 + void (* const end)(void);
60841 + int (* const pre_snapshot)(void);
60842 + void (* const finish)(void);
60843 + int (* const prepare)(void);
60844 + int (* const enter)(void);
60845 + void (* const leave)(void);
60846 + int (* const pre_restore)(void);
60847 + void (* const restore_cleanup)(void);
60848 + void (* const recover)(void);
60849 };
60850
60851 #ifdef CONFIG_HIBERNATION
60852 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
60853 extern void swsusp_unset_page_free(struct page *);
60854 extern unsigned long get_safe_page(gfp_t gfp_mask);
60855
60856 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
60857 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
60858 extern int hibernate(void);
60859 extern bool system_entering_hibernation(void);
60860 #else /* CONFIG_HIBERNATION */
60861 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
60862 static inline void swsusp_set_page_free(struct page *p) {}
60863 static inline void swsusp_unset_page_free(struct page *p) {}
60864
60865 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
60866 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
60867 static inline int hibernate(void) { return -ENOSYS; }
60868 static inline bool system_entering_hibernation(void) { return false; }
60869 #endif /* CONFIG_HIBERNATION */
60870 diff -urNp linux-2.6.32.45/include/linux/sysctl.h linux-2.6.32.45/include/linux/sysctl.h
60871 --- linux-2.6.32.45/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
60872 +++ linux-2.6.32.45/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
60873 @@ -164,7 +164,11 @@ enum
60874 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60875 };
60876
60877 -
60878 +#ifdef CONFIG_PAX_SOFTMODE
60879 +enum {
60880 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60881 +};
60882 +#endif
60883
60884 /* CTL_VM names: */
60885 enum
60886 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
60887
60888 extern int proc_dostring(struct ctl_table *, int,
60889 void __user *, size_t *, loff_t *);
60890 +extern int proc_dostring_modpriv(struct ctl_table *, int,
60891 + void __user *, size_t *, loff_t *);
60892 extern int proc_dointvec(struct ctl_table *, int,
60893 void __user *, size_t *, loff_t *);
60894 extern int proc_dointvec_minmax(struct ctl_table *, int,
60895 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
60896
60897 extern ctl_handler sysctl_data;
60898 extern ctl_handler sysctl_string;
60899 +extern ctl_handler sysctl_string_modpriv;
60900 extern ctl_handler sysctl_intvec;
60901 extern ctl_handler sysctl_jiffies;
60902 extern ctl_handler sysctl_ms_jiffies;
60903 diff -urNp linux-2.6.32.45/include/linux/sysfs.h linux-2.6.32.45/include/linux/sysfs.h
60904 --- linux-2.6.32.45/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
60905 +++ linux-2.6.32.45/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
60906 @@ -75,8 +75,8 @@ struct bin_attribute {
60907 };
60908
60909 struct sysfs_ops {
60910 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
60911 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
60912 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
60913 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
60914 };
60915
60916 struct sysfs_dirent;
60917 diff -urNp linux-2.6.32.45/include/linux/thread_info.h linux-2.6.32.45/include/linux/thread_info.h
60918 --- linux-2.6.32.45/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
60919 +++ linux-2.6.32.45/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
60920 @@ -23,7 +23,7 @@ struct restart_block {
60921 };
60922 /* For futex_wait and futex_wait_requeue_pi */
60923 struct {
60924 - u32 *uaddr;
60925 + u32 __user *uaddr;
60926 u32 val;
60927 u32 flags;
60928 u32 bitset;
60929 diff -urNp linux-2.6.32.45/include/linux/tty.h linux-2.6.32.45/include/linux/tty.h
60930 --- linux-2.6.32.45/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
60931 +++ linux-2.6.32.45/include/linux/tty.h 2011-08-05 20:33:55.000000000 -0400
60932 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
60933 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
60934 extern void tty_ldisc_enable(struct tty_struct *tty);
60935
60936 -
60937 /* n_tty.c */
60938 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
60939
60940 diff -urNp linux-2.6.32.45/include/linux/tty_ldisc.h linux-2.6.32.45/include/linux/tty_ldisc.h
60941 --- linux-2.6.32.45/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
60942 +++ linux-2.6.32.45/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
60943 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
60944
60945 struct module *owner;
60946
60947 - int refcount;
60948 + atomic_t refcount;
60949 };
60950
60951 struct tty_ldisc {
60952 diff -urNp linux-2.6.32.45/include/linux/types.h linux-2.6.32.45/include/linux/types.h
60953 --- linux-2.6.32.45/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
60954 +++ linux-2.6.32.45/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
60955 @@ -191,10 +191,26 @@ typedef struct {
60956 volatile int counter;
60957 } atomic_t;
60958
60959 +#ifdef CONFIG_PAX_REFCOUNT
60960 +typedef struct {
60961 + volatile int counter;
60962 +} atomic_unchecked_t;
60963 +#else
60964 +typedef atomic_t atomic_unchecked_t;
60965 +#endif
60966 +
60967 #ifdef CONFIG_64BIT
60968 typedef struct {
60969 volatile long counter;
60970 } atomic64_t;
60971 +
60972 +#ifdef CONFIG_PAX_REFCOUNT
60973 +typedef struct {
60974 + volatile long counter;
60975 +} atomic64_unchecked_t;
60976 +#else
60977 +typedef atomic64_t atomic64_unchecked_t;
60978 +#endif
60979 #endif
60980
60981 struct ustat {
60982 diff -urNp linux-2.6.32.45/include/linux/uaccess.h linux-2.6.32.45/include/linux/uaccess.h
60983 --- linux-2.6.32.45/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
60984 +++ linux-2.6.32.45/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
60985 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60986 long ret; \
60987 mm_segment_t old_fs = get_fs(); \
60988 \
60989 - set_fs(KERNEL_DS); \
60990 pagefault_disable(); \
60991 + set_fs(KERNEL_DS); \
60992 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60993 - pagefault_enable(); \
60994 set_fs(old_fs); \
60995 + pagefault_enable(); \
60996 ret; \
60997 })
60998
60999 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
61000 * Safely read from address @src to the buffer at @dst. If a kernel fault
61001 * happens, handle that and return -EFAULT.
61002 */
61003 -extern long probe_kernel_read(void *dst, void *src, size_t size);
61004 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
61005
61006 /*
61007 * probe_kernel_write(): safely attempt to write to a location
61008 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
61009 * Safely write to address @dst from the buffer at @src. If a kernel fault
61010 * happens, handle that and return -EFAULT.
61011 */
61012 -extern long probe_kernel_write(void *dst, void *src, size_t size);
61013 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
61014
61015 #endif /* __LINUX_UACCESS_H__ */
61016 diff -urNp linux-2.6.32.45/include/linux/unaligned/access_ok.h linux-2.6.32.45/include/linux/unaligned/access_ok.h
61017 --- linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
61018 +++ linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
61019 @@ -6,32 +6,32 @@
61020
61021 static inline u16 get_unaligned_le16(const void *p)
61022 {
61023 - return le16_to_cpup((__le16 *)p);
61024 + return le16_to_cpup((const __le16 *)p);
61025 }
61026
61027 static inline u32 get_unaligned_le32(const void *p)
61028 {
61029 - return le32_to_cpup((__le32 *)p);
61030 + return le32_to_cpup((const __le32 *)p);
61031 }
61032
61033 static inline u64 get_unaligned_le64(const void *p)
61034 {
61035 - return le64_to_cpup((__le64 *)p);
61036 + return le64_to_cpup((const __le64 *)p);
61037 }
61038
61039 static inline u16 get_unaligned_be16(const void *p)
61040 {
61041 - return be16_to_cpup((__be16 *)p);
61042 + return be16_to_cpup((const __be16 *)p);
61043 }
61044
61045 static inline u32 get_unaligned_be32(const void *p)
61046 {
61047 - return be32_to_cpup((__be32 *)p);
61048 + return be32_to_cpup((const __be32 *)p);
61049 }
61050
61051 static inline u64 get_unaligned_be64(const void *p)
61052 {
61053 - return be64_to_cpup((__be64 *)p);
61054 + return be64_to_cpup((const __be64 *)p);
61055 }
61056
61057 static inline void put_unaligned_le16(u16 val, void *p)
61058 diff -urNp linux-2.6.32.45/include/linux/vmalloc.h linux-2.6.32.45/include/linux/vmalloc.h
61059 --- linux-2.6.32.45/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
61060 +++ linux-2.6.32.45/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
61061 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
61062 #define VM_MAP 0x00000004 /* vmap()ed pages */
61063 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61064 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61065 +
61066 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61067 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
61068 +#endif
61069 +
61070 /* bits [20..32] reserved for arch specific ioremap internals */
61071
61072 /*
61073 @@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
61074
61075 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
61076
61077 +#define vmalloc(x) \
61078 +({ \
61079 + void *___retval; \
61080 + intoverflow_t ___x = (intoverflow_t)x; \
61081 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61082 + ___retval = NULL; \
61083 + else \
61084 + ___retval = vmalloc((unsigned long)___x); \
61085 + ___retval; \
61086 +})
61087 +
61088 +#define __vmalloc(x, y, z) \
61089 +({ \
61090 + void *___retval; \
61091 + intoverflow_t ___x = (intoverflow_t)x; \
61092 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61093 + ___retval = NULL; \
61094 + else \
61095 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61096 + ___retval; \
61097 +})
61098 +
61099 +#define vmalloc_user(x) \
61100 +({ \
61101 + void *___retval; \
61102 + intoverflow_t ___x = (intoverflow_t)x; \
61103 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61104 + ___retval = NULL; \
61105 + else \
61106 + ___retval = vmalloc_user((unsigned long)___x); \
61107 + ___retval; \
61108 +})
61109 +
61110 +#define vmalloc_exec(x) \
61111 +({ \
61112 + void *___retval; \
61113 + intoverflow_t ___x = (intoverflow_t)x; \
61114 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61115 + ___retval = NULL; \
61116 + else \
61117 + ___retval = vmalloc_exec((unsigned long)___x); \
61118 + ___retval; \
61119 +})
61120 +
61121 +#define vmalloc_node(x, y) \
61122 +({ \
61123 + void *___retval; \
61124 + intoverflow_t ___x = (intoverflow_t)x; \
61125 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61126 + ___retval = NULL; \
61127 + else \
61128 + ___retval = vmalloc_node((unsigned long)___x, (y));\
61129 + ___retval; \
61130 +})
61131 +
61132 +#define vmalloc_32(x) \
61133 +({ \
61134 + void *___retval; \
61135 + intoverflow_t ___x = (intoverflow_t)x; \
61136 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61137 + ___retval = NULL; \
61138 + else \
61139 + ___retval = vmalloc_32((unsigned long)___x); \
61140 + ___retval; \
61141 +})
61142 +
61143 +#define vmalloc_32_user(x) \
61144 +({ \
61145 + void *___retval; \
61146 + intoverflow_t ___x = (intoverflow_t)x; \
61147 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61148 + ___retval = NULL; \
61149 + else \
61150 + ___retval = vmalloc_32_user((unsigned long)___x);\
61151 + ___retval; \
61152 +})
61153 +
61154 #endif /* _LINUX_VMALLOC_H */
61155 diff -urNp linux-2.6.32.45/include/linux/vmstat.h linux-2.6.32.45/include/linux/vmstat.h
61156 --- linux-2.6.32.45/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
61157 +++ linux-2.6.32.45/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
61158 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
61159 /*
61160 * Zone based page accounting with per cpu differentials.
61161 */
61162 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61163 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61164
61165 static inline void zone_page_state_add(long x, struct zone *zone,
61166 enum zone_stat_item item)
61167 {
61168 - atomic_long_add(x, &zone->vm_stat[item]);
61169 - atomic_long_add(x, &vm_stat[item]);
61170 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61171 + atomic_long_add_unchecked(x, &vm_stat[item]);
61172 }
61173
61174 static inline unsigned long global_page_state(enum zone_stat_item item)
61175 {
61176 - long x = atomic_long_read(&vm_stat[item]);
61177 + long x = atomic_long_read_unchecked(&vm_stat[item]);
61178 #ifdef CONFIG_SMP
61179 if (x < 0)
61180 x = 0;
61181 @@ -158,7 +158,7 @@ static inline unsigned long global_page_
61182 static inline unsigned long zone_page_state(struct zone *zone,
61183 enum zone_stat_item item)
61184 {
61185 - long x = atomic_long_read(&zone->vm_stat[item]);
61186 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61187 #ifdef CONFIG_SMP
61188 if (x < 0)
61189 x = 0;
61190 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
61191 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61192 enum zone_stat_item item)
61193 {
61194 - long x = atomic_long_read(&zone->vm_stat[item]);
61195 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61196
61197 #ifdef CONFIG_SMP
61198 int cpu;
61199 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
61200
61201 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61202 {
61203 - atomic_long_inc(&zone->vm_stat[item]);
61204 - atomic_long_inc(&vm_stat[item]);
61205 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
61206 + atomic_long_inc_unchecked(&vm_stat[item]);
61207 }
61208
61209 static inline void __inc_zone_page_state(struct page *page,
61210 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
61211
61212 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61213 {
61214 - atomic_long_dec(&zone->vm_stat[item]);
61215 - atomic_long_dec(&vm_stat[item]);
61216 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
61217 + atomic_long_dec_unchecked(&vm_stat[item]);
61218 }
61219
61220 static inline void __dec_zone_page_state(struct page *page,
61221 diff -urNp linux-2.6.32.45/include/media/v4l2-dev.h linux-2.6.32.45/include/media/v4l2-dev.h
61222 --- linux-2.6.32.45/include/media/v4l2-dev.h 2011-03-27 14:31:47.000000000 -0400
61223 +++ linux-2.6.32.45/include/media/v4l2-dev.h 2011-08-05 20:33:55.000000000 -0400
61224 @@ -34,7 +34,7 @@ struct v4l2_device;
61225 #define V4L2_FL_UNREGISTERED (0)
61226
61227 struct v4l2_file_operations {
61228 - struct module *owner;
61229 + struct module * const owner;
61230 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61231 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61232 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61233 diff -urNp linux-2.6.32.45/include/media/v4l2-device.h linux-2.6.32.45/include/media/v4l2-device.h
61234 --- linux-2.6.32.45/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
61235 +++ linux-2.6.32.45/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
61236 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
61237 this function returns 0. If the name ends with a digit (e.g. cx18),
61238 then the name will be set to cx18-0 since cx180 looks really odd. */
61239 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
61240 - atomic_t *instance);
61241 + atomic_unchecked_t *instance);
61242
61243 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
61244 Since the parent disappears this ensures that v4l2_dev doesn't have an
61245 diff -urNp linux-2.6.32.45/include/net/flow.h linux-2.6.32.45/include/net/flow.h
61246 --- linux-2.6.32.45/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
61247 +++ linux-2.6.32.45/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
61248 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
61249 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
61250 u8 dir, flow_resolve_t resolver);
61251 extern void flow_cache_flush(void);
61252 -extern atomic_t flow_cache_genid;
61253 +extern atomic_unchecked_t flow_cache_genid;
61254
61255 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
61256 {
61257 diff -urNp linux-2.6.32.45/include/net/inetpeer.h linux-2.6.32.45/include/net/inetpeer.h
61258 --- linux-2.6.32.45/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
61259 +++ linux-2.6.32.45/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
61260 @@ -24,7 +24,7 @@ struct inet_peer
61261 __u32 dtime; /* the time of last use of not
61262 * referenced entries */
61263 atomic_t refcnt;
61264 - atomic_t rid; /* Frag reception counter */
61265 + atomic_unchecked_t rid; /* Frag reception counter */
61266 __u32 tcp_ts;
61267 unsigned long tcp_ts_stamp;
61268 };
61269 diff -urNp linux-2.6.32.45/include/net/ip_vs.h linux-2.6.32.45/include/net/ip_vs.h
61270 --- linux-2.6.32.45/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
61271 +++ linux-2.6.32.45/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
61272 @@ -365,7 +365,7 @@ struct ip_vs_conn {
61273 struct ip_vs_conn *control; /* Master control connection */
61274 atomic_t n_control; /* Number of controlled ones */
61275 struct ip_vs_dest *dest; /* real server */
61276 - atomic_t in_pkts; /* incoming packet counter */
61277 + atomic_unchecked_t in_pkts; /* incoming packet counter */
61278
61279 /* packet transmitter for different forwarding methods. If it
61280 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61281 @@ -466,7 +466,7 @@ struct ip_vs_dest {
61282 union nf_inet_addr addr; /* IP address of the server */
61283 __be16 port; /* port number of the server */
61284 volatile unsigned flags; /* dest status flags */
61285 - atomic_t conn_flags; /* flags to copy to conn */
61286 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
61287 atomic_t weight; /* server weight */
61288
61289 atomic_t refcnt; /* reference counter */
61290 diff -urNp linux-2.6.32.45/include/net/irda/ircomm_core.h linux-2.6.32.45/include/net/irda/ircomm_core.h
61291 --- linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-03-27 14:31:47.000000000 -0400
61292 +++ linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-08-05 20:33:55.000000000 -0400
61293 @@ -51,7 +51,7 @@ typedef struct {
61294 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61295 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61296 struct ircomm_info *);
61297 -} call_t;
61298 +} __no_const call_t;
61299
61300 struct ircomm_cb {
61301 irda_queue_t queue;
61302 diff -urNp linux-2.6.32.45/include/net/irda/ircomm_tty.h linux-2.6.32.45/include/net/irda/ircomm_tty.h
61303 --- linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
61304 +++ linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
61305 @@ -35,6 +35,7 @@
61306 #include <linux/termios.h>
61307 #include <linux/timer.h>
61308 #include <linux/tty.h> /* struct tty_struct */
61309 +#include <asm/local.h>
61310
61311 #include <net/irda/irias_object.h>
61312 #include <net/irda/ircomm_core.h>
61313 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61314 unsigned short close_delay;
61315 unsigned short closing_wait; /* time to wait before closing */
61316
61317 - int open_count;
61318 - int blocked_open; /* # of blocked opens */
61319 + local_t open_count;
61320 + local_t blocked_open; /* # of blocked opens */
61321
61322 /* Protect concurent access to :
61323 * o self->open_count
61324 diff -urNp linux-2.6.32.45/include/net/iucv/af_iucv.h linux-2.6.32.45/include/net/iucv/af_iucv.h
61325 --- linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
61326 +++ linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
61327 @@ -87,7 +87,7 @@ struct iucv_sock {
61328 struct iucv_sock_list {
61329 struct hlist_head head;
61330 rwlock_t lock;
61331 - atomic_t autobind_name;
61332 + atomic_unchecked_t autobind_name;
61333 };
61334
61335 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61336 diff -urNp linux-2.6.32.45/include/net/lapb.h linux-2.6.32.45/include/net/lapb.h
61337 --- linux-2.6.32.45/include/net/lapb.h 2011-03-27 14:31:47.000000000 -0400
61338 +++ linux-2.6.32.45/include/net/lapb.h 2011-08-05 20:33:55.000000000 -0400
61339 @@ -95,7 +95,7 @@ struct lapb_cb {
61340 struct sk_buff_head write_queue;
61341 struct sk_buff_head ack_queue;
61342 unsigned char window;
61343 - struct lapb_register_struct callbacks;
61344 + struct lapb_register_struct *callbacks;
61345
61346 /* FRMR control information */
61347 struct lapb_frame frmr_data;
61348 diff -urNp linux-2.6.32.45/include/net/neighbour.h linux-2.6.32.45/include/net/neighbour.h
61349 --- linux-2.6.32.45/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
61350 +++ linux-2.6.32.45/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
61351 @@ -125,12 +125,12 @@ struct neighbour
61352 struct neigh_ops
61353 {
61354 int family;
61355 - void (*solicit)(struct neighbour *, struct sk_buff*);
61356 - void (*error_report)(struct neighbour *, struct sk_buff*);
61357 - int (*output)(struct sk_buff*);
61358 - int (*connected_output)(struct sk_buff*);
61359 - int (*hh_output)(struct sk_buff*);
61360 - int (*queue_xmit)(struct sk_buff*);
61361 + void (* const solicit)(struct neighbour *, struct sk_buff*);
61362 + void (* const error_report)(struct neighbour *, struct sk_buff*);
61363 + int (* const output)(struct sk_buff*);
61364 + int (* const connected_output)(struct sk_buff*);
61365 + int (* const hh_output)(struct sk_buff*);
61366 + int (* const queue_xmit)(struct sk_buff*);
61367 };
61368
61369 struct pneigh_entry
61370 diff -urNp linux-2.6.32.45/include/net/netlink.h linux-2.6.32.45/include/net/netlink.h
61371 --- linux-2.6.32.45/include/net/netlink.h 2011-07-13 17:23:04.000000000 -0400
61372 +++ linux-2.6.32.45/include/net/netlink.h 2011-07-13 17:23:19.000000000 -0400
61373 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
61374 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61375 {
61376 if (mark)
61377 - skb_trim(skb, (unsigned char *) mark - skb->data);
61378 + skb_trim(skb, (const unsigned char *) mark - skb->data);
61379 }
61380
61381 /**
61382 diff -urNp linux-2.6.32.45/include/net/netns/ipv4.h linux-2.6.32.45/include/net/netns/ipv4.h
61383 --- linux-2.6.32.45/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
61384 +++ linux-2.6.32.45/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
61385 @@ -54,7 +54,7 @@ struct netns_ipv4 {
61386 int current_rt_cache_rebuild_count;
61387
61388 struct timer_list rt_secret_timer;
61389 - atomic_t rt_genid;
61390 + atomic_unchecked_t rt_genid;
61391
61392 #ifdef CONFIG_IP_MROUTE
61393 struct sock *mroute_sk;
61394 diff -urNp linux-2.6.32.45/include/net/sctp/sctp.h linux-2.6.32.45/include/net/sctp/sctp.h
61395 --- linux-2.6.32.45/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
61396 +++ linux-2.6.32.45/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
61397 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
61398
61399 #else /* SCTP_DEBUG */
61400
61401 -#define SCTP_DEBUG_PRINTK(whatever...)
61402 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61403 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61404 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61405 #define SCTP_ENABLE_DEBUG
61406 #define SCTP_DISABLE_DEBUG
61407 #define SCTP_ASSERT(expr, str, func)
61408 diff -urNp linux-2.6.32.45/include/net/secure_seq.h linux-2.6.32.45/include/net/secure_seq.h
61409 --- linux-2.6.32.45/include/net/secure_seq.h 2011-08-16 20:37:25.000000000 -0400
61410 +++ linux-2.6.32.45/include/net/secure_seq.h 2011-08-07 19:48:09.000000000 -0400
61411 @@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
61412 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
61413 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
61414 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
61415 - __be16 dport);
61416 + __be16 dport);
61417 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
61418 __be16 sport, __be16 dport);
61419 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61420 - __be16 sport, __be16 dport);
61421 + __be16 sport, __be16 dport);
61422 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
61423 - __be16 sport, __be16 dport);
61424 + __be16 sport, __be16 dport);
61425 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61426 - __be16 sport, __be16 dport);
61427 + __be16 sport, __be16 dport);
61428
61429 #endif /* _NET_SECURE_SEQ */
61430 diff -urNp linux-2.6.32.45/include/net/sock.h linux-2.6.32.45/include/net/sock.h
61431 --- linux-2.6.32.45/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
61432 +++ linux-2.6.32.45/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
61433 @@ -272,7 +272,7 @@ struct sock {
61434 rwlock_t sk_callback_lock;
61435 int sk_err,
61436 sk_err_soft;
61437 - atomic_t sk_drops;
61438 + atomic_unchecked_t sk_drops;
61439 unsigned short sk_ack_backlog;
61440 unsigned short sk_max_ack_backlog;
61441 __u32 sk_priority;
61442 diff -urNp linux-2.6.32.45/include/net/tcp.h linux-2.6.32.45/include/net/tcp.h
61443 --- linux-2.6.32.45/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
61444 +++ linux-2.6.32.45/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
61445 @@ -1444,6 +1444,7 @@ enum tcp_seq_states {
61446 struct tcp_seq_afinfo {
61447 char *name;
61448 sa_family_t family;
61449 + /* cannot be const */
61450 struct file_operations seq_fops;
61451 struct seq_operations seq_ops;
61452 };
61453 diff -urNp linux-2.6.32.45/include/net/udp.h linux-2.6.32.45/include/net/udp.h
61454 --- linux-2.6.32.45/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
61455 +++ linux-2.6.32.45/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
61456 @@ -187,6 +187,7 @@ struct udp_seq_afinfo {
61457 char *name;
61458 sa_family_t family;
61459 struct udp_table *udp_table;
61460 + /* cannot be const */
61461 struct file_operations seq_fops;
61462 struct seq_operations seq_ops;
61463 };
61464 diff -urNp linux-2.6.32.45/include/rdma/iw_cm.h linux-2.6.32.45/include/rdma/iw_cm.h
61465 --- linux-2.6.32.45/include/rdma/iw_cm.h 2011-03-27 14:31:47.000000000 -0400
61466 +++ linux-2.6.32.45/include/rdma/iw_cm.h 2011-08-05 20:33:55.000000000 -0400
61467 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
61468 int backlog);
61469
61470 int (*destroy_listen)(struct iw_cm_id *cm_id);
61471 -};
61472 +} __no_const;
61473
61474 /**
61475 * iw_create_cm_id - Create an IW CM identifier.
61476 diff -urNp linux-2.6.32.45/include/scsi/scsi_device.h linux-2.6.32.45/include/scsi/scsi_device.h
61477 --- linux-2.6.32.45/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
61478 +++ linux-2.6.32.45/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
61479 @@ -156,9 +156,9 @@ struct scsi_device {
61480 unsigned int max_device_blocked; /* what device_blocked counts down from */
61481 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61482
61483 - atomic_t iorequest_cnt;
61484 - atomic_t iodone_cnt;
61485 - atomic_t ioerr_cnt;
61486 + atomic_unchecked_t iorequest_cnt;
61487 + atomic_unchecked_t iodone_cnt;
61488 + atomic_unchecked_t ioerr_cnt;
61489
61490 struct device sdev_gendev,
61491 sdev_dev;
61492 diff -urNp linux-2.6.32.45/include/scsi/scsi_transport_fc.h linux-2.6.32.45/include/scsi/scsi_transport_fc.h
61493 --- linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-03-27 14:31:47.000000000 -0400
61494 +++ linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-08-05 20:33:55.000000000 -0400
61495 @@ -663,9 +663,9 @@ struct fc_function_template {
61496 int (*bsg_timeout)(struct fc_bsg_job *);
61497
61498 /* allocation lengths for host-specific data */
61499 - u32 dd_fcrport_size;
61500 - u32 dd_fcvport_size;
61501 - u32 dd_bsg_size;
61502 + const u32 dd_fcrport_size;
61503 + const u32 dd_fcvport_size;
61504 + const u32 dd_bsg_size;
61505
61506 /*
61507 * The driver sets these to tell the transport class it
61508 @@ -675,39 +675,39 @@ struct fc_function_template {
61509 */
61510
61511 /* remote port fixed attributes */
61512 - unsigned long show_rport_maxframe_size:1;
61513 - unsigned long show_rport_supported_classes:1;
61514 - unsigned long show_rport_dev_loss_tmo:1;
61515 + const unsigned long show_rport_maxframe_size:1;
61516 + const unsigned long show_rport_supported_classes:1;
61517 + const unsigned long show_rport_dev_loss_tmo:1;
61518
61519 /*
61520 * target dynamic attributes
61521 * These should all be "1" if the driver uses the remote port
61522 * add/delete functions (so attributes reflect rport values).
61523 */
61524 - unsigned long show_starget_node_name:1;
61525 - unsigned long show_starget_port_name:1;
61526 - unsigned long show_starget_port_id:1;
61527 + const unsigned long show_starget_node_name:1;
61528 + const unsigned long show_starget_port_name:1;
61529 + const unsigned long show_starget_port_id:1;
61530
61531 /* host fixed attributes */
61532 - unsigned long show_host_node_name:1;
61533 - unsigned long show_host_port_name:1;
61534 - unsigned long show_host_permanent_port_name:1;
61535 - unsigned long show_host_supported_classes:1;
61536 - unsigned long show_host_supported_fc4s:1;
61537 - unsigned long show_host_supported_speeds:1;
61538 - unsigned long show_host_maxframe_size:1;
61539 - unsigned long show_host_serial_number:1;
61540 + const unsigned long show_host_node_name:1;
61541 + const unsigned long show_host_port_name:1;
61542 + const unsigned long show_host_permanent_port_name:1;
61543 + const unsigned long show_host_supported_classes:1;
61544 + const unsigned long show_host_supported_fc4s:1;
61545 + const unsigned long show_host_supported_speeds:1;
61546 + const unsigned long show_host_maxframe_size:1;
61547 + const unsigned long show_host_serial_number:1;
61548 /* host dynamic attributes */
61549 - unsigned long show_host_port_id:1;
61550 - unsigned long show_host_port_type:1;
61551 - unsigned long show_host_port_state:1;
61552 - unsigned long show_host_active_fc4s:1;
61553 - unsigned long show_host_speed:1;
61554 - unsigned long show_host_fabric_name:1;
61555 - unsigned long show_host_symbolic_name:1;
61556 - unsigned long show_host_system_hostname:1;
61557 + const unsigned long show_host_port_id:1;
61558 + const unsigned long show_host_port_type:1;
61559 + const unsigned long show_host_port_state:1;
61560 + const unsigned long show_host_active_fc4s:1;
61561 + const unsigned long show_host_speed:1;
61562 + const unsigned long show_host_fabric_name:1;
61563 + const unsigned long show_host_symbolic_name:1;
61564 + const unsigned long show_host_system_hostname:1;
61565
61566 - unsigned long disable_target_scan:1;
61567 + const unsigned long disable_target_scan:1;
61568 };
61569
61570
61571 diff -urNp linux-2.6.32.45/include/sound/ac97_codec.h linux-2.6.32.45/include/sound/ac97_codec.h
61572 --- linux-2.6.32.45/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
61573 +++ linux-2.6.32.45/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
61574 @@ -419,15 +419,15 @@
61575 struct snd_ac97;
61576
61577 struct snd_ac97_build_ops {
61578 - int (*build_3d) (struct snd_ac97 *ac97);
61579 - int (*build_specific) (struct snd_ac97 *ac97);
61580 - int (*build_spdif) (struct snd_ac97 *ac97);
61581 - int (*build_post_spdif) (struct snd_ac97 *ac97);
61582 + int (* const build_3d) (struct snd_ac97 *ac97);
61583 + int (* const build_specific) (struct snd_ac97 *ac97);
61584 + int (* const build_spdif) (struct snd_ac97 *ac97);
61585 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
61586 #ifdef CONFIG_PM
61587 - void (*suspend) (struct snd_ac97 *ac97);
61588 - void (*resume) (struct snd_ac97 *ac97);
61589 + void (* const suspend) (struct snd_ac97 *ac97);
61590 + void (* const resume) (struct snd_ac97 *ac97);
61591 #endif
61592 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61593 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61594 };
61595
61596 struct snd_ac97_bus_ops {
61597 @@ -477,7 +477,7 @@ struct snd_ac97_template {
61598
61599 struct snd_ac97 {
61600 /* -- lowlevel (hardware) driver specific -- */
61601 - struct snd_ac97_build_ops * build_ops;
61602 + const struct snd_ac97_build_ops * build_ops;
61603 void *private_data;
61604 void (*private_free) (struct snd_ac97 *ac97);
61605 /* --- */
61606 diff -urNp linux-2.6.32.45/include/sound/ak4xxx-adda.h linux-2.6.32.45/include/sound/ak4xxx-adda.h
61607 --- linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-03-27 14:31:47.000000000 -0400
61608 +++ linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-08-05 20:33:55.000000000 -0400
61609 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61610 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61611 unsigned char val);
61612 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61613 -};
61614 +} __no_const;
61615
61616 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61617
61618 diff -urNp linux-2.6.32.45/include/sound/hwdep.h linux-2.6.32.45/include/sound/hwdep.h
61619 --- linux-2.6.32.45/include/sound/hwdep.h 2011-03-27 14:31:47.000000000 -0400
61620 +++ linux-2.6.32.45/include/sound/hwdep.h 2011-08-05 20:33:55.000000000 -0400
61621 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61622 struct snd_hwdep_dsp_status *status);
61623 int (*dsp_load)(struct snd_hwdep *hw,
61624 struct snd_hwdep_dsp_image *image);
61625 -};
61626 +} __no_const;
61627
61628 struct snd_hwdep {
61629 struct snd_card *card;
61630 diff -urNp linux-2.6.32.45/include/sound/info.h linux-2.6.32.45/include/sound/info.h
61631 --- linux-2.6.32.45/include/sound/info.h 2011-03-27 14:31:47.000000000 -0400
61632 +++ linux-2.6.32.45/include/sound/info.h 2011-08-05 20:33:55.000000000 -0400
61633 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
61634 struct snd_info_buffer *buffer);
61635 void (*write)(struct snd_info_entry *entry,
61636 struct snd_info_buffer *buffer);
61637 -};
61638 +} __no_const;
61639
61640 struct snd_info_entry_ops {
61641 int (*open)(struct snd_info_entry *entry,
61642 diff -urNp linux-2.6.32.45/include/sound/sb16_csp.h linux-2.6.32.45/include/sound/sb16_csp.h
61643 --- linux-2.6.32.45/include/sound/sb16_csp.h 2011-03-27 14:31:47.000000000 -0400
61644 +++ linux-2.6.32.45/include/sound/sb16_csp.h 2011-08-05 20:33:55.000000000 -0400
61645 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
61646 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61647 int (*csp_stop) (struct snd_sb_csp * p);
61648 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61649 -};
61650 +} __no_const;
61651
61652 /*
61653 * CSP private data
61654 diff -urNp linux-2.6.32.45/include/sound/ymfpci.h linux-2.6.32.45/include/sound/ymfpci.h
61655 --- linux-2.6.32.45/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
61656 +++ linux-2.6.32.45/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
61657 @@ -358,7 +358,7 @@ struct snd_ymfpci {
61658 spinlock_t reg_lock;
61659 spinlock_t voice_lock;
61660 wait_queue_head_t interrupt_sleep;
61661 - atomic_t interrupt_sleep_count;
61662 + atomic_unchecked_t interrupt_sleep_count;
61663 struct snd_info_entry *proc_entry;
61664 const struct firmware *dsp_microcode;
61665 const struct firmware *controller_microcode;
61666 diff -urNp linux-2.6.32.45/include/trace/events/irq.h linux-2.6.32.45/include/trace/events/irq.h
61667 --- linux-2.6.32.45/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
61668 +++ linux-2.6.32.45/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
61669 @@ -34,7 +34,7 @@
61670 */
61671 TRACE_EVENT(irq_handler_entry,
61672
61673 - TP_PROTO(int irq, struct irqaction *action),
61674 + TP_PROTO(int irq, const struct irqaction *action),
61675
61676 TP_ARGS(irq, action),
61677
61678 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
61679 */
61680 TRACE_EVENT(irq_handler_exit,
61681
61682 - TP_PROTO(int irq, struct irqaction *action, int ret),
61683 + TP_PROTO(int irq, const struct irqaction *action, int ret),
61684
61685 TP_ARGS(irq, action, ret),
61686
61687 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
61688 */
61689 TRACE_EVENT(softirq_entry,
61690
61691 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61692 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61693
61694 TP_ARGS(h, vec),
61695
61696 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
61697 */
61698 TRACE_EVENT(softirq_exit,
61699
61700 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61701 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61702
61703 TP_ARGS(h, vec),
61704
61705 diff -urNp linux-2.6.32.45/include/video/uvesafb.h linux-2.6.32.45/include/video/uvesafb.h
61706 --- linux-2.6.32.45/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
61707 +++ linux-2.6.32.45/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
61708 @@ -177,6 +177,7 @@ struct uvesafb_par {
61709 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61710 u8 pmi_setpal; /* PMI for palette changes */
61711 u16 *pmi_base; /* protected mode interface location */
61712 + u8 *pmi_code; /* protected mode code location */
61713 void *pmi_start;
61714 void *pmi_pal;
61715 u8 *vbe_state_orig; /*
61716 diff -urNp linux-2.6.32.45/init/do_mounts.c linux-2.6.32.45/init/do_mounts.c
61717 --- linux-2.6.32.45/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
61718 +++ linux-2.6.32.45/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
61719 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
61720
61721 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61722 {
61723 - int err = sys_mount(name, "/root", fs, flags, data);
61724 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
61725 if (err)
61726 return err;
61727
61728 - sys_chdir("/root");
61729 + sys_chdir((__force const char __user *)"/root");
61730 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61731 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61732 current->fs->pwd.mnt->mnt_sb->s_type->name,
61733 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
61734 va_start(args, fmt);
61735 vsprintf(buf, fmt, args);
61736 va_end(args);
61737 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61738 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61739 if (fd >= 0) {
61740 sys_ioctl(fd, FDEJECT, 0);
61741 sys_close(fd);
61742 }
61743 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61744 - fd = sys_open("/dev/console", O_RDWR, 0);
61745 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
61746 if (fd >= 0) {
61747 sys_ioctl(fd, TCGETS, (long)&termios);
61748 termios.c_lflag &= ~ICANON;
61749 sys_ioctl(fd, TCSETSF, (long)&termios);
61750 - sys_read(fd, &c, 1);
61751 + sys_read(fd, (char __user *)&c, 1);
61752 termios.c_lflag |= ICANON;
61753 sys_ioctl(fd, TCSETSF, (long)&termios);
61754 sys_close(fd);
61755 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
61756 mount_root();
61757 out:
61758 devtmpfs_mount("dev");
61759 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61760 - sys_chroot(".");
61761 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61762 + sys_chroot((__force char __user *)".");
61763 }
61764 diff -urNp linux-2.6.32.45/init/do_mounts.h linux-2.6.32.45/init/do_mounts.h
61765 --- linux-2.6.32.45/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
61766 +++ linux-2.6.32.45/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
61767 @@ -15,15 +15,15 @@ extern int root_mountflags;
61768
61769 static inline int create_dev(char *name, dev_t dev)
61770 {
61771 - sys_unlink(name);
61772 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61773 + sys_unlink((__force char __user *)name);
61774 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
61775 }
61776
61777 #if BITS_PER_LONG == 32
61778 static inline u32 bstat(char *name)
61779 {
61780 struct stat64 stat;
61781 - if (sys_stat64(name, &stat) != 0)
61782 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
61783 return 0;
61784 if (!S_ISBLK(stat.st_mode))
61785 return 0;
61786 diff -urNp linux-2.6.32.45/init/do_mounts_initrd.c linux-2.6.32.45/init/do_mounts_initrd.c
61787 --- linux-2.6.32.45/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
61788 +++ linux-2.6.32.45/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
61789 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
61790 sys_close(old_fd);sys_close(root_fd);
61791 sys_close(0);sys_close(1);sys_close(2);
61792 sys_setsid();
61793 - (void) sys_open("/dev/console",O_RDWR,0);
61794 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
61795 (void) sys_dup(0);
61796 (void) sys_dup(0);
61797 return kernel_execve(shell, argv, envp_init);
61798 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
61799 create_dev("/dev/root.old", Root_RAM0);
61800 /* mount initrd on rootfs' /root */
61801 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61802 - sys_mkdir("/old", 0700);
61803 - root_fd = sys_open("/", 0, 0);
61804 - old_fd = sys_open("/old", 0, 0);
61805 + sys_mkdir((__force const char __user *)"/old", 0700);
61806 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
61807 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
61808 /* move initrd over / and chdir/chroot in initrd root */
61809 - sys_chdir("/root");
61810 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61811 - sys_chroot(".");
61812 + sys_chdir((__force const char __user *)"/root");
61813 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61814 + sys_chroot((__force const char __user *)".");
61815
61816 /*
61817 * In case that a resume from disk is carried out by linuxrc or one of
61818 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
61819
61820 /* move initrd to rootfs' /old */
61821 sys_fchdir(old_fd);
61822 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
61823 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
61824 /* switch root and cwd back to / of rootfs */
61825 sys_fchdir(root_fd);
61826 - sys_chroot(".");
61827 + sys_chroot((__force const char __user *)".");
61828 sys_close(old_fd);
61829 sys_close(root_fd);
61830
61831 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61832 - sys_chdir("/old");
61833 + sys_chdir((__force const char __user *)"/old");
61834 return;
61835 }
61836
61837 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
61838 mount_root();
61839
61840 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61841 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61842 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
61843 if (!error)
61844 printk("okay\n");
61845 else {
61846 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
61847 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
61848 if (error == -ENOENT)
61849 printk("/initrd does not exist. Ignored.\n");
61850 else
61851 printk("failed\n");
61852 printk(KERN_NOTICE "Unmounting old root\n");
61853 - sys_umount("/old", MNT_DETACH);
61854 + sys_umount((__force char __user *)"/old", MNT_DETACH);
61855 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61856 if (fd < 0) {
61857 error = fd;
61858 @@ -119,11 +119,11 @@ int __init initrd_load(void)
61859 * mounted in the normal path.
61860 */
61861 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61862 - sys_unlink("/initrd.image");
61863 + sys_unlink((__force const char __user *)"/initrd.image");
61864 handle_initrd();
61865 return 1;
61866 }
61867 }
61868 - sys_unlink("/initrd.image");
61869 + sys_unlink((__force const char __user *)"/initrd.image");
61870 return 0;
61871 }
61872 diff -urNp linux-2.6.32.45/init/do_mounts_md.c linux-2.6.32.45/init/do_mounts_md.c
61873 --- linux-2.6.32.45/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
61874 +++ linux-2.6.32.45/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
61875 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61876 partitioned ? "_d" : "", minor,
61877 md_setup_args[ent].device_names);
61878
61879 - fd = sys_open(name, 0, 0);
61880 + fd = sys_open((__force char __user *)name, 0, 0);
61881 if (fd < 0) {
61882 printk(KERN_ERR "md: open failed - cannot start "
61883 "array %s\n", name);
61884 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61885 * array without it
61886 */
61887 sys_close(fd);
61888 - fd = sys_open(name, 0, 0);
61889 + fd = sys_open((__force char __user *)name, 0, 0);
61890 sys_ioctl(fd, BLKRRPART, 0);
61891 }
61892 sys_close(fd);
61893 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61894
61895 wait_for_device_probe();
61896
61897 - fd = sys_open("/dev/md0", 0, 0);
61898 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
61899 if (fd >= 0) {
61900 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61901 sys_close(fd);
61902 diff -urNp linux-2.6.32.45/init/initramfs.c linux-2.6.32.45/init/initramfs.c
61903 --- linux-2.6.32.45/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
61904 +++ linux-2.6.32.45/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
61905 @@ -74,7 +74,7 @@ static void __init free_hash(void)
61906 }
61907 }
61908
61909 -static long __init do_utime(char __user *filename, time_t mtime)
61910 +static long __init do_utime(__force char __user *filename, time_t mtime)
61911 {
61912 struct timespec t[2];
61913
61914 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
61915 struct dir_entry *de, *tmp;
61916 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61917 list_del(&de->list);
61918 - do_utime(de->name, de->mtime);
61919 + do_utime((__force char __user *)de->name, de->mtime);
61920 kfree(de->name);
61921 kfree(de);
61922 }
61923 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
61924 if (nlink >= 2) {
61925 char *old = find_link(major, minor, ino, mode, collected);
61926 if (old)
61927 - return (sys_link(old, collected) < 0) ? -1 : 1;
61928 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
61929 }
61930 return 0;
61931 }
61932 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
61933 {
61934 struct stat st;
61935
61936 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61937 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
61938 if (S_ISDIR(st.st_mode))
61939 - sys_rmdir(path);
61940 + sys_rmdir((__force char __user *)path);
61941 else
61942 - sys_unlink(path);
61943 + sys_unlink((__force char __user *)path);
61944 }
61945 }
61946
61947 @@ -305,7 +305,7 @@ static int __init do_name(void)
61948 int openflags = O_WRONLY|O_CREAT;
61949 if (ml != 1)
61950 openflags |= O_TRUNC;
61951 - wfd = sys_open(collected, openflags, mode);
61952 + wfd = sys_open((__force char __user *)collected, openflags, mode);
61953
61954 if (wfd >= 0) {
61955 sys_fchown(wfd, uid, gid);
61956 @@ -317,17 +317,17 @@ static int __init do_name(void)
61957 }
61958 }
61959 } else if (S_ISDIR(mode)) {
61960 - sys_mkdir(collected, mode);
61961 - sys_chown(collected, uid, gid);
61962 - sys_chmod(collected, mode);
61963 + sys_mkdir((__force char __user *)collected, mode);
61964 + sys_chown((__force char __user *)collected, uid, gid);
61965 + sys_chmod((__force char __user *)collected, mode);
61966 dir_add(collected, mtime);
61967 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61968 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61969 if (maybe_link() == 0) {
61970 - sys_mknod(collected, mode, rdev);
61971 - sys_chown(collected, uid, gid);
61972 - sys_chmod(collected, mode);
61973 - do_utime(collected, mtime);
61974 + sys_mknod((__force char __user *)collected, mode, rdev);
61975 + sys_chown((__force char __user *)collected, uid, gid);
61976 + sys_chmod((__force char __user *)collected, mode);
61977 + do_utime((__force char __user *)collected, mtime);
61978 }
61979 }
61980 return 0;
61981 @@ -336,15 +336,15 @@ static int __init do_name(void)
61982 static int __init do_copy(void)
61983 {
61984 if (count >= body_len) {
61985 - sys_write(wfd, victim, body_len);
61986 + sys_write(wfd, (__force char __user *)victim, body_len);
61987 sys_close(wfd);
61988 - do_utime(vcollected, mtime);
61989 + do_utime((__force char __user *)vcollected, mtime);
61990 kfree(vcollected);
61991 eat(body_len);
61992 state = SkipIt;
61993 return 0;
61994 } else {
61995 - sys_write(wfd, victim, count);
61996 + sys_write(wfd, (__force char __user *)victim, count);
61997 body_len -= count;
61998 eat(count);
61999 return 1;
62000 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
62001 {
62002 collected[N_ALIGN(name_len) + body_len] = '\0';
62003 clean_path(collected, 0);
62004 - sys_symlink(collected + N_ALIGN(name_len), collected);
62005 - sys_lchown(collected, uid, gid);
62006 - do_utime(collected, mtime);
62007 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
62008 + sys_lchown((__force char __user *)collected, uid, gid);
62009 + do_utime((__force char __user *)collected, mtime);
62010 state = SkipIt;
62011 next_state = Reset;
62012 return 0;
62013 diff -urNp linux-2.6.32.45/init/Kconfig linux-2.6.32.45/init/Kconfig
62014 --- linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
62015 +++ linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
62016 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
62017
62018 config COMPAT_BRK
62019 bool "Disable heap randomization"
62020 - default y
62021 + default n
62022 help
62023 Randomizing heap placement makes heap exploits harder, but it
62024 also breaks ancient binaries (including anything libc5 based).
62025 diff -urNp linux-2.6.32.45/init/main.c linux-2.6.32.45/init/main.c
62026 --- linux-2.6.32.45/init/main.c 2011-05-10 22:12:01.000000000 -0400
62027 +++ linux-2.6.32.45/init/main.c 2011-08-05 20:33:55.000000000 -0400
62028 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
62029 #ifdef CONFIG_TC
62030 extern void tc_init(void);
62031 #endif
62032 +extern void grsecurity_init(void);
62033
62034 enum system_states system_state __read_mostly;
62035 EXPORT_SYMBOL(system_state);
62036 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char
62037
62038 __setup("reset_devices", set_reset_devices);
62039
62040 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62041 +extern char pax_enter_kernel_user[];
62042 +extern char pax_exit_kernel_user[];
62043 +extern pgdval_t clone_pgd_mask;
62044 +#endif
62045 +
62046 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62047 +static int __init setup_pax_nouderef(char *str)
62048 +{
62049 +#ifdef CONFIG_X86_32
62050 + unsigned int cpu;
62051 + struct desc_struct *gdt;
62052 +
62053 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
62054 + gdt = get_cpu_gdt_table(cpu);
62055 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62056 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62057 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62058 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62059 + }
62060 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62061 +#else
62062 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62063 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62064 + clone_pgd_mask = ~(pgdval_t)0UL;
62065 +#endif
62066 +
62067 + return 0;
62068 +}
62069 +early_param("pax_nouderef", setup_pax_nouderef);
62070 +#endif
62071 +
62072 +#ifdef CONFIG_PAX_SOFTMODE
62073 +int pax_softmode;
62074 +
62075 +static int __init setup_pax_softmode(char *str)
62076 +{
62077 + get_option(&str, &pax_softmode);
62078 + return 1;
62079 +}
62080 +__setup("pax_softmode=", setup_pax_softmode);
62081 +#endif
62082 +
62083 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62084 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62085 static const char *panic_later, *panic_param;
62086 @@ -705,52 +749,53 @@ int initcall_debug;
62087 core_param(initcall_debug, initcall_debug, bool, 0644);
62088
62089 static char msgbuf[64];
62090 -static struct boot_trace_call call;
62091 -static struct boot_trace_ret ret;
62092 +static struct boot_trace_call trace_call;
62093 +static struct boot_trace_ret trace_ret;
62094
62095 int do_one_initcall(initcall_t fn)
62096 {
62097 int count = preempt_count();
62098 ktime_t calltime, delta, rettime;
62099 + const char *msg1 = "", *msg2 = "";
62100
62101 if (initcall_debug) {
62102 - call.caller = task_pid_nr(current);
62103 - printk("calling %pF @ %i\n", fn, call.caller);
62104 + trace_call.caller = task_pid_nr(current);
62105 + printk("calling %pF @ %i\n", fn, trace_call.caller);
62106 calltime = ktime_get();
62107 - trace_boot_call(&call, fn);
62108 + trace_boot_call(&trace_call, fn);
62109 enable_boot_trace();
62110 }
62111
62112 - ret.result = fn();
62113 + trace_ret.result = fn();
62114
62115 if (initcall_debug) {
62116 disable_boot_trace();
62117 rettime = ktime_get();
62118 delta = ktime_sub(rettime, calltime);
62119 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
62120 - trace_boot_ret(&ret, fn);
62121 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
62122 + trace_boot_ret(&trace_ret, fn);
62123 printk("initcall %pF returned %d after %Ld usecs\n", fn,
62124 - ret.result, ret.duration);
62125 + trace_ret.result, trace_ret.duration);
62126 }
62127
62128 msgbuf[0] = 0;
62129
62130 - if (ret.result && ret.result != -ENODEV && initcall_debug)
62131 - sprintf(msgbuf, "error code %d ", ret.result);
62132 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
62133 + sprintf(msgbuf, "error code %d ", trace_ret.result);
62134
62135 if (preempt_count() != count) {
62136 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62137 + msg1 = " preemption imbalance";
62138 preempt_count() = count;
62139 }
62140 if (irqs_disabled()) {
62141 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62142 + msg2 = " disabled interrupts";
62143 local_irq_enable();
62144 }
62145 - if (msgbuf[0]) {
62146 - printk("initcall %pF returned with %s\n", fn, msgbuf);
62147 + if (msgbuf[0] || *msg1 || *msg2) {
62148 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62149 }
62150
62151 - return ret.result;
62152 + return trace_ret.result;
62153 }
62154
62155
62156 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
62157 if (!ramdisk_execute_command)
62158 ramdisk_execute_command = "/init";
62159
62160 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62161 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
62162 ramdisk_execute_command = NULL;
62163 prepare_namespace();
62164 }
62165
62166 + grsecurity_init();
62167 +
62168 /*
62169 * Ok, we have completed the initial bootup, and
62170 * we're essentially up and running. Get rid of the
62171 diff -urNp linux-2.6.32.45/init/noinitramfs.c linux-2.6.32.45/init/noinitramfs.c
62172 --- linux-2.6.32.45/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
62173 +++ linux-2.6.32.45/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
62174 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
62175 {
62176 int err;
62177
62178 - err = sys_mkdir("/dev", 0755);
62179 + err = sys_mkdir((const char __user *)"/dev", 0755);
62180 if (err < 0)
62181 goto out;
62182
62183 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
62184 if (err < 0)
62185 goto out;
62186
62187 - err = sys_mkdir("/root", 0700);
62188 + err = sys_mkdir((const char __user *)"/root", 0700);
62189 if (err < 0)
62190 goto out;
62191
62192 diff -urNp linux-2.6.32.45/ipc/mqueue.c linux-2.6.32.45/ipc/mqueue.c
62193 --- linux-2.6.32.45/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
62194 +++ linux-2.6.32.45/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
62195 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
62196 mq_bytes = (mq_msg_tblsz +
62197 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62198
62199 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62200 spin_lock(&mq_lock);
62201 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62202 u->mq_bytes + mq_bytes >
62203 diff -urNp linux-2.6.32.45/ipc/msg.c linux-2.6.32.45/ipc/msg.c
62204 --- linux-2.6.32.45/ipc/msg.c 2011-03-27 14:31:47.000000000 -0400
62205 +++ linux-2.6.32.45/ipc/msg.c 2011-08-05 20:33:55.000000000 -0400
62206 @@ -310,18 +310,19 @@ static inline int msg_security(struct ke
62207 return security_msg_queue_associate(msq, msgflg);
62208 }
62209
62210 +static struct ipc_ops msg_ops = {
62211 + .getnew = newque,
62212 + .associate = msg_security,
62213 + .more_checks = NULL
62214 +};
62215 +
62216 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62217 {
62218 struct ipc_namespace *ns;
62219 - struct ipc_ops msg_ops;
62220 struct ipc_params msg_params;
62221
62222 ns = current->nsproxy->ipc_ns;
62223
62224 - msg_ops.getnew = newque;
62225 - msg_ops.associate = msg_security;
62226 - msg_ops.more_checks = NULL;
62227 -
62228 msg_params.key = key;
62229 msg_params.flg = msgflg;
62230
62231 diff -urNp linux-2.6.32.45/ipc/sem.c linux-2.6.32.45/ipc/sem.c
62232 --- linux-2.6.32.45/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
62233 +++ linux-2.6.32.45/ipc/sem.c 2011-08-05 20:33:55.000000000 -0400
62234 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct
62235 return 0;
62236 }
62237
62238 +static struct ipc_ops sem_ops = {
62239 + .getnew = newary,
62240 + .associate = sem_security,
62241 + .more_checks = sem_more_checks
62242 +};
62243 +
62244 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62245 {
62246 struct ipc_namespace *ns;
62247 - struct ipc_ops sem_ops;
62248 struct ipc_params sem_params;
62249
62250 ns = current->nsproxy->ipc_ns;
62251 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62252 if (nsems < 0 || nsems > ns->sc_semmsl)
62253 return -EINVAL;
62254
62255 - sem_ops.getnew = newary;
62256 - sem_ops.associate = sem_security;
62257 - sem_ops.more_checks = sem_more_checks;
62258 -
62259 sem_params.key = key;
62260 sem_params.flg = semflg;
62261 sem_params.u.nsems = nsems;
62262 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namesp
62263 ushort* sem_io = fast_sem_io;
62264 int nsems;
62265
62266 + pax_track_stack();
62267 +
62268 sma = sem_lock_check(ns, semid);
62269 if (IS_ERR(sma))
62270 return PTR_ERR(sma);
62271 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62272 unsigned long jiffies_left = 0;
62273 struct ipc_namespace *ns;
62274
62275 + pax_track_stack();
62276 +
62277 ns = current->nsproxy->ipc_ns;
62278
62279 if (nsops < 1 || semid < 0)
62280 diff -urNp linux-2.6.32.45/ipc/shm.c linux-2.6.32.45/ipc/shm.c
62281 --- linux-2.6.32.45/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
62282 +++ linux-2.6.32.45/ipc/shm.c 2011-08-05 20:33:55.000000000 -0400
62283 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
62284 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62285 #endif
62286
62287 +#ifdef CONFIG_GRKERNSEC
62288 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62289 + const time_t shm_createtime, const uid_t cuid,
62290 + const int shmid);
62291 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62292 + const time_t shm_createtime);
62293 +#endif
62294 +
62295 void shm_init_ns(struct ipc_namespace *ns)
62296 {
62297 ns->shm_ctlmax = SHMMAX;
62298 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
62299 shp->shm_lprid = 0;
62300 shp->shm_atim = shp->shm_dtim = 0;
62301 shp->shm_ctim = get_seconds();
62302 +#ifdef CONFIG_GRKERNSEC
62303 + {
62304 + struct timespec timeval;
62305 + do_posix_clock_monotonic_gettime(&timeval);
62306 +
62307 + shp->shm_createtime = timeval.tv_sec;
62308 + }
62309 +#endif
62310 shp->shm_segsz = size;
62311 shp->shm_nattch = 0;
62312 shp->shm_file = file;
62313 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct
62314 return 0;
62315 }
62316
62317 +static struct ipc_ops shm_ops = {
62318 + .getnew = newseg,
62319 + .associate = shm_security,
62320 + .more_checks = shm_more_checks
62321 +};
62322 +
62323 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62324 {
62325 struct ipc_namespace *ns;
62326 - struct ipc_ops shm_ops;
62327 struct ipc_params shm_params;
62328
62329 ns = current->nsproxy->ipc_ns;
62330
62331 - shm_ops.getnew = newseg;
62332 - shm_ops.associate = shm_security;
62333 - shm_ops.more_checks = shm_more_checks;
62334 -
62335 shm_params.key = key;
62336 shm_params.flg = shmflg;
62337 shm_params.u.size = size;
62338 @@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *sh
62339 if (err)
62340 goto out_unlock;
62341
62342 +#ifdef CONFIG_GRKERNSEC
62343 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62344 + shp->shm_perm.cuid, shmid) ||
62345 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62346 + err = -EACCES;
62347 + goto out_unlock;
62348 + }
62349 +#endif
62350 +
62351 path.dentry = dget(shp->shm_file->f_path.dentry);
62352 path.mnt = shp->shm_file->f_path.mnt;
62353 shp->shm_nattch++;
62354 +#ifdef CONFIG_GRKERNSEC
62355 + shp->shm_lapid = current->pid;
62356 +#endif
62357 size = i_size_read(path.dentry->d_inode);
62358 shm_unlock(shp);
62359
62360 diff -urNp linux-2.6.32.45/kernel/acct.c linux-2.6.32.45/kernel/acct.c
62361 --- linux-2.6.32.45/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
62362 +++ linux-2.6.32.45/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
62363 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
62364 */
62365 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62366 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62367 - file->f_op->write(file, (char *)&ac,
62368 + file->f_op->write(file, (__force char __user *)&ac,
62369 sizeof(acct_t), &file->f_pos);
62370 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62371 set_fs(fs);
62372 diff -urNp linux-2.6.32.45/kernel/audit.c linux-2.6.32.45/kernel/audit.c
62373 --- linux-2.6.32.45/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
62374 +++ linux-2.6.32.45/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
62375 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
62376 3) suppressed due to audit_rate_limit
62377 4) suppressed due to audit_backlog_limit
62378 */
62379 -static atomic_t audit_lost = ATOMIC_INIT(0);
62380 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62381
62382 /* The netlink socket. */
62383 static struct sock *audit_sock;
62384 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
62385 unsigned long now;
62386 int print;
62387
62388 - atomic_inc(&audit_lost);
62389 + atomic_inc_unchecked(&audit_lost);
62390
62391 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62392
62393 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
62394 printk(KERN_WARNING
62395 "audit: audit_lost=%d audit_rate_limit=%d "
62396 "audit_backlog_limit=%d\n",
62397 - atomic_read(&audit_lost),
62398 + atomic_read_unchecked(&audit_lost),
62399 audit_rate_limit,
62400 audit_backlog_limit);
62401 audit_panic(message);
62402 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
62403 status_set.pid = audit_pid;
62404 status_set.rate_limit = audit_rate_limit;
62405 status_set.backlog_limit = audit_backlog_limit;
62406 - status_set.lost = atomic_read(&audit_lost);
62407 + status_set.lost = atomic_read_unchecked(&audit_lost);
62408 status_set.backlog = skb_queue_len(&audit_skb_queue);
62409 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62410 &status_set, sizeof(status_set));
62411 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
62412 spin_unlock_irq(&tsk->sighand->siglock);
62413 }
62414 read_unlock(&tasklist_lock);
62415 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
62416 - &s, sizeof(s));
62417 +
62418 + if (!err)
62419 + audit_send_reply(NETLINK_CB(skb).pid, seq,
62420 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
62421 break;
62422 }
62423 case AUDIT_TTY_SET: {
62424 diff -urNp linux-2.6.32.45/kernel/auditsc.c linux-2.6.32.45/kernel/auditsc.c
62425 --- linux-2.6.32.45/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
62426 +++ linux-2.6.32.45/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
62427 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
62428 }
62429
62430 /* global counter which is incremented every time something logs in */
62431 -static atomic_t session_id = ATOMIC_INIT(0);
62432 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62433
62434 /**
62435 * audit_set_loginuid - set a task's audit_context loginuid
62436 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
62437 */
62438 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62439 {
62440 - unsigned int sessionid = atomic_inc_return(&session_id);
62441 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62442 struct audit_context *context = task->audit_context;
62443
62444 if (context && context->in_syscall) {
62445 diff -urNp linux-2.6.32.45/kernel/capability.c linux-2.6.32.45/kernel/capability.c
62446 --- linux-2.6.32.45/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
62447 +++ linux-2.6.32.45/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
62448 @@ -305,10 +305,26 @@ int capable(int cap)
62449 BUG();
62450 }
62451
62452 - if (security_capable(cap) == 0) {
62453 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
62454 current->flags |= PF_SUPERPRIV;
62455 return 1;
62456 }
62457 return 0;
62458 }
62459 +
62460 +int capable_nolog(int cap)
62461 +{
62462 + if (unlikely(!cap_valid(cap))) {
62463 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62464 + BUG();
62465 + }
62466 +
62467 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
62468 + current->flags |= PF_SUPERPRIV;
62469 + return 1;
62470 + }
62471 + return 0;
62472 +}
62473 +
62474 EXPORT_SYMBOL(capable);
62475 +EXPORT_SYMBOL(capable_nolog);
62476 diff -urNp linux-2.6.32.45/kernel/cgroup.c linux-2.6.32.45/kernel/cgroup.c
62477 --- linux-2.6.32.45/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
62478 +++ linux-2.6.32.45/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
62479 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
62480 struct hlist_head *hhead;
62481 struct cg_cgroup_link *link;
62482
62483 + pax_track_stack();
62484 +
62485 /* First see if we already have a cgroup group that matches
62486 * the desired set */
62487 read_lock(&css_set_lock);
62488 diff -urNp linux-2.6.32.45/kernel/configs.c linux-2.6.32.45/kernel/configs.c
62489 --- linux-2.6.32.45/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
62490 +++ linux-2.6.32.45/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
62491 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
62492 struct proc_dir_entry *entry;
62493
62494 /* create the current config file */
62495 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62496 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62497 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62498 + &ikconfig_file_ops);
62499 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62500 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62501 + &ikconfig_file_ops);
62502 +#endif
62503 +#else
62504 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62505 &ikconfig_file_ops);
62506 +#endif
62507 +
62508 if (!entry)
62509 return -ENOMEM;
62510
62511 diff -urNp linux-2.6.32.45/kernel/cpu.c linux-2.6.32.45/kernel/cpu.c
62512 --- linux-2.6.32.45/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
62513 +++ linux-2.6.32.45/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
62514 @@ -19,7 +19,7 @@
62515 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
62516 static DEFINE_MUTEX(cpu_add_remove_lock);
62517
62518 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
62519 +static RAW_NOTIFIER_HEAD(cpu_chain);
62520
62521 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
62522 * Should always be manipulated under cpu_add_remove_lock
62523 diff -urNp linux-2.6.32.45/kernel/cred.c linux-2.6.32.45/kernel/cred.c
62524 --- linux-2.6.32.45/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
62525 +++ linux-2.6.32.45/kernel/cred.c 2011-08-11 19:49:38.000000000 -0400
62526 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
62527 */
62528 void __put_cred(struct cred *cred)
62529 {
62530 + pax_track_stack();
62531 +
62532 kdebug("__put_cred(%p{%d,%d})", cred,
62533 atomic_read(&cred->usage),
62534 read_cred_subscribers(cred));
62535 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
62536 {
62537 struct cred *cred;
62538
62539 + pax_track_stack();
62540 +
62541 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62542 atomic_read(&tsk->cred->usage),
62543 read_cred_subscribers(tsk->cred));
62544 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
62545 {
62546 const struct cred *cred;
62547
62548 + pax_track_stack();
62549 +
62550 rcu_read_lock();
62551
62552 do {
62553 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
62554 {
62555 struct cred *new;
62556
62557 + pax_track_stack();
62558 +
62559 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62560 if (!new)
62561 return NULL;
62562 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
62563 const struct cred *old;
62564 struct cred *new;
62565
62566 + pax_track_stack();
62567 +
62568 validate_process_creds();
62569
62570 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62571 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
62572 struct thread_group_cred *tgcred = NULL;
62573 struct cred *new;
62574
62575 + pax_track_stack();
62576 +
62577 #ifdef CONFIG_KEYS
62578 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62579 if (!tgcred)
62580 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
62581 struct cred *new;
62582 int ret;
62583
62584 + pax_track_stack();
62585 +
62586 mutex_init(&p->cred_guard_mutex);
62587
62588 if (
62589 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
62590 struct task_struct *task = current;
62591 const struct cred *old = task->real_cred;
62592
62593 + pax_track_stack();
62594 +
62595 kdebug("commit_creds(%p{%d,%d})", new,
62596 atomic_read(&new->usage),
62597 read_cred_subscribers(new));
62598 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
62599
62600 get_cred(new); /* we will require a ref for the subj creds too */
62601
62602 + gr_set_role_label(task, new->uid, new->gid);
62603 +
62604 /* dumpability changes */
62605 if (old->euid != new->euid ||
62606 old->egid != new->egid ||
62607 @@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
62608 key_fsgid_changed(task);
62609
62610 /* do it
62611 - * - What if a process setreuid()'s and this brings the
62612 - * new uid over his NPROC rlimit? We can check this now
62613 - * cheaply with the new uid cache, so if it matters
62614 - * we should be checking for it. -DaveM
62615 + * RLIMIT_NPROC limits on user->processes have already been checked
62616 + * in set_user().
62617 */
62618 alter_cred_subscribers(new, 2);
62619 if (new->user != old->user)
62620 @@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
62621 */
62622 void abort_creds(struct cred *new)
62623 {
62624 + pax_track_stack();
62625 +
62626 kdebug("abort_creds(%p{%d,%d})", new,
62627 atomic_read(&new->usage),
62628 read_cred_subscribers(new));
62629 @@ -629,6 +647,8 @@ const struct cred *override_creds(const
62630 {
62631 const struct cred *old = current->cred;
62632
62633 + pax_track_stack();
62634 +
62635 kdebug("override_creds(%p{%d,%d})", new,
62636 atomic_read(&new->usage),
62637 read_cred_subscribers(new));
62638 @@ -658,6 +678,8 @@ void revert_creds(const struct cred *old
62639 {
62640 const struct cred *override = current->cred;
62641
62642 + pax_track_stack();
62643 +
62644 kdebug("revert_creds(%p{%d,%d})", old,
62645 atomic_read(&old->usage),
62646 read_cred_subscribers(old));
62647 @@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct
62648 const struct cred *old;
62649 struct cred *new;
62650
62651 + pax_track_stack();
62652 +
62653 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62654 if (!new)
62655 return NULL;
62656 @@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62657 */
62658 int set_security_override(struct cred *new, u32 secid)
62659 {
62660 + pax_track_stack();
62661 +
62662 return security_kernel_act_as(new, secid);
62663 }
62664 EXPORT_SYMBOL(set_security_override);
62665 @@ -777,6 +803,8 @@ int set_security_override_from_ctx(struc
62666 u32 secid;
62667 int ret;
62668
62669 + pax_track_stack();
62670 +
62671 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62672 if (ret < 0)
62673 return ret;
62674 diff -urNp linux-2.6.32.45/kernel/exit.c linux-2.6.32.45/kernel/exit.c
62675 --- linux-2.6.32.45/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
62676 +++ linux-2.6.32.45/kernel/exit.c 2011-08-17 19:19:50.000000000 -0400
62677 @@ -55,6 +55,10 @@
62678 #include <asm/pgtable.h>
62679 #include <asm/mmu_context.h>
62680
62681 +#ifdef CONFIG_GRKERNSEC
62682 +extern rwlock_t grsec_exec_file_lock;
62683 +#endif
62684 +
62685 static void exit_mm(struct task_struct * tsk);
62686
62687 static void __unhash_process(struct task_struct *p)
62688 @@ -174,6 +178,10 @@ void release_task(struct task_struct * p
62689 struct task_struct *leader;
62690 int zap_leader;
62691 repeat:
62692 +#ifdef CONFIG_NET
62693 + gr_del_task_from_ip_table(p);
62694 +#endif
62695 +
62696 tracehook_prepare_release_task(p);
62697 /* don't need to get the RCU readlock here - the process is dead and
62698 * can't be modifying its own credentials */
62699 @@ -341,11 +349,22 @@ static void reparent_to_kthreadd(void)
62700 {
62701 write_lock_irq(&tasklist_lock);
62702
62703 +#ifdef CONFIG_GRKERNSEC
62704 + write_lock(&grsec_exec_file_lock);
62705 + if (current->exec_file) {
62706 + fput(current->exec_file);
62707 + current->exec_file = NULL;
62708 + }
62709 + write_unlock(&grsec_exec_file_lock);
62710 +#endif
62711 +
62712 ptrace_unlink(current);
62713 /* Reparent to init */
62714 current->real_parent = current->parent = kthreadd_task;
62715 list_move_tail(&current->sibling, &current->real_parent->children);
62716
62717 + gr_set_kernel_label(current);
62718 +
62719 /* Set the exit signal to SIGCHLD so we signal init on exit */
62720 current->exit_signal = SIGCHLD;
62721
62722 @@ -397,7 +416,7 @@ int allow_signal(int sig)
62723 * know it'll be handled, so that they don't get converted to
62724 * SIGKILL or just silently dropped.
62725 */
62726 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62727 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62728 recalc_sigpending();
62729 spin_unlock_irq(&current->sighand->siglock);
62730 return 0;
62731 @@ -433,6 +452,17 @@ void daemonize(const char *name, ...)
62732 vsnprintf(current->comm, sizeof(current->comm), name, args);
62733 va_end(args);
62734
62735 +#ifdef CONFIG_GRKERNSEC
62736 + write_lock(&grsec_exec_file_lock);
62737 + if (current->exec_file) {
62738 + fput(current->exec_file);
62739 + current->exec_file = NULL;
62740 + }
62741 + write_unlock(&grsec_exec_file_lock);
62742 +#endif
62743 +
62744 + gr_set_kernel_label(current);
62745 +
62746 /*
62747 * If we were started as result of loading a module, close all of the
62748 * user space pages. We don't need them, and if we didn't close them
62749 @@ -897,17 +927,17 @@ NORET_TYPE void do_exit(long code)
62750 struct task_struct *tsk = current;
62751 int group_dead;
62752
62753 - profile_task_exit(tsk);
62754 -
62755 - WARN_ON(atomic_read(&tsk->fs_excl));
62756 -
62757 + /*
62758 + * Check this first since set_fs() below depends on
62759 + * current_thread_info(), which we better not access when we're in
62760 + * interrupt context. Other than that, we want to do the set_fs()
62761 + * as early as possible.
62762 + */
62763 if (unlikely(in_interrupt()))
62764 panic("Aiee, killing interrupt handler!");
62765 - if (unlikely(!tsk->pid))
62766 - panic("Attempted to kill the idle task!");
62767
62768 /*
62769 - * If do_exit is called because this processes oopsed, it's possible
62770 + * If do_exit is called because this processes Oops'ed, it's possible
62771 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
62772 * continuing. Amongst other possible reasons, this is to prevent
62773 * mm_release()->clear_child_tid() from writing to a user-controlled
62774 @@ -915,6 +945,13 @@ NORET_TYPE void do_exit(long code)
62775 */
62776 set_fs(USER_DS);
62777
62778 + profile_task_exit(tsk);
62779 +
62780 + WARN_ON(atomic_read(&tsk->fs_excl));
62781 +
62782 + if (unlikely(!tsk->pid))
62783 + panic("Attempted to kill the idle task!");
62784 +
62785 tracehook_report_exit(&code);
62786
62787 validate_creds_for_do_exit(tsk);
62788 @@ -973,6 +1010,9 @@ NORET_TYPE void do_exit(long code)
62789 tsk->exit_code = code;
62790 taskstats_exit(tsk, group_dead);
62791
62792 + gr_acl_handle_psacct(tsk, code);
62793 + gr_acl_handle_exit();
62794 +
62795 exit_mm(tsk);
62796
62797 if (group_dead)
62798 @@ -1188,7 +1228,7 @@ static int wait_task_zombie(struct wait_
62799
62800 if (unlikely(wo->wo_flags & WNOWAIT)) {
62801 int exit_code = p->exit_code;
62802 - int why, status;
62803 + int why;
62804
62805 get_task_struct(p);
62806 read_unlock(&tasklist_lock);
62807 diff -urNp linux-2.6.32.45/kernel/fork.c linux-2.6.32.45/kernel/fork.c
62808 --- linux-2.6.32.45/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
62809 +++ linux-2.6.32.45/kernel/fork.c 2011-08-11 19:50:07.000000000 -0400
62810 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
62811 *stackend = STACK_END_MAGIC; /* for overflow detection */
62812
62813 #ifdef CONFIG_CC_STACKPROTECTOR
62814 - tsk->stack_canary = get_random_int();
62815 + tsk->stack_canary = pax_get_random_long();
62816 #endif
62817
62818 /* One for us, one for whoever does the "release_task()" (usually parent) */
62819 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
62820 mm->locked_vm = 0;
62821 mm->mmap = NULL;
62822 mm->mmap_cache = NULL;
62823 - mm->free_area_cache = oldmm->mmap_base;
62824 - mm->cached_hole_size = ~0UL;
62825 + mm->free_area_cache = oldmm->free_area_cache;
62826 + mm->cached_hole_size = oldmm->cached_hole_size;
62827 mm->map_count = 0;
62828 cpumask_clear(mm_cpumask(mm));
62829 mm->mm_rb = RB_ROOT;
62830 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
62831 tmp->vm_flags &= ~VM_LOCKED;
62832 tmp->vm_mm = mm;
62833 tmp->vm_next = tmp->vm_prev = NULL;
62834 + tmp->vm_mirror = NULL;
62835 anon_vma_link(tmp);
62836 file = tmp->vm_file;
62837 if (file) {
62838 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
62839 if (retval)
62840 goto out;
62841 }
62842 +
62843 +#ifdef CONFIG_PAX_SEGMEXEC
62844 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
62845 + struct vm_area_struct *mpnt_m;
62846 +
62847 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
62848 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
62849 +
62850 + if (!mpnt->vm_mirror)
62851 + continue;
62852 +
62853 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
62854 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
62855 + mpnt->vm_mirror = mpnt_m;
62856 + } else {
62857 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
62858 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
62859 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
62860 + mpnt->vm_mirror->vm_mirror = mpnt;
62861 + }
62862 + }
62863 + BUG_ON(mpnt_m);
62864 + }
62865 +#endif
62866 +
62867 /* a new mm has just been created */
62868 arch_dup_mmap(oldmm, mm);
62869 retval = 0;
62870 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
62871 write_unlock(&fs->lock);
62872 return -EAGAIN;
62873 }
62874 - fs->users++;
62875 + atomic_inc(&fs->users);
62876 write_unlock(&fs->lock);
62877 return 0;
62878 }
62879 tsk->fs = copy_fs_struct(fs);
62880 if (!tsk->fs)
62881 return -ENOMEM;
62882 + gr_set_chroot_entries(tsk, &tsk->fs->root);
62883 return 0;
62884 }
62885
62886 @@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(
62887 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
62888 #endif
62889 retval = -EAGAIN;
62890 +
62891 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
62892 +
62893 if (atomic_read(&p->real_cred->user->processes) >=
62894 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
62895 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
62896 - p->real_cred->user != INIT_USER)
62897 + if (p->real_cred->user != INIT_USER &&
62898 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
62899 goto bad_fork_free;
62900 }
62901 + current->flags &= ~PF_NPROC_EXCEEDED;
62902
62903 retval = copy_creds(p, clone_flags);
62904 if (retval < 0)
62905 @@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(
62906 goto bad_fork_free_pid;
62907 }
62908
62909 + gr_copy_label(p);
62910 +
62911 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
62912 /*
62913 * Clear TID on mm_release()?
62914 @@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
62915 bad_fork_free:
62916 free_task(p);
62917 fork_out:
62918 + gr_log_forkfail(retval);
62919 +
62920 return ERR_PTR(retval);
62921 }
62922
62923 @@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
62924 if (clone_flags & CLONE_PARENT_SETTID)
62925 put_user(nr, parent_tidptr);
62926
62927 + gr_handle_brute_check();
62928 +
62929 if (clone_flags & CLONE_VFORK) {
62930 p->vfork_done = &vfork;
62931 init_completion(&vfork);
62932 @@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unsh
62933 return 0;
62934
62935 /* don't need lock here; in the worst case we'll do useless copy */
62936 - if (fs->users == 1)
62937 + if (atomic_read(&fs->users) == 1)
62938 return 0;
62939
62940 *new_fsp = copy_fs_struct(fs);
62941 @@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
62942 fs = current->fs;
62943 write_lock(&fs->lock);
62944 current->fs = new_fs;
62945 - if (--fs->users)
62946 + gr_set_chroot_entries(current, &current->fs->root);
62947 + if (atomic_dec_return(&fs->users))
62948 new_fs = NULL;
62949 else
62950 new_fs = fs;
62951 diff -urNp linux-2.6.32.45/kernel/futex.c linux-2.6.32.45/kernel/futex.c
62952 --- linux-2.6.32.45/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
62953 +++ linux-2.6.32.45/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
62954 @@ -54,6 +54,7 @@
62955 #include <linux/mount.h>
62956 #include <linux/pagemap.h>
62957 #include <linux/syscalls.h>
62958 +#include <linux/ptrace.h>
62959 #include <linux/signal.h>
62960 #include <linux/module.h>
62961 #include <linux/magic.h>
62962 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
62963 struct page *page;
62964 int err;
62965
62966 +#ifdef CONFIG_PAX_SEGMEXEC
62967 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
62968 + return -EFAULT;
62969 +#endif
62970 +
62971 /*
62972 * The futex address must be "naturally" aligned.
62973 */
62974 @@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
62975 struct futex_q q;
62976 int ret;
62977
62978 + pax_track_stack();
62979 +
62980 if (!bitset)
62981 return -EINVAL;
62982
62983 @@ -1841,7 +1849,7 @@ retry:
62984
62985 restart = &current_thread_info()->restart_block;
62986 restart->fn = futex_wait_restart;
62987 - restart->futex.uaddr = (u32 *)uaddr;
62988 + restart->futex.uaddr = uaddr;
62989 restart->futex.val = val;
62990 restart->futex.time = abs_time->tv64;
62991 restart->futex.bitset = bitset;
62992 @@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
62993 struct futex_q q;
62994 int res, ret;
62995
62996 + pax_track_stack();
62997 +
62998 if (!bitset)
62999 return -EINVAL;
63000
63001 @@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63002 {
63003 struct robust_list_head __user *head;
63004 unsigned long ret;
63005 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63006 const struct cred *cred = current_cred(), *pcred;
63007 +#endif
63008
63009 if (!futex_cmpxchg_enabled)
63010 return -ENOSYS;
63011 @@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63012 if (!p)
63013 goto err_unlock;
63014 ret = -EPERM;
63015 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63016 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63017 + goto err_unlock;
63018 +#else
63019 pcred = __task_cred(p);
63020 if (cred->euid != pcred->euid &&
63021 cred->euid != pcred->uid &&
63022 !capable(CAP_SYS_PTRACE))
63023 goto err_unlock;
63024 +#endif
63025 head = p->robust_list;
63026 rcu_read_unlock();
63027 }
63028 @@ -2459,7 +2476,7 @@ retry:
63029 */
63030 static inline int fetch_robust_entry(struct robust_list __user **entry,
63031 struct robust_list __user * __user *head,
63032 - int *pi)
63033 + unsigned int *pi)
63034 {
63035 unsigned long uentry;
63036
63037 @@ -2640,6 +2657,7 @@ static int __init futex_init(void)
63038 {
63039 u32 curval;
63040 int i;
63041 + mm_segment_t oldfs;
63042
63043 /*
63044 * This will fail and we want it. Some arch implementations do
63045 @@ -2651,7 +2669,10 @@ static int __init futex_init(void)
63046 * implementation, the non functional ones will return
63047 * -ENOSYS.
63048 */
63049 + oldfs = get_fs();
63050 + set_fs(USER_DS);
63051 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
63052 + set_fs(oldfs);
63053 if (curval == -EFAULT)
63054 futex_cmpxchg_enabled = 1;
63055
63056 diff -urNp linux-2.6.32.45/kernel/futex_compat.c linux-2.6.32.45/kernel/futex_compat.c
63057 --- linux-2.6.32.45/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
63058 +++ linux-2.6.32.45/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
63059 @@ -10,6 +10,7 @@
63060 #include <linux/compat.h>
63061 #include <linux/nsproxy.h>
63062 #include <linux/futex.h>
63063 +#include <linux/ptrace.h>
63064
63065 #include <asm/uaccess.h>
63066
63067 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
63068 {
63069 struct compat_robust_list_head __user *head;
63070 unsigned long ret;
63071 - const struct cred *cred = current_cred(), *pcred;
63072 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63073 + const struct cred *cred = current_cred();
63074 + const struct cred *pcred;
63075 +#endif
63076
63077 if (!futex_cmpxchg_enabled)
63078 return -ENOSYS;
63079 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
63080 if (!p)
63081 goto err_unlock;
63082 ret = -EPERM;
63083 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63084 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63085 + goto err_unlock;
63086 +#else
63087 pcred = __task_cred(p);
63088 if (cred->euid != pcred->euid &&
63089 cred->euid != pcred->uid &&
63090 !capable(CAP_SYS_PTRACE))
63091 goto err_unlock;
63092 +#endif
63093 head = p->compat_robust_list;
63094 read_unlock(&tasklist_lock);
63095 }
63096 diff -urNp linux-2.6.32.45/kernel/gcov/base.c linux-2.6.32.45/kernel/gcov/base.c
63097 --- linux-2.6.32.45/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
63098 +++ linux-2.6.32.45/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
63099 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
63100 }
63101
63102 #ifdef CONFIG_MODULES
63103 -static inline int within(void *addr, void *start, unsigned long size)
63104 -{
63105 - return ((addr >= start) && (addr < start + size));
63106 -}
63107 -
63108 /* Update list and generate events when modules are unloaded. */
63109 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63110 void *data)
63111 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
63112 prev = NULL;
63113 /* Remove entries located in module from linked list. */
63114 for (info = gcov_info_head; info; info = info->next) {
63115 - if (within(info, mod->module_core, mod->core_size)) {
63116 + if (within_module_core_rw((unsigned long)info, mod)) {
63117 if (prev)
63118 prev->next = info->next;
63119 else
63120 diff -urNp linux-2.6.32.45/kernel/hrtimer.c linux-2.6.32.45/kernel/hrtimer.c
63121 --- linux-2.6.32.45/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
63122 +++ linux-2.6.32.45/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
63123 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
63124 local_irq_restore(flags);
63125 }
63126
63127 -static void run_hrtimer_softirq(struct softirq_action *h)
63128 +static void run_hrtimer_softirq(void)
63129 {
63130 hrtimer_peek_ahead_timers();
63131 }
63132 diff -urNp linux-2.6.32.45/kernel/kallsyms.c linux-2.6.32.45/kernel/kallsyms.c
63133 --- linux-2.6.32.45/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
63134 +++ linux-2.6.32.45/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
63135 @@ -11,6 +11,9 @@
63136 * Changed the compression method from stem compression to "table lookup"
63137 * compression (see scripts/kallsyms.c for a more complete description)
63138 */
63139 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63140 +#define __INCLUDED_BY_HIDESYM 1
63141 +#endif
63142 #include <linux/kallsyms.h>
63143 #include <linux/module.h>
63144 #include <linux/init.h>
63145 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
63146
63147 static inline int is_kernel_inittext(unsigned long addr)
63148 {
63149 + if (system_state != SYSTEM_BOOTING)
63150 + return 0;
63151 +
63152 if (addr >= (unsigned long)_sinittext
63153 && addr <= (unsigned long)_einittext)
63154 return 1;
63155 return 0;
63156 }
63157
63158 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63159 +#ifdef CONFIG_MODULES
63160 +static inline int is_module_text(unsigned long addr)
63161 +{
63162 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63163 + return 1;
63164 +
63165 + addr = ktla_ktva(addr);
63166 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63167 +}
63168 +#else
63169 +static inline int is_module_text(unsigned long addr)
63170 +{
63171 + return 0;
63172 +}
63173 +#endif
63174 +#endif
63175 +
63176 static inline int is_kernel_text(unsigned long addr)
63177 {
63178 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63179 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
63180
63181 static inline int is_kernel(unsigned long addr)
63182 {
63183 +
63184 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63185 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
63186 + return 1;
63187 +
63188 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63189 +#else
63190 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63191 +#endif
63192 +
63193 return 1;
63194 return in_gate_area_no_task(addr);
63195 }
63196
63197 static int is_ksym_addr(unsigned long addr)
63198 {
63199 +
63200 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63201 + if (is_module_text(addr))
63202 + return 0;
63203 +#endif
63204 +
63205 if (all_var)
63206 return is_kernel(addr);
63207
63208 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
63209
63210 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63211 {
63212 - iter->name[0] = '\0';
63213 iter->nameoff = get_symbol_offset(new_pos);
63214 iter->pos = new_pos;
63215 }
63216 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
63217 {
63218 struct kallsym_iter *iter = m->private;
63219
63220 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63221 + if (current_uid())
63222 + return 0;
63223 +#endif
63224 +
63225 /* Some debugging symbols have no name. Ignore them. */
63226 if (!iter->name[0])
63227 return 0;
63228 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
63229 struct kallsym_iter *iter;
63230 int ret;
63231
63232 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63233 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63234 if (!iter)
63235 return -ENOMEM;
63236 reset_iter(iter, 0);
63237 diff -urNp linux-2.6.32.45/kernel/kgdb.c linux-2.6.32.45/kernel/kgdb.c
63238 --- linux-2.6.32.45/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
63239 +++ linux-2.6.32.45/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
63240 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
63241 /* Guard for recursive entry */
63242 static int exception_level;
63243
63244 -static struct kgdb_io *kgdb_io_ops;
63245 +static const struct kgdb_io *kgdb_io_ops;
63246 static DEFINE_SPINLOCK(kgdb_registration_lock);
63247
63248 /* kgdb console driver is loaded */
63249 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
63250 */
63251 static atomic_t passive_cpu_wait[NR_CPUS];
63252 static atomic_t cpu_in_kgdb[NR_CPUS];
63253 -atomic_t kgdb_setting_breakpoint;
63254 +atomic_unchecked_t kgdb_setting_breakpoint;
63255
63256 struct task_struct *kgdb_usethread;
63257 struct task_struct *kgdb_contthread;
63258 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
63259 sizeof(unsigned long)];
63260
63261 /* to keep track of the CPU which is doing the single stepping*/
63262 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63263 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63264
63265 /*
63266 * If you are debugging a problem where roundup (the collection of
63267 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
63268 return 0;
63269 if (kgdb_connected)
63270 return 1;
63271 - if (atomic_read(&kgdb_setting_breakpoint))
63272 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
63273 return 1;
63274 if (print_wait)
63275 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
63276 @@ -1426,8 +1426,8 @@ acquirelock:
63277 * instance of the exception handler wanted to come into the
63278 * debugger on a different CPU via a single step
63279 */
63280 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63281 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
63282 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63283 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
63284
63285 atomic_set(&kgdb_active, -1);
63286 touch_softlockup_watchdog();
63287 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
63288 *
63289 * Register it with the KGDB core.
63290 */
63291 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
63292 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
63293 {
63294 int err;
63295
63296 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
63297 *
63298 * Unregister it with the KGDB core.
63299 */
63300 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
63301 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
63302 {
63303 BUG_ON(kgdb_connected);
63304
63305 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
63306 */
63307 void kgdb_breakpoint(void)
63308 {
63309 - atomic_set(&kgdb_setting_breakpoint, 1);
63310 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
63311 wmb(); /* Sync point before breakpoint */
63312 arch_kgdb_breakpoint();
63313 wmb(); /* Sync point after breakpoint */
63314 - atomic_set(&kgdb_setting_breakpoint, 0);
63315 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
63316 }
63317 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
63318
63319 diff -urNp linux-2.6.32.45/kernel/kmod.c linux-2.6.32.45/kernel/kmod.c
63320 --- linux-2.6.32.45/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
63321 +++ linux-2.6.32.45/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
63322 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63323 * If module auto-loading support is disabled then this function
63324 * becomes a no-operation.
63325 */
63326 -int __request_module(bool wait, const char *fmt, ...)
63327 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63328 {
63329 - va_list args;
63330 char module_name[MODULE_NAME_LEN];
63331 unsigned int max_modprobes;
63332 int ret;
63333 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63334 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63335 static char *envp[] = { "HOME=/",
63336 "TERM=linux",
63337 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63338 @@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
63339 if (ret)
63340 return ret;
63341
63342 - va_start(args, fmt);
63343 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63344 - va_end(args);
63345 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63346 if (ret >= MODULE_NAME_LEN)
63347 return -ENAMETOOLONG;
63348
63349 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63350 + if (!current_uid()) {
63351 + /* hack to workaround consolekit/udisks stupidity */
63352 + read_lock(&tasklist_lock);
63353 + if (!strcmp(current->comm, "mount") &&
63354 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63355 + read_unlock(&tasklist_lock);
63356 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63357 + return -EPERM;
63358 + }
63359 + read_unlock(&tasklist_lock);
63360 + }
63361 +#endif
63362 +
63363 /* If modprobe needs a service that is in a module, we get a recursive
63364 * loop. Limit the number of running kmod threads to max_threads/2 or
63365 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63366 @@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
63367 atomic_dec(&kmod_concurrent);
63368 return ret;
63369 }
63370 +
63371 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63372 +{
63373 + va_list args;
63374 + int ret;
63375 +
63376 + va_start(args, fmt);
63377 + ret = ____request_module(wait, module_param, fmt, args);
63378 + va_end(args);
63379 +
63380 + return ret;
63381 +}
63382 +
63383 +int __request_module(bool wait, const char *fmt, ...)
63384 +{
63385 + va_list args;
63386 + int ret;
63387 +
63388 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63389 + if (current_uid()) {
63390 + char module_param[MODULE_NAME_LEN];
63391 +
63392 + memset(module_param, 0, sizeof(module_param));
63393 +
63394 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63395 +
63396 + va_start(args, fmt);
63397 + ret = ____request_module(wait, module_param, fmt, args);
63398 + va_end(args);
63399 +
63400 + return ret;
63401 + }
63402 +#endif
63403 +
63404 + va_start(args, fmt);
63405 + ret = ____request_module(wait, NULL, fmt, args);
63406 + va_end(args);
63407 +
63408 + return ret;
63409 +}
63410 +
63411 +
63412 EXPORT_SYMBOL(__request_module);
63413 #endif /* CONFIG_MODULES */
63414
63415 diff -urNp linux-2.6.32.45/kernel/kprobes.c linux-2.6.32.45/kernel/kprobes.c
63416 --- linux-2.6.32.45/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
63417 +++ linux-2.6.32.45/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
63418 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
63419 * kernel image and loaded module images reside. This is required
63420 * so x86_64 can correctly handle the %rip-relative fixups.
63421 */
63422 - kip->insns = module_alloc(PAGE_SIZE);
63423 + kip->insns = module_alloc_exec(PAGE_SIZE);
63424 if (!kip->insns) {
63425 kfree(kip);
63426 return NULL;
63427 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
63428 */
63429 if (!list_is_singular(&kprobe_insn_pages)) {
63430 list_del(&kip->list);
63431 - module_free(NULL, kip->insns);
63432 + module_free_exec(NULL, kip->insns);
63433 kfree(kip);
63434 }
63435 return 1;
63436 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
63437 {
63438 int i, err = 0;
63439 unsigned long offset = 0, size = 0;
63440 - char *modname, namebuf[128];
63441 + char *modname, namebuf[KSYM_NAME_LEN];
63442 const char *symbol_name;
63443 void *addr;
63444 struct kprobe_blackpoint *kb;
63445 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
63446 const char *sym = NULL;
63447 unsigned int i = *(loff_t *) v;
63448 unsigned long offset = 0;
63449 - char *modname, namebuf[128];
63450 + char *modname, namebuf[KSYM_NAME_LEN];
63451
63452 head = &kprobe_table[i];
63453 preempt_disable();
63454 diff -urNp linux-2.6.32.45/kernel/lockdep.c linux-2.6.32.45/kernel/lockdep.c
63455 --- linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
63456 +++ linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
63457 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
63458 /*
63459 * Various lockdep statistics:
63460 */
63461 -atomic_t chain_lookup_hits;
63462 -atomic_t chain_lookup_misses;
63463 -atomic_t hardirqs_on_events;
63464 -atomic_t hardirqs_off_events;
63465 -atomic_t redundant_hardirqs_on;
63466 -atomic_t redundant_hardirqs_off;
63467 -atomic_t softirqs_on_events;
63468 -atomic_t softirqs_off_events;
63469 -atomic_t redundant_softirqs_on;
63470 -atomic_t redundant_softirqs_off;
63471 -atomic_t nr_unused_locks;
63472 -atomic_t nr_cyclic_checks;
63473 -atomic_t nr_find_usage_forwards_checks;
63474 -atomic_t nr_find_usage_backwards_checks;
63475 +atomic_unchecked_t chain_lookup_hits;
63476 +atomic_unchecked_t chain_lookup_misses;
63477 +atomic_unchecked_t hardirqs_on_events;
63478 +atomic_unchecked_t hardirqs_off_events;
63479 +atomic_unchecked_t redundant_hardirqs_on;
63480 +atomic_unchecked_t redundant_hardirqs_off;
63481 +atomic_unchecked_t softirqs_on_events;
63482 +atomic_unchecked_t softirqs_off_events;
63483 +atomic_unchecked_t redundant_softirqs_on;
63484 +atomic_unchecked_t redundant_softirqs_off;
63485 +atomic_unchecked_t nr_unused_locks;
63486 +atomic_unchecked_t nr_cyclic_checks;
63487 +atomic_unchecked_t nr_find_usage_forwards_checks;
63488 +atomic_unchecked_t nr_find_usage_backwards_checks;
63489 #endif
63490
63491 /*
63492 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
63493 int i;
63494 #endif
63495
63496 +#ifdef CONFIG_PAX_KERNEXEC
63497 + start = ktla_ktva(start);
63498 +#endif
63499 +
63500 /*
63501 * static variable?
63502 */
63503 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
63504 */
63505 for_each_possible_cpu(i) {
63506 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
63507 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
63508 - + per_cpu_offset(i);
63509 + end = start + PERCPU_ENOUGH_ROOM;
63510
63511 if ((addr >= start) && (addr < end))
63512 return 1;
63513 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
63514 if (!static_obj(lock->key)) {
63515 debug_locks_off();
63516 printk("INFO: trying to register non-static key.\n");
63517 + printk("lock:%pS key:%pS.\n", lock, lock->key);
63518 printk("the code is fine but needs lockdep annotation.\n");
63519 printk("turning off the locking correctness validator.\n");
63520 dump_stack();
63521 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
63522 if (!class)
63523 return 0;
63524 }
63525 - debug_atomic_inc((atomic_t *)&class->ops);
63526 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
63527 if (very_verbose(class)) {
63528 printk("\nacquire class [%p] %s", class->key, class->name);
63529 if (class->name_version > 1)
63530 diff -urNp linux-2.6.32.45/kernel/lockdep_internals.h linux-2.6.32.45/kernel/lockdep_internals.h
63531 --- linux-2.6.32.45/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
63532 +++ linux-2.6.32.45/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
63533 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
63534 /*
63535 * Various lockdep statistics:
63536 */
63537 -extern atomic_t chain_lookup_hits;
63538 -extern atomic_t chain_lookup_misses;
63539 -extern atomic_t hardirqs_on_events;
63540 -extern atomic_t hardirqs_off_events;
63541 -extern atomic_t redundant_hardirqs_on;
63542 -extern atomic_t redundant_hardirqs_off;
63543 -extern atomic_t softirqs_on_events;
63544 -extern atomic_t softirqs_off_events;
63545 -extern atomic_t redundant_softirqs_on;
63546 -extern atomic_t redundant_softirqs_off;
63547 -extern atomic_t nr_unused_locks;
63548 -extern atomic_t nr_cyclic_checks;
63549 -extern atomic_t nr_cyclic_check_recursions;
63550 -extern atomic_t nr_find_usage_forwards_checks;
63551 -extern atomic_t nr_find_usage_forwards_recursions;
63552 -extern atomic_t nr_find_usage_backwards_checks;
63553 -extern atomic_t nr_find_usage_backwards_recursions;
63554 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
63555 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
63556 -# define debug_atomic_read(ptr) atomic_read(ptr)
63557 +extern atomic_unchecked_t chain_lookup_hits;
63558 +extern atomic_unchecked_t chain_lookup_misses;
63559 +extern atomic_unchecked_t hardirqs_on_events;
63560 +extern atomic_unchecked_t hardirqs_off_events;
63561 +extern atomic_unchecked_t redundant_hardirqs_on;
63562 +extern atomic_unchecked_t redundant_hardirqs_off;
63563 +extern atomic_unchecked_t softirqs_on_events;
63564 +extern atomic_unchecked_t softirqs_off_events;
63565 +extern atomic_unchecked_t redundant_softirqs_on;
63566 +extern atomic_unchecked_t redundant_softirqs_off;
63567 +extern atomic_unchecked_t nr_unused_locks;
63568 +extern atomic_unchecked_t nr_cyclic_checks;
63569 +extern atomic_unchecked_t nr_cyclic_check_recursions;
63570 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
63571 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
63572 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
63573 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
63574 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
63575 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
63576 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
63577 #else
63578 # define debug_atomic_inc(ptr) do { } while (0)
63579 # define debug_atomic_dec(ptr) do { } while (0)
63580 diff -urNp linux-2.6.32.45/kernel/lockdep_proc.c linux-2.6.32.45/kernel/lockdep_proc.c
63581 --- linux-2.6.32.45/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
63582 +++ linux-2.6.32.45/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
63583 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63584
63585 static void print_name(struct seq_file *m, struct lock_class *class)
63586 {
63587 - char str[128];
63588 + char str[KSYM_NAME_LEN];
63589 const char *name = class->name;
63590
63591 if (!name) {
63592 diff -urNp linux-2.6.32.45/kernel/module.c linux-2.6.32.45/kernel/module.c
63593 --- linux-2.6.32.45/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
63594 +++ linux-2.6.32.45/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
63595 @@ -55,6 +55,7 @@
63596 #include <linux/async.h>
63597 #include <linux/percpu.h>
63598 #include <linux/kmemleak.h>
63599 +#include <linux/grsecurity.h>
63600
63601 #define CREATE_TRACE_POINTS
63602 #include <trace/events/module.h>
63603 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
63604 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
63605
63606 /* Bounds of module allocation, for speeding __module_address */
63607 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63608 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63609 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63610
63611 int register_module_notifier(struct notifier_block * nb)
63612 {
63613 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
63614 return true;
63615
63616 list_for_each_entry_rcu(mod, &modules, list) {
63617 - struct symsearch arr[] = {
63618 + struct symsearch modarr[] = {
63619 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63620 NOT_GPL_ONLY, false },
63621 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63622 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
63623 #endif
63624 };
63625
63626 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63627 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63628 return true;
63629 }
63630 return false;
63631 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
63632 void *ptr;
63633 int cpu;
63634
63635 - if (align > PAGE_SIZE) {
63636 + if (align-1 >= PAGE_SIZE) {
63637 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63638 name, align, PAGE_SIZE);
63639 align = PAGE_SIZE;
63640 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
63641 * /sys/module/foo/sections stuff
63642 * J. Corbet <corbet@lwn.net>
63643 */
63644 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
63645 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63646
63647 static inline bool sect_empty(const Elf_Shdr *sect)
63648 {
63649 @@ -1545,7 +1547,8 @@ static void free_module(struct module *m
63650 destroy_params(mod->kp, mod->num_kp);
63651
63652 /* This may be NULL, but that's OK */
63653 - module_free(mod, mod->module_init);
63654 + module_free(mod, mod->module_init_rw);
63655 + module_free_exec(mod, mod->module_init_rx);
63656 kfree(mod->args);
63657 if (mod->percpu)
63658 percpu_modfree(mod->percpu);
63659 @@ -1554,10 +1557,12 @@ static void free_module(struct module *m
63660 percpu_modfree(mod->refptr);
63661 #endif
63662 /* Free lock-classes: */
63663 - lockdep_free_key_range(mod->module_core, mod->core_size);
63664 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63665 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63666
63667 /* Finally, free the core (containing the module structure) */
63668 - module_free(mod, mod->module_core);
63669 + module_free_exec(mod, mod->module_core_rx);
63670 + module_free(mod, mod->module_core_rw);
63671
63672 #ifdef CONFIG_MPU
63673 update_protections(current->mm);
63674 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
63675 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63676 int ret = 0;
63677 const struct kernel_symbol *ksym;
63678 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63679 + int is_fs_load = 0;
63680 + int register_filesystem_found = 0;
63681 + char *p;
63682 +
63683 + p = strstr(mod->args, "grsec_modharden_fs");
63684 +
63685 + if (p) {
63686 + char *endptr = p + strlen("grsec_modharden_fs");
63687 + /* copy \0 as well */
63688 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63689 + is_fs_load = 1;
63690 + }
63691 +#endif
63692 +
63693
63694 for (i = 1; i < n; i++) {
63695 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63696 + const char *name = strtab + sym[i].st_name;
63697 +
63698 + /* it's a real shame this will never get ripped and copied
63699 + upstream! ;(
63700 + */
63701 + if (is_fs_load && !strcmp(name, "register_filesystem"))
63702 + register_filesystem_found = 1;
63703 +#endif
63704 switch (sym[i].st_shndx) {
63705 case SHN_COMMON:
63706 /* We compiled with -fno-common. These are not
63707 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
63708 strtab + sym[i].st_name, mod);
63709 /* Ok if resolved. */
63710 if (ksym) {
63711 + pax_open_kernel();
63712 sym[i].st_value = ksym->value;
63713 + pax_close_kernel();
63714 break;
63715 }
63716
63717 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
63718 secbase = (unsigned long)mod->percpu;
63719 else
63720 secbase = sechdrs[sym[i].st_shndx].sh_addr;
63721 + pax_open_kernel();
63722 sym[i].st_value += secbase;
63723 + pax_close_kernel();
63724 break;
63725 }
63726 }
63727
63728 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63729 + if (is_fs_load && !register_filesystem_found) {
63730 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63731 + ret = -EPERM;
63732 + }
63733 +#endif
63734 +
63735 return ret;
63736 }
63737
63738 @@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
63739 || s->sh_entsize != ~0UL
63740 || strstarts(secstrings + s->sh_name, ".init"))
63741 continue;
63742 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63743 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63744 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63745 + else
63746 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63747 DEBUGP("\t%s\n", secstrings + s->sh_name);
63748 }
63749 - if (m == 0)
63750 - mod->core_text_size = mod->core_size;
63751 }
63752
63753 DEBUGP("Init section allocation order:\n");
63754 @@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
63755 || s->sh_entsize != ~0UL
63756 || !strstarts(secstrings + s->sh_name, ".init"))
63757 continue;
63758 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63759 - | INIT_OFFSET_MASK);
63760 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63761 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63762 + else
63763 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63764 + s->sh_entsize |= INIT_OFFSET_MASK;
63765 DEBUGP("\t%s\n", secstrings + s->sh_name);
63766 }
63767 - if (m == 0)
63768 - mod->init_text_size = mod->init_size;
63769 }
63770 }
63771
63772 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
63773
63774 /* As per nm */
63775 static char elf_type(const Elf_Sym *sym,
63776 - Elf_Shdr *sechdrs,
63777 - const char *secstrings,
63778 - struct module *mod)
63779 + const Elf_Shdr *sechdrs,
63780 + const char *secstrings)
63781 {
63782 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
63783 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
63784 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
63785
63786 /* Put symbol section at end of init part of module. */
63787 symsect->sh_flags |= SHF_ALLOC;
63788 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63789 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63790 symindex) | INIT_OFFSET_MASK;
63791 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
63792
63793 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
63794 }
63795
63796 /* Append room for core symbols at end of core part. */
63797 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63798 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
63799 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63800 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
63801
63802 /* Put string table section at end of init part of module. */
63803 strsect->sh_flags |= SHF_ALLOC;
63804 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63805 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63806 strindex) | INIT_OFFSET_MASK;
63807 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
63808
63809 /* Append room for core symbols' strings at end of core part. */
63810 - *pstroffs = mod->core_size;
63811 + *pstroffs = mod->core_size_rx;
63812 __set_bit(0, strmap);
63813 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
63814 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
63815
63816 return symoffs;
63817 }
63818 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
63819 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63820 mod->strtab = (void *)sechdrs[strindex].sh_addr;
63821
63822 + pax_open_kernel();
63823 +
63824 /* Set types up while we still have access to sections. */
63825 for (i = 0; i < mod->num_symtab; i++)
63826 mod->symtab[i].st_info
63827 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
63828 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
63829
63830 - mod->core_symtab = dst = mod->module_core + symoffs;
63831 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
63832 src = mod->symtab;
63833 *dst = *src;
63834 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63835 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
63836 }
63837 mod->core_num_syms = ndst;
63838
63839 - mod->core_strtab = s = mod->module_core + stroffs;
63840 + mod->core_strtab = s = mod->module_core_rx + stroffs;
63841 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
63842 if (test_bit(i, strmap))
63843 *++s = mod->strtab[i];
63844 +
63845 + pax_close_kernel();
63846 }
63847 #else
63848 static inline unsigned long layout_symtab(struct module *mod,
63849 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
63850 #endif
63851 }
63852
63853 -static void *module_alloc_update_bounds(unsigned long size)
63854 +static void *module_alloc_update_bounds_rw(unsigned long size)
63855 {
63856 void *ret = module_alloc(size);
63857
63858 if (ret) {
63859 /* Update module bounds. */
63860 - if ((unsigned long)ret < module_addr_min)
63861 - module_addr_min = (unsigned long)ret;
63862 - if ((unsigned long)ret + size > module_addr_max)
63863 - module_addr_max = (unsigned long)ret + size;
63864 + if ((unsigned long)ret < module_addr_min_rw)
63865 + module_addr_min_rw = (unsigned long)ret;
63866 + if ((unsigned long)ret + size > module_addr_max_rw)
63867 + module_addr_max_rw = (unsigned long)ret + size;
63868 + }
63869 + return ret;
63870 +}
63871 +
63872 +static void *module_alloc_update_bounds_rx(unsigned long size)
63873 +{
63874 + void *ret = module_alloc_exec(size);
63875 +
63876 + if (ret) {
63877 + /* Update module bounds. */
63878 + if ((unsigned long)ret < module_addr_min_rx)
63879 + module_addr_min_rx = (unsigned long)ret;
63880 + if ((unsigned long)ret + size > module_addr_max_rx)
63881 + module_addr_max_rx = (unsigned long)ret + size;
63882 }
63883 return ret;
63884 }
63885 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
63886 unsigned int i;
63887
63888 /* only scan the sections containing data */
63889 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
63890 - (unsigned long)mod->module_core,
63891 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
63892 + (unsigned long)mod->module_core_rw,
63893 sizeof(struct module), GFP_KERNEL);
63894
63895 for (i = 1; i < hdr->e_shnum; i++) {
63896 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
63897 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
63898 continue;
63899
63900 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
63901 - (unsigned long)mod->module_core,
63902 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
63903 + (unsigned long)mod->module_core_rw,
63904 sechdrs[i].sh_size, GFP_KERNEL);
63905 }
63906 }
63907 @@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
63908 secstrings, &stroffs, strmap);
63909
63910 /* Do the allocs. */
63911 - ptr = module_alloc_update_bounds(mod->core_size);
63912 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
63913 /*
63914 * The pointer to this block is stored in the module structure
63915 * which is inside the block. Just mark it as not being a
63916 @@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
63917 err = -ENOMEM;
63918 goto free_percpu;
63919 }
63920 - memset(ptr, 0, mod->core_size);
63921 - mod->module_core = ptr;
63922 + memset(ptr, 0, mod->core_size_rw);
63923 + mod->module_core_rw = ptr;
63924
63925 - ptr = module_alloc_update_bounds(mod->init_size);
63926 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
63927 /*
63928 * The pointer to this block is stored in the module structure
63929 * which is inside the block. This block doesn't need to be
63930 * scanned as it contains data and code that will be freed
63931 * after the module is initialized.
63932 */
63933 - kmemleak_ignore(ptr);
63934 - if (!ptr && mod->init_size) {
63935 + kmemleak_not_leak(ptr);
63936 + if (!ptr && mod->init_size_rw) {
63937 + err = -ENOMEM;
63938 + goto free_core_rw;
63939 + }
63940 + memset(ptr, 0, mod->init_size_rw);
63941 + mod->module_init_rw = ptr;
63942 +
63943 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
63944 + kmemleak_not_leak(ptr);
63945 + if (!ptr) {
63946 err = -ENOMEM;
63947 - goto free_core;
63948 + goto free_init_rw;
63949 }
63950 - memset(ptr, 0, mod->init_size);
63951 - mod->module_init = ptr;
63952 +
63953 + pax_open_kernel();
63954 + memset(ptr, 0, mod->core_size_rx);
63955 + pax_close_kernel();
63956 + mod->module_core_rx = ptr;
63957 +
63958 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
63959 + kmemleak_not_leak(ptr);
63960 + if (!ptr && mod->init_size_rx) {
63961 + err = -ENOMEM;
63962 + goto free_core_rx;
63963 + }
63964 +
63965 + pax_open_kernel();
63966 + memset(ptr, 0, mod->init_size_rx);
63967 + pax_close_kernel();
63968 + mod->module_init_rx = ptr;
63969
63970 /* Transfer each section which specifies SHF_ALLOC */
63971 DEBUGP("final section addresses:\n");
63972 @@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
63973 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
63974 continue;
63975
63976 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
63977 - dest = mod->module_init
63978 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63979 - else
63980 - dest = mod->module_core + sechdrs[i].sh_entsize;
63981 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
63982 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63983 + dest = mod->module_init_rw
63984 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63985 + else
63986 + dest = mod->module_init_rx
63987 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63988 + } else {
63989 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63990 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
63991 + else
63992 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
63993 + }
63994 +
63995 + if (sechdrs[i].sh_type != SHT_NOBITS) {
63996
63997 - if (sechdrs[i].sh_type != SHT_NOBITS)
63998 - memcpy(dest, (void *)sechdrs[i].sh_addr,
63999 - sechdrs[i].sh_size);
64000 +#ifdef CONFIG_PAX_KERNEXEC
64001 +#ifdef CONFIG_X86_64
64002 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
64003 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64004 +#endif
64005 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
64006 + pax_open_kernel();
64007 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
64008 + pax_close_kernel();
64009 + } else
64010 +#endif
64011 +
64012 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
64013 + }
64014 /* Update sh_addr to point to copy in image. */
64015 - sechdrs[i].sh_addr = (unsigned long)dest;
64016 +
64017 +#ifdef CONFIG_PAX_KERNEXEC
64018 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
64019 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
64020 + else
64021 +#endif
64022 +
64023 + sechdrs[i].sh_addr = (unsigned long)dest;
64024 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
64025 }
64026 /* Module has been moved. */
64027 @@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
64028 mod->name);
64029 if (!mod->refptr) {
64030 err = -ENOMEM;
64031 - goto free_init;
64032 + goto free_init_rx;
64033 }
64034 #endif
64035 /* Now we've moved module, initialize linked lists, etc. */
64036 @@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
64037 /* Set up MODINFO_ATTR fields */
64038 setup_modinfo(mod, sechdrs, infoindex);
64039
64040 + mod->args = args;
64041 +
64042 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64043 + {
64044 + char *p, *p2;
64045 +
64046 + if (strstr(mod->args, "grsec_modharden_netdev")) {
64047 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64048 + err = -EPERM;
64049 + goto cleanup;
64050 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64051 + p += strlen("grsec_modharden_normal");
64052 + p2 = strstr(p, "_");
64053 + if (p2) {
64054 + *p2 = '\0';
64055 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64056 + *p2 = '_';
64057 + }
64058 + err = -EPERM;
64059 + goto cleanup;
64060 + }
64061 + }
64062 +#endif
64063 +
64064 +
64065 /* Fix up syms, so that st_value is a pointer to location. */
64066 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
64067 mod);
64068 @@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
64069
64070 /* Now do relocations. */
64071 for (i = 1; i < hdr->e_shnum; i++) {
64072 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
64073 unsigned int info = sechdrs[i].sh_info;
64074 + strtab = (char *)sechdrs[strindex].sh_addr;
64075
64076 /* Not a valid relocation section? */
64077 if (info >= hdr->e_shnum)
64078 @@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
64079 * Do it before processing of module parameters, so the module
64080 * can provide parameter accessor functions of its own.
64081 */
64082 - if (mod->module_init)
64083 - flush_icache_range((unsigned long)mod->module_init,
64084 - (unsigned long)mod->module_init
64085 - + mod->init_size);
64086 - flush_icache_range((unsigned long)mod->module_core,
64087 - (unsigned long)mod->module_core + mod->core_size);
64088 + if (mod->module_init_rx)
64089 + flush_icache_range((unsigned long)mod->module_init_rx,
64090 + (unsigned long)mod->module_init_rx
64091 + + mod->init_size_rx);
64092 + flush_icache_range((unsigned long)mod->module_core_rx,
64093 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
64094
64095 set_fs(old_fs);
64096
64097 - mod->args = args;
64098 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
64099 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
64100 mod->name);
64101 @@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
64102 free_unload:
64103 module_unload_free(mod);
64104 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
64105 + free_init_rx:
64106 percpu_modfree(mod->refptr);
64107 - free_init:
64108 #endif
64109 - module_free(mod, mod->module_init);
64110 - free_core:
64111 - module_free(mod, mod->module_core);
64112 + module_free_exec(mod, mod->module_init_rx);
64113 + free_core_rx:
64114 + module_free_exec(mod, mod->module_core_rx);
64115 + free_init_rw:
64116 + module_free(mod, mod->module_init_rw);
64117 + free_core_rw:
64118 + module_free(mod, mod->module_core_rw);
64119 /* mod will be freed with core. Don't access it beyond this line! */
64120 free_percpu:
64121 if (percpu)
64122 @@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
64123 mod->symtab = mod->core_symtab;
64124 mod->strtab = mod->core_strtab;
64125 #endif
64126 - module_free(mod, mod->module_init);
64127 - mod->module_init = NULL;
64128 - mod->init_size = 0;
64129 - mod->init_text_size = 0;
64130 + module_free(mod, mod->module_init_rw);
64131 + module_free_exec(mod, mod->module_init_rx);
64132 + mod->module_init_rw = NULL;
64133 + mod->module_init_rx = NULL;
64134 + mod->init_size_rw = 0;
64135 + mod->init_size_rx = 0;
64136 mutex_unlock(&module_mutex);
64137
64138 return 0;
64139 @@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
64140 unsigned long nextval;
64141
64142 /* At worse, next value is at end of module */
64143 - if (within_module_init(addr, mod))
64144 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
64145 + if (within_module_init_rx(addr, mod))
64146 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64147 + else if (within_module_init_rw(addr, mod))
64148 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64149 + else if (within_module_core_rx(addr, mod))
64150 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64151 + else if (within_module_core_rw(addr, mod))
64152 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64153 else
64154 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
64155 + return NULL;
64156
64157 /* Scan for closest preceeding symbol, and next symbol. (ELF
64158 starts real symbols at 1). */
64159 @@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
64160 char buf[8];
64161
64162 seq_printf(m, "%s %u",
64163 - mod->name, mod->init_size + mod->core_size);
64164 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64165 print_unload_info(m, mod);
64166
64167 /* Informative for users. */
64168 @@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
64169 mod->state == MODULE_STATE_COMING ? "Loading":
64170 "Live");
64171 /* Used by oprofile and other similar tools. */
64172 - seq_printf(m, " 0x%p", mod->module_core);
64173 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
64174
64175 /* Taints info */
64176 if (mod->taints)
64177 @@ -2981,7 +3128,17 @@ static const struct file_operations proc
64178
64179 static int __init proc_modules_init(void)
64180 {
64181 +#ifndef CONFIG_GRKERNSEC_HIDESYM
64182 +#ifdef CONFIG_GRKERNSEC_PROC_USER
64183 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64184 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64185 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64186 +#else
64187 proc_create("modules", 0, NULL, &proc_modules_operations);
64188 +#endif
64189 +#else
64190 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64191 +#endif
64192 return 0;
64193 }
64194 module_init(proc_modules_init);
64195 @@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
64196 {
64197 struct module *mod;
64198
64199 - if (addr < module_addr_min || addr > module_addr_max)
64200 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64201 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
64202 return NULL;
64203
64204 list_for_each_entry_rcu(mod, &modules, list)
64205 - if (within_module_core(addr, mod)
64206 - || within_module_init(addr, mod))
64207 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
64208 return mod;
64209 return NULL;
64210 }
64211 @@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
64212 */
64213 struct module *__module_text_address(unsigned long addr)
64214 {
64215 - struct module *mod = __module_address(addr);
64216 + struct module *mod;
64217 +
64218 +#ifdef CONFIG_X86_32
64219 + addr = ktla_ktva(addr);
64220 +#endif
64221 +
64222 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64223 + return NULL;
64224 +
64225 + mod = __module_address(addr);
64226 +
64227 if (mod) {
64228 /* Make sure it's within the text section. */
64229 - if (!within(addr, mod->module_init, mod->init_text_size)
64230 - && !within(addr, mod->module_core, mod->core_text_size))
64231 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64232 mod = NULL;
64233 }
64234 return mod;
64235 diff -urNp linux-2.6.32.45/kernel/mutex.c linux-2.6.32.45/kernel/mutex.c
64236 --- linux-2.6.32.45/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
64237 +++ linux-2.6.32.45/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
64238 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
64239 */
64240
64241 for (;;) {
64242 - struct thread_info *owner;
64243 + struct task_struct *owner;
64244
64245 /*
64246 * If we own the BKL, then don't spin. The owner of
64247 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
64248 spin_lock_mutex(&lock->wait_lock, flags);
64249
64250 debug_mutex_lock_common(lock, &waiter);
64251 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64252 + debug_mutex_add_waiter(lock, &waiter, task);
64253
64254 /* add waiting tasks to the end of the waitqueue (FIFO): */
64255 list_add_tail(&waiter.list, &lock->wait_list);
64256 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
64257 * TASK_UNINTERRUPTIBLE case.)
64258 */
64259 if (unlikely(signal_pending_state(state, task))) {
64260 - mutex_remove_waiter(lock, &waiter,
64261 - task_thread_info(task));
64262 + mutex_remove_waiter(lock, &waiter, task);
64263 mutex_release(&lock->dep_map, 1, ip);
64264 spin_unlock_mutex(&lock->wait_lock, flags);
64265
64266 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
64267 done:
64268 lock_acquired(&lock->dep_map, ip);
64269 /* got the lock - rejoice! */
64270 - mutex_remove_waiter(lock, &waiter, current_thread_info());
64271 + mutex_remove_waiter(lock, &waiter, task);
64272 mutex_set_owner(lock);
64273
64274 /* set it to 0 if there are no waiters left: */
64275 diff -urNp linux-2.6.32.45/kernel/mutex-debug.c linux-2.6.32.45/kernel/mutex-debug.c
64276 --- linux-2.6.32.45/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
64277 +++ linux-2.6.32.45/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
64278 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64279 }
64280
64281 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64282 - struct thread_info *ti)
64283 + struct task_struct *task)
64284 {
64285 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64286
64287 /* Mark the current thread as blocked on the lock: */
64288 - ti->task->blocked_on = waiter;
64289 + task->blocked_on = waiter;
64290 }
64291
64292 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64293 - struct thread_info *ti)
64294 + struct task_struct *task)
64295 {
64296 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64297 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64298 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64299 - ti->task->blocked_on = NULL;
64300 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
64301 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64302 + task->blocked_on = NULL;
64303
64304 list_del_init(&waiter->list);
64305 waiter->task = NULL;
64306 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
64307 return;
64308
64309 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
64310 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
64311 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
64312 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
64313 mutex_clear_owner(lock);
64314 }
64315 diff -urNp linux-2.6.32.45/kernel/mutex-debug.h linux-2.6.32.45/kernel/mutex-debug.h
64316 --- linux-2.6.32.45/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
64317 +++ linux-2.6.32.45/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
64318 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
64319 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64320 extern void debug_mutex_add_waiter(struct mutex *lock,
64321 struct mutex_waiter *waiter,
64322 - struct thread_info *ti);
64323 + struct task_struct *task);
64324 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64325 - struct thread_info *ti);
64326 + struct task_struct *task);
64327 extern void debug_mutex_unlock(struct mutex *lock);
64328 extern void debug_mutex_init(struct mutex *lock, const char *name,
64329 struct lock_class_key *key);
64330
64331 static inline void mutex_set_owner(struct mutex *lock)
64332 {
64333 - lock->owner = current_thread_info();
64334 + lock->owner = current;
64335 }
64336
64337 static inline void mutex_clear_owner(struct mutex *lock)
64338 diff -urNp linux-2.6.32.45/kernel/mutex.h linux-2.6.32.45/kernel/mutex.h
64339 --- linux-2.6.32.45/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
64340 +++ linux-2.6.32.45/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
64341 @@ -19,7 +19,7 @@
64342 #ifdef CONFIG_SMP
64343 static inline void mutex_set_owner(struct mutex *lock)
64344 {
64345 - lock->owner = current_thread_info();
64346 + lock->owner = current;
64347 }
64348
64349 static inline void mutex_clear_owner(struct mutex *lock)
64350 diff -urNp linux-2.6.32.45/kernel/panic.c linux-2.6.32.45/kernel/panic.c
64351 --- linux-2.6.32.45/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
64352 +++ linux-2.6.32.45/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
64353 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
64354 const char *board;
64355
64356 printk(KERN_WARNING "------------[ cut here ]------------\n");
64357 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64358 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64359 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64360 if (board)
64361 printk(KERN_WARNING "Hardware name: %s\n", board);
64362 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64363 */
64364 void __stack_chk_fail(void)
64365 {
64366 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
64367 + dump_stack();
64368 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64369 __builtin_return_address(0));
64370 }
64371 EXPORT_SYMBOL(__stack_chk_fail);
64372 diff -urNp linux-2.6.32.45/kernel/params.c linux-2.6.32.45/kernel/params.c
64373 --- linux-2.6.32.45/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
64374 +++ linux-2.6.32.45/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
64375 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
64376 return ret;
64377 }
64378
64379 -static struct sysfs_ops module_sysfs_ops = {
64380 +static const struct sysfs_ops module_sysfs_ops = {
64381 .show = module_attr_show,
64382 .store = module_attr_store,
64383 };
64384 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
64385 return 0;
64386 }
64387
64388 -static struct kset_uevent_ops module_uevent_ops = {
64389 +static const struct kset_uevent_ops module_uevent_ops = {
64390 .filter = uevent_filter,
64391 };
64392
64393 diff -urNp linux-2.6.32.45/kernel/perf_event.c linux-2.6.32.45/kernel/perf_event.c
64394 --- linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:35:30.000000000 -0400
64395 +++ linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:34:01.000000000 -0400
64396 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
64397 */
64398 int sysctl_perf_event_sample_rate __read_mostly = 100000;
64399
64400 -static atomic64_t perf_event_id;
64401 +static atomic64_unchecked_t perf_event_id;
64402
64403 /*
64404 * Lock for (sysadmin-configurable) event reservations:
64405 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
64406 * In order to keep per-task stats reliable we need to flip the event
64407 * values when we flip the contexts.
64408 */
64409 - value = atomic64_read(&next_event->count);
64410 - value = atomic64_xchg(&event->count, value);
64411 - atomic64_set(&next_event->count, value);
64412 + value = atomic64_read_unchecked(&next_event->count);
64413 + value = atomic64_xchg_unchecked(&event->count, value);
64414 + atomic64_set_unchecked(&next_event->count, value);
64415
64416 swap(event->total_time_enabled, next_event->total_time_enabled);
64417 swap(event->total_time_running, next_event->total_time_running);
64418 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
64419 update_event_times(event);
64420 }
64421
64422 - return atomic64_read(&event->count);
64423 + return atomic64_read_unchecked(&event->count);
64424 }
64425
64426 /*
64427 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
64428 values[n++] = 1 + leader->nr_siblings;
64429 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64430 values[n++] = leader->total_time_enabled +
64431 - atomic64_read(&leader->child_total_time_enabled);
64432 + atomic64_read_unchecked(&leader->child_total_time_enabled);
64433 }
64434 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64435 values[n++] = leader->total_time_running +
64436 - atomic64_read(&leader->child_total_time_running);
64437 + atomic64_read_unchecked(&leader->child_total_time_running);
64438 }
64439
64440 size = n * sizeof(u64);
64441 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
64442 values[n++] = perf_event_read_value(event);
64443 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64444 values[n++] = event->total_time_enabled +
64445 - atomic64_read(&event->child_total_time_enabled);
64446 + atomic64_read_unchecked(&event->child_total_time_enabled);
64447 }
64448 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64449 values[n++] = event->total_time_running +
64450 - atomic64_read(&event->child_total_time_running);
64451 + atomic64_read_unchecked(&event->child_total_time_running);
64452 }
64453 if (read_format & PERF_FORMAT_ID)
64454 values[n++] = primary_event_id(event);
64455 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
64456 static void perf_event_reset(struct perf_event *event)
64457 {
64458 (void)perf_event_read(event);
64459 - atomic64_set(&event->count, 0);
64460 + atomic64_set_unchecked(&event->count, 0);
64461 perf_event_update_userpage(event);
64462 }
64463
64464 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
64465 ++userpg->lock;
64466 barrier();
64467 userpg->index = perf_event_index(event);
64468 - userpg->offset = atomic64_read(&event->count);
64469 + userpg->offset = atomic64_read_unchecked(&event->count);
64470 if (event->state == PERF_EVENT_STATE_ACTIVE)
64471 - userpg->offset -= atomic64_read(&event->hw.prev_count);
64472 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
64473
64474 userpg->time_enabled = event->total_time_enabled +
64475 - atomic64_read(&event->child_total_time_enabled);
64476 + atomic64_read_unchecked(&event->child_total_time_enabled);
64477
64478 userpg->time_running = event->total_time_running +
64479 - atomic64_read(&event->child_total_time_running);
64480 + atomic64_read_unchecked(&event->child_total_time_running);
64481
64482 barrier();
64483 ++userpg->lock;
64484 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
64485 u64 values[4];
64486 int n = 0;
64487
64488 - values[n++] = atomic64_read(&event->count);
64489 + values[n++] = atomic64_read_unchecked(&event->count);
64490 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64491 values[n++] = event->total_time_enabled +
64492 - atomic64_read(&event->child_total_time_enabled);
64493 + atomic64_read_unchecked(&event->child_total_time_enabled);
64494 }
64495 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64496 values[n++] = event->total_time_running +
64497 - atomic64_read(&event->child_total_time_running);
64498 + atomic64_read_unchecked(&event->child_total_time_running);
64499 }
64500 if (read_format & PERF_FORMAT_ID)
64501 values[n++] = primary_event_id(event);
64502 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
64503 if (leader != event)
64504 leader->pmu->read(leader);
64505
64506 - values[n++] = atomic64_read(&leader->count);
64507 + values[n++] = atomic64_read_unchecked(&leader->count);
64508 if (read_format & PERF_FORMAT_ID)
64509 values[n++] = primary_event_id(leader);
64510
64511 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
64512 if (sub != event)
64513 sub->pmu->read(sub);
64514
64515 - values[n++] = atomic64_read(&sub->count);
64516 + values[n++] = atomic64_read_unchecked(&sub->count);
64517 if (read_format & PERF_FORMAT_ID)
64518 values[n++] = primary_event_id(sub);
64519
64520 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf
64521 {
64522 struct hw_perf_event *hwc = &event->hw;
64523
64524 - atomic64_add(nr, &event->count);
64525 + atomic64_add_unchecked(nr, &event->count);
64526
64527 if (!hwc->sample_period)
64528 return;
64529 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(
64530 u64 now;
64531
64532 now = cpu_clock(cpu);
64533 - prev = atomic64_read(&event->hw.prev_count);
64534 - atomic64_set(&event->hw.prev_count, now);
64535 - atomic64_add(now - prev, &event->count);
64536 + prev = atomic64_read_unchecked(&event->hw.prev_count);
64537 + atomic64_set_unchecked(&event->hw.prev_count, now);
64538 + atomic64_add_unchecked(now - prev, &event->count);
64539 }
64540
64541 static int cpu_clock_perf_event_enable(struct perf_event *event)
64542 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(s
64543 struct hw_perf_event *hwc = &event->hw;
64544 int cpu = raw_smp_processor_id();
64545
64546 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
64547 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
64548 perf_swevent_start_hrtimer(event);
64549
64550 return 0;
64551 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update
64552 u64 prev;
64553 s64 delta;
64554
64555 - prev = atomic64_xchg(&event->hw.prev_count, now);
64556 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
64557 delta = now - prev;
64558 - atomic64_add(delta, &event->count);
64559 + atomic64_add_unchecked(delta, &event->count);
64560 }
64561
64562 static int task_clock_perf_event_enable(struct perf_event *event)
64563 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(
64564
64565 now = event->ctx->time;
64566
64567 - atomic64_set(&hwc->prev_count, now);
64568 + atomic64_set_unchecked(&hwc->prev_count, now);
64569
64570 perf_swevent_start_hrtimer(event);
64571
64572 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr
64573 event->parent = parent_event;
64574
64575 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64576 - event->id = atomic64_inc_return(&perf_event_id);
64577 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
64578
64579 event->state = PERF_EVENT_STATE_INACTIVE;
64580
64581 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf
64582 if (child_event->attr.inherit_stat)
64583 perf_event_read_event(child_event, child);
64584
64585 - child_val = atomic64_read(&child_event->count);
64586 + child_val = atomic64_read_unchecked(&child_event->count);
64587
64588 /*
64589 * Add back the child's count to the parent's count:
64590 */
64591 - atomic64_add(child_val, &parent_event->count);
64592 - atomic64_add(child_event->total_time_enabled,
64593 + atomic64_add_unchecked(child_val, &parent_event->count);
64594 + atomic64_add_unchecked(child_event->total_time_enabled,
64595 &parent_event->child_total_time_enabled);
64596 - atomic64_add(child_event->total_time_running,
64597 + atomic64_add_unchecked(child_event->total_time_running,
64598 &parent_event->child_total_time_running);
64599
64600 /*
64601 diff -urNp linux-2.6.32.45/kernel/pid.c linux-2.6.32.45/kernel/pid.c
64602 --- linux-2.6.32.45/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
64603 +++ linux-2.6.32.45/kernel/pid.c 2011-07-14 19:15:33.000000000 -0400
64604 @@ -33,6 +33,7 @@
64605 #include <linux/rculist.h>
64606 #include <linux/bootmem.h>
64607 #include <linux/hash.h>
64608 +#include <linux/security.h>
64609 #include <linux/pid_namespace.h>
64610 #include <linux/init_task.h>
64611 #include <linux/syscalls.h>
64612 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64613
64614 int pid_max = PID_MAX_DEFAULT;
64615
64616 -#define RESERVED_PIDS 300
64617 +#define RESERVED_PIDS 500
64618
64619 int pid_max_min = RESERVED_PIDS + 1;
64620 int pid_max_max = PID_MAX_LIMIT;
64621 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
64622 */
64623 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64624 {
64625 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64626 + struct task_struct *task;
64627 +
64628 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64629 +
64630 + if (gr_pid_is_chrooted(task))
64631 + return NULL;
64632 +
64633 + return task;
64634 }
64635
64636 struct task_struct *find_task_by_vpid(pid_t vnr)
64637 @@ -391,6 +399,13 @@ struct task_struct *find_task_by_vpid(pi
64638 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64639 }
64640
64641 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64642 +{
64643 + struct task_struct *task;
64644 +
64645 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64646 +}
64647 +
64648 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64649 {
64650 struct pid *pid;
64651 diff -urNp linux-2.6.32.45/kernel/posix-cpu-timers.c linux-2.6.32.45/kernel/posix-cpu-timers.c
64652 --- linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
64653 +++ linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-08-06 09:33:44.000000000 -0400
64654 @@ -6,6 +6,7 @@
64655 #include <linux/posix-timers.h>
64656 #include <linux/errno.h>
64657 #include <linux/math64.h>
64658 +#include <linux/security.h>
64659 #include <asm/uaccess.h>
64660 #include <linux/kernel_stat.h>
64661 #include <trace/events/timer.h>
64662 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(st
64663
64664 static __init int init_posix_cpu_timers(void)
64665 {
64666 - struct k_clock process = {
64667 + static struct k_clock process = {
64668 .clock_getres = process_cpu_clock_getres,
64669 .clock_get = process_cpu_clock_get,
64670 .clock_set = do_posix_clock_nosettime,
64671 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(
64672 .nsleep = process_cpu_nsleep,
64673 .nsleep_restart = process_cpu_nsleep_restart,
64674 };
64675 - struct k_clock thread = {
64676 + static struct k_clock thread = {
64677 .clock_getres = thread_cpu_clock_getres,
64678 .clock_get = thread_cpu_clock_get,
64679 .clock_set = do_posix_clock_nosettime,
64680 diff -urNp linux-2.6.32.45/kernel/posix-timers.c linux-2.6.32.45/kernel/posix-timers.c
64681 --- linux-2.6.32.45/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
64682 +++ linux-2.6.32.45/kernel/posix-timers.c 2011-08-06 09:34:14.000000000 -0400
64683 @@ -42,6 +42,7 @@
64684 #include <linux/compiler.h>
64685 #include <linux/idr.h>
64686 #include <linux/posix-timers.h>
64687 +#include <linux/grsecurity.h>
64688 #include <linux/syscalls.h>
64689 #include <linux/wait.h>
64690 #include <linux/workqueue.h>
64691 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
64692 * which we beg off on and pass to do_sys_settimeofday().
64693 */
64694
64695 -static struct k_clock posix_clocks[MAX_CLOCKS];
64696 +static struct k_clock *posix_clocks[MAX_CLOCKS];
64697
64698 /*
64699 * These ones are defined below.
64700 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k
64701 */
64702 #define CLOCK_DISPATCH(clock, call, arglist) \
64703 ((clock) < 0 ? posix_cpu_##call arglist : \
64704 - (posix_clocks[clock].call != NULL \
64705 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
64706 + (posix_clocks[clock]->call != NULL \
64707 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
64708
64709 /*
64710 * Default clock hook functions when the struct k_clock passed
64711 @@ -172,7 +173,7 @@ static inline int common_clock_getres(co
64712 struct timespec *tp)
64713 {
64714 tp->tv_sec = 0;
64715 - tp->tv_nsec = posix_clocks[which_clock].res;
64716 + tp->tv_nsec = posix_clocks[which_clock]->res;
64717 return 0;
64718 }
64719
64720 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const
64721 return 0;
64722 if ((unsigned) which_clock >= MAX_CLOCKS)
64723 return 1;
64724 - if (posix_clocks[which_clock].clock_getres != NULL)
64725 + if (!posix_clocks[which_clock])
64726 return 0;
64727 - if (posix_clocks[which_clock].res != 0)
64728 + if (posix_clocks[which_clock]->clock_getres != NULL)
64729 + return 0;
64730 + if (posix_clocks[which_clock]->res != 0)
64731 return 0;
64732 return 1;
64733 }
64734 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t
64735 */
64736 static __init int init_posix_timers(void)
64737 {
64738 - struct k_clock clock_realtime = {
64739 + static struct k_clock clock_realtime = {
64740 .clock_getres = hrtimer_get_res,
64741 };
64742 - struct k_clock clock_monotonic = {
64743 + static struct k_clock clock_monotonic = {
64744 .clock_getres = hrtimer_get_res,
64745 .clock_get = posix_ktime_get_ts,
64746 .clock_set = do_posix_clock_nosettime,
64747 };
64748 - struct k_clock clock_monotonic_raw = {
64749 + static struct k_clock clock_monotonic_raw = {
64750 .clock_getres = hrtimer_get_res,
64751 .clock_get = posix_get_monotonic_raw,
64752 .clock_set = do_posix_clock_nosettime,
64753 .timer_create = no_timer_create,
64754 .nsleep = no_nsleep,
64755 };
64756 - struct k_clock clock_realtime_coarse = {
64757 + static struct k_clock clock_realtime_coarse = {
64758 .clock_getres = posix_get_coarse_res,
64759 .clock_get = posix_get_realtime_coarse,
64760 .clock_set = do_posix_clock_nosettime,
64761 .timer_create = no_timer_create,
64762 .nsleep = no_nsleep,
64763 };
64764 - struct k_clock clock_monotonic_coarse = {
64765 + static struct k_clock clock_monotonic_coarse = {
64766 .clock_getres = posix_get_coarse_res,
64767 .clock_get = posix_get_monotonic_coarse,
64768 .clock_set = do_posix_clock_nosettime,
64769 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void
64770 .nsleep = no_nsleep,
64771 };
64772
64773 + pax_track_stack();
64774 +
64775 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
64776 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
64777 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64778 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_
64779 return;
64780 }
64781
64782 - posix_clocks[clock_id] = *new_clock;
64783 + posix_clocks[clock_id] = new_clock;
64784 }
64785 EXPORT_SYMBOL_GPL(register_posix_clock);
64786
64787 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64788 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64789 return -EFAULT;
64790
64791 + /* only the CLOCK_REALTIME clock can be set, all other clocks
64792 + have their clock_set fptr set to a nosettime dummy function
64793 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64794 + call common_clock_set, which calls do_sys_settimeofday, which
64795 + we hook
64796 + */
64797 +
64798 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
64799 }
64800
64801 diff -urNp linux-2.6.32.45/kernel/power/hibernate.c linux-2.6.32.45/kernel/power/hibernate.c
64802 --- linux-2.6.32.45/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
64803 +++ linux-2.6.32.45/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
64804 @@ -48,14 +48,14 @@ enum {
64805
64806 static int hibernation_mode = HIBERNATION_SHUTDOWN;
64807
64808 -static struct platform_hibernation_ops *hibernation_ops;
64809 +static const struct platform_hibernation_ops *hibernation_ops;
64810
64811 /**
64812 * hibernation_set_ops - set the global hibernate operations
64813 * @ops: the hibernation operations to use in subsequent hibernation transitions
64814 */
64815
64816 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
64817 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
64818 {
64819 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
64820 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
64821 diff -urNp linux-2.6.32.45/kernel/power/poweroff.c linux-2.6.32.45/kernel/power/poweroff.c
64822 --- linux-2.6.32.45/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
64823 +++ linux-2.6.32.45/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
64824 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64825 .enable_mask = SYSRQ_ENABLE_BOOT,
64826 };
64827
64828 -static int pm_sysrq_init(void)
64829 +static int __init pm_sysrq_init(void)
64830 {
64831 register_sysrq_key('o', &sysrq_poweroff_op);
64832 return 0;
64833 diff -urNp linux-2.6.32.45/kernel/power/process.c linux-2.6.32.45/kernel/power/process.c
64834 --- linux-2.6.32.45/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
64835 +++ linux-2.6.32.45/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
64836 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
64837 struct timeval start, end;
64838 u64 elapsed_csecs64;
64839 unsigned int elapsed_csecs;
64840 + bool timedout = false;
64841
64842 do_gettimeofday(&start);
64843
64844 end_time = jiffies + TIMEOUT;
64845 do {
64846 todo = 0;
64847 + if (time_after(jiffies, end_time))
64848 + timedout = true;
64849 read_lock(&tasklist_lock);
64850 do_each_thread(g, p) {
64851 if (frozen(p) || !freezeable(p))
64852 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
64853 * It is "frozen enough". If the task does wake
64854 * up, it will immediately call try_to_freeze.
64855 */
64856 - if (!task_is_stopped_or_traced(p) &&
64857 - !freezer_should_skip(p))
64858 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64859 todo++;
64860 + if (timedout) {
64861 + printk(KERN_ERR "Task refusing to freeze:\n");
64862 + sched_show_task(p);
64863 + }
64864 + }
64865 } while_each_thread(g, p);
64866 read_unlock(&tasklist_lock);
64867 yield(); /* Yield is okay here */
64868 - if (time_after(jiffies, end_time))
64869 - break;
64870 - } while (todo);
64871 + } while (todo && !timedout);
64872
64873 do_gettimeofday(&end);
64874 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
64875 diff -urNp linux-2.6.32.45/kernel/power/suspend.c linux-2.6.32.45/kernel/power/suspend.c
64876 --- linux-2.6.32.45/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
64877 +++ linux-2.6.32.45/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
64878 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
64879 [PM_SUSPEND_MEM] = "mem",
64880 };
64881
64882 -static struct platform_suspend_ops *suspend_ops;
64883 +static const struct platform_suspend_ops *suspend_ops;
64884
64885 /**
64886 * suspend_set_ops - Set the global suspend method table.
64887 * @ops: Pointer to ops structure.
64888 */
64889 -void suspend_set_ops(struct platform_suspend_ops *ops)
64890 +void suspend_set_ops(const struct platform_suspend_ops *ops)
64891 {
64892 mutex_lock(&pm_mutex);
64893 suspend_ops = ops;
64894 diff -urNp linux-2.6.32.45/kernel/printk.c linux-2.6.32.45/kernel/printk.c
64895 --- linux-2.6.32.45/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
64896 +++ linux-2.6.32.45/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
64897 @@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
64898 char c;
64899 int error = 0;
64900
64901 +#ifdef CONFIG_GRKERNSEC_DMESG
64902 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
64903 + return -EPERM;
64904 +#endif
64905 +
64906 error = security_syslog(type);
64907 if (error)
64908 return error;
64909 diff -urNp linux-2.6.32.45/kernel/profile.c linux-2.6.32.45/kernel/profile.c
64910 --- linux-2.6.32.45/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
64911 +++ linux-2.6.32.45/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
64912 @@ -39,7 +39,7 @@ struct profile_hit {
64913 /* Oprofile timer tick hook */
64914 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64915
64916 -static atomic_t *prof_buffer;
64917 +static atomic_unchecked_t *prof_buffer;
64918 static unsigned long prof_len, prof_shift;
64919
64920 int prof_on __read_mostly;
64921 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
64922 hits[i].pc = 0;
64923 continue;
64924 }
64925 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64926 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64927 hits[i].hits = hits[i].pc = 0;
64928 }
64929 }
64930 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
64931 * Add the current hit(s) and flush the write-queue out
64932 * to the global buffer:
64933 */
64934 - atomic_add(nr_hits, &prof_buffer[pc]);
64935 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64936 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64937 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64938 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64939 hits[i].pc = hits[i].hits = 0;
64940 }
64941 out:
64942 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
64943 if (prof_on != type || !prof_buffer)
64944 return;
64945 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64946 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64947 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64948 }
64949 #endif /* !CONFIG_SMP */
64950 EXPORT_SYMBOL_GPL(profile_hits);
64951 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64952 return -EFAULT;
64953 buf++; p++; count--; read++;
64954 }
64955 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64956 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64957 if (copy_to_user(buf, (void *)pnt, count))
64958 return -EFAULT;
64959 read += count;
64960 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64961 }
64962 #endif
64963 profile_discard_flip_buffers();
64964 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64965 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64966 return count;
64967 }
64968
64969 diff -urNp linux-2.6.32.45/kernel/ptrace.c linux-2.6.32.45/kernel/ptrace.c
64970 --- linux-2.6.32.45/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
64971 +++ linux-2.6.32.45/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
64972 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
64973 return ret;
64974 }
64975
64976 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64977 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64978 + unsigned int log)
64979 {
64980 const struct cred *cred = current_cred(), *tcred;
64981
64982 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
64983 cred->gid != tcred->egid ||
64984 cred->gid != tcred->sgid ||
64985 cred->gid != tcred->gid) &&
64986 - !capable(CAP_SYS_PTRACE)) {
64987 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64988 + (log && !capable(CAP_SYS_PTRACE)))
64989 + ) {
64990 rcu_read_unlock();
64991 return -EPERM;
64992 }
64993 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
64994 smp_rmb();
64995 if (task->mm)
64996 dumpable = get_dumpable(task->mm);
64997 - if (!dumpable && !capable(CAP_SYS_PTRACE))
64998 + if (!dumpable &&
64999 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
65000 + (log && !capable(CAP_SYS_PTRACE))))
65001 return -EPERM;
65002
65003 return security_ptrace_access_check(task, mode);
65004 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
65005 {
65006 int err;
65007 task_lock(task);
65008 - err = __ptrace_may_access(task, mode);
65009 + err = __ptrace_may_access(task, mode, 0);
65010 + task_unlock(task);
65011 + return !err;
65012 +}
65013 +
65014 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65015 +{
65016 + int err;
65017 + task_lock(task);
65018 + err = __ptrace_may_access(task, mode, 1);
65019 task_unlock(task);
65020 return !err;
65021 }
65022 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
65023 goto out;
65024
65025 task_lock(task);
65026 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65027 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65028 task_unlock(task);
65029 if (retval)
65030 goto unlock_creds;
65031 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
65032 goto unlock_tasklist;
65033
65034 task->ptrace = PT_PTRACED;
65035 - if (capable(CAP_SYS_PTRACE))
65036 + if (capable_nolog(CAP_SYS_PTRACE))
65037 task->ptrace |= PT_PTRACE_CAP;
65038
65039 __ptrace_link(task, current);
65040 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
65041 {
65042 int copied = 0;
65043
65044 + pax_track_stack();
65045 +
65046 while (len > 0) {
65047 char buf[128];
65048 int this_len, retval;
65049 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
65050 {
65051 int copied = 0;
65052
65053 + pax_track_stack();
65054 +
65055 while (len > 0) {
65056 char buf[128];
65057 int this_len, retval;
65058 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
65059 int ret = -EIO;
65060 siginfo_t siginfo;
65061
65062 + pax_track_stack();
65063 +
65064 switch (request) {
65065 case PTRACE_PEEKTEXT:
65066 case PTRACE_PEEKDATA:
65067 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
65068 ret = ptrace_setoptions(child, data);
65069 break;
65070 case PTRACE_GETEVENTMSG:
65071 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
65072 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
65073 break;
65074
65075 case PTRACE_GETSIGINFO:
65076 ret = ptrace_getsiginfo(child, &siginfo);
65077 if (!ret)
65078 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
65079 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
65080 &siginfo);
65081 break;
65082
65083 case PTRACE_SETSIGINFO:
65084 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
65085 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
65086 sizeof siginfo))
65087 ret = -EFAULT;
65088 else
65089 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
65090 goto out;
65091 }
65092
65093 + if (gr_handle_ptrace(child, request)) {
65094 + ret = -EPERM;
65095 + goto out_put_task_struct;
65096 + }
65097 +
65098 if (request == PTRACE_ATTACH) {
65099 ret = ptrace_attach(child);
65100 /*
65101 * Some architectures need to do book-keeping after
65102 * a ptrace attach.
65103 */
65104 - if (!ret)
65105 + if (!ret) {
65106 arch_ptrace_attach(child);
65107 + gr_audit_ptrace(child);
65108 + }
65109 goto out_put_task_struct;
65110 }
65111
65112 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
65113 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65114 if (copied != sizeof(tmp))
65115 return -EIO;
65116 - return put_user(tmp, (unsigned long __user *)data);
65117 + return put_user(tmp, (__force unsigned long __user *)data);
65118 }
65119
65120 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
65121 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
65122 siginfo_t siginfo;
65123 int ret;
65124
65125 + pax_track_stack();
65126 +
65127 switch (request) {
65128 case PTRACE_PEEKTEXT:
65129 case PTRACE_PEEKDATA:
65130 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
65131 goto out;
65132 }
65133
65134 + if (gr_handle_ptrace(child, request)) {
65135 + ret = -EPERM;
65136 + goto out_put_task_struct;
65137 + }
65138 +
65139 if (request == PTRACE_ATTACH) {
65140 ret = ptrace_attach(child);
65141 /*
65142 * Some architectures need to do book-keeping after
65143 * a ptrace attach.
65144 */
65145 - if (!ret)
65146 + if (!ret) {
65147 arch_ptrace_attach(child);
65148 + gr_audit_ptrace(child);
65149 + }
65150 goto out_put_task_struct;
65151 }
65152
65153 diff -urNp linux-2.6.32.45/kernel/rcutorture.c linux-2.6.32.45/kernel/rcutorture.c
65154 --- linux-2.6.32.45/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
65155 +++ linux-2.6.32.45/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
65156 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
65157 { 0 };
65158 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65159 { 0 };
65160 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65161 -static atomic_t n_rcu_torture_alloc;
65162 -static atomic_t n_rcu_torture_alloc_fail;
65163 -static atomic_t n_rcu_torture_free;
65164 -static atomic_t n_rcu_torture_mberror;
65165 -static atomic_t n_rcu_torture_error;
65166 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65167 +static atomic_unchecked_t n_rcu_torture_alloc;
65168 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
65169 +static atomic_unchecked_t n_rcu_torture_free;
65170 +static atomic_unchecked_t n_rcu_torture_mberror;
65171 +static atomic_unchecked_t n_rcu_torture_error;
65172 static long n_rcu_torture_timers;
65173 static struct list_head rcu_torture_removed;
65174 static cpumask_var_t shuffle_tmp_mask;
65175 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
65176
65177 spin_lock_bh(&rcu_torture_lock);
65178 if (list_empty(&rcu_torture_freelist)) {
65179 - atomic_inc(&n_rcu_torture_alloc_fail);
65180 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65181 spin_unlock_bh(&rcu_torture_lock);
65182 return NULL;
65183 }
65184 - atomic_inc(&n_rcu_torture_alloc);
65185 + atomic_inc_unchecked(&n_rcu_torture_alloc);
65186 p = rcu_torture_freelist.next;
65187 list_del_init(p);
65188 spin_unlock_bh(&rcu_torture_lock);
65189 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
65190 static void
65191 rcu_torture_free(struct rcu_torture *p)
65192 {
65193 - atomic_inc(&n_rcu_torture_free);
65194 + atomic_inc_unchecked(&n_rcu_torture_free);
65195 spin_lock_bh(&rcu_torture_lock);
65196 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65197 spin_unlock_bh(&rcu_torture_lock);
65198 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
65199 i = rp->rtort_pipe_count;
65200 if (i > RCU_TORTURE_PIPE_LEN)
65201 i = RCU_TORTURE_PIPE_LEN;
65202 - atomic_inc(&rcu_torture_wcount[i]);
65203 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65204 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65205 rp->rtort_mbtest = 0;
65206 rcu_torture_free(rp);
65207 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
65208 i = rp->rtort_pipe_count;
65209 if (i > RCU_TORTURE_PIPE_LEN)
65210 i = RCU_TORTURE_PIPE_LEN;
65211 - atomic_inc(&rcu_torture_wcount[i]);
65212 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65213 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65214 rp->rtort_mbtest = 0;
65215 list_del(&rp->rtort_free);
65216 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
65217 i = old_rp->rtort_pipe_count;
65218 if (i > RCU_TORTURE_PIPE_LEN)
65219 i = RCU_TORTURE_PIPE_LEN;
65220 - atomic_inc(&rcu_torture_wcount[i]);
65221 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65222 old_rp->rtort_pipe_count++;
65223 cur_ops->deferred_free(old_rp);
65224 }
65225 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
65226 return;
65227 }
65228 if (p->rtort_mbtest == 0)
65229 - atomic_inc(&n_rcu_torture_mberror);
65230 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65231 spin_lock(&rand_lock);
65232 cur_ops->read_delay(&rand);
65233 n_rcu_torture_timers++;
65234 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
65235 continue;
65236 }
65237 if (p->rtort_mbtest == 0)
65238 - atomic_inc(&n_rcu_torture_mberror);
65239 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65240 cur_ops->read_delay(&rand);
65241 preempt_disable();
65242 pipe_count = p->rtort_pipe_count;
65243 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
65244 rcu_torture_current,
65245 rcu_torture_current_version,
65246 list_empty(&rcu_torture_freelist),
65247 - atomic_read(&n_rcu_torture_alloc),
65248 - atomic_read(&n_rcu_torture_alloc_fail),
65249 - atomic_read(&n_rcu_torture_free),
65250 - atomic_read(&n_rcu_torture_mberror),
65251 + atomic_read_unchecked(&n_rcu_torture_alloc),
65252 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65253 + atomic_read_unchecked(&n_rcu_torture_free),
65254 + atomic_read_unchecked(&n_rcu_torture_mberror),
65255 n_rcu_torture_timers);
65256 - if (atomic_read(&n_rcu_torture_mberror) != 0)
65257 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
65258 cnt += sprintf(&page[cnt], " !!!");
65259 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65260 if (i > 1) {
65261 cnt += sprintf(&page[cnt], "!!! ");
65262 - atomic_inc(&n_rcu_torture_error);
65263 + atomic_inc_unchecked(&n_rcu_torture_error);
65264 WARN_ON_ONCE(1);
65265 }
65266 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65267 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
65268 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65269 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65270 cnt += sprintf(&page[cnt], " %d",
65271 - atomic_read(&rcu_torture_wcount[i]));
65272 + atomic_read_unchecked(&rcu_torture_wcount[i]));
65273 }
65274 cnt += sprintf(&page[cnt], "\n");
65275 if (cur_ops->stats)
65276 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
65277
65278 if (cur_ops->cleanup)
65279 cur_ops->cleanup();
65280 - if (atomic_read(&n_rcu_torture_error))
65281 + if (atomic_read_unchecked(&n_rcu_torture_error))
65282 rcu_torture_print_module_parms("End of test: FAILURE");
65283 else
65284 rcu_torture_print_module_parms("End of test: SUCCESS");
65285 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
65286
65287 rcu_torture_current = NULL;
65288 rcu_torture_current_version = 0;
65289 - atomic_set(&n_rcu_torture_alloc, 0);
65290 - atomic_set(&n_rcu_torture_alloc_fail, 0);
65291 - atomic_set(&n_rcu_torture_free, 0);
65292 - atomic_set(&n_rcu_torture_mberror, 0);
65293 - atomic_set(&n_rcu_torture_error, 0);
65294 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65295 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65296 + atomic_set_unchecked(&n_rcu_torture_free, 0);
65297 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65298 + atomic_set_unchecked(&n_rcu_torture_error, 0);
65299 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65300 - atomic_set(&rcu_torture_wcount[i], 0);
65301 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65302 for_each_possible_cpu(cpu) {
65303 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65304 per_cpu(rcu_torture_count, cpu)[i] = 0;
65305 diff -urNp linux-2.6.32.45/kernel/rcutree.c linux-2.6.32.45/kernel/rcutree.c
65306 --- linux-2.6.32.45/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
65307 +++ linux-2.6.32.45/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
65308 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
65309 /*
65310 * Do softirq processing for the current CPU.
65311 */
65312 -static void rcu_process_callbacks(struct softirq_action *unused)
65313 +static void rcu_process_callbacks(void)
65314 {
65315 /*
65316 * Memory references from any prior RCU read-side critical sections
65317 diff -urNp linux-2.6.32.45/kernel/rcutree_plugin.h linux-2.6.32.45/kernel/rcutree_plugin.h
65318 --- linux-2.6.32.45/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
65319 +++ linux-2.6.32.45/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
65320 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
65321 */
65322 void __rcu_read_lock(void)
65323 {
65324 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
65325 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
65326 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
65327 }
65328 EXPORT_SYMBOL_GPL(__rcu_read_lock);
65329 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
65330 struct task_struct *t = current;
65331
65332 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
65333 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
65334 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
65335 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
65336 rcu_read_unlock_special(t);
65337 }
65338 diff -urNp linux-2.6.32.45/kernel/relay.c linux-2.6.32.45/kernel/relay.c
65339 --- linux-2.6.32.45/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
65340 +++ linux-2.6.32.45/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
65341 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
65342 unsigned int flags,
65343 int *nonpad_ret)
65344 {
65345 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
65346 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
65347 struct rchan_buf *rbuf = in->private_data;
65348 unsigned int subbuf_size = rbuf->chan->subbuf_size;
65349 uint64_t pos = (uint64_t) *ppos;
65350 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
65351 .ops = &relay_pipe_buf_ops,
65352 .spd_release = relay_page_release,
65353 };
65354 + ssize_t ret;
65355 +
65356 + pax_track_stack();
65357
65358 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65359 return 0;
65360 diff -urNp linux-2.6.32.45/kernel/resource.c linux-2.6.32.45/kernel/resource.c
65361 --- linux-2.6.32.45/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
65362 +++ linux-2.6.32.45/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
65363 @@ -132,8 +132,18 @@ static const struct file_operations proc
65364
65365 static int __init ioresources_init(void)
65366 {
65367 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65368 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65369 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65370 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65371 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65372 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65373 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65374 +#endif
65375 +#else
65376 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65377 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65378 +#endif
65379 return 0;
65380 }
65381 __initcall(ioresources_init);
65382 diff -urNp linux-2.6.32.45/kernel/rtmutex.c linux-2.6.32.45/kernel/rtmutex.c
65383 --- linux-2.6.32.45/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
65384 +++ linux-2.6.32.45/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
65385 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
65386 */
65387 spin_lock_irqsave(&pendowner->pi_lock, flags);
65388
65389 - WARN_ON(!pendowner->pi_blocked_on);
65390 + BUG_ON(!pendowner->pi_blocked_on);
65391 WARN_ON(pendowner->pi_blocked_on != waiter);
65392 WARN_ON(pendowner->pi_blocked_on->lock != lock);
65393
65394 diff -urNp linux-2.6.32.45/kernel/rtmutex-tester.c linux-2.6.32.45/kernel/rtmutex-tester.c
65395 --- linux-2.6.32.45/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
65396 +++ linux-2.6.32.45/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
65397 @@ -21,7 +21,7 @@
65398 #define MAX_RT_TEST_MUTEXES 8
65399
65400 static spinlock_t rttest_lock;
65401 -static atomic_t rttest_event;
65402 +static atomic_unchecked_t rttest_event;
65403
65404 struct test_thread_data {
65405 int opcode;
65406 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
65407
65408 case RTTEST_LOCKCONT:
65409 td->mutexes[td->opdata] = 1;
65410 - td->event = atomic_add_return(1, &rttest_event);
65411 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65412 return 0;
65413
65414 case RTTEST_RESET:
65415 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
65416 return 0;
65417
65418 case RTTEST_RESETEVENT:
65419 - atomic_set(&rttest_event, 0);
65420 + atomic_set_unchecked(&rttest_event, 0);
65421 return 0;
65422
65423 default:
65424 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
65425 return ret;
65426
65427 td->mutexes[id] = 1;
65428 - td->event = atomic_add_return(1, &rttest_event);
65429 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65430 rt_mutex_lock(&mutexes[id]);
65431 - td->event = atomic_add_return(1, &rttest_event);
65432 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65433 td->mutexes[id] = 4;
65434 return 0;
65435
65436 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
65437 return ret;
65438
65439 td->mutexes[id] = 1;
65440 - td->event = atomic_add_return(1, &rttest_event);
65441 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65442 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65443 - td->event = atomic_add_return(1, &rttest_event);
65444 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65445 td->mutexes[id] = ret ? 0 : 4;
65446 return ret ? -EINTR : 0;
65447
65448 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
65449 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65450 return ret;
65451
65452 - td->event = atomic_add_return(1, &rttest_event);
65453 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65454 rt_mutex_unlock(&mutexes[id]);
65455 - td->event = atomic_add_return(1, &rttest_event);
65456 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65457 td->mutexes[id] = 0;
65458 return 0;
65459
65460 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
65461 break;
65462
65463 td->mutexes[dat] = 2;
65464 - td->event = atomic_add_return(1, &rttest_event);
65465 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65466 break;
65467
65468 case RTTEST_LOCKBKL:
65469 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
65470 return;
65471
65472 td->mutexes[dat] = 3;
65473 - td->event = atomic_add_return(1, &rttest_event);
65474 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65475 break;
65476
65477 case RTTEST_LOCKNOWAIT:
65478 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
65479 return;
65480
65481 td->mutexes[dat] = 1;
65482 - td->event = atomic_add_return(1, &rttest_event);
65483 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65484 return;
65485
65486 case RTTEST_LOCKBKL:
65487 diff -urNp linux-2.6.32.45/kernel/sched.c linux-2.6.32.45/kernel/sched.c
65488 --- linux-2.6.32.45/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
65489 +++ linux-2.6.32.45/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
65490 @@ -5043,7 +5043,7 @@ out:
65491 * In CONFIG_NO_HZ case, the idle load balance owner will do the
65492 * rebalancing for all the cpus for whom scheduler ticks are stopped.
65493 */
65494 -static void run_rebalance_domains(struct softirq_action *h)
65495 +static void run_rebalance_domains(void)
65496 {
65497 int this_cpu = smp_processor_id();
65498 struct rq *this_rq = cpu_rq(this_cpu);
65499 @@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
65500 struct rq *rq;
65501 int cpu;
65502
65503 + pax_track_stack();
65504 +
65505 need_resched:
65506 preempt_disable();
65507 cpu = smp_processor_id();
65508 @@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
65509 * Look out! "owner" is an entirely speculative pointer
65510 * access and not reliable.
65511 */
65512 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
65513 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
65514 {
65515 unsigned int cpu;
65516 struct rq *rq;
65517 @@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
65518 * DEBUG_PAGEALLOC could have unmapped it if
65519 * the mutex owner just released it and exited.
65520 */
65521 - if (probe_kernel_address(&owner->cpu, cpu))
65522 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
65523 return 0;
65524 #else
65525 - cpu = owner->cpu;
65526 + cpu = task_thread_info(owner)->cpu;
65527 #endif
65528
65529 /*
65530 @@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
65531 /*
65532 * Is that owner really running on that cpu?
65533 */
65534 - if (task_thread_info(rq->curr) != owner || need_resched())
65535 + if (rq->curr != owner || need_resched())
65536 return 0;
65537
65538 cpu_relax();
65539 @@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
65540 /* convert nice value [19,-20] to rlimit style value [1,40] */
65541 int nice_rlim = 20 - nice;
65542
65543 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65544 +
65545 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
65546 capable(CAP_SYS_NICE));
65547 }
65548 @@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65549 if (nice > 19)
65550 nice = 19;
65551
65552 - if (increment < 0 && !can_nice(current, nice))
65553 + if (increment < 0 && (!can_nice(current, nice) ||
65554 + gr_handle_chroot_nice()))
65555 return -EPERM;
65556
65557 retval = security_task_setnice(current, nice);
65558 @@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
65559 long power;
65560 int weight;
65561
65562 - WARN_ON(!sd || !sd->groups);
65563 + BUG_ON(!sd || !sd->groups);
65564
65565 if (cpu != group_first_cpu(sd->groups))
65566 return;
65567 diff -urNp linux-2.6.32.45/kernel/signal.c linux-2.6.32.45/kernel/signal.c
65568 --- linux-2.6.32.45/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
65569 +++ linux-2.6.32.45/kernel/signal.c 2011-08-16 21:15:58.000000000 -0400
65570 @@ -41,12 +41,12 @@
65571
65572 static struct kmem_cache *sigqueue_cachep;
65573
65574 -static void __user *sig_handler(struct task_struct *t, int sig)
65575 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
65576 {
65577 return t->sighand->action[sig - 1].sa.sa_handler;
65578 }
65579
65580 -static int sig_handler_ignored(void __user *handler, int sig)
65581 +static int sig_handler_ignored(__sighandler_t handler, int sig)
65582 {
65583 /* Is it explicitly or implicitly ignored? */
65584 return handler == SIG_IGN ||
65585 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
65586 static int sig_task_ignored(struct task_struct *t, int sig,
65587 int from_ancestor_ns)
65588 {
65589 - void __user *handler;
65590 + __sighandler_t handler;
65591
65592 handler = sig_handler(t, sig);
65593
65594 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
65595 */
65596 user = get_uid(__task_cred(t)->user);
65597 atomic_inc(&user->sigpending);
65598 +
65599 + if (!override_rlimit)
65600 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65601 if (override_rlimit ||
65602 atomic_read(&user->sigpending) <=
65603 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
65604 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
65605
65606 int unhandled_signal(struct task_struct *tsk, int sig)
65607 {
65608 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65609 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65610 if (is_global_init(tsk))
65611 return 1;
65612 if (handler != SIG_IGN && handler != SIG_DFL)
65613 @@ -627,6 +630,13 @@ static int check_kill_permission(int sig
65614 }
65615 }
65616
65617 + /* allow glibc communication via tgkill to other threads in our
65618 + thread group */
65619 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65620 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65621 + && gr_handle_signal(t, sig))
65622 + return -EPERM;
65623 +
65624 return security_task_kill(t, info, sig, 0);
65625 }
65626
65627 @@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct si
65628 return send_signal(sig, info, p, 1);
65629 }
65630
65631 -static int
65632 +int
65633 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65634 {
65635 return send_signal(sig, info, t, 0);
65636 @@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *
65637 unsigned long int flags;
65638 int ret, blocked, ignored;
65639 struct k_sigaction *action;
65640 + int is_unhandled = 0;
65641
65642 spin_lock_irqsave(&t->sighand->siglock, flags);
65643 action = &t->sighand->action[sig-1];
65644 @@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *
65645 }
65646 if (action->sa.sa_handler == SIG_DFL)
65647 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65648 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65649 + is_unhandled = 1;
65650 ret = specific_send_sig_info(sig, info, t);
65651 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65652
65653 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
65654 + normal operation */
65655 + if (is_unhandled) {
65656 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65657 + gr_handle_crash(t, sig);
65658 + }
65659 +
65660 return ret;
65661 }
65662
65663 @@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct
65664 {
65665 int ret = check_kill_permission(sig, info, p);
65666
65667 - if (!ret && sig)
65668 + if (!ret && sig) {
65669 ret = do_send_sig_info(sig, info, p, true);
65670 + if (!ret)
65671 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65672 + }
65673
65674 return ret;
65675 }
65676 @@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
65677 {
65678 siginfo_t info;
65679
65680 + pax_track_stack();
65681 +
65682 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65683
65684 memset(&info, 0, sizeof info);
65685 @@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65686 int error = -ESRCH;
65687
65688 rcu_read_lock();
65689 - p = find_task_by_vpid(pid);
65690 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65691 + /* allow glibc communication via tgkill to other threads in our
65692 + thread group */
65693 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65694 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
65695 + p = find_task_by_vpid_unrestricted(pid);
65696 + else
65697 +#endif
65698 + p = find_task_by_vpid(pid);
65699 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65700 error = check_kill_permission(sig, info, p);
65701 /*
65702 diff -urNp linux-2.6.32.45/kernel/smp.c linux-2.6.32.45/kernel/smp.c
65703 --- linux-2.6.32.45/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
65704 +++ linux-2.6.32.45/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
65705 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
65706 }
65707 EXPORT_SYMBOL(smp_call_function);
65708
65709 -void ipi_call_lock(void)
65710 +void ipi_call_lock(void) __acquires(call_function.lock)
65711 {
65712 spin_lock(&call_function.lock);
65713 }
65714
65715 -void ipi_call_unlock(void)
65716 +void ipi_call_unlock(void) __releases(call_function.lock)
65717 {
65718 spin_unlock(&call_function.lock);
65719 }
65720
65721 -void ipi_call_lock_irq(void)
65722 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
65723 {
65724 spin_lock_irq(&call_function.lock);
65725 }
65726
65727 -void ipi_call_unlock_irq(void)
65728 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
65729 {
65730 spin_unlock_irq(&call_function.lock);
65731 }
65732 diff -urNp linux-2.6.32.45/kernel/softirq.c linux-2.6.32.45/kernel/softirq.c
65733 --- linux-2.6.32.45/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
65734 +++ linux-2.6.32.45/kernel/softirq.c 2011-08-05 20:33:55.000000000 -0400
65735 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65736
65737 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65738
65739 -char *softirq_to_name[NR_SOFTIRQS] = {
65740 +const char * const softirq_to_name[NR_SOFTIRQS] = {
65741 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65742 "TASKLET", "SCHED", "HRTIMER", "RCU"
65743 };
65744 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
65745
65746 asmlinkage void __do_softirq(void)
65747 {
65748 - struct softirq_action *h;
65749 + const struct softirq_action *h;
65750 __u32 pending;
65751 int max_restart = MAX_SOFTIRQ_RESTART;
65752 int cpu;
65753 @@ -233,7 +233,7 @@ restart:
65754 kstat_incr_softirqs_this_cpu(h - softirq_vec);
65755
65756 trace_softirq_entry(h, softirq_vec);
65757 - h->action(h);
65758 + h->action();
65759 trace_softirq_exit(h, softirq_vec);
65760 if (unlikely(prev_count != preempt_count())) {
65761 printk(KERN_ERR "huh, entered softirq %td %s %p"
65762 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
65763 local_irq_restore(flags);
65764 }
65765
65766 -void open_softirq(int nr, void (*action)(struct softirq_action *))
65767 +void open_softirq(int nr, void (*action)(void))
65768 {
65769 - softirq_vec[nr].action = action;
65770 + pax_open_kernel();
65771 + *(void **)&softirq_vec[nr].action = action;
65772 + pax_close_kernel();
65773 }
65774
65775 /*
65776 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct
65777
65778 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65779
65780 -static void tasklet_action(struct softirq_action *a)
65781 +static void tasklet_action(void)
65782 {
65783 struct tasklet_struct *list;
65784
65785 @@ -454,7 +456,7 @@ static void tasklet_action(struct softir
65786 }
65787 }
65788
65789 -static void tasklet_hi_action(struct softirq_action *a)
65790 +static void tasklet_hi_action(void)
65791 {
65792 struct tasklet_struct *list;
65793
65794 diff -urNp linux-2.6.32.45/kernel/sys.c linux-2.6.32.45/kernel/sys.c
65795 --- linux-2.6.32.45/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
65796 +++ linux-2.6.32.45/kernel/sys.c 2011-08-11 19:51:54.000000000 -0400
65797 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
65798 error = -EACCES;
65799 goto out;
65800 }
65801 +
65802 + if (gr_handle_chroot_setpriority(p, niceval)) {
65803 + error = -EACCES;
65804 + goto out;
65805 + }
65806 +
65807 no_nice = security_task_setnice(p, niceval);
65808 if (no_nice) {
65809 error = no_nice;
65810 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
65811 !(user = find_user(who)))
65812 goto out_unlock; /* No processes for this user */
65813
65814 - do_each_thread(g, p)
65815 + do_each_thread(g, p) {
65816 if (__task_cred(p)->uid == who)
65817 error = set_one_prio(p, niceval, error);
65818 - while_each_thread(g, p);
65819 + } while_each_thread(g, p);
65820 if (who != cred->uid)
65821 free_uid(user); /* For find_user() */
65822 break;
65823 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
65824 !(user = find_user(who)))
65825 goto out_unlock; /* No processes for this user */
65826
65827 - do_each_thread(g, p)
65828 + do_each_thread(g, p) {
65829 if (__task_cred(p)->uid == who) {
65830 niceval = 20 - task_nice(p);
65831 if (niceval > retval)
65832 retval = niceval;
65833 }
65834 - while_each_thread(g, p);
65835 + } while_each_thread(g, p);
65836 if (who != cred->uid)
65837 free_uid(user); /* for find_user() */
65838 break;
65839 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65840 goto error;
65841 }
65842
65843 + if (gr_check_group_change(new->gid, new->egid, -1))
65844 + goto error;
65845 +
65846 if (rgid != (gid_t) -1 ||
65847 (egid != (gid_t) -1 && egid != old->gid))
65848 new->sgid = new->egid;
65849 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65850 goto error;
65851
65852 retval = -EPERM;
65853 +
65854 + if (gr_check_group_change(gid, gid, gid))
65855 + goto error;
65856 +
65857 if (capable(CAP_SETGID))
65858 new->gid = new->egid = new->sgid = new->fsgid = gid;
65859 else if (gid == old->gid || gid == old->sgid)
65860 @@ -567,12 +580,19 @@ static int set_user(struct cred *new)
65861 if (!new_user)
65862 return -EAGAIN;
65863
65864 + /*
65865 + * We don't fail in case of NPROC limit excess here because too many
65866 + * poorly written programs don't check set*uid() return code, assuming
65867 + * it never fails if called by root. We may still enforce NPROC limit
65868 + * for programs doing set*uid()+execve() by harmlessly deferring the
65869 + * failure to the execve() stage.
65870 + */
65871 if (atomic_read(&new_user->processes) >=
65872 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
65873 - new_user != INIT_USER) {
65874 - free_uid(new_user);
65875 - return -EAGAIN;
65876 - }
65877 + new_user != INIT_USER)
65878 + current->flags |= PF_NPROC_EXCEEDED;
65879 + else
65880 + current->flags &= ~PF_NPROC_EXCEEDED;
65881
65882 free_uid(new->user);
65883 new->user = new_user;
65884 @@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65885 goto error;
65886 }
65887
65888 + if (gr_check_user_change(new->uid, new->euid, -1))
65889 + goto error;
65890 +
65891 if (new->uid != old->uid) {
65892 retval = set_user(new);
65893 if (retval < 0)
65894 @@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65895 goto error;
65896
65897 retval = -EPERM;
65898 +
65899 + if (gr_check_crash_uid(uid))
65900 + goto error;
65901 + if (gr_check_user_change(uid, uid, uid))
65902 + goto error;
65903 +
65904 if (capable(CAP_SETUID)) {
65905 new->suid = new->uid = uid;
65906 if (uid != old->uid) {
65907 @@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65908 goto error;
65909 }
65910
65911 + if (gr_check_user_change(ruid, euid, -1))
65912 + goto error;
65913 +
65914 if (ruid != (uid_t) -1) {
65915 new->uid = ruid;
65916 if (ruid != old->uid) {
65917 @@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65918 goto error;
65919 }
65920
65921 + if (gr_check_group_change(rgid, egid, -1))
65922 + goto error;
65923 +
65924 if (rgid != (gid_t) -1)
65925 new->gid = rgid;
65926 if (egid != (gid_t) -1)
65927 @@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65928 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
65929 goto error;
65930
65931 + if (gr_check_user_change(-1, -1, uid))
65932 + goto error;
65933 +
65934 if (uid == old->uid || uid == old->euid ||
65935 uid == old->suid || uid == old->fsuid ||
65936 capable(CAP_SETUID)) {
65937 @@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65938 if (gid == old->gid || gid == old->egid ||
65939 gid == old->sgid || gid == old->fsgid ||
65940 capable(CAP_SETGID)) {
65941 + if (gr_check_group_change(-1, -1, gid))
65942 + goto error;
65943 +
65944 if (gid != old_fsgid) {
65945 new->fsgid = gid;
65946 goto change_okay;
65947 @@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65948 error = get_dumpable(me->mm);
65949 break;
65950 case PR_SET_DUMPABLE:
65951 - if (arg2 < 0 || arg2 > 1) {
65952 + if (arg2 > 1) {
65953 error = -EINVAL;
65954 break;
65955 }
65956 diff -urNp linux-2.6.32.45/kernel/sysctl.c linux-2.6.32.45/kernel/sysctl.c
65957 --- linux-2.6.32.45/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
65958 +++ linux-2.6.32.45/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
65959 @@ -63,6 +63,13 @@
65960 static int deprecated_sysctl_warning(struct __sysctl_args *args);
65961
65962 #if defined(CONFIG_SYSCTL)
65963 +#include <linux/grsecurity.h>
65964 +#include <linux/grinternal.h>
65965 +
65966 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
65967 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
65968 + const int op);
65969 +extern int gr_handle_chroot_sysctl(const int op);
65970
65971 /* External variables not in a header file. */
65972 extern int C_A_D;
65973 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
65974 static int proc_taint(struct ctl_table *table, int write,
65975 void __user *buffer, size_t *lenp, loff_t *ppos);
65976 #endif
65977 +extern ctl_table grsecurity_table[];
65978
65979 static struct ctl_table root_table[];
65980 static struct ctl_table_root sysctl_table_root;
65981 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
65982 int sysctl_legacy_va_layout;
65983 #endif
65984
65985 +#ifdef CONFIG_PAX_SOFTMODE
65986 +static ctl_table pax_table[] = {
65987 + {
65988 + .ctl_name = CTL_UNNUMBERED,
65989 + .procname = "softmode",
65990 + .data = &pax_softmode,
65991 + .maxlen = sizeof(unsigned int),
65992 + .mode = 0600,
65993 + .proc_handler = &proc_dointvec,
65994 + },
65995 +
65996 + { .ctl_name = 0 }
65997 +};
65998 +#endif
65999 +
66000 extern int prove_locking;
66001 extern int lock_stat;
66002
66003 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
66004 #endif
66005
66006 static struct ctl_table kern_table[] = {
66007 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66008 + {
66009 + .ctl_name = CTL_UNNUMBERED,
66010 + .procname = "grsecurity",
66011 + .mode = 0500,
66012 + .child = grsecurity_table,
66013 + },
66014 +#endif
66015 +
66016 +#ifdef CONFIG_PAX_SOFTMODE
66017 + {
66018 + .ctl_name = CTL_UNNUMBERED,
66019 + .procname = "pax",
66020 + .mode = 0500,
66021 + .child = pax_table,
66022 + },
66023 +#endif
66024 +
66025 {
66026 .ctl_name = CTL_UNNUMBERED,
66027 .procname = "sched_child_runs_first",
66028 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
66029 .data = &modprobe_path,
66030 .maxlen = KMOD_PATH_LEN,
66031 .mode = 0644,
66032 - .proc_handler = &proc_dostring,
66033 - .strategy = &sysctl_string,
66034 + .proc_handler = &proc_dostring_modpriv,
66035 + .strategy = &sysctl_string_modpriv,
66036 },
66037 {
66038 .ctl_name = CTL_UNNUMBERED,
66039 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
66040 .mode = 0644,
66041 .proc_handler = &proc_dointvec
66042 },
66043 + {
66044 + .procname = "heap_stack_gap",
66045 + .data = &sysctl_heap_stack_gap,
66046 + .maxlen = sizeof(sysctl_heap_stack_gap),
66047 + .mode = 0644,
66048 + .proc_handler = proc_doulongvec_minmax,
66049 + },
66050 #else
66051 {
66052 .ctl_name = CTL_UNNUMBERED,
66053 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
66054 return 0;
66055 }
66056
66057 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
66058 +
66059 static int parse_table(int __user *name, int nlen,
66060 void __user *oldval, size_t __user *oldlenp,
66061 void __user *newval, size_t newlen,
66062 @@ -1821,7 +1871,7 @@ repeat:
66063 if (n == table->ctl_name) {
66064 int error;
66065 if (table->child) {
66066 - if (sysctl_perm(root, table, MAY_EXEC))
66067 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
66068 return -EPERM;
66069 name++;
66070 nlen--;
66071 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
66072 int error;
66073 int mode;
66074
66075 + if (table->parent != NULL && table->parent->procname != NULL &&
66076 + table->procname != NULL &&
66077 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66078 + return -EACCES;
66079 + if (gr_handle_chroot_sysctl(op))
66080 + return -EACCES;
66081 + error = gr_handle_sysctl(table, op);
66082 + if (error)
66083 + return error;
66084 +
66085 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
66086 + if (error)
66087 + return error;
66088 +
66089 + if (root->permissions)
66090 + mode = root->permissions(root, current->nsproxy, table);
66091 + else
66092 + mode = table->mode;
66093 +
66094 + return test_perm(mode, op);
66095 +}
66096 +
66097 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
66098 +{
66099 + int error;
66100 + int mode;
66101 +
66102 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
66103 if (error)
66104 return error;
66105 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
66106 buffer, lenp, ppos);
66107 }
66108
66109 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66110 + void __user *buffer, size_t *lenp, loff_t *ppos)
66111 +{
66112 + if (write && !capable(CAP_SYS_MODULE))
66113 + return -EPERM;
66114 +
66115 + return _proc_do_string(table->data, table->maxlen, write,
66116 + buffer, lenp, ppos);
66117 +}
66118 +
66119
66120 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
66121 int *valp,
66122 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
66123 vleft = table->maxlen / sizeof(unsigned long);
66124 left = *lenp;
66125
66126 - for (; left && vleft--; i++, min++, max++, first=0) {
66127 + for (; left && vleft--; i++, first=0) {
66128 if (write) {
66129 while (left) {
66130 char c;
66131 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
66132 return -ENOSYS;
66133 }
66134
66135 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66136 + void __user *buffer, size_t *lenp, loff_t *ppos)
66137 +{
66138 + return -ENOSYS;
66139 +}
66140 +
66141 int proc_dointvec(struct ctl_table *table, int write,
66142 void __user *buffer, size_t *lenp, loff_t *ppos)
66143 {
66144 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
66145 return 1;
66146 }
66147
66148 +int sysctl_string_modpriv(struct ctl_table *table,
66149 + void __user *oldval, size_t __user *oldlenp,
66150 + void __user *newval, size_t newlen)
66151 +{
66152 + if (newval && newlen && !capable(CAP_SYS_MODULE))
66153 + return -EPERM;
66154 +
66155 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
66156 +}
66157 +
66158 /*
66159 * This function makes sure that all of the integers in the vector
66160 * are between the minimum and maximum values given in the arrays
66161 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
66162 return -ENOSYS;
66163 }
66164
66165 +int sysctl_string_modpriv(struct ctl_table *table,
66166 + void __user *oldval, size_t __user *oldlenp,
66167 + void __user *newval, size_t newlen)
66168 +{
66169 + return -ENOSYS;
66170 +}
66171 +
66172 int sysctl_intvec(struct ctl_table *table,
66173 void __user *oldval, size_t __user *oldlenp,
66174 void __user *newval, size_t newlen)
66175 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66176 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66177 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66178 EXPORT_SYMBOL(proc_dostring);
66179 +EXPORT_SYMBOL(proc_dostring_modpriv);
66180 EXPORT_SYMBOL(proc_doulongvec_minmax);
66181 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66182 EXPORT_SYMBOL(register_sysctl_table);
66183 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
66184 EXPORT_SYMBOL(sysctl_jiffies);
66185 EXPORT_SYMBOL(sysctl_ms_jiffies);
66186 EXPORT_SYMBOL(sysctl_string);
66187 +EXPORT_SYMBOL(sysctl_string_modpriv);
66188 EXPORT_SYMBOL(sysctl_data);
66189 EXPORT_SYMBOL(unregister_sysctl_table);
66190 diff -urNp linux-2.6.32.45/kernel/sysctl_check.c linux-2.6.32.45/kernel/sysctl_check.c
66191 --- linux-2.6.32.45/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
66192 +++ linux-2.6.32.45/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
66193 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
66194 } else {
66195 if ((table->strategy == sysctl_data) ||
66196 (table->strategy == sysctl_string) ||
66197 + (table->strategy == sysctl_string_modpriv) ||
66198 (table->strategy == sysctl_intvec) ||
66199 (table->strategy == sysctl_jiffies) ||
66200 (table->strategy == sysctl_ms_jiffies) ||
66201 (table->proc_handler == proc_dostring) ||
66202 + (table->proc_handler == proc_dostring_modpriv) ||
66203 (table->proc_handler == proc_dointvec) ||
66204 (table->proc_handler == proc_dointvec_minmax) ||
66205 (table->proc_handler == proc_dointvec_jiffies) ||
66206 diff -urNp linux-2.6.32.45/kernel/taskstats.c linux-2.6.32.45/kernel/taskstats.c
66207 --- linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:04.000000000 -0400
66208 +++ linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:19.000000000 -0400
66209 @@ -26,9 +26,12 @@
66210 #include <linux/cgroup.h>
66211 #include <linux/fs.h>
66212 #include <linux/file.h>
66213 +#include <linux/grsecurity.h>
66214 #include <net/genetlink.h>
66215 #include <asm/atomic.h>
66216
66217 +extern int gr_is_taskstats_denied(int pid);
66218 +
66219 /*
66220 * Maximum length of a cpumask that can be specified in
66221 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66222 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
66223 size_t size;
66224 cpumask_var_t mask;
66225
66226 + if (gr_is_taskstats_denied(current->pid))
66227 + return -EACCES;
66228 +
66229 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
66230 return -ENOMEM;
66231
66232 diff -urNp linux-2.6.32.45/kernel/time/tick-broadcast.c linux-2.6.32.45/kernel/time/tick-broadcast.c
66233 --- linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
66234 +++ linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
66235 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
66236 * then clear the broadcast bit.
66237 */
66238 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66239 - int cpu = smp_processor_id();
66240 + cpu = smp_processor_id();
66241
66242 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66243 tick_broadcast_clear_oneshot(cpu);
66244 diff -urNp linux-2.6.32.45/kernel/time/timekeeping.c linux-2.6.32.45/kernel/time/timekeeping.c
66245 --- linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
66246 +++ linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
66247 @@ -14,6 +14,7 @@
66248 #include <linux/init.h>
66249 #include <linux/mm.h>
66250 #include <linux/sched.h>
66251 +#include <linux/grsecurity.h>
66252 #include <linux/sysdev.h>
66253 #include <linux/clocksource.h>
66254 #include <linux/jiffies.h>
66255 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
66256 */
66257 struct timespec ts = xtime;
66258 timespec_add_ns(&ts, nsec);
66259 - ACCESS_ONCE(xtime_cache) = ts;
66260 + ACCESS_ONCE_RW(xtime_cache) = ts;
66261 }
66262
66263 /* must hold xtime_lock */
66264 @@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
66265 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66266 return -EINVAL;
66267
66268 + gr_log_timechange();
66269 +
66270 write_seqlock_irqsave(&xtime_lock, flags);
66271
66272 timekeeping_forward_now();
66273 diff -urNp linux-2.6.32.45/kernel/time/timer_list.c linux-2.6.32.45/kernel/time/timer_list.c
66274 --- linux-2.6.32.45/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
66275 +++ linux-2.6.32.45/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
66276 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66277
66278 static void print_name_offset(struct seq_file *m, void *sym)
66279 {
66280 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66281 + SEQ_printf(m, "<%p>", NULL);
66282 +#else
66283 char symname[KSYM_NAME_LEN];
66284
66285 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66286 SEQ_printf(m, "<%p>", sym);
66287 else
66288 SEQ_printf(m, "%s", symname);
66289 +#endif
66290 }
66291
66292 static void
66293 @@ -112,7 +116,11 @@ next_one:
66294 static void
66295 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66296 {
66297 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66298 + SEQ_printf(m, " .base: %p\n", NULL);
66299 +#else
66300 SEQ_printf(m, " .base: %p\n", base);
66301 +#endif
66302 SEQ_printf(m, " .index: %d\n",
66303 base->index);
66304 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66305 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
66306 {
66307 struct proc_dir_entry *pe;
66308
66309 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66310 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66311 +#else
66312 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66313 +#endif
66314 if (!pe)
66315 return -ENOMEM;
66316 return 0;
66317 diff -urNp linux-2.6.32.45/kernel/time/timer_stats.c linux-2.6.32.45/kernel/time/timer_stats.c
66318 --- linux-2.6.32.45/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
66319 +++ linux-2.6.32.45/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
66320 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66321 static unsigned long nr_entries;
66322 static struct entry entries[MAX_ENTRIES];
66323
66324 -static atomic_t overflow_count;
66325 +static atomic_unchecked_t overflow_count;
66326
66327 /*
66328 * The entries are in a hash-table, for fast lookup:
66329 @@ -140,7 +140,7 @@ static void reset_entries(void)
66330 nr_entries = 0;
66331 memset(entries, 0, sizeof(entries));
66332 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66333 - atomic_set(&overflow_count, 0);
66334 + atomic_set_unchecked(&overflow_count, 0);
66335 }
66336
66337 static struct entry *alloc_entry(void)
66338 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66339 if (likely(entry))
66340 entry->count++;
66341 else
66342 - atomic_inc(&overflow_count);
66343 + atomic_inc_unchecked(&overflow_count);
66344
66345 out_unlock:
66346 spin_unlock_irqrestore(lock, flags);
66347 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66348
66349 static void print_name_offset(struct seq_file *m, unsigned long addr)
66350 {
66351 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66352 + seq_printf(m, "<%p>", NULL);
66353 +#else
66354 char symname[KSYM_NAME_LEN];
66355
66356 if (lookup_symbol_name(addr, symname) < 0)
66357 seq_printf(m, "<%p>", (void *)addr);
66358 else
66359 seq_printf(m, "%s", symname);
66360 +#endif
66361 }
66362
66363 static int tstats_show(struct seq_file *m, void *v)
66364 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66365
66366 seq_puts(m, "Timer Stats Version: v0.2\n");
66367 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66368 - if (atomic_read(&overflow_count))
66369 + if (atomic_read_unchecked(&overflow_count))
66370 seq_printf(m, "Overflow: %d entries\n",
66371 - atomic_read(&overflow_count));
66372 + atomic_read_unchecked(&overflow_count));
66373
66374 for (i = 0; i < nr_entries; i++) {
66375 entry = entries + i;
66376 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
66377 {
66378 struct proc_dir_entry *pe;
66379
66380 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66381 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66382 +#else
66383 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66384 +#endif
66385 if (!pe)
66386 return -ENOMEM;
66387 return 0;
66388 diff -urNp linux-2.6.32.45/kernel/time.c linux-2.6.32.45/kernel/time.c
66389 --- linux-2.6.32.45/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
66390 +++ linux-2.6.32.45/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
66391 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
66392 return error;
66393
66394 if (tz) {
66395 + /* we log in do_settimeofday called below, so don't log twice
66396 + */
66397 + if (!tv)
66398 + gr_log_timechange();
66399 +
66400 /* SMP safe, global irq locking makes it work. */
66401 sys_tz = *tz;
66402 update_vsyscall_tz();
66403 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
66404 * Avoid unnecessary multiplications/divisions in the
66405 * two most common HZ cases:
66406 */
66407 -unsigned int inline jiffies_to_msecs(const unsigned long j)
66408 +inline unsigned int jiffies_to_msecs(const unsigned long j)
66409 {
66410 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
66411 return (MSEC_PER_SEC / HZ) * j;
66412 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
66413 }
66414 EXPORT_SYMBOL(jiffies_to_msecs);
66415
66416 -unsigned int inline jiffies_to_usecs(const unsigned long j)
66417 +inline unsigned int jiffies_to_usecs(const unsigned long j)
66418 {
66419 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
66420 return (USEC_PER_SEC / HZ) * j;
66421 diff -urNp linux-2.6.32.45/kernel/timer.c linux-2.6.32.45/kernel/timer.c
66422 --- linux-2.6.32.45/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
66423 +++ linux-2.6.32.45/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
66424 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
66425 /*
66426 * This function runs timers and the timer-tq in bottom half context.
66427 */
66428 -static void run_timer_softirq(struct softirq_action *h)
66429 +static void run_timer_softirq(void)
66430 {
66431 struct tvec_base *base = __get_cpu_var(tvec_bases);
66432
66433 diff -urNp linux-2.6.32.45/kernel/trace/blktrace.c linux-2.6.32.45/kernel/trace/blktrace.c
66434 --- linux-2.6.32.45/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
66435 +++ linux-2.6.32.45/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
66436 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
66437 struct blk_trace *bt = filp->private_data;
66438 char buf[16];
66439
66440 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66441 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66442
66443 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66444 }
66445 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
66446 return 1;
66447
66448 bt = buf->chan->private_data;
66449 - atomic_inc(&bt->dropped);
66450 + atomic_inc_unchecked(&bt->dropped);
66451 return 0;
66452 }
66453
66454 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
66455
66456 bt->dir = dir;
66457 bt->dev = dev;
66458 - atomic_set(&bt->dropped, 0);
66459 + atomic_set_unchecked(&bt->dropped, 0);
66460
66461 ret = -EIO;
66462 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66463 diff -urNp linux-2.6.32.45/kernel/trace/ftrace.c linux-2.6.32.45/kernel/trace/ftrace.c
66464 --- linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
66465 +++ linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
66466 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
66467
66468 ip = rec->ip;
66469
66470 + ret = ftrace_arch_code_modify_prepare();
66471 + FTRACE_WARN_ON(ret);
66472 + if (ret)
66473 + return 0;
66474 +
66475 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66476 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66477 if (ret) {
66478 ftrace_bug(ret, ip);
66479 rec->flags |= FTRACE_FL_FAILED;
66480 - return 0;
66481 }
66482 - return 1;
66483 + return ret ? 0 : 1;
66484 }
66485
66486 /*
66487 diff -urNp linux-2.6.32.45/kernel/trace/ring_buffer.c linux-2.6.32.45/kernel/trace/ring_buffer.c
66488 --- linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
66489 +++ linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
66490 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
66491 * the reader page). But if the next page is a header page,
66492 * its flags will be non zero.
66493 */
66494 -static int inline
66495 +static inline int
66496 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
66497 struct buffer_page *page, struct list_head *list)
66498 {
66499 diff -urNp linux-2.6.32.45/kernel/trace/trace.c linux-2.6.32.45/kernel/trace/trace.c
66500 --- linux-2.6.32.45/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
66501 +++ linux-2.6.32.45/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
66502 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
66503 size_t rem;
66504 unsigned int i;
66505
66506 + pax_track_stack();
66507 +
66508 /* copy the tracer to avoid using a global lock all around */
66509 mutex_lock(&trace_types_lock);
66510 if (unlikely(old_tracer != current_trace && current_trace)) {
66511 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
66512 int entries, size, i;
66513 size_t ret;
66514
66515 + pax_track_stack();
66516 +
66517 if (*ppos & (PAGE_SIZE - 1)) {
66518 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
66519 return -EINVAL;
66520 @@ -3816,10 +3820,9 @@ static const struct file_operations trac
66521 };
66522 #endif
66523
66524 -static struct dentry *d_tracer;
66525 -
66526 struct dentry *tracing_init_dentry(void)
66527 {
66528 + static struct dentry *d_tracer;
66529 static int once;
66530
66531 if (d_tracer)
66532 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
66533 return d_tracer;
66534 }
66535
66536 -static struct dentry *d_percpu;
66537 -
66538 struct dentry *tracing_dentry_percpu(void)
66539 {
66540 + static struct dentry *d_percpu;
66541 static int once;
66542 struct dentry *d_tracer;
66543
66544 diff -urNp linux-2.6.32.45/kernel/trace/trace_events.c linux-2.6.32.45/kernel/trace/trace_events.c
66545 --- linux-2.6.32.45/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
66546 +++ linux-2.6.32.45/kernel/trace/trace_events.c 2011-08-05 20:33:55.000000000 -0400
66547 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list
66548 * Modules must own their file_operations to keep up with
66549 * reference counting.
66550 */
66551 +
66552 struct ftrace_module_file_ops {
66553 struct list_head list;
66554 struct module *mod;
66555 - struct file_operations id;
66556 - struct file_operations enable;
66557 - struct file_operations format;
66558 - struct file_operations filter;
66559 };
66560
66561 static void remove_subsystem_dir(const char *name)
66562 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod
66563
66564 file_ops->mod = mod;
66565
66566 - file_ops->id = ftrace_event_id_fops;
66567 - file_ops->id.owner = mod;
66568 -
66569 - file_ops->enable = ftrace_enable_fops;
66570 - file_ops->enable.owner = mod;
66571 -
66572 - file_ops->filter = ftrace_event_filter_fops;
66573 - file_ops->filter.owner = mod;
66574 -
66575 - file_ops->format = ftrace_event_format_fops;
66576 - file_ops->format.owner = mod;
66577 + pax_open_kernel();
66578 + *(void **)&mod->trace_id.owner = mod;
66579 + *(void **)&mod->trace_enable.owner = mod;
66580 + *(void **)&mod->trace_filter.owner = mod;
66581 + *(void **)&mod->trace_format.owner = mod;
66582 + pax_close_kernel();
66583
66584 list_add(&file_ops->list, &ftrace_module_file_list);
66585
66586 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(stru
66587 call->mod = mod;
66588 list_add(&call->list, &ftrace_events);
66589 event_create_dir(call, d_events,
66590 - &file_ops->id, &file_ops->enable,
66591 - &file_ops->filter, &file_ops->format);
66592 + &mod->trace_id, &mod->trace_enable,
66593 + &mod->trace_filter, &mod->trace_format);
66594 }
66595 }
66596
66597 diff -urNp linux-2.6.32.45/kernel/trace/trace_mmiotrace.c linux-2.6.32.45/kernel/trace/trace_mmiotrace.c
66598 --- linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
66599 +++ linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
66600 @@ -23,7 +23,7 @@ struct header_iter {
66601 static struct trace_array *mmio_trace_array;
66602 static bool overrun_detected;
66603 static unsigned long prev_overruns;
66604 -static atomic_t dropped_count;
66605 +static atomic_unchecked_t dropped_count;
66606
66607 static void mmio_reset_data(struct trace_array *tr)
66608 {
66609 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
66610
66611 static unsigned long count_overruns(struct trace_iterator *iter)
66612 {
66613 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
66614 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66615 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66616
66617 if (over > prev_overruns)
66618 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
66619 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66620 sizeof(*entry), 0, pc);
66621 if (!event) {
66622 - atomic_inc(&dropped_count);
66623 + atomic_inc_unchecked(&dropped_count);
66624 return;
66625 }
66626 entry = ring_buffer_event_data(event);
66627 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
66628 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66629 sizeof(*entry), 0, pc);
66630 if (!event) {
66631 - atomic_inc(&dropped_count);
66632 + atomic_inc_unchecked(&dropped_count);
66633 return;
66634 }
66635 entry = ring_buffer_event_data(event);
66636 diff -urNp linux-2.6.32.45/kernel/trace/trace_output.c linux-2.6.32.45/kernel/trace/trace_output.c
66637 --- linux-2.6.32.45/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
66638 +++ linux-2.6.32.45/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
66639 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
66640 return 0;
66641 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66642 if (!IS_ERR(p)) {
66643 - p = mangle_path(s->buffer + s->len, p, "\n");
66644 + p = mangle_path(s->buffer + s->len, p, "\n\\");
66645 if (p) {
66646 s->len = p - s->buffer;
66647 return 1;
66648 diff -urNp linux-2.6.32.45/kernel/trace/trace_stack.c linux-2.6.32.45/kernel/trace/trace_stack.c
66649 --- linux-2.6.32.45/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
66650 +++ linux-2.6.32.45/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
66651 @@ -50,7 +50,7 @@ static inline void check_stack(void)
66652 return;
66653
66654 /* we do not handle interrupt stacks yet */
66655 - if (!object_is_on_stack(&this_size))
66656 + if (!object_starts_on_stack(&this_size))
66657 return;
66658
66659 local_irq_save(flags);
66660 diff -urNp linux-2.6.32.45/kernel/trace/trace_workqueue.c linux-2.6.32.45/kernel/trace/trace_workqueue.c
66661 --- linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
66662 +++ linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
66663 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
66664 int cpu;
66665 pid_t pid;
66666 /* Can be inserted from interrupt or user context, need to be atomic */
66667 - atomic_t inserted;
66668 + atomic_unchecked_t inserted;
66669 /*
66670 * Don't need to be atomic, works are serialized in a single workqueue thread
66671 * on a single CPU.
66672 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
66673 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66674 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66675 if (node->pid == wq_thread->pid) {
66676 - atomic_inc(&node->inserted);
66677 + atomic_inc_unchecked(&node->inserted);
66678 goto found;
66679 }
66680 }
66681 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
66682 tsk = get_pid_task(pid, PIDTYPE_PID);
66683 if (tsk) {
66684 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66685 - atomic_read(&cws->inserted), cws->executed,
66686 + atomic_read_unchecked(&cws->inserted), cws->executed,
66687 tsk->comm);
66688 put_task_struct(tsk);
66689 }
66690 diff -urNp linux-2.6.32.45/kernel/user.c linux-2.6.32.45/kernel/user.c
66691 --- linux-2.6.32.45/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
66692 +++ linux-2.6.32.45/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
66693 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
66694 spin_lock_irq(&uidhash_lock);
66695 up = uid_hash_find(uid, hashent);
66696 if (up) {
66697 + put_user_ns(ns);
66698 key_put(new->uid_keyring);
66699 key_put(new->session_keyring);
66700 kmem_cache_free(uid_cachep, new);
66701 diff -urNp linux-2.6.32.45/lib/bug.c linux-2.6.32.45/lib/bug.c
66702 --- linux-2.6.32.45/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
66703 +++ linux-2.6.32.45/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
66704 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
66705 return BUG_TRAP_TYPE_NONE;
66706
66707 bug = find_bug(bugaddr);
66708 + if (!bug)
66709 + return BUG_TRAP_TYPE_NONE;
66710
66711 printk(KERN_EMERG "------------[ cut here ]------------\n");
66712
66713 diff -urNp linux-2.6.32.45/lib/debugobjects.c linux-2.6.32.45/lib/debugobjects.c
66714 --- linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:04.000000000 -0400
66715 +++ linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:19.000000000 -0400
66716 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
66717 if (limit > 4)
66718 return;
66719
66720 - is_on_stack = object_is_on_stack(addr);
66721 + is_on_stack = object_starts_on_stack(addr);
66722 if (is_on_stack == onstack)
66723 return;
66724
66725 diff -urNp linux-2.6.32.45/lib/dma-debug.c linux-2.6.32.45/lib/dma-debug.c
66726 --- linux-2.6.32.45/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
66727 +++ linux-2.6.32.45/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
66728 @@ -861,7 +861,7 @@ out:
66729
66730 static void check_for_stack(struct device *dev, void *addr)
66731 {
66732 - if (object_is_on_stack(addr))
66733 + if (object_starts_on_stack(addr))
66734 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66735 "stack [addr=%p]\n", addr);
66736 }
66737 diff -urNp linux-2.6.32.45/lib/idr.c linux-2.6.32.45/lib/idr.c
66738 --- linux-2.6.32.45/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
66739 +++ linux-2.6.32.45/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
66740 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
66741 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
66742
66743 /* if already at the top layer, we need to grow */
66744 - if (id >= 1 << (idp->layers * IDR_BITS)) {
66745 + if (id >= (1 << (idp->layers * IDR_BITS))) {
66746 *starting_id = id;
66747 return IDR_NEED_TO_GROW;
66748 }
66749 diff -urNp linux-2.6.32.45/lib/inflate.c linux-2.6.32.45/lib/inflate.c
66750 --- linux-2.6.32.45/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
66751 +++ linux-2.6.32.45/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
66752 @@ -266,7 +266,7 @@ static void free(void *where)
66753 malloc_ptr = free_mem_ptr;
66754 }
66755 #else
66756 -#define malloc(a) kmalloc(a, GFP_KERNEL)
66757 +#define malloc(a) kmalloc((a), GFP_KERNEL)
66758 #define free(a) kfree(a)
66759 #endif
66760
66761 diff -urNp linux-2.6.32.45/lib/Kconfig.debug linux-2.6.32.45/lib/Kconfig.debug
66762 --- linux-2.6.32.45/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
66763 +++ linux-2.6.32.45/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
66764 @@ -905,7 +905,7 @@ config LATENCYTOP
66765 select STACKTRACE
66766 select SCHEDSTATS
66767 select SCHED_DEBUG
66768 - depends on HAVE_LATENCYTOP_SUPPORT
66769 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
66770 help
66771 Enable this option if you want to use the LatencyTOP tool
66772 to find out which userspace is blocking on what kernel operations.
66773 diff -urNp linux-2.6.32.45/lib/kobject.c linux-2.6.32.45/lib/kobject.c
66774 --- linux-2.6.32.45/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
66775 +++ linux-2.6.32.45/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
66776 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
66777 return ret;
66778 }
66779
66780 -struct sysfs_ops kobj_sysfs_ops = {
66781 +const struct sysfs_ops kobj_sysfs_ops = {
66782 .show = kobj_attr_show,
66783 .store = kobj_attr_store,
66784 };
66785 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
66786 * If the kset was not able to be created, NULL will be returned.
66787 */
66788 static struct kset *kset_create(const char *name,
66789 - struct kset_uevent_ops *uevent_ops,
66790 + const struct kset_uevent_ops *uevent_ops,
66791 struct kobject *parent_kobj)
66792 {
66793 struct kset *kset;
66794 @@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
66795 * If the kset was not able to be created, NULL will be returned.
66796 */
66797 struct kset *kset_create_and_add(const char *name,
66798 - struct kset_uevent_ops *uevent_ops,
66799 + const struct kset_uevent_ops *uevent_ops,
66800 struct kobject *parent_kobj)
66801 {
66802 struct kset *kset;
66803 diff -urNp linux-2.6.32.45/lib/kobject_uevent.c linux-2.6.32.45/lib/kobject_uevent.c
66804 --- linux-2.6.32.45/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
66805 +++ linux-2.6.32.45/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
66806 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
66807 const char *subsystem;
66808 struct kobject *top_kobj;
66809 struct kset *kset;
66810 - struct kset_uevent_ops *uevent_ops;
66811 + const struct kset_uevent_ops *uevent_ops;
66812 u64 seq;
66813 int i = 0;
66814 int retval = 0;
66815 diff -urNp linux-2.6.32.45/lib/kref.c linux-2.6.32.45/lib/kref.c
66816 --- linux-2.6.32.45/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
66817 +++ linux-2.6.32.45/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
66818 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
66819 */
66820 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66821 {
66822 - WARN_ON(release == NULL);
66823 + BUG_ON(release == NULL);
66824 WARN_ON(release == (void (*)(struct kref *))kfree);
66825
66826 if (atomic_dec_and_test(&kref->refcount)) {
66827 diff -urNp linux-2.6.32.45/lib/parser.c linux-2.6.32.45/lib/parser.c
66828 --- linux-2.6.32.45/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
66829 +++ linux-2.6.32.45/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
66830 @@ -126,7 +126,7 @@ static int match_number(substring_t *s,
66831 char *buf;
66832 int ret;
66833
66834 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
66835 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
66836 if (!buf)
66837 return -ENOMEM;
66838 memcpy(buf, s->from, s->to - s->from);
66839 diff -urNp linux-2.6.32.45/lib/radix-tree.c linux-2.6.32.45/lib/radix-tree.c
66840 --- linux-2.6.32.45/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
66841 +++ linux-2.6.32.45/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
66842 @@ -81,7 +81,7 @@ struct radix_tree_preload {
66843 int nr;
66844 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66845 };
66846 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66847 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66848
66849 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
66850 {
66851 diff -urNp linux-2.6.32.45/lib/random32.c linux-2.6.32.45/lib/random32.c
66852 --- linux-2.6.32.45/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
66853 +++ linux-2.6.32.45/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
66854 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
66855 */
66856 static inline u32 __seed(u32 x, u32 m)
66857 {
66858 - return (x < m) ? x + m : x;
66859 + return (x <= m) ? x + m + 1 : x;
66860 }
66861
66862 /**
66863 diff -urNp linux-2.6.32.45/lib/vsprintf.c linux-2.6.32.45/lib/vsprintf.c
66864 --- linux-2.6.32.45/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
66865 +++ linux-2.6.32.45/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
66866 @@ -16,6 +16,9 @@
66867 * - scnprintf and vscnprintf
66868 */
66869
66870 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66871 +#define __INCLUDED_BY_HIDESYM 1
66872 +#endif
66873 #include <stdarg.h>
66874 #include <linux/module.h>
66875 #include <linux/types.h>
66876 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end
66877 return buf;
66878 }
66879
66880 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
66881 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
66882 {
66883 int len, i;
66884
66885 if ((unsigned long)s < PAGE_SIZE)
66886 - s = "<NULL>";
66887 + s = "(null)";
66888
66889 len = strnlen(s, spec.precision);
66890
66891 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
66892 unsigned long value = (unsigned long) ptr;
66893 #ifdef CONFIG_KALLSYMS
66894 char sym[KSYM_SYMBOL_LEN];
66895 - if (ext != 'f' && ext != 's')
66896 + if (ext != 'f' && ext != 's' && ext != 'a')
66897 sprint_symbol(sym, value);
66898 else
66899 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66900 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
66901 * - 'f' For simple symbolic function names without offset
66902 * - 'S' For symbolic direct pointers with offset
66903 * - 's' For symbolic direct pointers without offset
66904 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66905 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66906 * - 'R' For a struct resource pointer, it prints the range of
66907 * addresses (not the name nor the flags)
66908 * - 'M' For a 6-byte MAC address, it prints the address in the
66909 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
66910 struct printf_spec spec)
66911 {
66912 if (!ptr)
66913 - return string(buf, end, "(null)", spec);
66914 + return string(buf, end, "(nil)", spec);
66915
66916 switch (*fmt) {
66917 case 'F':
66918 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
66919 case 's':
66920 /* Fallthrough */
66921 case 'S':
66922 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66923 + break;
66924 +#else
66925 + return symbol_string(buf, end, ptr, spec, *fmt);
66926 +#endif
66927 + case 'a':
66928 + /* Fallthrough */
66929 + case 'A':
66930 return symbol_string(buf, end, ptr, spec, *fmt);
66931 case 'R':
66932 return resource_string(buf, end, ptr, spec);
66933 @@ -1445,7 +1458,7 @@ do { \
66934 size_t len;
66935 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
66936 || (unsigned long)save_str < PAGE_SIZE)
66937 - save_str = "<NULL>";
66938 + save_str = "(null)";
66939 len = strlen(save_str);
66940 if (str + len + 1 < end)
66941 memcpy(str, save_str, len + 1);
66942 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
66943 typeof(type) value; \
66944 if (sizeof(type) == 8) { \
66945 args = PTR_ALIGN(args, sizeof(u32)); \
66946 - *(u32 *)&value = *(u32 *)args; \
66947 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66948 + *(u32 *)&value = *(const u32 *)args; \
66949 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66950 } else { \
66951 args = PTR_ALIGN(args, sizeof(type)); \
66952 - value = *(typeof(type) *)args; \
66953 + value = *(const typeof(type) *)args; \
66954 } \
66955 args += sizeof(type); \
66956 value; \
66957 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
66958 const char *str_arg = args;
66959 size_t len = strlen(str_arg);
66960 args += len + 1;
66961 - str = string(str, end, (char *)str_arg, spec);
66962 + str = string(str, end, str_arg, spec);
66963 break;
66964 }
66965
66966 diff -urNp linux-2.6.32.45/localversion-grsec linux-2.6.32.45/localversion-grsec
66967 --- linux-2.6.32.45/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66968 +++ linux-2.6.32.45/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
66969 @@ -0,0 +1 @@
66970 +-grsec
66971 diff -urNp linux-2.6.32.45/Makefile linux-2.6.32.45/Makefile
66972 --- linux-2.6.32.45/Makefile 2011-08-16 20:37:25.000000000 -0400
66973 +++ linux-2.6.32.45/Makefile 2011-08-16 20:42:28.000000000 -0400
66974 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
66975
66976 HOSTCC = gcc
66977 HOSTCXX = g++
66978 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
66979 -HOSTCXXFLAGS = -O2
66980 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
66981 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
66982 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
66983
66984 # Decide whether to build built-in, modular, or both.
66985 # Normally, just do built-in.
66986 @@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
66987 KBUILD_CPPFLAGS := -D__KERNEL__
66988
66989 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
66990 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
66991 -fno-strict-aliasing -fno-common \
66992 -Werror-implicit-function-declaration \
66993 -Wno-format-security \
66994 -fno-delete-null-pointer-checks
66995 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
66996 KBUILD_AFLAGS := -D__ASSEMBLY__
66997
66998 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
66999 @@ -376,8 +379,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
67000 # Rules shared between *config targets and build targets
67001
67002 # Basic helpers built in scripts/
67003 -PHONY += scripts_basic
67004 -scripts_basic:
67005 +PHONY += scripts_basic gcc-plugins
67006 +scripts_basic: gcc-plugins
67007 $(Q)$(MAKE) $(build)=scripts/basic
67008
67009 # To avoid any implicit rule to kick in, define an empty command.
67010 @@ -403,7 +406,7 @@ endif
67011 # of make so .config is not included in this case either (for *config).
67012
67013 no-dot-config-targets := clean mrproper distclean \
67014 - cscope TAGS tags help %docs check% \
67015 + cscope gtags TAGS tags help %docs check% \
67016 include/linux/version.h headers_% \
67017 kernelrelease kernelversion
67018
67019 @@ -526,6 +529,25 @@ else
67020 KBUILD_CFLAGS += -O2
67021 endif
67022
67023 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
67024 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
67025 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
67026 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
67027 +endif
67028 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
67029 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
67030 +gcc-plugins:
67031 + $(Q)$(MAKE) $(build)=tools/gcc
67032 +else
67033 +gcc-plugins:
67034 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
67035 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
67036 +else
67037 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
67038 +endif
67039 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
67040 +endif
67041 +
67042 include $(srctree)/arch/$(SRCARCH)/Makefile
67043
67044 ifneq ($(CONFIG_FRAME_WARN),0)
67045 @@ -644,7 +666,7 @@ export mod_strip_cmd
67046
67047
67048 ifeq ($(KBUILD_EXTMOD),)
67049 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
67050 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
67051
67052 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
67053 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
67054 @@ -970,7 +992,7 @@ ifneq ($(KBUILD_SRC),)
67055 endif
67056
67057 # prepare2 creates a makefile if using a separate output directory
67058 -prepare2: prepare3 outputmakefile
67059 +prepare2: prepare3 outputmakefile gcc-plugins
67060
67061 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
67062 include/asm include/config/auto.conf
67063 @@ -1198,7 +1220,7 @@ MRPROPER_FILES += .config .config.old in
67064 include/linux/autoconf.h include/linux/version.h \
67065 include/linux/utsrelease.h \
67066 include/linux/bounds.h include/asm*/asm-offsets.h \
67067 - Module.symvers Module.markers tags TAGS cscope*
67068 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
67069
67070 # clean - Delete most, but leave enough to build external modules
67071 #
67072 @@ -1289,6 +1311,7 @@ help:
67073 @echo ' modules_prepare - Set up for building external modules'
67074 @echo ' tags/TAGS - Generate tags file for editors'
67075 @echo ' cscope - Generate cscope index'
67076 + @echo ' gtags - Generate GNU GLOBAL index'
67077 @echo ' kernelrelease - Output the release version string'
67078 @echo ' kernelversion - Output the version stored in Makefile'
67079 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
67080 @@ -1421,7 +1444,7 @@ clean: $(clean-dirs)
67081 $(call cmd,rmdirs)
67082 $(call cmd,rmfiles)
67083 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
67084 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
67085 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
67086 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
67087 -o -name '*.gcno' \) -type f -print | xargs rm -f
67088
67089 @@ -1445,7 +1468,7 @@ endif # KBUILD_EXTMOD
67090 quiet_cmd_tags = GEN $@
67091 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
67092
67093 -tags TAGS cscope: FORCE
67094 +tags TAGS cscope gtags: FORCE
67095 $(call cmd,tags)
67096
67097 # Scripts to check various things for consistency
67098 diff -urNp linux-2.6.32.45/mm/backing-dev.c linux-2.6.32.45/mm/backing-dev.c
67099 --- linux-2.6.32.45/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
67100 +++ linux-2.6.32.45/mm/backing-dev.c 2011-08-11 19:48:17.000000000 -0400
67101 @@ -272,7 +272,7 @@ static void bdi_task_init(struct backing
67102 list_add_tail_rcu(&wb->list, &bdi->wb_list);
67103 spin_unlock(&bdi->wb_lock);
67104
67105 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
67106 + tsk->flags |= PF_SWAPWRITE;
67107 set_freezable();
67108
67109 /*
67110 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
67111 * Add the default flusher task that gets created for any bdi
67112 * that has dirty data pending writeout
67113 */
67114 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67115 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67116 {
67117 if (!bdi_cap_writeback_dirty(bdi))
67118 return;
67119 diff -urNp linux-2.6.32.45/mm/filemap.c linux-2.6.32.45/mm/filemap.c
67120 --- linux-2.6.32.45/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
67121 +++ linux-2.6.32.45/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
67122 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
67123 struct address_space *mapping = file->f_mapping;
67124
67125 if (!mapping->a_ops->readpage)
67126 - return -ENOEXEC;
67127 + return -ENODEV;
67128 file_accessed(file);
67129 vma->vm_ops = &generic_file_vm_ops;
67130 vma->vm_flags |= VM_CAN_NONLINEAR;
67131 @@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
67132 *pos = i_size_read(inode);
67133
67134 if (limit != RLIM_INFINITY) {
67135 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67136 if (*pos >= limit) {
67137 send_sig(SIGXFSZ, current, 0);
67138 return -EFBIG;
67139 diff -urNp linux-2.6.32.45/mm/fremap.c linux-2.6.32.45/mm/fremap.c
67140 --- linux-2.6.32.45/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
67141 +++ linux-2.6.32.45/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
67142 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67143 retry:
67144 vma = find_vma(mm, start);
67145
67146 +#ifdef CONFIG_PAX_SEGMEXEC
67147 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67148 + goto out;
67149 +#endif
67150 +
67151 /*
67152 * Make sure the vma is shared, that it supports prefaulting,
67153 * and that the remapped range is valid and fully within
67154 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67155 /*
67156 * drop PG_Mlocked flag for over-mapped range
67157 */
67158 - unsigned int saved_flags = vma->vm_flags;
67159 + unsigned long saved_flags = vma->vm_flags;
67160 munlock_vma_pages_range(vma, start, start + size);
67161 vma->vm_flags = saved_flags;
67162 }
67163 diff -urNp linux-2.6.32.45/mm/highmem.c linux-2.6.32.45/mm/highmem.c
67164 --- linux-2.6.32.45/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
67165 +++ linux-2.6.32.45/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
67166 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
67167 * So no dangers, even with speculative execution.
67168 */
67169 page = pte_page(pkmap_page_table[i]);
67170 + pax_open_kernel();
67171 pte_clear(&init_mm, (unsigned long)page_address(page),
67172 &pkmap_page_table[i]);
67173 -
67174 + pax_close_kernel();
67175 set_page_address(page, NULL);
67176 need_flush = 1;
67177 }
67178 @@ -177,9 +178,11 @@ start:
67179 }
67180 }
67181 vaddr = PKMAP_ADDR(last_pkmap_nr);
67182 +
67183 + pax_open_kernel();
67184 set_pte_at(&init_mm, vaddr,
67185 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67186 -
67187 + pax_close_kernel();
67188 pkmap_count[last_pkmap_nr] = 1;
67189 set_page_address(page, (void *)vaddr);
67190
67191 diff -urNp linux-2.6.32.45/mm/hugetlb.c linux-2.6.32.45/mm/hugetlb.c
67192 --- linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:04.000000000 -0400
67193 +++ linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:19.000000000 -0400
67194 @@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
67195 return 1;
67196 }
67197
67198 +#ifdef CONFIG_PAX_SEGMEXEC
67199 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67200 +{
67201 + struct mm_struct *mm = vma->vm_mm;
67202 + struct vm_area_struct *vma_m;
67203 + unsigned long address_m;
67204 + pte_t *ptep_m;
67205 +
67206 + vma_m = pax_find_mirror_vma(vma);
67207 + if (!vma_m)
67208 + return;
67209 +
67210 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67211 + address_m = address + SEGMEXEC_TASK_SIZE;
67212 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67213 + get_page(page_m);
67214 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67215 +}
67216 +#endif
67217 +
67218 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
67219 unsigned long address, pte_t *ptep, pte_t pte,
67220 struct page *pagecache_page)
67221 @@ -2004,6 +2024,11 @@ retry_avoidcopy:
67222 huge_ptep_clear_flush(vma, address, ptep);
67223 set_huge_pte_at(mm, address, ptep,
67224 make_huge_pte(vma, new_page, 1));
67225 +
67226 +#ifdef CONFIG_PAX_SEGMEXEC
67227 + pax_mirror_huge_pte(vma, address, new_page);
67228 +#endif
67229 +
67230 /* Make the old page be freed below */
67231 new_page = old_page;
67232 }
67233 @@ -2135,6 +2160,10 @@ retry:
67234 && (vma->vm_flags & VM_SHARED)));
67235 set_huge_pte_at(mm, address, ptep, new_pte);
67236
67237 +#ifdef CONFIG_PAX_SEGMEXEC
67238 + pax_mirror_huge_pte(vma, address, page);
67239 +#endif
67240 +
67241 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67242 /* Optimization, do the COW without a second fault */
67243 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67244 @@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
67245 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67246 struct hstate *h = hstate_vma(vma);
67247
67248 +#ifdef CONFIG_PAX_SEGMEXEC
67249 + struct vm_area_struct *vma_m;
67250 +
67251 + vma_m = pax_find_mirror_vma(vma);
67252 + if (vma_m) {
67253 + unsigned long address_m;
67254 +
67255 + if (vma->vm_start > vma_m->vm_start) {
67256 + address_m = address;
67257 + address -= SEGMEXEC_TASK_SIZE;
67258 + vma = vma_m;
67259 + h = hstate_vma(vma);
67260 + } else
67261 + address_m = address + SEGMEXEC_TASK_SIZE;
67262 +
67263 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67264 + return VM_FAULT_OOM;
67265 + address_m &= HPAGE_MASK;
67266 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67267 + }
67268 +#endif
67269 +
67270 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67271 if (!ptep)
67272 return VM_FAULT_OOM;
67273 diff -urNp linux-2.6.32.45/mm/internal.h linux-2.6.32.45/mm/internal.h
67274 --- linux-2.6.32.45/mm/internal.h 2011-03-27 14:31:47.000000000 -0400
67275 +++ linux-2.6.32.45/mm/internal.h 2011-07-09 09:13:08.000000000 -0400
67276 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
67277 * in mm/page_alloc.c
67278 */
67279 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67280 +extern void free_compound_page(struct page *page);
67281 extern void prep_compound_page(struct page *page, unsigned long order);
67282
67283
67284 diff -urNp linux-2.6.32.45/mm/Kconfig linux-2.6.32.45/mm/Kconfig
67285 --- linux-2.6.32.45/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
67286 +++ linux-2.6.32.45/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
67287 @@ -228,7 +228,7 @@ config KSM
67288 config DEFAULT_MMAP_MIN_ADDR
67289 int "Low address space to protect from user allocation"
67290 depends on MMU
67291 - default 4096
67292 + default 65536
67293 help
67294 This is the portion of low virtual memory which should be protected
67295 from userspace allocation. Keeping a user from writing to low pages
67296 diff -urNp linux-2.6.32.45/mm/kmemleak.c linux-2.6.32.45/mm/kmemleak.c
67297 --- linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
67298 +++ linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
67299 @@ -358,7 +358,7 @@ static void print_unreferenced(struct se
67300
67301 for (i = 0; i < object->trace_len; i++) {
67302 void *ptr = (void *)object->trace[i];
67303 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67304 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67305 }
67306 }
67307
67308 diff -urNp linux-2.6.32.45/mm/maccess.c linux-2.6.32.45/mm/maccess.c
67309 --- linux-2.6.32.45/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
67310 +++ linux-2.6.32.45/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
67311 @@ -14,7 +14,7 @@
67312 * Safely read from address @src to the buffer at @dst. If a kernel fault
67313 * happens, handle that and return -EFAULT.
67314 */
67315 -long probe_kernel_read(void *dst, void *src, size_t size)
67316 +long probe_kernel_read(void *dst, const void *src, size_t size)
67317 {
67318 long ret;
67319 mm_segment_t old_fs = get_fs();
67320 @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
67321 * Safely write to address @dst from the buffer at @src. If a kernel fault
67322 * happens, handle that and return -EFAULT.
67323 */
67324 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
67325 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
67326 {
67327 long ret;
67328 mm_segment_t old_fs = get_fs();
67329 diff -urNp linux-2.6.32.45/mm/madvise.c linux-2.6.32.45/mm/madvise.c
67330 --- linux-2.6.32.45/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
67331 +++ linux-2.6.32.45/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
67332 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
67333 pgoff_t pgoff;
67334 unsigned long new_flags = vma->vm_flags;
67335
67336 +#ifdef CONFIG_PAX_SEGMEXEC
67337 + struct vm_area_struct *vma_m;
67338 +#endif
67339 +
67340 switch (behavior) {
67341 case MADV_NORMAL:
67342 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67343 @@ -103,6 +107,13 @@ success:
67344 /*
67345 * vm_flags is protected by the mmap_sem held in write mode.
67346 */
67347 +
67348 +#ifdef CONFIG_PAX_SEGMEXEC
67349 + vma_m = pax_find_mirror_vma(vma);
67350 + if (vma_m)
67351 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67352 +#endif
67353 +
67354 vma->vm_flags = new_flags;
67355
67356 out:
67357 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
67358 struct vm_area_struct ** prev,
67359 unsigned long start, unsigned long end)
67360 {
67361 +
67362 +#ifdef CONFIG_PAX_SEGMEXEC
67363 + struct vm_area_struct *vma_m;
67364 +#endif
67365 +
67366 *prev = vma;
67367 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67368 return -EINVAL;
67369 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
67370 zap_page_range(vma, start, end - start, &details);
67371 } else
67372 zap_page_range(vma, start, end - start, NULL);
67373 +
67374 +#ifdef CONFIG_PAX_SEGMEXEC
67375 + vma_m = pax_find_mirror_vma(vma);
67376 + if (vma_m) {
67377 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67378 + struct zap_details details = {
67379 + .nonlinear_vma = vma_m,
67380 + .last_index = ULONG_MAX,
67381 + };
67382 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67383 + } else
67384 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67385 + }
67386 +#endif
67387 +
67388 return 0;
67389 }
67390
67391 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67392 if (end < start)
67393 goto out;
67394
67395 +#ifdef CONFIG_PAX_SEGMEXEC
67396 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67397 + if (end > SEGMEXEC_TASK_SIZE)
67398 + goto out;
67399 + } else
67400 +#endif
67401 +
67402 + if (end > TASK_SIZE)
67403 + goto out;
67404 +
67405 error = 0;
67406 if (end == start)
67407 goto out;
67408 diff -urNp linux-2.6.32.45/mm/memory.c linux-2.6.32.45/mm/memory.c
67409 --- linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:04.000000000 -0400
67410 +++ linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:23.000000000 -0400
67411 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
67412 return;
67413
67414 pmd = pmd_offset(pud, start);
67415 +
67416 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67417 pud_clear(pud);
67418 pmd_free_tlb(tlb, pmd, start);
67419 +#endif
67420 +
67421 }
67422
67423 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67424 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct
67425 if (end - 1 > ceiling - 1)
67426 return;
67427
67428 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67429 pud = pud_offset(pgd, start);
67430 pgd_clear(pgd);
67431 pud_free_tlb(tlb, pud, start);
67432 +#endif
67433 +
67434 }
67435
67436 /*
67437 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
67438 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67439 i = 0;
67440
67441 - do {
67442 + while (nr_pages) {
67443 struct vm_area_struct *vma;
67444
67445 - vma = find_extend_vma(mm, start);
67446 + vma = find_vma(mm, start);
67447 if (!vma && in_gate_area(tsk, start)) {
67448 unsigned long pg = start & PAGE_MASK;
67449 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
67450 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
67451 continue;
67452 }
67453
67454 - if (!vma ||
67455 + if (!vma || start < vma->vm_start ||
67456 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67457 !(vm_flags & vma->vm_flags))
67458 return i ? : -EFAULT;
67459 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
67460 start += PAGE_SIZE;
67461 nr_pages--;
67462 } while (nr_pages && start < vma->vm_end);
67463 - } while (nr_pages);
67464 + }
67465 return i;
67466 }
67467
67468 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
67469 page_add_file_rmap(page);
67470 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67471
67472 +#ifdef CONFIG_PAX_SEGMEXEC
67473 + pax_mirror_file_pte(vma, addr, page, ptl);
67474 +#endif
67475 +
67476 retval = 0;
67477 pte_unmap_unlock(pte, ptl);
67478 return retval;
67479 @@ -1560,10 +1571,22 @@ out:
67480 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67481 struct page *page)
67482 {
67483 +
67484 +#ifdef CONFIG_PAX_SEGMEXEC
67485 + struct vm_area_struct *vma_m;
67486 +#endif
67487 +
67488 if (addr < vma->vm_start || addr >= vma->vm_end)
67489 return -EFAULT;
67490 if (!page_count(page))
67491 return -EINVAL;
67492 +
67493 +#ifdef CONFIG_PAX_SEGMEXEC
67494 + vma_m = pax_find_mirror_vma(vma);
67495 + if (vma_m)
67496 + vma_m->vm_flags |= VM_INSERTPAGE;
67497 +#endif
67498 +
67499 vma->vm_flags |= VM_INSERTPAGE;
67500 return insert_page(vma, addr, page, vma->vm_page_prot);
67501 }
67502 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
67503 unsigned long pfn)
67504 {
67505 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67506 + BUG_ON(vma->vm_mirror);
67507
67508 if (addr < vma->vm_start || addr >= vma->vm_end)
67509 return -EFAULT;
67510 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
67511 copy_user_highpage(dst, src, va, vma);
67512 }
67513
67514 +#ifdef CONFIG_PAX_SEGMEXEC
67515 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67516 +{
67517 + struct mm_struct *mm = vma->vm_mm;
67518 + spinlock_t *ptl;
67519 + pte_t *pte, entry;
67520 +
67521 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67522 + entry = *pte;
67523 + if (!pte_present(entry)) {
67524 + if (!pte_none(entry)) {
67525 + BUG_ON(pte_file(entry));
67526 + free_swap_and_cache(pte_to_swp_entry(entry));
67527 + pte_clear_not_present_full(mm, address, pte, 0);
67528 + }
67529 + } else {
67530 + struct page *page;
67531 +
67532 + flush_cache_page(vma, address, pte_pfn(entry));
67533 + entry = ptep_clear_flush(vma, address, pte);
67534 + BUG_ON(pte_dirty(entry));
67535 + page = vm_normal_page(vma, address, entry);
67536 + if (page) {
67537 + update_hiwater_rss(mm);
67538 + if (PageAnon(page))
67539 + dec_mm_counter(mm, anon_rss);
67540 + else
67541 + dec_mm_counter(mm, file_rss);
67542 + page_remove_rmap(page);
67543 + page_cache_release(page);
67544 + }
67545 + }
67546 + pte_unmap_unlock(pte, ptl);
67547 +}
67548 +
67549 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
67550 + *
67551 + * the ptl of the lower mapped page is held on entry and is not released on exit
67552 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67553 + */
67554 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67555 +{
67556 + struct mm_struct *mm = vma->vm_mm;
67557 + unsigned long address_m;
67558 + spinlock_t *ptl_m;
67559 + struct vm_area_struct *vma_m;
67560 + pmd_t *pmd_m;
67561 + pte_t *pte_m, entry_m;
67562 +
67563 + BUG_ON(!page_m || !PageAnon(page_m));
67564 +
67565 + vma_m = pax_find_mirror_vma(vma);
67566 + if (!vma_m)
67567 + return;
67568 +
67569 + BUG_ON(!PageLocked(page_m));
67570 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67571 + address_m = address + SEGMEXEC_TASK_SIZE;
67572 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67573 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67574 + ptl_m = pte_lockptr(mm, pmd_m);
67575 + if (ptl != ptl_m) {
67576 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67577 + if (!pte_none(*pte_m))
67578 + goto out;
67579 + }
67580 +
67581 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67582 + page_cache_get(page_m);
67583 + page_add_anon_rmap(page_m, vma_m, address_m);
67584 + inc_mm_counter(mm, anon_rss);
67585 + set_pte_at(mm, address_m, pte_m, entry_m);
67586 + update_mmu_cache(vma_m, address_m, entry_m);
67587 +out:
67588 + if (ptl != ptl_m)
67589 + spin_unlock(ptl_m);
67590 + pte_unmap_nested(pte_m);
67591 + unlock_page(page_m);
67592 +}
67593 +
67594 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67595 +{
67596 + struct mm_struct *mm = vma->vm_mm;
67597 + unsigned long address_m;
67598 + spinlock_t *ptl_m;
67599 + struct vm_area_struct *vma_m;
67600 + pmd_t *pmd_m;
67601 + pte_t *pte_m, entry_m;
67602 +
67603 + BUG_ON(!page_m || PageAnon(page_m));
67604 +
67605 + vma_m = pax_find_mirror_vma(vma);
67606 + if (!vma_m)
67607 + return;
67608 +
67609 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67610 + address_m = address + SEGMEXEC_TASK_SIZE;
67611 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67612 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67613 + ptl_m = pte_lockptr(mm, pmd_m);
67614 + if (ptl != ptl_m) {
67615 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67616 + if (!pte_none(*pte_m))
67617 + goto out;
67618 + }
67619 +
67620 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67621 + page_cache_get(page_m);
67622 + page_add_file_rmap(page_m);
67623 + inc_mm_counter(mm, file_rss);
67624 + set_pte_at(mm, address_m, pte_m, entry_m);
67625 + update_mmu_cache(vma_m, address_m, entry_m);
67626 +out:
67627 + if (ptl != ptl_m)
67628 + spin_unlock(ptl_m);
67629 + pte_unmap_nested(pte_m);
67630 +}
67631 +
67632 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67633 +{
67634 + struct mm_struct *mm = vma->vm_mm;
67635 + unsigned long address_m;
67636 + spinlock_t *ptl_m;
67637 + struct vm_area_struct *vma_m;
67638 + pmd_t *pmd_m;
67639 + pte_t *pte_m, entry_m;
67640 +
67641 + vma_m = pax_find_mirror_vma(vma);
67642 + if (!vma_m)
67643 + return;
67644 +
67645 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67646 + address_m = address + SEGMEXEC_TASK_SIZE;
67647 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67648 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67649 + ptl_m = pte_lockptr(mm, pmd_m);
67650 + if (ptl != ptl_m) {
67651 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67652 + if (!pte_none(*pte_m))
67653 + goto out;
67654 + }
67655 +
67656 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67657 + set_pte_at(mm, address_m, pte_m, entry_m);
67658 +out:
67659 + if (ptl != ptl_m)
67660 + spin_unlock(ptl_m);
67661 + pte_unmap_nested(pte_m);
67662 +}
67663 +
67664 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67665 +{
67666 + struct page *page_m;
67667 + pte_t entry;
67668 +
67669 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67670 + goto out;
67671 +
67672 + entry = *pte;
67673 + page_m = vm_normal_page(vma, address, entry);
67674 + if (!page_m)
67675 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67676 + else if (PageAnon(page_m)) {
67677 + if (pax_find_mirror_vma(vma)) {
67678 + pte_unmap_unlock(pte, ptl);
67679 + lock_page(page_m);
67680 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67681 + if (pte_same(entry, *pte))
67682 + pax_mirror_anon_pte(vma, address, page_m, ptl);
67683 + else
67684 + unlock_page(page_m);
67685 + }
67686 + } else
67687 + pax_mirror_file_pte(vma, address, page_m, ptl);
67688 +
67689 +out:
67690 + pte_unmap_unlock(pte, ptl);
67691 +}
67692 +#endif
67693 +
67694 /*
67695 * This routine handles present pages, when users try to write
67696 * to a shared page. It is done by copying the page to a new address
67697 @@ -2156,6 +2360,12 @@ gotten:
67698 */
67699 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67700 if (likely(pte_same(*page_table, orig_pte))) {
67701 +
67702 +#ifdef CONFIG_PAX_SEGMEXEC
67703 + if (pax_find_mirror_vma(vma))
67704 + BUG_ON(!trylock_page(new_page));
67705 +#endif
67706 +
67707 if (old_page) {
67708 if (!PageAnon(old_page)) {
67709 dec_mm_counter(mm, file_rss);
67710 @@ -2207,6 +2417,10 @@ gotten:
67711 page_remove_rmap(old_page);
67712 }
67713
67714 +#ifdef CONFIG_PAX_SEGMEXEC
67715 + pax_mirror_anon_pte(vma, address, new_page, ptl);
67716 +#endif
67717 +
67718 /* Free the old page.. */
67719 new_page = old_page;
67720 ret |= VM_FAULT_WRITE;
67721 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
67722 swap_free(entry);
67723 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67724 try_to_free_swap(page);
67725 +
67726 +#ifdef CONFIG_PAX_SEGMEXEC
67727 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67728 +#endif
67729 +
67730 unlock_page(page);
67731
67732 if (flags & FAULT_FLAG_WRITE) {
67733 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
67734
67735 /* No need to invalidate - it was non-present before */
67736 update_mmu_cache(vma, address, pte);
67737 +
67738 +#ifdef CONFIG_PAX_SEGMEXEC
67739 + pax_mirror_anon_pte(vma, address, page, ptl);
67740 +#endif
67741 +
67742 unlock:
67743 pte_unmap_unlock(page_table, ptl);
67744 out:
67745 @@ -2632,40 +2856,6 @@ out_release:
67746 }
67747
67748 /*
67749 - * This is like a special single-page "expand_{down|up}wards()",
67750 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
67751 - * doesn't hit another vma.
67752 - */
67753 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67754 -{
67755 - address &= PAGE_MASK;
67756 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67757 - struct vm_area_struct *prev = vma->vm_prev;
67758 -
67759 - /*
67760 - * Is there a mapping abutting this one below?
67761 - *
67762 - * That's only ok if it's the same stack mapping
67763 - * that has gotten split..
67764 - */
67765 - if (prev && prev->vm_end == address)
67766 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67767 -
67768 - expand_stack(vma, address - PAGE_SIZE);
67769 - }
67770 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67771 - struct vm_area_struct *next = vma->vm_next;
67772 -
67773 - /* As VM_GROWSDOWN but s/below/above/ */
67774 - if (next && next->vm_start == address + PAGE_SIZE)
67775 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67776 -
67777 - expand_upwards(vma, address + PAGE_SIZE);
67778 - }
67779 - return 0;
67780 -}
67781 -
67782 -/*
67783 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67784 * but allow concurrent faults), and pte mapped but not yet locked.
67785 * We return with mmap_sem still held, but pte unmapped and unlocked.
67786 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
67787 unsigned long address, pte_t *page_table, pmd_t *pmd,
67788 unsigned int flags)
67789 {
67790 - struct page *page;
67791 + struct page *page = NULL;
67792 spinlock_t *ptl;
67793 pte_t entry;
67794
67795 - pte_unmap(page_table);
67796 -
67797 - /* Check if we need to add a guard page to the stack */
67798 - if (check_stack_guard_page(vma, address) < 0)
67799 - return VM_FAULT_SIGBUS;
67800 -
67801 - /* Use the zero-page for reads */
67802 if (!(flags & FAULT_FLAG_WRITE)) {
67803 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67804 vma->vm_page_prot));
67805 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67806 + ptl = pte_lockptr(mm, pmd);
67807 + spin_lock(ptl);
67808 if (!pte_none(*page_table))
67809 goto unlock;
67810 goto setpte;
67811 }
67812
67813 /* Allocate our own private page. */
67814 + pte_unmap(page_table);
67815 +
67816 if (unlikely(anon_vma_prepare(vma)))
67817 goto oom;
67818 page = alloc_zeroed_user_highpage_movable(vma, address);
67819 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
67820 if (!pte_none(*page_table))
67821 goto release;
67822
67823 +#ifdef CONFIG_PAX_SEGMEXEC
67824 + if (pax_find_mirror_vma(vma))
67825 + BUG_ON(!trylock_page(page));
67826 +#endif
67827 +
67828 inc_mm_counter(mm, anon_rss);
67829 page_add_new_anon_rmap(page, vma, address);
67830 setpte:
67831 @@ -2720,6 +2911,12 @@ setpte:
67832
67833 /* No need to invalidate - it was non-present before */
67834 update_mmu_cache(vma, address, entry);
67835 +
67836 +#ifdef CONFIG_PAX_SEGMEXEC
67837 + if (page)
67838 + pax_mirror_anon_pte(vma, address, page, ptl);
67839 +#endif
67840 +
67841 unlock:
67842 pte_unmap_unlock(page_table, ptl);
67843 return 0;
67844 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
67845 */
67846 /* Only go through if we didn't race with anybody else... */
67847 if (likely(pte_same(*page_table, orig_pte))) {
67848 +
67849 +#ifdef CONFIG_PAX_SEGMEXEC
67850 + if (anon && pax_find_mirror_vma(vma))
67851 + BUG_ON(!trylock_page(page));
67852 +#endif
67853 +
67854 flush_icache_page(vma, page);
67855 entry = mk_pte(page, vma->vm_page_prot);
67856 if (flags & FAULT_FLAG_WRITE)
67857 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
67858
67859 /* no need to invalidate: a not-present page won't be cached */
67860 update_mmu_cache(vma, address, entry);
67861 +
67862 +#ifdef CONFIG_PAX_SEGMEXEC
67863 + if (anon)
67864 + pax_mirror_anon_pte(vma, address, page, ptl);
67865 + else
67866 + pax_mirror_file_pte(vma, address, page, ptl);
67867 +#endif
67868 +
67869 } else {
67870 if (charged)
67871 mem_cgroup_uncharge_page(page);
67872 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
67873 if (flags & FAULT_FLAG_WRITE)
67874 flush_tlb_page(vma, address);
67875 }
67876 +
67877 +#ifdef CONFIG_PAX_SEGMEXEC
67878 + pax_mirror_pte(vma, address, pte, pmd, ptl);
67879 + return 0;
67880 +#endif
67881 +
67882 unlock:
67883 pte_unmap_unlock(pte, ptl);
67884 return 0;
67885 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
67886 pmd_t *pmd;
67887 pte_t *pte;
67888
67889 +#ifdef CONFIG_PAX_SEGMEXEC
67890 + struct vm_area_struct *vma_m;
67891 +#endif
67892 +
67893 __set_current_state(TASK_RUNNING);
67894
67895 count_vm_event(PGFAULT);
67896 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
67897 if (unlikely(is_vm_hugetlb_page(vma)))
67898 return hugetlb_fault(mm, vma, address, flags);
67899
67900 +#ifdef CONFIG_PAX_SEGMEXEC
67901 + vma_m = pax_find_mirror_vma(vma);
67902 + if (vma_m) {
67903 + unsigned long address_m;
67904 + pgd_t *pgd_m;
67905 + pud_t *pud_m;
67906 + pmd_t *pmd_m;
67907 +
67908 + if (vma->vm_start > vma_m->vm_start) {
67909 + address_m = address;
67910 + address -= SEGMEXEC_TASK_SIZE;
67911 + vma = vma_m;
67912 + } else
67913 + address_m = address + SEGMEXEC_TASK_SIZE;
67914 +
67915 + pgd_m = pgd_offset(mm, address_m);
67916 + pud_m = pud_alloc(mm, pgd_m, address_m);
67917 + if (!pud_m)
67918 + return VM_FAULT_OOM;
67919 + pmd_m = pmd_alloc(mm, pud_m, address_m);
67920 + if (!pmd_m)
67921 + return VM_FAULT_OOM;
67922 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
67923 + return VM_FAULT_OOM;
67924 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67925 + }
67926 +#endif
67927 +
67928 pgd = pgd_offset(mm, address);
67929 pud = pud_alloc(mm, pgd, address);
67930 if (!pud)
67931 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
67932 gate_vma.vm_start = FIXADDR_USER_START;
67933 gate_vma.vm_end = FIXADDR_USER_END;
67934 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
67935 - gate_vma.vm_page_prot = __P101;
67936 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
67937 /*
67938 * Make sure the vDSO gets into every core dump.
67939 * Dumping its contents makes post-mortem fully interpretable later
67940 diff -urNp linux-2.6.32.45/mm/memory-failure.c linux-2.6.32.45/mm/memory-failure.c
67941 --- linux-2.6.32.45/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
67942 +++ linux-2.6.32.45/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
67943 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
67944
67945 int sysctl_memory_failure_recovery __read_mostly = 1;
67946
67947 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67948 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67949
67950 /*
67951 * Send all the processes who have the page mapped an ``action optional''
67952 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
67953 return 0;
67954 }
67955
67956 - atomic_long_add(1, &mce_bad_pages);
67957 + atomic_long_add_unchecked(1, &mce_bad_pages);
67958
67959 /*
67960 * We need/can do nothing about count=0 pages.
67961 diff -urNp linux-2.6.32.45/mm/mempolicy.c linux-2.6.32.45/mm/mempolicy.c
67962 --- linux-2.6.32.45/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
67963 +++ linux-2.6.32.45/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
67964 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
67965 struct vm_area_struct *next;
67966 int err;
67967
67968 +#ifdef CONFIG_PAX_SEGMEXEC
67969 + struct vm_area_struct *vma_m;
67970 +#endif
67971 +
67972 err = 0;
67973 for (; vma && vma->vm_start < end; vma = next) {
67974 next = vma->vm_next;
67975 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
67976 err = policy_vma(vma, new);
67977 if (err)
67978 break;
67979 +
67980 +#ifdef CONFIG_PAX_SEGMEXEC
67981 + vma_m = pax_find_mirror_vma(vma);
67982 + if (vma_m) {
67983 + err = policy_vma(vma_m, new);
67984 + if (err)
67985 + break;
67986 + }
67987 +#endif
67988 +
67989 }
67990 return err;
67991 }
67992 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
67993
67994 if (end < start)
67995 return -EINVAL;
67996 +
67997 +#ifdef CONFIG_PAX_SEGMEXEC
67998 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67999 + if (end > SEGMEXEC_TASK_SIZE)
68000 + return -EINVAL;
68001 + } else
68002 +#endif
68003 +
68004 + if (end > TASK_SIZE)
68005 + return -EINVAL;
68006 +
68007 if (end == start)
68008 return 0;
68009
68010 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68011 if (!mm)
68012 return -EINVAL;
68013
68014 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68015 + if (mm != current->mm &&
68016 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68017 + err = -EPERM;
68018 + goto out;
68019 + }
68020 +#endif
68021 +
68022 /*
68023 * Check if this process has the right to modify the specified
68024 * process. The right exists if the process has administrative
68025 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68026 rcu_read_lock();
68027 tcred = __task_cred(task);
68028 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68029 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68030 - !capable(CAP_SYS_NICE)) {
68031 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68032 rcu_read_unlock();
68033 err = -EPERM;
68034 goto out;
68035 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
68036
68037 if (file) {
68038 seq_printf(m, " file=");
68039 - seq_path(m, &file->f_path, "\n\t= ");
68040 + seq_path(m, &file->f_path, "\n\t\\= ");
68041 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
68042 seq_printf(m, " heap");
68043 } else if (vma->vm_start <= mm->start_stack &&
68044 diff -urNp linux-2.6.32.45/mm/migrate.c linux-2.6.32.45/mm/migrate.c
68045 --- linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:04.000000000 -0400
68046 +++ linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:23.000000000 -0400
68047 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
68048 unsigned long chunk_start;
68049 int err;
68050
68051 + pax_track_stack();
68052 +
68053 task_nodes = cpuset_mems_allowed(task);
68054
68055 err = -ENOMEM;
68056 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68057 if (!mm)
68058 return -EINVAL;
68059
68060 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68061 + if (mm != current->mm &&
68062 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68063 + err = -EPERM;
68064 + goto out;
68065 + }
68066 +#endif
68067 +
68068 /*
68069 * Check if this process has the right to modify the specified
68070 * process. The right exists if the process has administrative
68071 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68072 rcu_read_lock();
68073 tcred = __task_cred(task);
68074 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68075 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68076 - !capable(CAP_SYS_NICE)) {
68077 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68078 rcu_read_unlock();
68079 err = -EPERM;
68080 goto out;
68081 diff -urNp linux-2.6.32.45/mm/mlock.c linux-2.6.32.45/mm/mlock.c
68082 --- linux-2.6.32.45/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
68083 +++ linux-2.6.32.45/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
68084 @@ -13,6 +13,7 @@
68085 #include <linux/pagemap.h>
68086 #include <linux/mempolicy.h>
68087 #include <linux/syscalls.h>
68088 +#include <linux/security.h>
68089 #include <linux/sched.h>
68090 #include <linux/module.h>
68091 #include <linux/rmap.h>
68092 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
68093 }
68094 }
68095
68096 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
68097 -{
68098 - return (vma->vm_flags & VM_GROWSDOWN) &&
68099 - (vma->vm_start == addr) &&
68100 - !vma_stack_continue(vma->vm_prev, addr);
68101 -}
68102 -
68103 /**
68104 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
68105 * @vma: target vma
68106 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
68107 if (vma->vm_flags & VM_WRITE)
68108 gup_flags |= FOLL_WRITE;
68109
68110 - /* We don't try to access the guard page of a stack vma */
68111 - if (stack_guard_page(vma, start)) {
68112 - addr += PAGE_SIZE;
68113 - nr_pages--;
68114 - }
68115 -
68116 while (nr_pages > 0) {
68117 int i;
68118
68119 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
68120 {
68121 unsigned long nstart, end, tmp;
68122 struct vm_area_struct * vma, * prev;
68123 - int error;
68124 + int error = -EINVAL;
68125
68126 len = PAGE_ALIGN(len);
68127 end = start + len;
68128 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
68129 return -EINVAL;
68130 if (end == start)
68131 return 0;
68132 + if (end > TASK_SIZE)
68133 + return -EINVAL;
68134 +
68135 vma = find_vma_prev(current->mm, start, &prev);
68136 if (!vma || vma->vm_start > start)
68137 return -ENOMEM;
68138 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
68139 for (nstart = start ; ; ) {
68140 unsigned int newflags;
68141
68142 +#ifdef CONFIG_PAX_SEGMEXEC
68143 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68144 + break;
68145 +#endif
68146 +
68147 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68148
68149 newflags = vma->vm_flags | VM_LOCKED;
68150 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
68151 lock_limit >>= PAGE_SHIFT;
68152
68153 /* check against resource limits */
68154 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68155 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68156 error = do_mlock(start, len, 1);
68157 up_write(&current->mm->mmap_sem);
68158 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
68159 static int do_mlockall(int flags)
68160 {
68161 struct vm_area_struct * vma, * prev = NULL;
68162 - unsigned int def_flags = 0;
68163
68164 if (flags & MCL_FUTURE)
68165 - def_flags = VM_LOCKED;
68166 - current->mm->def_flags = def_flags;
68167 + current->mm->def_flags |= VM_LOCKED;
68168 + else
68169 + current->mm->def_flags &= ~VM_LOCKED;
68170 if (flags == MCL_FUTURE)
68171 goto out;
68172
68173 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68174 - unsigned int newflags;
68175 + unsigned long newflags;
68176 +
68177 +#ifdef CONFIG_PAX_SEGMEXEC
68178 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68179 + break;
68180 +#endif
68181
68182 + BUG_ON(vma->vm_end > TASK_SIZE);
68183 newflags = vma->vm_flags | VM_LOCKED;
68184 if (!(flags & MCL_CURRENT))
68185 newflags &= ~VM_LOCKED;
68186 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68187 lock_limit >>= PAGE_SHIFT;
68188
68189 ret = -ENOMEM;
68190 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68191 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68192 capable(CAP_IPC_LOCK))
68193 ret = do_mlockall(flags);
68194 diff -urNp linux-2.6.32.45/mm/mmap.c linux-2.6.32.45/mm/mmap.c
68195 --- linux-2.6.32.45/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
68196 +++ linux-2.6.32.45/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
68197 @@ -45,6 +45,16 @@
68198 #define arch_rebalance_pgtables(addr, len) (addr)
68199 #endif
68200
68201 +static inline void verify_mm_writelocked(struct mm_struct *mm)
68202 +{
68203 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68204 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68205 + up_read(&mm->mmap_sem);
68206 + BUG();
68207 + }
68208 +#endif
68209 +}
68210 +
68211 static void unmap_region(struct mm_struct *mm,
68212 struct vm_area_struct *vma, struct vm_area_struct *prev,
68213 unsigned long start, unsigned long end);
68214 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
68215 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68216 *
68217 */
68218 -pgprot_t protection_map[16] = {
68219 +pgprot_t protection_map[16] __read_only = {
68220 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68221 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68222 };
68223
68224 pgprot_t vm_get_page_prot(unsigned long vm_flags)
68225 {
68226 - return __pgprot(pgprot_val(protection_map[vm_flags &
68227 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68228 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68229 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68230 +
68231 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68232 + if (!nx_enabled &&
68233 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68234 + (vm_flags & (VM_READ | VM_WRITE)))
68235 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68236 +#endif
68237 +
68238 + return prot;
68239 }
68240 EXPORT_SYMBOL(vm_get_page_prot);
68241
68242 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
68243 int sysctl_overcommit_ratio = 50; /* default is 50% */
68244 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68245 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68246 struct percpu_counter vm_committed_as;
68247
68248 /*
68249 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
68250 struct vm_area_struct *next = vma->vm_next;
68251
68252 might_sleep();
68253 + BUG_ON(vma->vm_mirror);
68254 if (vma->vm_ops && vma->vm_ops->close)
68255 vma->vm_ops->close(vma);
68256 if (vma->vm_file) {
68257 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68258 * not page aligned -Ram Gupta
68259 */
68260 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
68261 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68262 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68263 (mm->end_data - mm->start_data) > rlim)
68264 goto out;
68265 @@ -704,6 +726,12 @@ static int
68266 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68267 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68268 {
68269 +
68270 +#ifdef CONFIG_PAX_SEGMEXEC
68271 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68272 + return 0;
68273 +#endif
68274 +
68275 if (is_mergeable_vma(vma, file, vm_flags) &&
68276 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68277 if (vma->vm_pgoff == vm_pgoff)
68278 @@ -723,6 +751,12 @@ static int
68279 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68280 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68281 {
68282 +
68283 +#ifdef CONFIG_PAX_SEGMEXEC
68284 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68285 + return 0;
68286 +#endif
68287 +
68288 if (is_mergeable_vma(vma, file, vm_flags) &&
68289 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68290 pgoff_t vm_pglen;
68291 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
68292 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68293 struct vm_area_struct *prev, unsigned long addr,
68294 unsigned long end, unsigned long vm_flags,
68295 - struct anon_vma *anon_vma, struct file *file,
68296 + struct anon_vma *anon_vma, struct file *file,
68297 pgoff_t pgoff, struct mempolicy *policy)
68298 {
68299 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68300 struct vm_area_struct *area, *next;
68301
68302 +#ifdef CONFIG_PAX_SEGMEXEC
68303 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68304 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68305 +
68306 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68307 +#endif
68308 +
68309 /*
68310 * We later require that vma->vm_flags == vm_flags,
68311 * so this tests vma->vm_flags & VM_SPECIAL, too.
68312 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
68313 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68314 next = next->vm_next;
68315
68316 +#ifdef CONFIG_PAX_SEGMEXEC
68317 + if (prev)
68318 + prev_m = pax_find_mirror_vma(prev);
68319 + if (area)
68320 + area_m = pax_find_mirror_vma(area);
68321 + if (next)
68322 + next_m = pax_find_mirror_vma(next);
68323 +#endif
68324 +
68325 /*
68326 * Can it merge with the predecessor?
68327 */
68328 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
68329 /* cases 1, 6 */
68330 vma_adjust(prev, prev->vm_start,
68331 next->vm_end, prev->vm_pgoff, NULL);
68332 - } else /* cases 2, 5, 7 */
68333 +
68334 +#ifdef CONFIG_PAX_SEGMEXEC
68335 + if (prev_m)
68336 + vma_adjust(prev_m, prev_m->vm_start,
68337 + next_m->vm_end, prev_m->vm_pgoff, NULL);
68338 +#endif
68339 +
68340 + } else { /* cases 2, 5, 7 */
68341 vma_adjust(prev, prev->vm_start,
68342 end, prev->vm_pgoff, NULL);
68343 +
68344 +#ifdef CONFIG_PAX_SEGMEXEC
68345 + if (prev_m)
68346 + vma_adjust(prev_m, prev_m->vm_start,
68347 + end_m, prev_m->vm_pgoff, NULL);
68348 +#endif
68349 +
68350 + }
68351 return prev;
68352 }
68353
68354 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
68355 mpol_equal(policy, vma_policy(next)) &&
68356 can_vma_merge_before(next, vm_flags,
68357 anon_vma, file, pgoff+pglen)) {
68358 - if (prev && addr < prev->vm_end) /* case 4 */
68359 + if (prev && addr < prev->vm_end) { /* case 4 */
68360 vma_adjust(prev, prev->vm_start,
68361 addr, prev->vm_pgoff, NULL);
68362 - else /* cases 3, 8 */
68363 +
68364 +#ifdef CONFIG_PAX_SEGMEXEC
68365 + if (prev_m)
68366 + vma_adjust(prev_m, prev_m->vm_start,
68367 + addr_m, prev_m->vm_pgoff, NULL);
68368 +#endif
68369 +
68370 + } else { /* cases 3, 8 */
68371 vma_adjust(area, addr, next->vm_end,
68372 next->vm_pgoff - pglen, NULL);
68373 +
68374 +#ifdef CONFIG_PAX_SEGMEXEC
68375 + if (area_m)
68376 + vma_adjust(area_m, addr_m, next_m->vm_end,
68377 + next_m->vm_pgoff - pglen, NULL);
68378 +#endif
68379 +
68380 + }
68381 return area;
68382 }
68383
68384 @@ -898,14 +978,11 @@ none:
68385 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68386 struct file *file, long pages)
68387 {
68388 - const unsigned long stack_flags
68389 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68390 -
68391 if (file) {
68392 mm->shared_vm += pages;
68393 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68394 mm->exec_vm += pages;
68395 - } else if (flags & stack_flags)
68396 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68397 mm->stack_vm += pages;
68398 if (flags & (VM_RESERVED|VM_IO))
68399 mm->reserved_vm += pages;
68400 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
68401 * (the exception is when the underlying filesystem is noexec
68402 * mounted, in which case we dont add PROT_EXEC.)
68403 */
68404 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68405 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68406 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68407 prot |= PROT_EXEC;
68408
68409 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
68410 /* Obtain the address to map to. we verify (or select) it and ensure
68411 * that it represents a valid section of the address space.
68412 */
68413 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
68414 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68415 if (addr & ~PAGE_MASK)
68416 return addr;
68417
68418 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
68419 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68420 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68421
68422 +#ifdef CONFIG_PAX_MPROTECT
68423 + if (mm->pax_flags & MF_PAX_MPROTECT) {
68424 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
68425 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68426 + gr_log_rwxmmap(file);
68427 +
68428 +#ifdef CONFIG_PAX_EMUPLT
68429 + vm_flags &= ~VM_EXEC;
68430 +#else
68431 + return -EPERM;
68432 +#endif
68433 +
68434 + }
68435 +
68436 + if (!(vm_flags & VM_EXEC))
68437 + vm_flags &= ~VM_MAYEXEC;
68438 +#else
68439 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68440 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68441 +#endif
68442 + else
68443 + vm_flags &= ~VM_MAYWRITE;
68444 + }
68445 +#endif
68446 +
68447 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68448 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68449 + vm_flags &= ~VM_PAGEEXEC;
68450 +#endif
68451 +
68452 if (flags & MAP_LOCKED)
68453 if (!can_do_mlock())
68454 return -EPERM;
68455 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
68456 locked += mm->locked_vm;
68457 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
68458 lock_limit >>= PAGE_SHIFT;
68459 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68460 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68461 return -EAGAIN;
68462 }
68463 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
68464 if (error)
68465 return error;
68466
68467 + if (!gr_acl_handle_mmap(file, prot))
68468 + return -EACCES;
68469 +
68470 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68471 }
68472 EXPORT_SYMBOL(do_mmap_pgoff);
68473 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
68474 */
68475 int vma_wants_writenotify(struct vm_area_struct *vma)
68476 {
68477 - unsigned int vm_flags = vma->vm_flags;
68478 + unsigned long vm_flags = vma->vm_flags;
68479
68480 /* If it was private or non-writable, the write bit is already clear */
68481 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68482 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68483 return 0;
68484
68485 /* The backer wishes to know when pages are first written to? */
68486 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
68487 unsigned long charged = 0;
68488 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68489
68490 +#ifdef CONFIG_PAX_SEGMEXEC
68491 + struct vm_area_struct *vma_m = NULL;
68492 +#endif
68493 +
68494 + /*
68495 + * mm->mmap_sem is required to protect against another thread
68496 + * changing the mappings in case we sleep.
68497 + */
68498 + verify_mm_writelocked(mm);
68499 +
68500 /* Clear old maps */
68501 error = -ENOMEM;
68502 -munmap_back:
68503 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68504 if (vma && vma->vm_start < addr + len) {
68505 if (do_munmap(mm, addr, len))
68506 return -ENOMEM;
68507 - goto munmap_back;
68508 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68509 + BUG_ON(vma && vma->vm_start < addr + len);
68510 }
68511
68512 /* Check against address space limit. */
68513 @@ -1173,6 +1294,16 @@ munmap_back:
68514 goto unacct_error;
68515 }
68516
68517 +#ifdef CONFIG_PAX_SEGMEXEC
68518 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68519 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68520 + if (!vma_m) {
68521 + error = -ENOMEM;
68522 + goto free_vma;
68523 + }
68524 + }
68525 +#endif
68526 +
68527 vma->vm_mm = mm;
68528 vma->vm_start = addr;
68529 vma->vm_end = addr + len;
68530 @@ -1195,6 +1326,19 @@ munmap_back:
68531 error = file->f_op->mmap(file, vma);
68532 if (error)
68533 goto unmap_and_free_vma;
68534 +
68535 +#ifdef CONFIG_PAX_SEGMEXEC
68536 + if (vma_m && (vm_flags & VM_EXECUTABLE))
68537 + added_exe_file_vma(mm);
68538 +#endif
68539 +
68540 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68541 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68542 + vma->vm_flags |= VM_PAGEEXEC;
68543 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68544 + }
68545 +#endif
68546 +
68547 if (vm_flags & VM_EXECUTABLE)
68548 added_exe_file_vma(mm);
68549
68550 @@ -1218,6 +1362,11 @@ munmap_back:
68551 vma_link(mm, vma, prev, rb_link, rb_parent);
68552 file = vma->vm_file;
68553
68554 +#ifdef CONFIG_PAX_SEGMEXEC
68555 + if (vma_m)
68556 + pax_mirror_vma(vma_m, vma);
68557 +#endif
68558 +
68559 /* Once vma denies write, undo our temporary denial count */
68560 if (correct_wcount)
68561 atomic_inc(&inode->i_writecount);
68562 @@ -1226,6 +1375,7 @@ out:
68563
68564 mm->total_vm += len >> PAGE_SHIFT;
68565 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68566 + track_exec_limit(mm, addr, addr + len, vm_flags);
68567 if (vm_flags & VM_LOCKED) {
68568 /*
68569 * makes pages present; downgrades, drops, reacquires mmap_sem
68570 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
68571 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68572 charged = 0;
68573 free_vma:
68574 +
68575 +#ifdef CONFIG_PAX_SEGMEXEC
68576 + if (vma_m)
68577 + kmem_cache_free(vm_area_cachep, vma_m);
68578 +#endif
68579 +
68580 kmem_cache_free(vm_area_cachep, vma);
68581 unacct_error:
68582 if (charged)
68583 @@ -1255,6 +1411,44 @@ unacct_error:
68584 return error;
68585 }
68586
68587 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68588 +{
68589 + if (!vma) {
68590 +#ifdef CONFIG_STACK_GROWSUP
68591 + if (addr > sysctl_heap_stack_gap)
68592 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68593 + else
68594 + vma = find_vma(current->mm, 0);
68595 + if (vma && (vma->vm_flags & VM_GROWSUP))
68596 + return false;
68597 +#endif
68598 + return true;
68599 + }
68600 +
68601 + if (addr + len > vma->vm_start)
68602 + return false;
68603 +
68604 + if (vma->vm_flags & VM_GROWSDOWN)
68605 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68606 +#ifdef CONFIG_STACK_GROWSUP
68607 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68608 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68609 +#endif
68610 +
68611 + return true;
68612 +}
68613 +
68614 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68615 +{
68616 + if (vma->vm_start < len)
68617 + return -ENOMEM;
68618 + if (!(vma->vm_flags & VM_GROWSDOWN))
68619 + return vma->vm_start - len;
68620 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
68621 + return vma->vm_start - len - sysctl_heap_stack_gap;
68622 + return -ENOMEM;
68623 +}
68624 +
68625 /* Get an address range which is currently unmapped.
68626 * For shmat() with addr=0.
68627 *
68628 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
68629 if (flags & MAP_FIXED)
68630 return addr;
68631
68632 +#ifdef CONFIG_PAX_RANDMMAP
68633 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68634 +#endif
68635 +
68636 if (addr) {
68637 addr = PAGE_ALIGN(addr);
68638 - vma = find_vma(mm, addr);
68639 - if (TASK_SIZE - len >= addr &&
68640 - (!vma || addr + len <= vma->vm_start))
68641 - return addr;
68642 + if (TASK_SIZE - len >= addr) {
68643 + vma = find_vma(mm, addr);
68644 + if (check_heap_stack_gap(vma, addr, len))
68645 + return addr;
68646 + }
68647 }
68648 if (len > mm->cached_hole_size) {
68649 - start_addr = addr = mm->free_area_cache;
68650 + start_addr = addr = mm->free_area_cache;
68651 } else {
68652 - start_addr = addr = TASK_UNMAPPED_BASE;
68653 - mm->cached_hole_size = 0;
68654 + start_addr = addr = mm->mmap_base;
68655 + mm->cached_hole_size = 0;
68656 }
68657
68658 full_search:
68659 @@ -1303,34 +1502,40 @@ full_search:
68660 * Start a new search - just in case we missed
68661 * some holes.
68662 */
68663 - if (start_addr != TASK_UNMAPPED_BASE) {
68664 - addr = TASK_UNMAPPED_BASE;
68665 - start_addr = addr;
68666 + if (start_addr != mm->mmap_base) {
68667 + start_addr = addr = mm->mmap_base;
68668 mm->cached_hole_size = 0;
68669 goto full_search;
68670 }
68671 return -ENOMEM;
68672 }
68673 - if (!vma || addr + len <= vma->vm_start) {
68674 - /*
68675 - * Remember the place where we stopped the search:
68676 - */
68677 - mm->free_area_cache = addr + len;
68678 - return addr;
68679 - }
68680 + if (check_heap_stack_gap(vma, addr, len))
68681 + break;
68682 if (addr + mm->cached_hole_size < vma->vm_start)
68683 mm->cached_hole_size = vma->vm_start - addr;
68684 addr = vma->vm_end;
68685 }
68686 +
68687 + /*
68688 + * Remember the place where we stopped the search:
68689 + */
68690 + mm->free_area_cache = addr + len;
68691 + return addr;
68692 }
68693 #endif
68694
68695 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68696 {
68697 +
68698 +#ifdef CONFIG_PAX_SEGMEXEC
68699 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68700 + return;
68701 +#endif
68702 +
68703 /*
68704 * Is this a new hole at the lowest possible address?
68705 */
68706 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68707 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68708 mm->free_area_cache = addr;
68709 mm->cached_hole_size = ~0UL;
68710 }
68711 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
68712 {
68713 struct vm_area_struct *vma;
68714 struct mm_struct *mm = current->mm;
68715 - unsigned long addr = addr0;
68716 + unsigned long base = mm->mmap_base, addr = addr0;
68717
68718 /* requested length too big for entire address space */
68719 if (len > TASK_SIZE)
68720 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
68721 if (flags & MAP_FIXED)
68722 return addr;
68723
68724 +#ifdef CONFIG_PAX_RANDMMAP
68725 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68726 +#endif
68727 +
68728 /* requesting a specific address */
68729 if (addr) {
68730 addr = PAGE_ALIGN(addr);
68731 - vma = find_vma(mm, addr);
68732 - if (TASK_SIZE - len >= addr &&
68733 - (!vma || addr + len <= vma->vm_start))
68734 - return addr;
68735 + if (TASK_SIZE - len >= addr) {
68736 + vma = find_vma(mm, addr);
68737 + if (check_heap_stack_gap(vma, addr, len))
68738 + return addr;
68739 + }
68740 }
68741
68742 /* check if free_area_cache is useful for us */
68743 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
68744 /* make sure it can fit in the remaining address space */
68745 if (addr > len) {
68746 vma = find_vma(mm, addr-len);
68747 - if (!vma || addr <= vma->vm_start)
68748 + if (check_heap_stack_gap(vma, addr - len, len))
68749 /* remember the address as a hint for next time */
68750 return (mm->free_area_cache = addr-len);
68751 }
68752 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
68753 * return with success:
68754 */
68755 vma = find_vma(mm, addr);
68756 - if (!vma || addr+len <= vma->vm_start)
68757 + if (check_heap_stack_gap(vma, addr, len))
68758 /* remember the address as a hint for next time */
68759 return (mm->free_area_cache = addr);
68760
68761 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
68762 mm->cached_hole_size = vma->vm_start - addr;
68763
68764 /* try just below the current vma->vm_start */
68765 - addr = vma->vm_start-len;
68766 - } while (len < vma->vm_start);
68767 + addr = skip_heap_stack_gap(vma, len);
68768 + } while (!IS_ERR_VALUE(addr));
68769
68770 bottomup:
68771 /*
68772 @@ -1414,13 +1624,21 @@ bottomup:
68773 * can happen with large stack limits and large mmap()
68774 * allocations.
68775 */
68776 + mm->mmap_base = TASK_UNMAPPED_BASE;
68777 +
68778 +#ifdef CONFIG_PAX_RANDMMAP
68779 + if (mm->pax_flags & MF_PAX_RANDMMAP)
68780 + mm->mmap_base += mm->delta_mmap;
68781 +#endif
68782 +
68783 + mm->free_area_cache = mm->mmap_base;
68784 mm->cached_hole_size = ~0UL;
68785 - mm->free_area_cache = TASK_UNMAPPED_BASE;
68786 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68787 /*
68788 * Restore the topdown base:
68789 */
68790 - mm->free_area_cache = mm->mmap_base;
68791 + mm->mmap_base = base;
68792 + mm->free_area_cache = base;
68793 mm->cached_hole_size = ~0UL;
68794
68795 return addr;
68796 @@ -1429,6 +1647,12 @@ bottomup:
68797
68798 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68799 {
68800 +
68801 +#ifdef CONFIG_PAX_SEGMEXEC
68802 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68803 + return;
68804 +#endif
68805 +
68806 /*
68807 * Is this a new hole at the highest possible address?
68808 */
68809 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
68810 mm->free_area_cache = addr;
68811
68812 /* dont allow allocations above current base */
68813 - if (mm->free_area_cache > mm->mmap_base)
68814 + if (mm->free_area_cache > mm->mmap_base) {
68815 mm->free_area_cache = mm->mmap_base;
68816 + mm->cached_hole_size = ~0UL;
68817 + }
68818 }
68819
68820 unsigned long
68821 @@ -1545,6 +1771,27 @@ out:
68822 return prev ? prev->vm_next : vma;
68823 }
68824
68825 +#ifdef CONFIG_PAX_SEGMEXEC
68826 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68827 +{
68828 + struct vm_area_struct *vma_m;
68829 +
68830 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68831 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68832 + BUG_ON(vma->vm_mirror);
68833 + return NULL;
68834 + }
68835 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68836 + vma_m = vma->vm_mirror;
68837 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68838 + BUG_ON(vma->vm_file != vma_m->vm_file);
68839 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68840 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
68841 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68842 + return vma_m;
68843 +}
68844 +#endif
68845 +
68846 /*
68847 * Verify that the stack growth is acceptable and
68848 * update accounting. This is shared with both the
68849 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
68850 return -ENOMEM;
68851
68852 /* Stack limit test */
68853 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
68854 if (size > rlim[RLIMIT_STACK].rlim_cur)
68855 return -ENOMEM;
68856
68857 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
68858 unsigned long limit;
68859 locked = mm->locked_vm + grow;
68860 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
68861 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68862 if (locked > limit && !capable(CAP_IPC_LOCK))
68863 return -ENOMEM;
68864 }
68865 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
68866 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68867 * vma is the last one with address > vma->vm_end. Have to extend vma.
68868 */
68869 +#ifndef CONFIG_IA64
68870 +static
68871 +#endif
68872 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68873 {
68874 int error;
68875 + bool locknext;
68876
68877 if (!(vma->vm_flags & VM_GROWSUP))
68878 return -EFAULT;
68879
68880 + /* Also guard against wrapping around to address 0. */
68881 + if (address < PAGE_ALIGN(address+1))
68882 + address = PAGE_ALIGN(address+1);
68883 + else
68884 + return -ENOMEM;
68885 +
68886 /*
68887 * We must make sure the anon_vma is allocated
68888 * so that the anon_vma locking is not a noop.
68889 */
68890 if (unlikely(anon_vma_prepare(vma)))
68891 return -ENOMEM;
68892 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68893 + if (locknext && anon_vma_prepare(vma->vm_next))
68894 + return -ENOMEM;
68895 anon_vma_lock(vma);
68896 + if (locknext)
68897 + anon_vma_lock(vma->vm_next);
68898
68899 /*
68900 * vma->vm_start/vm_end cannot change under us because the caller
68901 * is required to hold the mmap_sem in read mode. We need the
68902 - * anon_vma lock to serialize against concurrent expand_stacks.
68903 - * Also guard against wrapping around to address 0.
68904 + * anon_vma locks to serialize against concurrent expand_stacks
68905 + * and expand_upwards.
68906 */
68907 - if (address < PAGE_ALIGN(address+4))
68908 - address = PAGE_ALIGN(address+4);
68909 - else {
68910 - anon_vma_unlock(vma);
68911 - return -ENOMEM;
68912 - }
68913 error = 0;
68914
68915 /* Somebody else might have raced and expanded it already */
68916 - if (address > vma->vm_end) {
68917 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68918 + error = -ENOMEM;
68919 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68920 unsigned long size, grow;
68921
68922 size = address - vma->vm_start;
68923 @@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
68924 if (!error)
68925 vma->vm_end = address;
68926 }
68927 + if (locknext)
68928 + anon_vma_unlock(vma->vm_next);
68929 anon_vma_unlock(vma);
68930 return error;
68931 }
68932 @@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
68933 unsigned long address)
68934 {
68935 int error;
68936 + bool lockprev = false;
68937 + struct vm_area_struct *prev;
68938
68939 /*
68940 * We must make sure the anon_vma is allocated
68941 @@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
68942 if (error)
68943 return error;
68944
68945 + prev = vma->vm_prev;
68946 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68947 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68948 +#endif
68949 + if (lockprev && anon_vma_prepare(prev))
68950 + return -ENOMEM;
68951 + if (lockprev)
68952 + anon_vma_lock(prev);
68953 +
68954 anon_vma_lock(vma);
68955
68956 /*
68957 @@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
68958 */
68959
68960 /* Somebody else might have raced and expanded it already */
68961 - if (address < vma->vm_start) {
68962 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
68963 + error = -ENOMEM;
68964 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
68965 unsigned long size, grow;
68966
68967 +#ifdef CONFIG_PAX_SEGMEXEC
68968 + struct vm_area_struct *vma_m;
68969 +
68970 + vma_m = pax_find_mirror_vma(vma);
68971 +#endif
68972 +
68973 size = vma->vm_end - address;
68974 grow = (vma->vm_start - address) >> PAGE_SHIFT;
68975
68976 @@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
68977 if (!error) {
68978 vma->vm_start = address;
68979 vma->vm_pgoff -= grow;
68980 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
68981 +
68982 +#ifdef CONFIG_PAX_SEGMEXEC
68983 + if (vma_m) {
68984 + vma_m->vm_start -= grow << PAGE_SHIFT;
68985 + vma_m->vm_pgoff -= grow;
68986 + }
68987 +#endif
68988 +
68989 }
68990 }
68991 anon_vma_unlock(vma);
68992 + if (lockprev)
68993 + anon_vma_unlock(prev);
68994 return error;
68995 }
68996
68997 @@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
68998 do {
68999 long nrpages = vma_pages(vma);
69000
69001 +#ifdef CONFIG_PAX_SEGMEXEC
69002 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69003 + vma = remove_vma(vma);
69004 + continue;
69005 + }
69006 +#endif
69007 +
69008 mm->total_vm -= nrpages;
69009 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69010 vma = remove_vma(vma);
69011 @@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
69012 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69013 vma->vm_prev = NULL;
69014 do {
69015 +
69016 +#ifdef CONFIG_PAX_SEGMEXEC
69017 + if (vma->vm_mirror) {
69018 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69019 + vma->vm_mirror->vm_mirror = NULL;
69020 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
69021 + vma->vm_mirror = NULL;
69022 + }
69023 +#endif
69024 +
69025 rb_erase(&vma->vm_rb, &mm->mm_rb);
69026 mm->map_count--;
69027 tail_vma = vma;
69028 @@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
69029 struct mempolicy *pol;
69030 struct vm_area_struct *new;
69031
69032 +#ifdef CONFIG_PAX_SEGMEXEC
69033 + struct vm_area_struct *vma_m, *new_m = NULL;
69034 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69035 +#endif
69036 +
69037 if (is_vm_hugetlb_page(vma) && (addr &
69038 ~(huge_page_mask(hstate_vma(vma)))))
69039 return -EINVAL;
69040
69041 +#ifdef CONFIG_PAX_SEGMEXEC
69042 + vma_m = pax_find_mirror_vma(vma);
69043 +
69044 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69045 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69046 + if (mm->map_count >= sysctl_max_map_count-1)
69047 + return -ENOMEM;
69048 + } else
69049 +#endif
69050 +
69051 if (mm->map_count >= sysctl_max_map_count)
69052 return -ENOMEM;
69053
69054 @@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
69055 if (!new)
69056 return -ENOMEM;
69057
69058 +#ifdef CONFIG_PAX_SEGMEXEC
69059 + if (vma_m) {
69060 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69061 + if (!new_m) {
69062 + kmem_cache_free(vm_area_cachep, new);
69063 + return -ENOMEM;
69064 + }
69065 + }
69066 +#endif
69067 +
69068 /* most fields are the same, copy all, and then fixup */
69069 *new = *vma;
69070
69071 @@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
69072 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69073 }
69074
69075 +#ifdef CONFIG_PAX_SEGMEXEC
69076 + if (vma_m) {
69077 + *new_m = *vma_m;
69078 + new_m->vm_mirror = new;
69079 + new->vm_mirror = new_m;
69080 +
69081 + if (new_below)
69082 + new_m->vm_end = addr_m;
69083 + else {
69084 + new_m->vm_start = addr_m;
69085 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69086 + }
69087 + }
69088 +#endif
69089 +
69090 pol = mpol_dup(vma_policy(vma));
69091 if (IS_ERR(pol)) {
69092 +
69093 +#ifdef CONFIG_PAX_SEGMEXEC
69094 + if (new_m)
69095 + kmem_cache_free(vm_area_cachep, new_m);
69096 +#endif
69097 +
69098 kmem_cache_free(vm_area_cachep, new);
69099 return PTR_ERR(pol);
69100 }
69101 @@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
69102 else
69103 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69104
69105 +#ifdef CONFIG_PAX_SEGMEXEC
69106 + if (vma_m) {
69107 + mpol_get(pol);
69108 + vma_set_policy(new_m, pol);
69109 +
69110 + if (new_m->vm_file) {
69111 + get_file(new_m->vm_file);
69112 + if (vma_m->vm_flags & VM_EXECUTABLE)
69113 + added_exe_file_vma(mm);
69114 + }
69115 +
69116 + if (new_m->vm_ops && new_m->vm_ops->open)
69117 + new_m->vm_ops->open(new_m);
69118 +
69119 + if (new_below)
69120 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69121 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69122 + else
69123 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69124 + }
69125 +#endif
69126 +
69127 return 0;
69128 }
69129
69130 @@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
69131 * work. This now handles partial unmappings.
69132 * Jeremy Fitzhardinge <jeremy@goop.org>
69133 */
69134 +#ifdef CONFIG_PAX_SEGMEXEC
69135 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69136 +{
69137 + int ret = __do_munmap(mm, start, len);
69138 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69139 + return ret;
69140 +
69141 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69142 +}
69143 +
69144 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69145 +#else
69146 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69147 +#endif
69148 {
69149 unsigned long end;
69150 struct vm_area_struct *vma, *prev, *last;
69151
69152 + /*
69153 + * mm->mmap_sem is required to protect against another thread
69154 + * changing the mappings in case we sleep.
69155 + */
69156 + verify_mm_writelocked(mm);
69157 +
69158 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69159 return -EINVAL;
69160
69161 @@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
69162 /* Fix up all other VM information */
69163 remove_vma_list(mm, vma);
69164
69165 + track_exec_limit(mm, start, end, 0UL);
69166 +
69167 return 0;
69168 }
69169
69170 @@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
69171
69172 profile_munmap(addr);
69173
69174 +#ifdef CONFIG_PAX_SEGMEXEC
69175 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69176 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69177 + return -EINVAL;
69178 +#endif
69179 +
69180 down_write(&mm->mmap_sem);
69181 ret = do_munmap(mm, addr, len);
69182 up_write(&mm->mmap_sem);
69183 return ret;
69184 }
69185
69186 -static inline void verify_mm_writelocked(struct mm_struct *mm)
69187 -{
69188 -#ifdef CONFIG_DEBUG_VM
69189 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69190 - WARN_ON(1);
69191 - up_read(&mm->mmap_sem);
69192 - }
69193 -#endif
69194 -}
69195 -
69196 /*
69197 * this is really a simplified "do_mmap". it only handles
69198 * anonymous maps. eventually we may be able to do some
69199 @@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
69200 struct rb_node ** rb_link, * rb_parent;
69201 pgoff_t pgoff = addr >> PAGE_SHIFT;
69202 int error;
69203 + unsigned long charged;
69204
69205 len = PAGE_ALIGN(len);
69206 if (!len)
69207 @@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
69208
69209 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69210
69211 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69212 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69213 + flags &= ~VM_EXEC;
69214 +
69215 +#ifdef CONFIG_PAX_MPROTECT
69216 + if (mm->pax_flags & MF_PAX_MPROTECT)
69217 + flags &= ~VM_MAYEXEC;
69218 +#endif
69219 +
69220 + }
69221 +#endif
69222 +
69223 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69224 if (error & ~PAGE_MASK)
69225 return error;
69226
69227 + charged = len >> PAGE_SHIFT;
69228 +
69229 /*
69230 * mlock MCL_FUTURE?
69231 */
69232 if (mm->def_flags & VM_LOCKED) {
69233 unsigned long locked, lock_limit;
69234 - locked = len >> PAGE_SHIFT;
69235 + locked = charged;
69236 locked += mm->locked_vm;
69237 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
69238 lock_limit >>= PAGE_SHIFT;
69239 @@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
69240 /*
69241 * Clear old maps. this also does some error checking for us
69242 */
69243 - munmap_back:
69244 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69245 if (vma && vma->vm_start < addr + len) {
69246 if (do_munmap(mm, addr, len))
69247 return -ENOMEM;
69248 - goto munmap_back;
69249 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69250 + BUG_ON(vma && vma->vm_start < addr + len);
69251 }
69252
69253 /* Check against address space limits *after* clearing old maps... */
69254 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69255 + if (!may_expand_vm(mm, charged))
69256 return -ENOMEM;
69257
69258 if (mm->map_count > sysctl_max_map_count)
69259 return -ENOMEM;
69260
69261 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
69262 + if (security_vm_enough_memory(charged))
69263 return -ENOMEM;
69264
69265 /* Can we just expand an old private anonymous mapping? */
69266 @@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
69267 */
69268 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69269 if (!vma) {
69270 - vm_unacct_memory(len >> PAGE_SHIFT);
69271 + vm_unacct_memory(charged);
69272 return -ENOMEM;
69273 }
69274
69275 @@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
69276 vma->vm_page_prot = vm_get_page_prot(flags);
69277 vma_link(mm, vma, prev, rb_link, rb_parent);
69278 out:
69279 - mm->total_vm += len >> PAGE_SHIFT;
69280 + mm->total_vm += charged;
69281 if (flags & VM_LOCKED) {
69282 if (!mlock_vma_pages_range(vma, addr, addr + len))
69283 - mm->locked_vm += (len >> PAGE_SHIFT);
69284 + mm->locked_vm += charged;
69285 }
69286 + track_exec_limit(mm, addr, addr + len, flags);
69287 return addr;
69288 }
69289
69290 @@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
69291 * Walk the list again, actually closing and freeing it,
69292 * with preemption enabled, without holding any MM locks.
69293 */
69294 - while (vma)
69295 + while (vma) {
69296 + vma->vm_mirror = NULL;
69297 vma = remove_vma(vma);
69298 + }
69299
69300 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69301 }
69302 @@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
69303 struct vm_area_struct * __vma, * prev;
69304 struct rb_node ** rb_link, * rb_parent;
69305
69306 +#ifdef CONFIG_PAX_SEGMEXEC
69307 + struct vm_area_struct *vma_m = NULL;
69308 +#endif
69309 +
69310 /*
69311 * The vm_pgoff of a purely anonymous vma should be irrelevant
69312 * until its first write fault, when page's anon_vma and index
69313 @@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
69314 if ((vma->vm_flags & VM_ACCOUNT) &&
69315 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69316 return -ENOMEM;
69317 +
69318 +#ifdef CONFIG_PAX_SEGMEXEC
69319 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69320 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69321 + if (!vma_m)
69322 + return -ENOMEM;
69323 + }
69324 +#endif
69325 +
69326 vma_link(mm, vma, prev, rb_link, rb_parent);
69327 +
69328 +#ifdef CONFIG_PAX_SEGMEXEC
69329 + if (vma_m)
69330 + pax_mirror_vma(vma_m, vma);
69331 +#endif
69332 +
69333 return 0;
69334 }
69335
69336 @@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
69337 struct rb_node **rb_link, *rb_parent;
69338 struct mempolicy *pol;
69339
69340 + BUG_ON(vma->vm_mirror);
69341 +
69342 /*
69343 * If anonymous vma has not yet been faulted, update new pgoff
69344 * to match new location, to increase its chance of merging.
69345 @@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
69346 return new_vma;
69347 }
69348
69349 +#ifdef CONFIG_PAX_SEGMEXEC
69350 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69351 +{
69352 + struct vm_area_struct *prev_m;
69353 + struct rb_node **rb_link_m, *rb_parent_m;
69354 + struct mempolicy *pol_m;
69355 +
69356 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69357 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69358 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69359 + *vma_m = *vma;
69360 + pol_m = vma_policy(vma_m);
69361 + mpol_get(pol_m);
69362 + vma_set_policy(vma_m, pol_m);
69363 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69364 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69365 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69366 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69367 + if (vma_m->vm_file)
69368 + get_file(vma_m->vm_file);
69369 + if (vma_m->vm_ops && vma_m->vm_ops->open)
69370 + vma_m->vm_ops->open(vma_m);
69371 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69372 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69373 + vma_m->vm_mirror = vma;
69374 + vma->vm_mirror = vma_m;
69375 +}
69376 +#endif
69377 +
69378 /*
69379 * Return true if the calling process may expand its vm space by the passed
69380 * number of pages
69381 @@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
69382 unsigned long lim;
69383
69384 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
69385 -
69386 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69387 if (cur + npages > lim)
69388 return 0;
69389 return 1;
69390 @@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
69391 vma->vm_start = addr;
69392 vma->vm_end = addr + len;
69393
69394 +#ifdef CONFIG_PAX_MPROTECT
69395 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69396 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69397 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69398 + return -EPERM;
69399 + if (!(vm_flags & VM_EXEC))
69400 + vm_flags &= ~VM_MAYEXEC;
69401 +#else
69402 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69403 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69404 +#endif
69405 + else
69406 + vm_flags &= ~VM_MAYWRITE;
69407 + }
69408 +#endif
69409 +
69410 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69411 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69412
69413 diff -urNp linux-2.6.32.45/mm/mprotect.c linux-2.6.32.45/mm/mprotect.c
69414 --- linux-2.6.32.45/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
69415 +++ linux-2.6.32.45/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
69416 @@ -24,10 +24,16 @@
69417 #include <linux/mmu_notifier.h>
69418 #include <linux/migrate.h>
69419 #include <linux/perf_event.h>
69420 +
69421 +#ifdef CONFIG_PAX_MPROTECT
69422 +#include <linux/elf.h>
69423 +#endif
69424 +
69425 #include <asm/uaccess.h>
69426 #include <asm/pgtable.h>
69427 #include <asm/cacheflush.h>
69428 #include <asm/tlbflush.h>
69429 +#include <asm/mmu_context.h>
69430
69431 #ifndef pgprot_modify
69432 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69433 @@ -132,6 +138,48 @@ static void change_protection(struct vm_
69434 flush_tlb_range(vma, start, end);
69435 }
69436
69437 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69438 +/* called while holding the mmap semaphor for writing except stack expansion */
69439 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69440 +{
69441 + unsigned long oldlimit, newlimit = 0UL;
69442 +
69443 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
69444 + return;
69445 +
69446 + spin_lock(&mm->page_table_lock);
69447 + oldlimit = mm->context.user_cs_limit;
69448 + if ((prot & VM_EXEC) && oldlimit < end)
69449 + /* USER_CS limit moved up */
69450 + newlimit = end;
69451 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69452 + /* USER_CS limit moved down */
69453 + newlimit = start;
69454 +
69455 + if (newlimit) {
69456 + mm->context.user_cs_limit = newlimit;
69457 +
69458 +#ifdef CONFIG_SMP
69459 + wmb();
69460 + cpus_clear(mm->context.cpu_user_cs_mask);
69461 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69462 +#endif
69463 +
69464 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69465 + }
69466 + spin_unlock(&mm->page_table_lock);
69467 + if (newlimit == end) {
69468 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
69469 +
69470 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
69471 + if (is_vm_hugetlb_page(vma))
69472 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69473 + else
69474 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69475 + }
69476 +}
69477 +#endif
69478 +
69479 int
69480 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69481 unsigned long start, unsigned long end, unsigned long newflags)
69482 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
69483 int error;
69484 int dirty_accountable = 0;
69485
69486 +#ifdef CONFIG_PAX_SEGMEXEC
69487 + struct vm_area_struct *vma_m = NULL;
69488 + unsigned long start_m, end_m;
69489 +
69490 + start_m = start + SEGMEXEC_TASK_SIZE;
69491 + end_m = end + SEGMEXEC_TASK_SIZE;
69492 +#endif
69493 +
69494 if (newflags == oldflags) {
69495 *pprev = vma;
69496 return 0;
69497 }
69498
69499 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69500 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69501 +
69502 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69503 + return -ENOMEM;
69504 +
69505 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69506 + return -ENOMEM;
69507 + }
69508 +
69509 /*
69510 * If we make a private mapping writable we increase our commit;
69511 * but (without finer accounting) cannot reduce our commit if we
69512 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
69513 }
69514 }
69515
69516 +#ifdef CONFIG_PAX_SEGMEXEC
69517 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69518 + if (start != vma->vm_start) {
69519 + error = split_vma(mm, vma, start, 1);
69520 + if (error)
69521 + goto fail;
69522 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69523 + *pprev = (*pprev)->vm_next;
69524 + }
69525 +
69526 + if (end != vma->vm_end) {
69527 + error = split_vma(mm, vma, end, 0);
69528 + if (error)
69529 + goto fail;
69530 + }
69531 +
69532 + if (pax_find_mirror_vma(vma)) {
69533 + error = __do_munmap(mm, start_m, end_m - start_m);
69534 + if (error)
69535 + goto fail;
69536 + } else {
69537 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69538 + if (!vma_m) {
69539 + error = -ENOMEM;
69540 + goto fail;
69541 + }
69542 + vma->vm_flags = newflags;
69543 + pax_mirror_vma(vma_m, vma);
69544 + }
69545 + }
69546 +#endif
69547 +
69548 /*
69549 * First try to merge with previous and/or next vma.
69550 */
69551 @@ -195,9 +293,21 @@ success:
69552 * vm_flags and vm_page_prot are protected by the mmap_sem
69553 * held in write mode.
69554 */
69555 +
69556 +#ifdef CONFIG_PAX_SEGMEXEC
69557 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69558 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69559 +#endif
69560 +
69561 vma->vm_flags = newflags;
69562 +
69563 +#ifdef CONFIG_PAX_MPROTECT
69564 + if (mm->binfmt && mm->binfmt->handle_mprotect)
69565 + mm->binfmt->handle_mprotect(vma, newflags);
69566 +#endif
69567 +
69568 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69569 - vm_get_page_prot(newflags));
69570 + vm_get_page_prot(vma->vm_flags));
69571
69572 if (vma_wants_writenotify(vma)) {
69573 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69574 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69575 end = start + len;
69576 if (end <= start)
69577 return -ENOMEM;
69578 +
69579 +#ifdef CONFIG_PAX_SEGMEXEC
69580 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69581 + if (end > SEGMEXEC_TASK_SIZE)
69582 + return -EINVAL;
69583 + } else
69584 +#endif
69585 +
69586 + if (end > TASK_SIZE)
69587 + return -EINVAL;
69588 +
69589 if (!arch_validate_prot(prot))
69590 return -EINVAL;
69591
69592 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69593 /*
69594 * Does the application expect PROT_READ to imply PROT_EXEC:
69595 */
69596 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69597 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69598 prot |= PROT_EXEC;
69599
69600 vm_flags = calc_vm_prot_bits(prot);
69601 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69602 if (start > vma->vm_start)
69603 prev = vma;
69604
69605 +#ifdef CONFIG_PAX_MPROTECT
69606 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69607 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
69608 +#endif
69609 +
69610 for (nstart = start ; ; ) {
69611 unsigned long newflags;
69612
69613 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69614
69615 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69616 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69617 + if (prot & (PROT_WRITE | PROT_EXEC))
69618 + gr_log_rwxmprotect(vma->vm_file);
69619 +
69620 + error = -EACCES;
69621 + goto out;
69622 + }
69623 +
69624 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69625 error = -EACCES;
69626 goto out;
69627 }
69628 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69629 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69630 if (error)
69631 goto out;
69632 +
69633 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
69634 +
69635 nstart = tmp;
69636
69637 if (nstart < prev->vm_end)
69638 diff -urNp linux-2.6.32.45/mm/mremap.c linux-2.6.32.45/mm/mremap.c
69639 --- linux-2.6.32.45/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
69640 +++ linux-2.6.32.45/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
69641 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
69642 continue;
69643 pte = ptep_clear_flush(vma, old_addr, old_pte);
69644 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69645 +
69646 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69647 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69648 + pte = pte_exprotect(pte);
69649 +#endif
69650 +
69651 set_pte_at(mm, new_addr, new_pte, pte);
69652 }
69653
69654 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
69655 if (is_vm_hugetlb_page(vma))
69656 goto Einval;
69657
69658 +#ifdef CONFIG_PAX_SEGMEXEC
69659 + if (pax_find_mirror_vma(vma))
69660 + goto Einval;
69661 +#endif
69662 +
69663 /* We can't remap across vm area boundaries */
69664 if (old_len > vma->vm_end - addr)
69665 goto Efault;
69666 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
69667 unsigned long ret = -EINVAL;
69668 unsigned long charged = 0;
69669 unsigned long map_flags;
69670 + unsigned long pax_task_size = TASK_SIZE;
69671
69672 if (new_addr & ~PAGE_MASK)
69673 goto out;
69674
69675 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69676 +#ifdef CONFIG_PAX_SEGMEXEC
69677 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
69678 + pax_task_size = SEGMEXEC_TASK_SIZE;
69679 +#endif
69680 +
69681 + pax_task_size -= PAGE_SIZE;
69682 +
69683 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69684 goto out;
69685
69686 /* Check if the location we're moving into overlaps the
69687 * old location at all, and fail if it does.
69688 */
69689 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
69690 - goto out;
69691 -
69692 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
69693 + if (addr + old_len > new_addr && new_addr + new_len > addr)
69694 goto out;
69695
69696 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69697 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
69698 struct vm_area_struct *vma;
69699 unsigned long ret = -EINVAL;
69700 unsigned long charged = 0;
69701 + unsigned long pax_task_size = TASK_SIZE;
69702
69703 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69704 goto out;
69705 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
69706 if (!new_len)
69707 goto out;
69708
69709 +#ifdef CONFIG_PAX_SEGMEXEC
69710 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
69711 + pax_task_size = SEGMEXEC_TASK_SIZE;
69712 +#endif
69713 +
69714 + pax_task_size -= PAGE_SIZE;
69715 +
69716 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69717 + old_len > pax_task_size || addr > pax_task_size-old_len)
69718 + goto out;
69719 +
69720 if (flags & MREMAP_FIXED) {
69721 if (flags & MREMAP_MAYMOVE)
69722 ret = mremap_to(addr, old_len, new_addr, new_len);
69723 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
69724 addr + new_len);
69725 }
69726 ret = addr;
69727 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69728 goto out;
69729 }
69730 }
69731 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
69732 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69733 if (ret)
69734 goto out;
69735 +
69736 + map_flags = vma->vm_flags;
69737 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69738 + if (!(ret & ~PAGE_MASK)) {
69739 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69740 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69741 + }
69742 }
69743 out:
69744 if (ret & ~PAGE_MASK)
69745 diff -urNp linux-2.6.32.45/mm/nommu.c linux-2.6.32.45/mm/nommu.c
69746 --- linux-2.6.32.45/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
69747 +++ linux-2.6.32.45/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
69748 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69749 int sysctl_overcommit_ratio = 50; /* default is 50% */
69750 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69751 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69752 -int heap_stack_gap = 0;
69753
69754 atomic_long_t mmap_pages_allocated;
69755
69756 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
69757 EXPORT_SYMBOL(find_vma);
69758
69759 /*
69760 - * find a VMA
69761 - * - we don't extend stack VMAs under NOMMU conditions
69762 - */
69763 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69764 -{
69765 - return find_vma(mm, addr);
69766 -}
69767 -
69768 -/*
69769 * expand a stack to a given address
69770 * - not supported under NOMMU conditions
69771 */
69772 diff -urNp linux-2.6.32.45/mm/page_alloc.c linux-2.6.32.45/mm/page_alloc.c
69773 --- linux-2.6.32.45/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
69774 +++ linux-2.6.32.45/mm/page_alloc.c 2011-07-09 09:13:08.000000000 -0400
69775 @@ -289,7 +289,7 @@ out:
69776 * This usage means that zero-order pages may not be compound.
69777 */
69778
69779 -static void free_compound_page(struct page *page)
69780 +void free_compound_page(struct page *page)
69781 {
69782 __free_pages_ok(page, compound_order(page));
69783 }
69784 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
69785 int bad = 0;
69786 int wasMlocked = __TestClearPageMlocked(page);
69787
69788 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69789 + unsigned long index = 1UL << order;
69790 +#endif
69791 +
69792 kmemcheck_free_shadow(page, order);
69793
69794 for (i = 0 ; i < (1 << order) ; ++i)
69795 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
69796 debug_check_no_obj_freed(page_address(page),
69797 PAGE_SIZE << order);
69798 }
69799 +
69800 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69801 + for (; index; --index)
69802 + sanitize_highpage(page + index - 1);
69803 +#endif
69804 +
69805 arch_free_page(page, order);
69806 kernel_map_pages(page, 1 << order, 0);
69807
69808 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
69809 arch_alloc_page(page, order);
69810 kernel_map_pages(page, 1 << order, 1);
69811
69812 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
69813 if (gfp_flags & __GFP_ZERO)
69814 prep_zero_page(page, order, gfp_flags);
69815 +#endif
69816
69817 if (order && (gfp_flags & __GFP_COMP))
69818 prep_compound_page(page, order);
69819 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
69820 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
69821 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
69822 }
69823 +
69824 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69825 + sanitize_highpage(page);
69826 +#endif
69827 +
69828 arch_free_page(page, 0);
69829 kernel_map_pages(page, 1, 0);
69830
69831 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
69832 int cpu;
69833 struct zone *zone;
69834
69835 + pax_track_stack();
69836 +
69837 for_each_populated_zone(zone) {
69838 show_node(zone);
69839 printk("%s per-cpu:\n", zone->name);
69840 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
69841 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
69842 }
69843 #else
69844 -static void inline setup_usemap(struct pglist_data *pgdat,
69845 +static inline void setup_usemap(struct pglist_data *pgdat,
69846 struct zone *zone, unsigned long zonesize) {}
69847 #endif /* CONFIG_SPARSEMEM */
69848
69849 diff -urNp linux-2.6.32.45/mm/percpu.c linux-2.6.32.45/mm/percpu.c
69850 --- linux-2.6.32.45/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
69851 +++ linux-2.6.32.45/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
69852 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
69853 static unsigned int pcpu_last_unit_cpu __read_mostly;
69854
69855 /* the address of the first chunk which starts with the kernel static area */
69856 -void *pcpu_base_addr __read_mostly;
69857 +void *pcpu_base_addr __read_only;
69858 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69859
69860 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69861 diff -urNp linux-2.6.32.45/mm/rmap.c linux-2.6.32.45/mm/rmap.c
69862 --- linux-2.6.32.45/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
69863 +++ linux-2.6.32.45/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
69864 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
69865 /* page_table_lock to protect against threads */
69866 spin_lock(&mm->page_table_lock);
69867 if (likely(!vma->anon_vma)) {
69868 +
69869 +#ifdef CONFIG_PAX_SEGMEXEC
69870 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
69871 +
69872 + if (vma_m) {
69873 + BUG_ON(vma_m->anon_vma);
69874 + vma_m->anon_vma = anon_vma;
69875 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
69876 + }
69877 +#endif
69878 +
69879 vma->anon_vma = anon_vma;
69880 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
69881 allocated = NULL;
69882 diff -urNp linux-2.6.32.45/mm/shmem.c linux-2.6.32.45/mm/shmem.c
69883 --- linux-2.6.32.45/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
69884 +++ linux-2.6.32.45/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
69885 @@ -31,7 +31,7 @@
69886 #include <linux/swap.h>
69887 #include <linux/ima.h>
69888
69889 -static struct vfsmount *shm_mnt;
69890 +struct vfsmount *shm_mnt;
69891
69892 #ifdef CONFIG_SHMEM
69893 /*
69894 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
69895 goto unlock;
69896 }
69897 entry = shmem_swp_entry(info, index, NULL);
69898 + if (!entry)
69899 + goto unlock;
69900 if (entry->val) {
69901 /*
69902 * The more uptodate page coming down from a stacked
69903 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
69904 struct vm_area_struct pvma;
69905 struct page *page;
69906
69907 + pax_track_stack();
69908 +
69909 spol = mpol_cond_copy(&mpol,
69910 mpol_shared_policy_lookup(&info->policy, idx));
69911
69912 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
69913
69914 info = SHMEM_I(inode);
69915 inode->i_size = len-1;
69916 - if (len <= (char *)inode - (char *)info) {
69917 + if (len <= (char *)inode - (char *)info && len <= 64) {
69918 /* do it inline */
69919 memcpy(info, symname, len);
69920 inode->i_op = &shmem_symlink_inline_operations;
69921 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
69922 int err = -ENOMEM;
69923
69924 /* Round up to L1_CACHE_BYTES to resist false sharing */
69925 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
69926 - L1_CACHE_BYTES), GFP_KERNEL);
69927 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
69928 if (!sbinfo)
69929 return -ENOMEM;
69930
69931 diff -urNp linux-2.6.32.45/mm/slab.c linux-2.6.32.45/mm/slab.c
69932 --- linux-2.6.32.45/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
69933 +++ linux-2.6.32.45/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
69934 @@ -174,7 +174,7 @@
69935
69936 /* Legal flag mask for kmem_cache_create(). */
69937 #if DEBUG
69938 -# define CREATE_MASK (SLAB_RED_ZONE | \
69939 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
69940 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
69941 SLAB_CACHE_DMA | \
69942 SLAB_STORE_USER | \
69943 @@ -182,7 +182,7 @@
69944 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69945 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
69946 #else
69947 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
69948 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
69949 SLAB_CACHE_DMA | \
69950 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
69951 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69952 @@ -308,7 +308,7 @@ struct kmem_list3 {
69953 * Need this for bootstrapping a per node allocator.
69954 */
69955 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
69956 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
69957 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
69958 #define CACHE_CACHE 0
69959 #define SIZE_AC MAX_NUMNODES
69960 #define SIZE_L3 (2 * MAX_NUMNODES)
69961 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
69962 if ((x)->max_freeable < i) \
69963 (x)->max_freeable = i; \
69964 } while (0)
69965 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
69966 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
69967 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
69968 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
69969 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
69970 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
69971 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
69972 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
69973 #else
69974 #define STATS_INC_ACTIVE(x) do { } while (0)
69975 #define STATS_DEC_ACTIVE(x) do { } while (0)
69976 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
69977 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
69978 */
69979 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
69980 - const struct slab *slab, void *obj)
69981 + const struct slab *slab, const void *obj)
69982 {
69983 u32 offset = (obj - slab->s_mem);
69984 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
69985 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
69986 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
69987 sizes[INDEX_AC].cs_size,
69988 ARCH_KMALLOC_MINALIGN,
69989 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69990 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69991 NULL);
69992
69993 if (INDEX_AC != INDEX_L3) {
69994 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
69995 kmem_cache_create(names[INDEX_L3].name,
69996 sizes[INDEX_L3].cs_size,
69997 ARCH_KMALLOC_MINALIGN,
69998 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69999 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70000 NULL);
70001 }
70002
70003 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
70004 sizes->cs_cachep = kmem_cache_create(names->name,
70005 sizes->cs_size,
70006 ARCH_KMALLOC_MINALIGN,
70007 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70008 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70009 NULL);
70010 }
70011 #ifdef CONFIG_ZONE_DMA
70012 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
70013 }
70014 /* cpu stats */
70015 {
70016 - unsigned long allochit = atomic_read(&cachep->allochit);
70017 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70018 - unsigned long freehit = atomic_read(&cachep->freehit);
70019 - unsigned long freemiss = atomic_read(&cachep->freemiss);
70020 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70021 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70022 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70023 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70024
70025 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70026 allochit, allocmiss, freehit, freemiss);
70027 @@ -4471,15 +4471,66 @@ static const struct file_operations proc
70028
70029 static int __init slab_proc_init(void)
70030 {
70031 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
70032 + mode_t gr_mode = S_IRUGO;
70033 +
70034 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70035 + gr_mode = S_IRUSR;
70036 +#endif
70037 +
70038 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
70039 #ifdef CONFIG_DEBUG_SLAB_LEAK
70040 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70041 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
70042 #endif
70043 return 0;
70044 }
70045 module_init(slab_proc_init);
70046 #endif
70047
70048 +void check_object_size(const void *ptr, unsigned long n, bool to)
70049 +{
70050 +
70051 +#ifdef CONFIG_PAX_USERCOPY
70052 + struct page *page;
70053 + struct kmem_cache *cachep = NULL;
70054 + struct slab *slabp;
70055 + unsigned int objnr;
70056 + unsigned long offset;
70057 +
70058 + if (!n)
70059 + return;
70060 +
70061 + if (ZERO_OR_NULL_PTR(ptr))
70062 + goto report;
70063 +
70064 + if (!virt_addr_valid(ptr))
70065 + return;
70066 +
70067 + page = virt_to_head_page(ptr);
70068 +
70069 + if (!PageSlab(page)) {
70070 + if (object_is_on_stack(ptr, n) == -1)
70071 + goto report;
70072 + return;
70073 + }
70074 +
70075 + cachep = page_get_cache(page);
70076 + if (!(cachep->flags & SLAB_USERCOPY))
70077 + goto report;
70078 +
70079 + slabp = page_get_slab(page);
70080 + objnr = obj_to_index(cachep, slabp, ptr);
70081 + BUG_ON(objnr >= cachep->num);
70082 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70083 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70084 + return;
70085 +
70086 +report:
70087 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
70088 +#endif
70089 +
70090 +}
70091 +EXPORT_SYMBOL(check_object_size);
70092 +
70093 /**
70094 * ksize - get the actual amount of memory allocated for a given object
70095 * @objp: Pointer to the object
70096 diff -urNp linux-2.6.32.45/mm/slob.c linux-2.6.32.45/mm/slob.c
70097 --- linux-2.6.32.45/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
70098 +++ linux-2.6.32.45/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
70099 @@ -29,7 +29,7 @@
70100 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70101 * alloc_pages() directly, allocating compound pages so the page order
70102 * does not have to be separately tracked, and also stores the exact
70103 - * allocation size in page->private so that it can be used to accurately
70104 + * allocation size in slob_page->size so that it can be used to accurately
70105 * provide ksize(). These objects are detected in kfree() because slob_page()
70106 * is false for them.
70107 *
70108 @@ -58,6 +58,7 @@
70109 */
70110
70111 #include <linux/kernel.h>
70112 +#include <linux/sched.h>
70113 #include <linux/slab.h>
70114 #include <linux/mm.h>
70115 #include <linux/swap.h> /* struct reclaim_state */
70116 @@ -100,7 +101,8 @@ struct slob_page {
70117 unsigned long flags; /* mandatory */
70118 atomic_t _count; /* mandatory */
70119 slobidx_t units; /* free units left in page */
70120 - unsigned long pad[2];
70121 + unsigned long pad[1];
70122 + unsigned long size; /* size when >=PAGE_SIZE */
70123 slob_t *free; /* first free slob_t in page */
70124 struct list_head list; /* linked list of free pages */
70125 };
70126 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
70127 */
70128 static inline int is_slob_page(struct slob_page *sp)
70129 {
70130 - return PageSlab((struct page *)sp);
70131 + return PageSlab((struct page *)sp) && !sp->size;
70132 }
70133
70134 static inline void set_slob_page(struct slob_page *sp)
70135 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
70136
70137 static inline struct slob_page *slob_page(const void *addr)
70138 {
70139 - return (struct slob_page *)virt_to_page(addr);
70140 + return (struct slob_page *)virt_to_head_page(addr);
70141 }
70142
70143 /*
70144 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
70145 /*
70146 * Return the size of a slob block.
70147 */
70148 -static slobidx_t slob_units(slob_t *s)
70149 +static slobidx_t slob_units(const slob_t *s)
70150 {
70151 if (s->units > 0)
70152 return s->units;
70153 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
70154 /*
70155 * Return the next free slob block pointer after this one.
70156 */
70157 -static slob_t *slob_next(slob_t *s)
70158 +static slob_t *slob_next(const slob_t *s)
70159 {
70160 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70161 slobidx_t next;
70162 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
70163 /*
70164 * Returns true if s is the last free block in its page.
70165 */
70166 -static int slob_last(slob_t *s)
70167 +static int slob_last(const slob_t *s)
70168 {
70169 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70170 }
70171 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
70172 if (!page)
70173 return NULL;
70174
70175 + set_slob_page(page);
70176 return page_address(page);
70177 }
70178
70179 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
70180 if (!b)
70181 return NULL;
70182 sp = slob_page(b);
70183 - set_slob_page(sp);
70184
70185 spin_lock_irqsave(&slob_lock, flags);
70186 sp->units = SLOB_UNITS(PAGE_SIZE);
70187 sp->free = b;
70188 + sp->size = 0;
70189 INIT_LIST_HEAD(&sp->list);
70190 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70191 set_slob_page_free(sp, slob_list);
70192 @@ -475,10 +478,9 @@ out:
70193 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
70194 #endif
70195
70196 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70197 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70198 {
70199 - unsigned int *m;
70200 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70201 + slob_t *m;
70202 void *ret;
70203
70204 lockdep_trace_alloc(gfp);
70205 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
70206
70207 if (!m)
70208 return NULL;
70209 - *m = size;
70210 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70211 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70212 + m[0].units = size;
70213 + m[1].units = align;
70214 ret = (void *)m + align;
70215
70216 trace_kmalloc_node(_RET_IP_, ret,
70217 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
70218
70219 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
70220 if (ret) {
70221 - struct page *page;
70222 - page = virt_to_page(ret);
70223 - page->private = size;
70224 + struct slob_page *sp;
70225 + sp = slob_page(ret);
70226 + sp->size = size;
70227 }
70228
70229 trace_kmalloc_node(_RET_IP_, ret,
70230 size, PAGE_SIZE << order, gfp, node);
70231 }
70232
70233 - kmemleak_alloc(ret, size, 1, gfp);
70234 + return ret;
70235 +}
70236 +
70237 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70238 +{
70239 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70240 + void *ret = __kmalloc_node_align(size, gfp, node, align);
70241 +
70242 + if (!ZERO_OR_NULL_PTR(ret))
70243 + kmemleak_alloc(ret, size, 1, gfp);
70244 return ret;
70245 }
70246 EXPORT_SYMBOL(__kmalloc_node);
70247 @@ -528,13 +542,88 @@ void kfree(const void *block)
70248 sp = slob_page(block);
70249 if (is_slob_page(sp)) {
70250 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70251 - unsigned int *m = (unsigned int *)(block - align);
70252 - slob_free(m, *m + align);
70253 - } else
70254 + slob_t *m = (slob_t *)(block - align);
70255 + slob_free(m, m[0].units + align);
70256 + } else {
70257 + clear_slob_page(sp);
70258 + free_slob_page(sp);
70259 + sp->size = 0;
70260 put_page(&sp->page);
70261 + }
70262 }
70263 EXPORT_SYMBOL(kfree);
70264
70265 +void check_object_size(const void *ptr, unsigned long n, bool to)
70266 +{
70267 +
70268 +#ifdef CONFIG_PAX_USERCOPY
70269 + struct slob_page *sp;
70270 + const slob_t *free;
70271 + const void *base;
70272 + unsigned long flags;
70273 +
70274 + if (!n)
70275 + return;
70276 +
70277 + if (ZERO_OR_NULL_PTR(ptr))
70278 + goto report;
70279 +
70280 + if (!virt_addr_valid(ptr))
70281 + return;
70282 +
70283 + sp = slob_page(ptr);
70284 + if (!PageSlab((struct page*)sp)) {
70285 + if (object_is_on_stack(ptr, n) == -1)
70286 + goto report;
70287 + return;
70288 + }
70289 +
70290 + if (sp->size) {
70291 + base = page_address(&sp->page);
70292 + if (base <= ptr && n <= sp->size - (ptr - base))
70293 + return;
70294 + goto report;
70295 + }
70296 +
70297 + /* some tricky double walking to find the chunk */
70298 + spin_lock_irqsave(&slob_lock, flags);
70299 + base = (void *)((unsigned long)ptr & PAGE_MASK);
70300 + free = sp->free;
70301 +
70302 + while (!slob_last(free) && (void *)free <= ptr) {
70303 + base = free + slob_units(free);
70304 + free = slob_next(free);
70305 + }
70306 +
70307 + while (base < (void *)free) {
70308 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70309 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
70310 + int offset;
70311 +
70312 + if (ptr < base + align)
70313 + break;
70314 +
70315 + offset = ptr - base - align;
70316 + if (offset >= m) {
70317 + base += size;
70318 + continue;
70319 + }
70320 +
70321 + if (n > m - offset)
70322 + break;
70323 +
70324 + spin_unlock_irqrestore(&slob_lock, flags);
70325 + return;
70326 + }
70327 +
70328 + spin_unlock_irqrestore(&slob_lock, flags);
70329 +report:
70330 + pax_report_usercopy(ptr, n, to, NULL);
70331 +#endif
70332 +
70333 +}
70334 +EXPORT_SYMBOL(check_object_size);
70335 +
70336 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70337 size_t ksize(const void *block)
70338 {
70339 @@ -547,10 +636,10 @@ size_t ksize(const void *block)
70340 sp = slob_page(block);
70341 if (is_slob_page(sp)) {
70342 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70343 - unsigned int *m = (unsigned int *)(block - align);
70344 - return SLOB_UNITS(*m) * SLOB_UNIT;
70345 + slob_t *m = (slob_t *)(block - align);
70346 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70347 } else
70348 - return sp->page.private;
70349 + return sp->size;
70350 }
70351 EXPORT_SYMBOL(ksize);
70352
70353 @@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
70354 {
70355 struct kmem_cache *c;
70356
70357 +#ifdef CONFIG_PAX_USERCOPY
70358 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
70359 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70360 +#else
70361 c = slob_alloc(sizeof(struct kmem_cache),
70362 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70363 +#endif
70364
70365 if (c) {
70366 c->name = name;
70367 @@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
70368 {
70369 void *b;
70370
70371 +#ifdef CONFIG_PAX_USERCOPY
70372 + b = __kmalloc_node_align(c->size, flags, node, c->align);
70373 +#else
70374 if (c->size < PAGE_SIZE) {
70375 b = slob_alloc(c->size, flags, c->align, node);
70376 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70377 SLOB_UNITS(c->size) * SLOB_UNIT,
70378 flags, node);
70379 } else {
70380 + struct slob_page *sp;
70381 +
70382 b = slob_new_pages(flags, get_order(c->size), node);
70383 + sp = slob_page(b);
70384 + sp->size = c->size;
70385 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70386 PAGE_SIZE << get_order(c->size),
70387 flags, node);
70388 }
70389 +#endif
70390
70391 if (c->ctor)
70392 c->ctor(b);
70393 @@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70394
70395 static void __kmem_cache_free(void *b, int size)
70396 {
70397 - if (size < PAGE_SIZE)
70398 + struct slob_page *sp = slob_page(b);
70399 +
70400 + if (is_slob_page(sp))
70401 slob_free(b, size);
70402 - else
70403 + else {
70404 + clear_slob_page(sp);
70405 + free_slob_page(sp);
70406 + sp->size = 0;
70407 slob_free_pages(b, get_order(size));
70408 + }
70409 }
70410
70411 static void kmem_rcu_free(struct rcu_head *head)
70412 @@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
70413
70414 void kmem_cache_free(struct kmem_cache *c, void *b)
70415 {
70416 + int size = c->size;
70417 +
70418 +#ifdef CONFIG_PAX_USERCOPY
70419 + if (size + c->align < PAGE_SIZE) {
70420 + size += c->align;
70421 + b -= c->align;
70422 + }
70423 +#endif
70424 +
70425 kmemleak_free_recursive(b, c->flags);
70426 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70427 struct slob_rcu *slob_rcu;
70428 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70429 + slob_rcu = b + (size - sizeof(struct slob_rcu));
70430 INIT_RCU_HEAD(&slob_rcu->head);
70431 - slob_rcu->size = c->size;
70432 + slob_rcu->size = size;
70433 call_rcu(&slob_rcu->head, kmem_rcu_free);
70434 } else {
70435 - __kmem_cache_free(b, c->size);
70436 + __kmem_cache_free(b, size);
70437 }
70438
70439 +#ifdef CONFIG_PAX_USERCOPY
70440 + trace_kfree(_RET_IP_, b);
70441 +#else
70442 trace_kmem_cache_free(_RET_IP_, b);
70443 +#endif
70444 +
70445 }
70446 EXPORT_SYMBOL(kmem_cache_free);
70447
70448 diff -urNp linux-2.6.32.45/mm/slub.c linux-2.6.32.45/mm/slub.c
70449 --- linux-2.6.32.45/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
70450 +++ linux-2.6.32.45/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
70451 @@ -410,7 +410,7 @@ static void print_track(const char *s, s
70452 if (!t->addr)
70453 return;
70454
70455 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70456 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70457 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70458 }
70459
70460 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
70461
70462 page = virt_to_head_page(x);
70463
70464 + BUG_ON(!PageSlab(page));
70465 +
70466 slab_free(s, page, x, _RET_IP_);
70467
70468 trace_kmem_cache_free(_RET_IP_, x);
70469 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
70470 * Merge control. If this is set then no merging of slab caches will occur.
70471 * (Could be removed. This was introduced to pacify the merge skeptics.)
70472 */
70473 -static int slub_nomerge;
70474 +static int slub_nomerge = 1;
70475
70476 /*
70477 * Calculate the order of allocation given an slab object size.
70478 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
70479 * list to avoid pounding the page allocator excessively.
70480 */
70481 set_min_partial(s, ilog2(s->size));
70482 - s->refcount = 1;
70483 + atomic_set(&s->refcount, 1);
70484 #ifdef CONFIG_NUMA
70485 s->remote_node_defrag_ratio = 1000;
70486 #endif
70487 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
70488 void kmem_cache_destroy(struct kmem_cache *s)
70489 {
70490 down_write(&slub_lock);
70491 - s->refcount--;
70492 - if (!s->refcount) {
70493 + if (atomic_dec_and_test(&s->refcount)) {
70494 list_del(&s->list);
70495 up_write(&slub_lock);
70496 if (kmem_cache_close(s)) {
70497 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
70498 __setup("slub_nomerge", setup_slub_nomerge);
70499
70500 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
70501 - const char *name, int size, gfp_t gfp_flags)
70502 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
70503 {
70504 - unsigned int flags = 0;
70505 -
70506 if (gfp_flags & SLUB_DMA)
70507 - flags = SLAB_CACHE_DMA;
70508 + flags |= SLAB_CACHE_DMA;
70509
70510 /*
70511 * This function is called with IRQs disabled during early-boot on
70512 @@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
70513 EXPORT_SYMBOL(__kmalloc_node);
70514 #endif
70515
70516 +void check_object_size(const void *ptr, unsigned long n, bool to)
70517 +{
70518 +
70519 +#ifdef CONFIG_PAX_USERCOPY
70520 + struct page *page;
70521 + struct kmem_cache *s = NULL;
70522 + unsigned long offset;
70523 +
70524 + if (!n)
70525 + return;
70526 +
70527 + if (ZERO_OR_NULL_PTR(ptr))
70528 + goto report;
70529 +
70530 + if (!virt_addr_valid(ptr))
70531 + return;
70532 +
70533 + page = get_object_page(ptr);
70534 +
70535 + if (!page) {
70536 + if (object_is_on_stack(ptr, n) == -1)
70537 + goto report;
70538 + return;
70539 + }
70540 +
70541 + s = page->slab;
70542 + if (!(s->flags & SLAB_USERCOPY))
70543 + goto report;
70544 +
70545 + offset = (ptr - page_address(page)) % s->size;
70546 + if (offset <= s->objsize && n <= s->objsize - offset)
70547 + return;
70548 +
70549 +report:
70550 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
70551 +#endif
70552 +
70553 +}
70554 +EXPORT_SYMBOL(check_object_size);
70555 +
70556 size_t ksize(const void *object)
70557 {
70558 struct page *page;
70559 @@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
70560 * kmem_cache_open for slab_state == DOWN.
70561 */
70562 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
70563 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
70564 - kmalloc_caches[0].refcount = -1;
70565 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
70566 + atomic_set(&kmalloc_caches[0].refcount, -1);
70567 caches++;
70568
70569 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
70570 @@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
70571 /* Caches that are not of the two-to-the-power-of size */
70572 if (KMALLOC_MIN_SIZE <= 32) {
70573 create_kmalloc_cache(&kmalloc_caches[1],
70574 - "kmalloc-96", 96, GFP_NOWAIT);
70575 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
70576 caches++;
70577 }
70578 if (KMALLOC_MIN_SIZE <= 64) {
70579 create_kmalloc_cache(&kmalloc_caches[2],
70580 - "kmalloc-192", 192, GFP_NOWAIT);
70581 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
70582 caches++;
70583 }
70584
70585 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70586 create_kmalloc_cache(&kmalloc_caches[i],
70587 - "kmalloc", 1 << i, GFP_NOWAIT);
70588 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
70589 caches++;
70590 }
70591
70592 @@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
70593 /*
70594 * We may have set a slab to be unmergeable during bootstrap.
70595 */
70596 - if (s->refcount < 0)
70597 + if (atomic_read(&s->refcount) < 0)
70598 return 1;
70599
70600 return 0;
70601 @@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
70602 if (s) {
70603 int cpu;
70604
70605 - s->refcount++;
70606 + atomic_inc(&s->refcount);
70607 /*
70608 * Adjust the object sizes so that we clear
70609 * the complete object on kzalloc.
70610 @@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
70611
70612 if (sysfs_slab_alias(s, name)) {
70613 down_write(&slub_lock);
70614 - s->refcount--;
70615 + atomic_dec(&s->refcount);
70616 up_write(&slub_lock);
70617 goto err;
70618 }
70619 @@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
70620
70621 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70622 {
70623 - return sprintf(buf, "%d\n", s->refcount - 1);
70624 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70625 }
70626 SLAB_ATTR_RO(aliases);
70627
70628 @@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
70629 kfree(s);
70630 }
70631
70632 -static struct sysfs_ops slab_sysfs_ops = {
70633 +static const struct sysfs_ops slab_sysfs_ops = {
70634 .show = slab_attr_show,
70635 .store = slab_attr_store,
70636 };
70637 @@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
70638 return 0;
70639 }
70640
70641 -static struct kset_uevent_ops slab_uevent_ops = {
70642 +static const struct kset_uevent_ops slab_uevent_ops = {
70643 .filter = uevent_filter,
70644 };
70645
70646 @@ -4785,7 +4824,13 @@ static const struct file_operations proc
70647
70648 static int __init slab_proc_init(void)
70649 {
70650 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70651 + mode_t gr_mode = S_IRUGO;
70652 +
70653 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70654 + gr_mode = S_IRUSR;
70655 +#endif
70656 +
70657 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70658 return 0;
70659 }
70660 module_init(slab_proc_init);
70661 diff -urNp linux-2.6.32.45/mm/swap.c linux-2.6.32.45/mm/swap.c
70662 --- linux-2.6.32.45/mm/swap.c 2011-03-27 14:31:47.000000000 -0400
70663 +++ linux-2.6.32.45/mm/swap.c 2011-07-09 09:15:19.000000000 -0400
70664 @@ -30,6 +30,7 @@
70665 #include <linux/notifier.h>
70666 #include <linux/backing-dev.h>
70667 #include <linux/memcontrol.h>
70668 +#include <linux/hugetlb.h>
70669
70670 #include "internal.h"
70671
70672 @@ -65,6 +66,8 @@ static void put_compound_page(struct pag
70673 compound_page_dtor *dtor;
70674
70675 dtor = get_compound_page_dtor(page);
70676 + if (!PageHuge(page))
70677 + BUG_ON(dtor != free_compound_page);
70678 (*dtor)(page);
70679 }
70680 }
70681 diff -urNp linux-2.6.32.45/mm/util.c linux-2.6.32.45/mm/util.c
70682 --- linux-2.6.32.45/mm/util.c 2011-03-27 14:31:47.000000000 -0400
70683 +++ linux-2.6.32.45/mm/util.c 2011-04-17 15:56:46.000000000 -0400
70684 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
70685 void arch_pick_mmap_layout(struct mm_struct *mm)
70686 {
70687 mm->mmap_base = TASK_UNMAPPED_BASE;
70688 +
70689 +#ifdef CONFIG_PAX_RANDMMAP
70690 + if (mm->pax_flags & MF_PAX_RANDMMAP)
70691 + mm->mmap_base += mm->delta_mmap;
70692 +#endif
70693 +
70694 mm->get_unmapped_area = arch_get_unmapped_area;
70695 mm->unmap_area = arch_unmap_area;
70696 }
70697 diff -urNp linux-2.6.32.45/mm/vmalloc.c linux-2.6.32.45/mm/vmalloc.c
70698 --- linux-2.6.32.45/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
70699 +++ linux-2.6.32.45/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
70700 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
70701
70702 pte = pte_offset_kernel(pmd, addr);
70703 do {
70704 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70705 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70706 +
70707 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70708 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70709 + BUG_ON(!pte_exec(*pte));
70710 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
70711 + continue;
70712 + }
70713 +#endif
70714 +
70715 + {
70716 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70717 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70718 + }
70719 } while (pte++, addr += PAGE_SIZE, addr != end);
70720 }
70721
70722 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
70723 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
70724 {
70725 pte_t *pte;
70726 + int ret = -ENOMEM;
70727
70728 /*
70729 * nr is a running index into the array which helps higher level
70730 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
70731 pte = pte_alloc_kernel(pmd, addr);
70732 if (!pte)
70733 return -ENOMEM;
70734 +
70735 + pax_open_kernel();
70736 do {
70737 struct page *page = pages[*nr];
70738
70739 - if (WARN_ON(!pte_none(*pte)))
70740 - return -EBUSY;
70741 - if (WARN_ON(!page))
70742 - return -ENOMEM;
70743 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70744 + if (!(pgprot_val(prot) & _PAGE_NX))
70745 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
70746 + else
70747 +#endif
70748 +
70749 + if (WARN_ON(!pte_none(*pte))) {
70750 + ret = -EBUSY;
70751 + goto out;
70752 + }
70753 + if (WARN_ON(!page)) {
70754 + ret = -ENOMEM;
70755 + goto out;
70756 + }
70757 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
70758 (*nr)++;
70759 } while (pte++, addr += PAGE_SIZE, addr != end);
70760 - return 0;
70761 + ret = 0;
70762 +out:
70763 + pax_close_kernel();
70764 + return ret;
70765 }
70766
70767 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
70768 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
70769 * and fall back on vmalloc() if that fails. Others
70770 * just put it in the vmalloc space.
70771 */
70772 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
70773 +#ifdef CONFIG_MODULES
70774 +#ifdef MODULES_VADDR
70775 unsigned long addr = (unsigned long)x;
70776 if (addr >= MODULES_VADDR && addr < MODULES_END)
70777 return 1;
70778 #endif
70779 +
70780 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70781 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
70782 + return 1;
70783 +#endif
70784 +
70785 +#endif
70786 +
70787 return is_vmalloc_addr(x);
70788 }
70789
70790 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
70791
70792 if (!pgd_none(*pgd)) {
70793 pud_t *pud = pud_offset(pgd, addr);
70794 +#ifdef CONFIG_X86
70795 + if (!pud_large(*pud))
70796 +#endif
70797 if (!pud_none(*pud)) {
70798 pmd_t *pmd = pmd_offset(pud, addr);
70799 +#ifdef CONFIG_X86
70800 + if (!pmd_large(*pmd))
70801 +#endif
70802 if (!pmd_none(*pmd)) {
70803 pte_t *ptep, pte;
70804
70805 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
70806 struct rb_node *tmp;
70807
70808 while (*p) {
70809 - struct vmap_area *tmp;
70810 + struct vmap_area *varea;
70811
70812 parent = *p;
70813 - tmp = rb_entry(parent, struct vmap_area, rb_node);
70814 - if (va->va_start < tmp->va_end)
70815 + varea = rb_entry(parent, struct vmap_area, rb_node);
70816 + if (va->va_start < varea->va_end)
70817 p = &(*p)->rb_left;
70818 - else if (va->va_end > tmp->va_start)
70819 + else if (va->va_end > varea->va_start)
70820 p = &(*p)->rb_right;
70821 else
70822 BUG();
70823 @@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
70824 struct vm_struct *area;
70825
70826 BUG_ON(in_interrupt());
70827 +
70828 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70829 + if (flags & VM_KERNEXEC) {
70830 + if (start != VMALLOC_START || end != VMALLOC_END)
70831 + return NULL;
70832 + start = (unsigned long)MODULES_EXEC_VADDR;
70833 + end = (unsigned long)MODULES_EXEC_END;
70834 + }
70835 +#endif
70836 +
70837 if (flags & VM_IOREMAP) {
70838 int bit = fls(size);
70839
70840 @@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
70841 if (count > totalram_pages)
70842 return NULL;
70843
70844 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70845 + if (!(pgprot_val(prot) & _PAGE_NX))
70846 + flags |= VM_KERNEXEC;
70847 +#endif
70848 +
70849 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
70850 __builtin_return_address(0));
70851 if (!area)
70852 @@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
70853 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
70854 return NULL;
70855
70856 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70857 + if (!(pgprot_val(prot) & _PAGE_NX))
70858 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
70859 + node, gfp_mask, caller);
70860 + else
70861 +#endif
70862 +
70863 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
70864 VMALLOC_END, node, gfp_mask, caller);
70865
70866 @@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
70867 return addr;
70868 }
70869
70870 +#undef __vmalloc
70871 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
70872 {
70873 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
70874 @@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
70875 * For tight control over page level allocator and protection flags
70876 * use __vmalloc() instead.
70877 */
70878 +#undef vmalloc
70879 void *vmalloc(unsigned long size)
70880 {
70881 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70882 @@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
70883 * The resulting memory area is zeroed so it can be mapped to userspace
70884 * without leaking data.
70885 */
70886 +#undef vmalloc_user
70887 void *vmalloc_user(unsigned long size)
70888 {
70889 struct vm_struct *area;
70890 @@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
70891 * For tight control over page level allocator and protection flags
70892 * use __vmalloc() instead.
70893 */
70894 +#undef vmalloc_node
70895 void *vmalloc_node(unsigned long size, int node)
70896 {
70897 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70898 @@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
70899 * For tight control over page level allocator and protection flags
70900 * use __vmalloc() instead.
70901 */
70902 -
70903 +#undef vmalloc_exec
70904 void *vmalloc_exec(unsigned long size)
70905 {
70906 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
70907 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
70908 -1, __builtin_return_address(0));
70909 }
70910
70911 @@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
70912 * Allocate enough 32bit PA addressable pages to cover @size from the
70913 * page level allocator and map them into contiguous kernel virtual space.
70914 */
70915 +#undef vmalloc_32
70916 void *vmalloc_32(unsigned long size)
70917 {
70918 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
70919 @@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
70920 * The resulting memory area is 32bit addressable and zeroed so it can be
70921 * mapped to userspace without leaking data.
70922 */
70923 +#undef vmalloc_32_user
70924 void *vmalloc_32_user(unsigned long size)
70925 {
70926 struct vm_struct *area;
70927 @@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
70928 unsigned long uaddr = vma->vm_start;
70929 unsigned long usize = vma->vm_end - vma->vm_start;
70930
70931 + BUG_ON(vma->vm_mirror);
70932 +
70933 if ((PAGE_SIZE-1) & (unsigned long)addr)
70934 return -EINVAL;
70935
70936 diff -urNp linux-2.6.32.45/mm/vmstat.c linux-2.6.32.45/mm/vmstat.c
70937 --- linux-2.6.32.45/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
70938 +++ linux-2.6.32.45/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
70939 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
70940 *
70941 * vm_stat contains the global counters
70942 */
70943 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70944 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70945 EXPORT_SYMBOL(vm_stat);
70946
70947 #ifdef CONFIG_SMP
70948 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
70949 v = p->vm_stat_diff[i];
70950 p->vm_stat_diff[i] = 0;
70951 local_irq_restore(flags);
70952 - atomic_long_add(v, &zone->vm_stat[i]);
70953 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
70954 global_diff[i] += v;
70955 #ifdef CONFIG_NUMA
70956 /* 3 seconds idle till flush */
70957 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
70958
70959 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
70960 if (global_diff[i])
70961 - atomic_long_add(global_diff[i], &vm_stat[i]);
70962 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
70963 }
70964
70965 #endif
70966 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
70967 start_cpu_timer(cpu);
70968 #endif
70969 #ifdef CONFIG_PROC_FS
70970 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
70971 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
70972 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
70973 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
70974 + {
70975 + mode_t gr_mode = S_IRUGO;
70976 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70977 + gr_mode = S_IRUSR;
70978 +#endif
70979 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
70980 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
70981 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
70982 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
70983 +#else
70984 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
70985 +#endif
70986 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
70987 + }
70988 #endif
70989 return 0;
70990 }
70991 diff -urNp linux-2.6.32.45/net/8021q/vlan.c linux-2.6.32.45/net/8021q/vlan.c
70992 --- linux-2.6.32.45/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
70993 +++ linux-2.6.32.45/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
70994 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
70995 err = -EPERM;
70996 if (!capable(CAP_NET_ADMIN))
70997 break;
70998 - if ((args.u.name_type >= 0) &&
70999 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71000 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71001 struct vlan_net *vn;
71002
71003 vn = net_generic(net, vlan_net_id);
71004 diff -urNp linux-2.6.32.45/net/atm/atm_misc.c linux-2.6.32.45/net/atm/atm_misc.c
71005 --- linux-2.6.32.45/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
71006 +++ linux-2.6.32.45/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
71007 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
71008 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71009 return 1;
71010 atm_return(vcc,truesize);
71011 - atomic_inc(&vcc->stats->rx_drop);
71012 + atomic_inc_unchecked(&vcc->stats->rx_drop);
71013 return 0;
71014 }
71015
71016 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
71017 }
71018 }
71019 atm_return(vcc,guess);
71020 - atomic_inc(&vcc->stats->rx_drop);
71021 + atomic_inc_unchecked(&vcc->stats->rx_drop);
71022 return NULL;
71023 }
71024
71025 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
71026
71027 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
71028 {
71029 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71030 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71031 __SONET_ITEMS
71032 #undef __HANDLE_ITEM
71033 }
71034 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
71035
71036 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
71037 {
71038 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
71039 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71040 __SONET_ITEMS
71041 #undef __HANDLE_ITEM
71042 }
71043 diff -urNp linux-2.6.32.45/net/atm/lec.h linux-2.6.32.45/net/atm/lec.h
71044 --- linux-2.6.32.45/net/atm/lec.h 2011-03-27 14:31:47.000000000 -0400
71045 +++ linux-2.6.32.45/net/atm/lec.h 2011-08-05 20:33:55.000000000 -0400
71046 @@ -48,7 +48,7 @@ struct lane2_ops {
71047 const u8 *tlvs, u32 sizeoftlvs);
71048 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71049 const u8 *tlvs, u32 sizeoftlvs);
71050 -};
71051 +} __no_const;
71052
71053 /*
71054 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71055 diff -urNp linux-2.6.32.45/net/atm/mpc.c linux-2.6.32.45/net/atm/mpc.c
71056 --- linux-2.6.32.45/net/atm/mpc.c 2011-03-27 14:31:47.000000000 -0400
71057 +++ linux-2.6.32.45/net/atm/mpc.c 2011-08-05 20:33:55.000000000 -0400
71058 @@ -291,8 +291,8 @@ static void start_mpc(struct mpoa_client
71059 printk("mpoa: (%s) start_mpc not starting\n", dev->name);
71060 else {
71061 mpc->old_ops = dev->netdev_ops;
71062 - mpc->new_ops = *mpc->old_ops;
71063 - mpc->new_ops.ndo_start_xmit = mpc_send_packet;
71064 + memcpy((void *)&mpc->new_ops, mpc->old_ops, sizeof(mpc->new_ops));
71065 + *(void **)&mpc->new_ops.ndo_start_xmit = mpc_send_packet;
71066 dev->netdev_ops = &mpc->new_ops;
71067 }
71068 }
71069 diff -urNp linux-2.6.32.45/net/atm/mpoa_caches.c linux-2.6.32.45/net/atm/mpoa_caches.c
71070 --- linux-2.6.32.45/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
71071 +++ linux-2.6.32.45/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
71072 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
71073 struct timeval now;
71074 struct k_message msg;
71075
71076 + pax_track_stack();
71077 +
71078 do_gettimeofday(&now);
71079
71080 write_lock_irq(&client->egress_lock);
71081 diff -urNp linux-2.6.32.45/net/atm/proc.c linux-2.6.32.45/net/atm/proc.c
71082 --- linux-2.6.32.45/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
71083 +++ linux-2.6.32.45/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
71084 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
71085 const struct k_atm_aal_stats *stats)
71086 {
71087 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71088 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
71089 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
71090 - atomic_read(&stats->rx_drop));
71091 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71092 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71093 + atomic_read_unchecked(&stats->rx_drop));
71094 }
71095
71096 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71097 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
71098 {
71099 struct sock *sk = sk_atm(vcc);
71100
71101 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71102 + seq_printf(seq, "%p ", NULL);
71103 +#else
71104 seq_printf(seq, "%p ", vcc);
71105 +#endif
71106 +
71107 if (!vcc->dev)
71108 seq_printf(seq, "Unassigned ");
71109 else
71110 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
71111 {
71112 if (!vcc->dev)
71113 seq_printf(seq, sizeof(void *) == 4 ?
71114 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71115 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
71116 +#else
71117 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
71118 +#endif
71119 else
71120 seq_printf(seq, "%3d %3d %5d ",
71121 vcc->dev->number, vcc->vpi, vcc->vci);
71122 diff -urNp linux-2.6.32.45/net/atm/resources.c linux-2.6.32.45/net/atm/resources.c
71123 --- linux-2.6.32.45/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
71124 +++ linux-2.6.32.45/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
71125 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
71126 static void copy_aal_stats(struct k_atm_aal_stats *from,
71127 struct atm_aal_stats *to)
71128 {
71129 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71130 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71131 __AAL_STAT_ITEMS
71132 #undef __HANDLE_ITEM
71133 }
71134 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
71135 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71136 struct atm_aal_stats *to)
71137 {
71138 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71139 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71140 __AAL_STAT_ITEMS
71141 #undef __HANDLE_ITEM
71142 }
71143 diff -urNp linux-2.6.32.45/net/bluetooth/l2cap.c linux-2.6.32.45/net/bluetooth/l2cap.c
71144 --- linux-2.6.32.45/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
71145 +++ linux-2.6.32.45/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
71146 @@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
71147 err = -ENOTCONN;
71148 break;
71149 }
71150 -
71151 + memset(&cinfo, 0, sizeof(cinfo));
71152 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
71153 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
71154
71155 @@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
71156
71157 /* Reject if config buffer is too small. */
71158 len = cmd_len - sizeof(*req);
71159 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71160 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71161 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
71162 l2cap_build_conf_rsp(sk, rsp,
71163 L2CAP_CONF_REJECT, flags), rsp);
71164 diff -urNp linux-2.6.32.45/net/bluetooth/rfcomm/sock.c linux-2.6.32.45/net/bluetooth/rfcomm/sock.c
71165 --- linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
71166 +++ linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
71167 @@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
71168
71169 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
71170
71171 + memset(&cinfo, 0, sizeof(cinfo));
71172 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
71173 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
71174
71175 diff -urNp linux-2.6.32.45/net/bridge/br_private.h linux-2.6.32.45/net/bridge/br_private.h
71176 --- linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:35:30.000000000 -0400
71177 +++ linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:34:01.000000000 -0400
71178 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event,
71179
71180 #ifdef CONFIG_SYSFS
71181 /* br_sysfs_if.c */
71182 -extern struct sysfs_ops brport_sysfs_ops;
71183 +extern const struct sysfs_ops brport_sysfs_ops;
71184 extern int br_sysfs_addif(struct net_bridge_port *p);
71185
71186 /* br_sysfs_br.c */
71187 diff -urNp linux-2.6.32.45/net/bridge/br_stp_if.c linux-2.6.32.45/net/bridge/br_stp_if.c
71188 --- linux-2.6.32.45/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
71189 +++ linux-2.6.32.45/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
71190 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
71191 char *envp[] = { NULL };
71192
71193 if (br->stp_enabled == BR_USER_STP) {
71194 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
71195 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
71196 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
71197 br->dev->name, r);
71198
71199 diff -urNp linux-2.6.32.45/net/bridge/br_sysfs_if.c linux-2.6.32.45/net/bridge/br_sysfs_if.c
71200 --- linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
71201 +++ linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
71202 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
71203 return ret;
71204 }
71205
71206 -struct sysfs_ops brport_sysfs_ops = {
71207 +const struct sysfs_ops brport_sysfs_ops = {
71208 .show = brport_show,
71209 .store = brport_store,
71210 };
71211 diff -urNp linux-2.6.32.45/net/bridge/netfilter/ebtables.c linux-2.6.32.45/net/bridge/netfilter/ebtables.c
71212 --- linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
71213 +++ linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
71214 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
71215 unsigned int entries_size, nentries;
71216 char *entries;
71217
71218 + pax_track_stack();
71219 +
71220 if (cmd == EBT_SO_GET_ENTRIES) {
71221 entries_size = t->private->entries_size;
71222 nentries = t->private->nentries;
71223 diff -urNp linux-2.6.32.45/net/can/bcm.c linux-2.6.32.45/net/can/bcm.c
71224 --- linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
71225 +++ linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
71226 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
71227 struct bcm_sock *bo = bcm_sk(sk);
71228 struct bcm_op *op;
71229
71230 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71231 + seq_printf(m, ">>> socket %p", NULL);
71232 + seq_printf(m, " / sk %p", NULL);
71233 + seq_printf(m, " / bo %p", NULL);
71234 +#else
71235 seq_printf(m, ">>> socket %p", sk->sk_socket);
71236 seq_printf(m, " / sk %p", sk);
71237 seq_printf(m, " / bo %p", bo);
71238 +#endif
71239 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
71240 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
71241 seq_printf(m, " <<<\n");
71242 diff -urNp linux-2.6.32.45/net/core/dev.c linux-2.6.32.45/net/core/dev.c
71243 --- linux-2.6.32.45/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
71244 +++ linux-2.6.32.45/net/core/dev.c 2011-08-05 20:33:55.000000000 -0400
71245 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
71246 if (no_module && capable(CAP_NET_ADMIN))
71247 no_module = request_module("netdev-%s", name);
71248 if (no_module && capable(CAP_SYS_MODULE)) {
71249 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
71250 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
71251 +#else
71252 if (!request_module("%s", name))
71253 pr_err("Loading kernel module for a network device "
71254 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71255 "instead\n", name);
71256 +#endif
71257 }
71258 }
71259 EXPORT_SYMBOL(dev_load);
71260 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct
71261
71262 struct dev_gso_cb {
71263 void (*destructor)(struct sk_buff *skb);
71264 -};
71265 +} __no_const;
71266
71267 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71268
71269 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
71270 }
71271 EXPORT_SYMBOL(netif_rx_ni);
71272
71273 -static void net_tx_action(struct softirq_action *h)
71274 +static void net_tx_action(void)
71275 {
71276 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71277
71278 @@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
71279 EXPORT_SYMBOL(netif_napi_del);
71280
71281
71282 -static void net_rx_action(struct softirq_action *h)
71283 +static void net_rx_action(void)
71284 {
71285 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
71286 unsigned long time_limit = jiffies + 2;
71287 diff -urNp linux-2.6.32.45/net/core/flow.c linux-2.6.32.45/net/core/flow.c
71288 --- linux-2.6.32.45/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
71289 +++ linux-2.6.32.45/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
71290 @@ -35,11 +35,11 @@ struct flow_cache_entry {
71291 atomic_t *object_ref;
71292 };
71293
71294 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
71295 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71296
71297 static u32 flow_hash_shift;
71298 #define flow_hash_size (1 << flow_hash_shift)
71299 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
71300 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
71301
71302 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
71303
71304 @@ -52,7 +52,7 @@ struct flow_percpu_info {
71305 u32 hash_rnd;
71306 int count;
71307 };
71308 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
71309 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
71310
71311 #define flow_hash_rnd_recalc(cpu) \
71312 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
71313 @@ -69,7 +69,7 @@ struct flow_flush_info {
71314 atomic_t cpuleft;
71315 struct completion completion;
71316 };
71317 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
71318 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
71319
71320 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
71321
71322 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
71323 if (fle->family == family &&
71324 fle->dir == dir &&
71325 flow_key_compare(key, &fle->key) == 0) {
71326 - if (fle->genid == atomic_read(&flow_cache_genid)) {
71327 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
71328 void *ret = fle->object;
71329
71330 if (ret)
71331 @@ -228,7 +228,7 @@ nocache:
71332 err = resolver(net, key, family, dir, &obj, &obj_ref);
71333
71334 if (fle && !err) {
71335 - fle->genid = atomic_read(&flow_cache_genid);
71336 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
71337
71338 if (fle->object)
71339 atomic_dec(fle->object_ref);
71340 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
71341
71342 fle = flow_table(cpu)[i];
71343 for (; fle; fle = fle->next) {
71344 - unsigned genid = atomic_read(&flow_cache_genid);
71345 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
71346
71347 if (!fle->object || fle->genid == genid)
71348 continue;
71349 diff -urNp linux-2.6.32.45/net/core/rtnetlink.c linux-2.6.32.45/net/core/rtnetlink.c
71350 --- linux-2.6.32.45/net/core/rtnetlink.c 2011-03-27 14:31:47.000000000 -0400
71351 +++ linux-2.6.32.45/net/core/rtnetlink.c 2011-08-05 20:33:55.000000000 -0400
71352 @@ -57,7 +57,7 @@ struct rtnl_link
71353 {
71354 rtnl_doit_func doit;
71355 rtnl_dumpit_func dumpit;
71356 -};
71357 +} __no_const;
71358
71359 static DEFINE_MUTEX(rtnl_mutex);
71360
71361 diff -urNp linux-2.6.32.45/net/core/secure_seq.c linux-2.6.32.45/net/core/secure_seq.c
71362 --- linux-2.6.32.45/net/core/secure_seq.c 2011-08-16 20:37:25.000000000 -0400
71363 +++ linux-2.6.32.45/net/core/secure_seq.c 2011-08-07 19:48:09.000000000 -0400
71364 @@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be3
71365 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
71366
71367 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
71368 - __be16 dport)
71369 + __be16 dport)
71370 {
71371 u32 secret[MD5_MESSAGE_BYTES / 4];
71372 u32 hash[MD5_DIGEST_WORDS];
71373 @@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __b
71374 secret[i] = net_secret[i];
71375
71376 md5_transform(hash, secret);
71377 -
71378 return hash[0];
71379 }
71380 #endif
71381 diff -urNp linux-2.6.32.45/net/core/skbuff.c linux-2.6.32.45/net/core/skbuff.c
71382 --- linux-2.6.32.45/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
71383 +++ linux-2.6.32.45/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
71384 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
71385 struct sk_buff *frag_iter;
71386 struct sock *sk = skb->sk;
71387
71388 + pax_track_stack();
71389 +
71390 /*
71391 * __skb_splice_bits() only fails if the output has no room left,
71392 * so no point in going over the frag_list for the error case.
71393 diff -urNp linux-2.6.32.45/net/core/sock.c linux-2.6.32.45/net/core/sock.c
71394 --- linux-2.6.32.45/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
71395 +++ linux-2.6.32.45/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
71396 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
71397 break;
71398
71399 case SO_PEERCRED:
71400 + {
71401 + struct ucred peercred;
71402 if (len > sizeof(sk->sk_peercred))
71403 len = sizeof(sk->sk_peercred);
71404 - if (copy_to_user(optval, &sk->sk_peercred, len))
71405 + peercred = sk->sk_peercred;
71406 + if (copy_to_user(optval, &peercred, len))
71407 return -EFAULT;
71408 goto lenout;
71409 + }
71410
71411 case SO_PEERNAME:
71412 {
71413 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
71414 */
71415 smp_wmb();
71416 atomic_set(&sk->sk_refcnt, 1);
71417 - atomic_set(&sk->sk_drops, 0);
71418 + atomic_set_unchecked(&sk->sk_drops, 0);
71419 }
71420 EXPORT_SYMBOL(sock_init_data);
71421
71422 diff -urNp linux-2.6.32.45/net/decnet/sysctl_net_decnet.c linux-2.6.32.45/net/decnet/sysctl_net_decnet.c
71423 --- linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
71424 +++ linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
71425 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
71426
71427 if (len > *lenp) len = *lenp;
71428
71429 - if (copy_to_user(buffer, addr, len))
71430 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
71431 return -EFAULT;
71432
71433 *lenp = len;
71434 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
71435
71436 if (len > *lenp) len = *lenp;
71437
71438 - if (copy_to_user(buffer, devname, len))
71439 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
71440 return -EFAULT;
71441
71442 *lenp = len;
71443 diff -urNp linux-2.6.32.45/net/econet/Kconfig linux-2.6.32.45/net/econet/Kconfig
71444 --- linux-2.6.32.45/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
71445 +++ linux-2.6.32.45/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
71446 @@ -4,7 +4,7 @@
71447
71448 config ECONET
71449 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
71450 - depends on EXPERIMENTAL && INET
71451 + depends on EXPERIMENTAL && INET && BROKEN
71452 ---help---
71453 Econet is a fairly old and slow networking protocol mainly used by
71454 Acorn computers to access file and print servers. It uses native
71455 diff -urNp linux-2.6.32.45/net/ieee802154/dgram.c linux-2.6.32.45/net/ieee802154/dgram.c
71456 --- linux-2.6.32.45/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
71457 +++ linux-2.6.32.45/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
71458 @@ -318,7 +318,7 @@ out:
71459 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
71460 {
71461 if (sock_queue_rcv_skb(sk, skb) < 0) {
71462 - atomic_inc(&sk->sk_drops);
71463 + atomic_inc_unchecked(&sk->sk_drops);
71464 kfree_skb(skb);
71465 return NET_RX_DROP;
71466 }
71467 diff -urNp linux-2.6.32.45/net/ieee802154/raw.c linux-2.6.32.45/net/ieee802154/raw.c
71468 --- linux-2.6.32.45/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
71469 +++ linux-2.6.32.45/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
71470 @@ -206,7 +206,7 @@ out:
71471 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
71472 {
71473 if (sock_queue_rcv_skb(sk, skb) < 0) {
71474 - atomic_inc(&sk->sk_drops);
71475 + atomic_inc_unchecked(&sk->sk_drops);
71476 kfree_skb(skb);
71477 return NET_RX_DROP;
71478 }
71479 diff -urNp linux-2.6.32.45/net/ipv4/inet_diag.c linux-2.6.32.45/net/ipv4/inet_diag.c
71480 --- linux-2.6.32.45/net/ipv4/inet_diag.c 2011-07-13 17:23:04.000000000 -0400
71481 +++ linux-2.6.32.45/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
71482 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
71483 r->idiag_retrans = 0;
71484
71485 r->id.idiag_if = sk->sk_bound_dev_if;
71486 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71487 + r->id.idiag_cookie[0] = 0;
71488 + r->id.idiag_cookie[1] = 0;
71489 +#else
71490 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
71491 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
71492 +#endif
71493
71494 r->id.idiag_sport = inet->sport;
71495 r->id.idiag_dport = inet->dport;
71496 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
71497 r->idiag_family = tw->tw_family;
71498 r->idiag_retrans = 0;
71499 r->id.idiag_if = tw->tw_bound_dev_if;
71500 +
71501 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71502 + r->id.idiag_cookie[0] = 0;
71503 + r->id.idiag_cookie[1] = 0;
71504 +#else
71505 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
71506 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
71507 +#endif
71508 +
71509 r->id.idiag_sport = tw->tw_sport;
71510 r->id.idiag_dport = tw->tw_dport;
71511 r->id.idiag_src[0] = tw->tw_rcv_saddr;
71512 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
71513 if (sk == NULL)
71514 goto unlock;
71515
71516 +#ifndef CONFIG_GRKERNSEC_HIDESYM
71517 err = -ESTALE;
71518 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
71519 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
71520 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
71521 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
71522 goto out;
71523 +#endif
71524
71525 err = -ENOMEM;
71526 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
71527 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
71528 r->idiag_retrans = req->retrans;
71529
71530 r->id.idiag_if = sk->sk_bound_dev_if;
71531 +
71532 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71533 + r->id.idiag_cookie[0] = 0;
71534 + r->id.idiag_cookie[1] = 0;
71535 +#else
71536 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
71537 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
71538 +#endif
71539
71540 tmo = req->expires - jiffies;
71541 if (tmo < 0)
71542 diff -urNp linux-2.6.32.45/net/ipv4/inet_hashtables.c linux-2.6.32.45/net/ipv4/inet_hashtables.c
71543 --- linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71544 +++ linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:42:30.000000000 -0400
71545 @@ -18,12 +18,15 @@
71546 #include <linux/sched.h>
71547 #include <linux/slab.h>
71548 #include <linux/wait.h>
71549 +#include <linux/security.h>
71550
71551 #include <net/inet_connection_sock.h>
71552 #include <net/inet_hashtables.h>
71553 #include <net/secure_seq.h>
71554 #include <net/ip.h>
71555
71556 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
71557 +
71558 /*
71559 * Allocate and initialize a new local port bind bucket.
71560 * The bindhash mutex for snum's hash chain must be held here.
71561 @@ -491,6 +494,8 @@ ok:
71562 }
71563 spin_unlock(&head->lock);
71564
71565 + gr_update_task_in_ip_table(current, inet_sk(sk));
71566 +
71567 if (tw) {
71568 inet_twsk_deschedule(tw, death_row);
71569 inet_twsk_put(tw);
71570 diff -urNp linux-2.6.32.45/net/ipv4/inetpeer.c linux-2.6.32.45/net/ipv4/inetpeer.c
71571 --- linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-16 20:37:25.000000000 -0400
71572 +++ linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-07 19:48:09.000000000 -0400
71573 @@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 da
71574 struct inet_peer *p, *n;
71575 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
71576
71577 + pax_track_stack();
71578 +
71579 /* Look up for the address quickly. */
71580 read_lock_bh(&peer_pool_lock);
71581 p = lookup(daddr, NULL);
71582 @@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 da
71583 return NULL;
71584 n->v4daddr = daddr;
71585 atomic_set(&n->refcnt, 1);
71586 - atomic_set(&n->rid, 0);
71587 + atomic_set_unchecked(&n->rid, 0);
71588 n->ip_id_count = secure_ip_id(daddr);
71589 n->tcp_ts_stamp = 0;
71590
71591 diff -urNp linux-2.6.32.45/net/ipv4/ip_fragment.c linux-2.6.32.45/net/ipv4/ip_fragment.c
71592 --- linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
71593 +++ linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
71594 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
71595 return 0;
71596
71597 start = qp->rid;
71598 - end = atomic_inc_return(&peer->rid);
71599 + end = atomic_inc_return_unchecked(&peer->rid);
71600 qp->rid = end;
71601
71602 rc = qp->q.fragments && (end - start) > max;
71603 diff -urNp linux-2.6.32.45/net/ipv4/ip_sockglue.c linux-2.6.32.45/net/ipv4/ip_sockglue.c
71604 --- linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
71605 +++ linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
71606 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
71607 int val;
71608 int len;
71609
71610 + pax_track_stack();
71611 +
71612 if (level != SOL_IP)
71613 return -EOPNOTSUPP;
71614
71615 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c
71616 --- linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
71617 +++ linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
71618 @@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
71619 private = &tmp;
71620 }
71621 #endif
71622 + memset(&info, 0, sizeof(info));
71623 info.valid_hooks = t->valid_hooks;
71624 memcpy(info.hook_entry, private->hook_entry,
71625 sizeof(info.hook_entry));
71626 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c
71627 --- linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
71628 +++ linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
71629 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
71630 private = &tmp;
71631 }
71632 #endif
71633 + memset(&info, 0, sizeof(info));
71634 info.valid_hooks = t->valid_hooks;
71635 memcpy(info.hook_entry, private->hook_entry,
71636 sizeof(info.hook_entry));
71637 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c
71638 --- linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
71639 +++ linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
71640 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
71641
71642 *len = 0;
71643
71644 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
71645 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
71646 if (*octets == NULL) {
71647 if (net_ratelimit())
71648 printk("OOM in bsalg (%d)\n", __LINE__);
71649 diff -urNp linux-2.6.32.45/net/ipv4/raw.c linux-2.6.32.45/net/ipv4/raw.c
71650 --- linux-2.6.32.45/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
71651 +++ linux-2.6.32.45/net/ipv4/raw.c 2011-08-14 11:46:51.000000000 -0400
71652 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
71653 /* Charge it to the socket. */
71654
71655 if (sock_queue_rcv_skb(sk, skb) < 0) {
71656 - atomic_inc(&sk->sk_drops);
71657 + atomic_inc_unchecked(&sk->sk_drops);
71658 kfree_skb(skb);
71659 return NET_RX_DROP;
71660 }
71661 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
71662 int raw_rcv(struct sock *sk, struct sk_buff *skb)
71663 {
71664 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
71665 - atomic_inc(&sk->sk_drops);
71666 + atomic_inc_unchecked(&sk->sk_drops);
71667 kfree_skb(skb);
71668 return NET_RX_DROP;
71669 }
71670 @@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
71671
71672 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
71673 {
71674 + struct icmp_filter filter;
71675 +
71676 + if (optlen < 0)
71677 + return -EINVAL;
71678 if (optlen > sizeof(struct icmp_filter))
71679 optlen = sizeof(struct icmp_filter);
71680 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
71681 + if (copy_from_user(&filter, optval, optlen))
71682 return -EFAULT;
71683 + raw_sk(sk)->filter = filter;
71684 +
71685 return 0;
71686 }
71687
71688 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
71689 {
71690 int len, ret = -EFAULT;
71691 + struct icmp_filter filter;
71692
71693 if (get_user(len, optlen))
71694 goto out;
71695 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
71696 if (len > sizeof(struct icmp_filter))
71697 len = sizeof(struct icmp_filter);
71698 ret = -EFAULT;
71699 - if (put_user(len, optlen) ||
71700 - copy_to_user(optval, &raw_sk(sk)->filter, len))
71701 + filter = raw_sk(sk)->filter;
71702 + if (put_user(len, optlen) || len > sizeof filter ||
71703 + copy_to_user(optval, &filter, len))
71704 goto out;
71705 ret = 0;
71706 out: return ret;
71707 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
71708 sk_wmem_alloc_get(sp),
71709 sk_rmem_alloc_get(sp),
71710 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71711 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
71712 + atomic_read(&sp->sk_refcnt),
71713 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71714 + NULL,
71715 +#else
71716 + sp,
71717 +#endif
71718 + atomic_read_unchecked(&sp->sk_drops));
71719 }
71720
71721 static int raw_seq_show(struct seq_file *seq, void *v)
71722 diff -urNp linux-2.6.32.45/net/ipv4/route.c linux-2.6.32.45/net/ipv4/route.c
71723 --- linux-2.6.32.45/net/ipv4/route.c 2011-08-16 20:37:25.000000000 -0400
71724 +++ linux-2.6.32.45/net/ipv4/route.c 2011-08-07 19:48:09.000000000 -0400
71725 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be3
71726
71727 static inline int rt_genid(struct net *net)
71728 {
71729 - return atomic_read(&net->ipv4.rt_genid);
71730 + return atomic_read_unchecked(&net->ipv4.rt_genid);
71731 }
71732
71733 #ifdef CONFIG_PROC_FS
71734 @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct n
71735 unsigned char shuffle;
71736
71737 get_random_bytes(&shuffle, sizeof(shuffle));
71738 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
71739 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
71740 }
71741
71742 /*
71743 @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_oper
71744
71745 static __net_init int rt_secret_timer_init(struct net *net)
71746 {
71747 - atomic_set(&net->ipv4.rt_genid,
71748 + atomic_set_unchecked(&net->ipv4.rt_genid,
71749 (int) ((num_physpages ^ (num_physpages>>8)) ^
71750 (jiffies ^ (jiffies >> 7))));
71751
71752 diff -urNp linux-2.6.32.45/net/ipv4/tcp.c linux-2.6.32.45/net/ipv4/tcp.c
71753 --- linux-2.6.32.45/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
71754 +++ linux-2.6.32.45/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
71755 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
71756 int val;
71757 int err = 0;
71758
71759 + pax_track_stack();
71760 +
71761 /* This is a string value all the others are int's */
71762 if (optname == TCP_CONGESTION) {
71763 char name[TCP_CA_NAME_MAX];
71764 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
71765 struct tcp_sock *tp = tcp_sk(sk);
71766 int val, len;
71767
71768 + pax_track_stack();
71769 +
71770 if (get_user(len, optlen))
71771 return -EFAULT;
71772
71773 diff -urNp linux-2.6.32.45/net/ipv4/tcp_ipv4.c linux-2.6.32.45/net/ipv4/tcp_ipv4.c
71774 --- linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-16 20:37:25.000000000 -0400
71775 +++ linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-07 19:48:09.000000000 -0400
71776 @@ -85,6 +85,9 @@
71777 int sysctl_tcp_tw_reuse __read_mostly;
71778 int sysctl_tcp_low_latency __read_mostly;
71779
71780 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71781 +extern int grsec_enable_blackhole;
71782 +#endif
71783
71784 #ifdef CONFIG_TCP_MD5SIG
71785 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
71786 @@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
71787 return 0;
71788
71789 reset:
71790 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71791 + if (!grsec_enable_blackhole)
71792 +#endif
71793 tcp_v4_send_reset(rsk, skb);
71794 discard:
71795 kfree_skb(skb);
71796 @@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
71797 TCP_SKB_CB(skb)->sacked = 0;
71798
71799 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
71800 - if (!sk)
71801 + if (!sk) {
71802 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71803 + ret = 1;
71804 +#endif
71805 goto no_tcp_socket;
71806 + }
71807
71808 process:
71809 - if (sk->sk_state == TCP_TIME_WAIT)
71810 + if (sk->sk_state == TCP_TIME_WAIT) {
71811 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71812 + ret = 2;
71813 +#endif
71814 goto do_time_wait;
71815 + }
71816
71817 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
71818 goto discard_and_relse;
71819 @@ -1651,6 +1665,10 @@ no_tcp_socket:
71820 bad_packet:
71821 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
71822 } else {
71823 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71824 + if (!grsec_enable_blackhole || (ret == 1 &&
71825 + (skb->dev->flags & IFF_LOOPBACK)))
71826 +#endif
71827 tcp_v4_send_reset(NULL, skb);
71828 }
71829
71830 @@ -2195,14 +2213,14 @@ int tcp_proc_register(struct net *net, s
71831 int rc = 0;
71832 struct proc_dir_entry *p;
71833
71834 - afinfo->seq_fops.open = tcp_seq_open;
71835 - afinfo->seq_fops.read = seq_read;
71836 - afinfo->seq_fops.llseek = seq_lseek;
71837 - afinfo->seq_fops.release = seq_release_net;
71838 -
71839 - afinfo->seq_ops.start = tcp_seq_start;
71840 - afinfo->seq_ops.next = tcp_seq_next;
71841 - afinfo->seq_ops.stop = tcp_seq_stop;
71842 + *(void **)&afinfo->seq_fops.open = tcp_seq_open;
71843 + *(void **)&afinfo->seq_fops.read = seq_read;
71844 + *(void **)&afinfo->seq_fops.llseek = seq_lseek;
71845 + *(void **)&afinfo->seq_fops.release = seq_release_net;
71846 +
71847 + *(void **)&afinfo->seq_ops.start = tcp_seq_start;
71848 + *(void **)&afinfo->seq_ops.next = tcp_seq_next;
71849 + *(void **)&afinfo->seq_ops.stop = tcp_seq_stop;
71850
71851 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
71852 &afinfo->seq_fops, afinfo);
71853 @@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk
71854 0, /* non standard timer */
71855 0, /* open_requests have no inode */
71856 atomic_read(&sk->sk_refcnt),
71857 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71858 + NULL,
71859 +#else
71860 req,
71861 +#endif
71862 len);
71863 }
71864
71865 @@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *s
71866 sock_i_uid(sk),
71867 icsk->icsk_probes_out,
71868 sock_i_ino(sk),
71869 - atomic_read(&sk->sk_refcnt), sk,
71870 + atomic_read(&sk->sk_refcnt),
71871 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71872 + NULL,
71873 +#else
71874 + sk,
71875 +#endif
71876 jiffies_to_clock_t(icsk->icsk_rto),
71877 jiffies_to_clock_t(icsk->icsk_ack.ato),
71878 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
71879 @@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct in
71880 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
71881 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
71882 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
71883 - atomic_read(&tw->tw_refcnt), tw, len);
71884 + atomic_read(&tw->tw_refcnt),
71885 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71886 + NULL,
71887 +#else
71888 + tw,
71889 +#endif
71890 + len);
71891 }
71892
71893 #define TMPSZ 150
71894 diff -urNp linux-2.6.32.45/net/ipv4/tcp_minisocks.c linux-2.6.32.45/net/ipv4/tcp_minisocks.c
71895 --- linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
71896 +++ linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
71897 @@ -26,6 +26,10 @@
71898 #include <net/inet_common.h>
71899 #include <net/xfrm.h>
71900
71901 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71902 +extern int grsec_enable_blackhole;
71903 +#endif
71904 +
71905 #ifdef CONFIG_SYSCTL
71906 #define SYNC_INIT 0 /* let the user enable it */
71907 #else
71908 @@ -672,6 +676,10 @@ listen_overflow:
71909
71910 embryonic_reset:
71911 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
71912 +
71913 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71914 + if (!grsec_enable_blackhole)
71915 +#endif
71916 if (!(flg & TCP_FLAG_RST))
71917 req->rsk_ops->send_reset(sk, skb);
71918
71919 diff -urNp linux-2.6.32.45/net/ipv4/tcp_output.c linux-2.6.32.45/net/ipv4/tcp_output.c
71920 --- linux-2.6.32.45/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
71921 +++ linux-2.6.32.45/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
71922 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
71923 __u8 *md5_hash_location;
71924 int mss;
71925
71926 + pax_track_stack();
71927 +
71928 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
71929 if (skb == NULL)
71930 return NULL;
71931 diff -urNp linux-2.6.32.45/net/ipv4/tcp_probe.c linux-2.6.32.45/net/ipv4/tcp_probe.c
71932 --- linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
71933 +++ linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
71934 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
71935 if (cnt + width >= len)
71936 break;
71937
71938 - if (copy_to_user(buf + cnt, tbuf, width))
71939 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
71940 return -EFAULT;
71941 cnt += width;
71942 }
71943 diff -urNp linux-2.6.32.45/net/ipv4/tcp_timer.c linux-2.6.32.45/net/ipv4/tcp_timer.c
71944 --- linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
71945 +++ linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
71946 @@ -21,6 +21,10 @@
71947 #include <linux/module.h>
71948 #include <net/tcp.h>
71949
71950 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71951 +extern int grsec_lastack_retries;
71952 +#endif
71953 +
71954 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
71955 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
71956 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
71957 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
71958 }
71959 }
71960
71961 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71962 + if ((sk->sk_state == TCP_LAST_ACK) &&
71963 + (grsec_lastack_retries > 0) &&
71964 + (grsec_lastack_retries < retry_until))
71965 + retry_until = grsec_lastack_retries;
71966 +#endif
71967 +
71968 if (retransmits_timed_out(sk, retry_until)) {
71969 /* Has it gone just too far? */
71970 tcp_write_err(sk);
71971 diff -urNp linux-2.6.32.45/net/ipv4/udp.c linux-2.6.32.45/net/ipv4/udp.c
71972 --- linux-2.6.32.45/net/ipv4/udp.c 2011-07-13 17:23:04.000000000 -0400
71973 +++ linux-2.6.32.45/net/ipv4/udp.c 2011-08-05 20:33:55.000000000 -0400
71974 @@ -86,6 +86,7 @@
71975 #include <linux/types.h>
71976 #include <linux/fcntl.h>
71977 #include <linux/module.h>
71978 +#include <linux/security.h>
71979 #include <linux/socket.h>
71980 #include <linux/sockios.h>
71981 #include <linux/igmp.h>
71982 @@ -106,6 +107,10 @@
71983 #include <net/xfrm.h>
71984 #include "udp_impl.h"
71985
71986 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71987 +extern int grsec_enable_blackhole;
71988 +#endif
71989 +
71990 struct udp_table udp_table;
71991 EXPORT_SYMBOL(udp_table);
71992
71993 @@ -371,6 +376,9 @@ found:
71994 return s;
71995 }
71996
71997 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
71998 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
71999 +
72000 /*
72001 * This routine is called by the ICMP module when it gets some
72002 * sort of error condition. If err < 0 then the socket should
72003 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
72004 dport = usin->sin_port;
72005 if (dport == 0)
72006 return -EINVAL;
72007 +
72008 + err = gr_search_udp_sendmsg(sk, usin);
72009 + if (err)
72010 + return err;
72011 } else {
72012 if (sk->sk_state != TCP_ESTABLISHED)
72013 return -EDESTADDRREQ;
72014 +
72015 + err = gr_search_udp_sendmsg(sk, NULL);
72016 + if (err)
72017 + return err;
72018 +
72019 daddr = inet->daddr;
72020 dport = inet->dport;
72021 /* Open fast path for connected socket.
72022 @@ -945,6 +962,10 @@ try_again:
72023 if (!skb)
72024 goto out;
72025
72026 + err = gr_search_udp_recvmsg(sk, skb);
72027 + if (err)
72028 + goto out_free;
72029 +
72030 ulen = skb->len - sizeof(struct udphdr);
72031 copied = len;
72032 if (copied > ulen)
72033 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
72034 if (rc == -ENOMEM) {
72035 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
72036 is_udplite);
72037 - atomic_inc(&sk->sk_drops);
72038 + atomic_inc_unchecked(&sk->sk_drops);
72039 }
72040 goto drop;
72041 }
72042 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
72043 goto csum_error;
72044
72045 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
72046 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72047 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72048 +#endif
72049 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
72050
72051 /*
72052 @@ -1719,14 +1743,14 @@ int udp_proc_register(struct net *net, s
72053 struct proc_dir_entry *p;
72054 int rc = 0;
72055
72056 - afinfo->seq_fops.open = udp_seq_open;
72057 - afinfo->seq_fops.read = seq_read;
72058 - afinfo->seq_fops.llseek = seq_lseek;
72059 - afinfo->seq_fops.release = seq_release_net;
72060 -
72061 - afinfo->seq_ops.start = udp_seq_start;
72062 - afinfo->seq_ops.next = udp_seq_next;
72063 - afinfo->seq_ops.stop = udp_seq_stop;
72064 + *(void **)&afinfo->seq_fops.open = udp_seq_open;
72065 + *(void **)&afinfo->seq_fops.read = seq_read;
72066 + *(void **)&afinfo->seq_fops.llseek = seq_lseek;
72067 + *(void **)&afinfo->seq_fops.release = seq_release_net;
72068 +
72069 + *(void **)&afinfo->seq_ops.start = udp_seq_start;
72070 + *(void **)&afinfo->seq_ops.next = udp_seq_next;
72071 + *(void **)&afinfo->seq_ops.stop = udp_seq_stop;
72072
72073 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
72074 &afinfo->seq_fops, afinfo);
72075 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
72076 sk_wmem_alloc_get(sp),
72077 sk_rmem_alloc_get(sp),
72078 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72079 - atomic_read(&sp->sk_refcnt), sp,
72080 - atomic_read(&sp->sk_drops), len);
72081 + atomic_read(&sp->sk_refcnt),
72082 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72083 + NULL,
72084 +#else
72085 + sp,
72086 +#endif
72087 + atomic_read_unchecked(&sp->sk_drops), len);
72088 }
72089
72090 int udp4_seq_show(struct seq_file *seq, void *v)
72091 diff -urNp linux-2.6.32.45/net/ipv6/inet6_connection_sock.c linux-2.6.32.45/net/ipv6/inet6_connection_sock.c
72092 --- linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
72093 +++ linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
72094 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
72095 #ifdef CONFIG_XFRM
72096 {
72097 struct rt6_info *rt = (struct rt6_info *)dst;
72098 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
72099 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
72100 }
72101 #endif
72102 }
72103 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
72104 #ifdef CONFIG_XFRM
72105 if (dst) {
72106 struct rt6_info *rt = (struct rt6_info *)dst;
72107 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
72108 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
72109 sk->sk_dst_cache = NULL;
72110 dst_release(dst);
72111 dst = NULL;
72112 diff -urNp linux-2.6.32.45/net/ipv6/inet6_hashtables.c linux-2.6.32.45/net/ipv6/inet6_hashtables.c
72113 --- linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-16 20:37:25.000000000 -0400
72114 +++ linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-07 19:48:09.000000000 -0400
72115 @@ -119,7 +119,7 @@ out:
72116 }
72117 EXPORT_SYMBOL(__inet6_lookup_established);
72118
72119 -static int inline compute_score(struct sock *sk, struct net *net,
72120 +static inline int compute_score(struct sock *sk, struct net *net,
72121 const unsigned short hnum,
72122 const struct in6_addr *daddr,
72123 const int dif)
72124 diff -urNp linux-2.6.32.45/net/ipv6/ipv6_sockglue.c linux-2.6.32.45/net/ipv6/ipv6_sockglue.c
72125 --- linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
72126 +++ linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
72127 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
72128 int val, valbool;
72129 int retv = -ENOPROTOOPT;
72130
72131 + pax_track_stack();
72132 +
72133 if (optval == NULL)
72134 val=0;
72135 else {
72136 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
72137 int len;
72138 int val;
72139
72140 + pax_track_stack();
72141 +
72142 if (ip6_mroute_opt(optname))
72143 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
72144
72145 diff -urNp linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c
72146 --- linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
72147 +++ linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
72148 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
72149 private = &tmp;
72150 }
72151 #endif
72152 + memset(&info, 0, sizeof(info));
72153 info.valid_hooks = t->valid_hooks;
72154 memcpy(info.hook_entry, private->hook_entry,
72155 sizeof(info.hook_entry));
72156 diff -urNp linux-2.6.32.45/net/ipv6/raw.c linux-2.6.32.45/net/ipv6/raw.c
72157 --- linux-2.6.32.45/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
72158 +++ linux-2.6.32.45/net/ipv6/raw.c 2011-08-14 11:48:20.000000000 -0400
72159 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
72160 {
72161 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
72162 skb_checksum_complete(skb)) {
72163 - atomic_inc(&sk->sk_drops);
72164 + atomic_inc_unchecked(&sk->sk_drops);
72165 kfree_skb(skb);
72166 return NET_RX_DROP;
72167 }
72168
72169 /* Charge it to the socket. */
72170 if (sock_queue_rcv_skb(sk,skb)<0) {
72171 - atomic_inc(&sk->sk_drops);
72172 + atomic_inc_unchecked(&sk->sk_drops);
72173 kfree_skb(skb);
72174 return NET_RX_DROP;
72175 }
72176 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72177 struct raw6_sock *rp = raw6_sk(sk);
72178
72179 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72180 - atomic_inc(&sk->sk_drops);
72181 + atomic_inc_unchecked(&sk->sk_drops);
72182 kfree_skb(skb);
72183 return NET_RX_DROP;
72184 }
72185 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72186
72187 if (inet->hdrincl) {
72188 if (skb_checksum_complete(skb)) {
72189 - atomic_inc(&sk->sk_drops);
72190 + atomic_inc_unchecked(&sk->sk_drops);
72191 kfree_skb(skb);
72192 return NET_RX_DROP;
72193 }
72194 @@ -518,7 +518,7 @@ csum_copy_err:
72195 as some normal condition.
72196 */
72197 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
72198 - atomic_inc(&sk->sk_drops);
72199 + atomic_inc_unchecked(&sk->sk_drops);
72200 goto out;
72201 }
72202
72203 @@ -600,7 +600,7 @@ out:
72204 return err;
72205 }
72206
72207 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72208 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72209 struct flowi *fl, struct rt6_info *rt,
72210 unsigned int flags)
72211 {
72212 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
72213 u16 proto;
72214 int err;
72215
72216 + pax_track_stack();
72217 +
72218 /* Rough check on arithmetic overflow,
72219 better check is made in ip6_append_data().
72220 */
72221 @@ -916,12 +918,17 @@ do_confirm:
72222 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72223 char __user *optval, int optlen)
72224 {
72225 + struct icmp6_filter filter;
72226 +
72227 switch (optname) {
72228 case ICMPV6_FILTER:
72229 + if (optlen < 0)
72230 + return -EINVAL;
72231 if (optlen > sizeof(struct icmp6_filter))
72232 optlen = sizeof(struct icmp6_filter);
72233 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72234 + if (copy_from_user(&filter, optval, optlen))
72235 return -EFAULT;
72236 + raw6_sk(sk)->filter = filter;
72237 return 0;
72238 default:
72239 return -ENOPROTOOPT;
72240 @@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct so
72241 char __user *optval, int __user *optlen)
72242 {
72243 int len;
72244 + struct icmp6_filter filter;
72245
72246 switch (optname) {
72247 case ICMPV6_FILTER:
72248 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
72249 len = sizeof(struct icmp6_filter);
72250 if (put_user(len, optlen))
72251 return -EFAULT;
72252 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72253 + filter = raw6_sk(sk)->filter;
72254 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
72255 return -EFAULT;
72256 return 0;
72257 default:
72258 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
72259 0, 0L, 0,
72260 sock_i_uid(sp), 0,
72261 sock_i_ino(sp),
72262 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72263 + atomic_read(&sp->sk_refcnt),
72264 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72265 + NULL,
72266 +#else
72267 + sp,
72268 +#endif
72269 + atomic_read_unchecked(&sp->sk_drops));
72270 }
72271
72272 static int raw6_seq_show(struct seq_file *seq, void *v)
72273 diff -urNp linux-2.6.32.45/net/ipv6/tcp_ipv6.c linux-2.6.32.45/net/ipv6/tcp_ipv6.c
72274 --- linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-16 20:37:25.000000000 -0400
72275 +++ linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-07 19:48:09.000000000 -0400
72276 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
72277 }
72278 #endif
72279
72280 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72281 +extern int grsec_enable_blackhole;
72282 +#endif
72283 +
72284 static void tcp_v6_hash(struct sock *sk)
72285 {
72286 if (sk->sk_state != TCP_CLOSE) {
72287 @@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk
72288 return 0;
72289
72290 reset:
72291 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72292 + if (!grsec_enable_blackhole)
72293 +#endif
72294 tcp_v6_send_reset(sk, skb);
72295 discard:
72296 if (opt_skb)
72297 @@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
72298 TCP_SKB_CB(skb)->sacked = 0;
72299
72300 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72301 - if (!sk)
72302 + if (!sk) {
72303 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72304 + ret = 1;
72305 +#endif
72306 goto no_tcp_socket;
72307 + }
72308
72309 process:
72310 - if (sk->sk_state == TCP_TIME_WAIT)
72311 + if (sk->sk_state == TCP_TIME_WAIT) {
72312 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72313 + ret = 2;
72314 +#endif
72315 goto do_time_wait;
72316 + }
72317
72318 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
72319 goto discard_and_relse;
72320 @@ -1701,6 +1716,10 @@ no_tcp_socket:
72321 bad_packet:
72322 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72323 } else {
72324 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72325 + if (!grsec_enable_blackhole || (ret == 1 &&
72326 + (skb->dev->flags & IFF_LOOPBACK)))
72327 +#endif
72328 tcp_v6_send_reset(NULL, skb);
72329 }
72330
72331 @@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file
72332 uid,
72333 0, /* non standard timer */
72334 0, /* open_requests have no inode */
72335 - 0, req);
72336 + 0,
72337 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72338 + NULL
72339 +#else
72340 + req
72341 +#endif
72342 + );
72343 }
72344
72345 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
72346 @@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_fil
72347 sock_i_uid(sp),
72348 icsk->icsk_probes_out,
72349 sock_i_ino(sp),
72350 - atomic_read(&sp->sk_refcnt), sp,
72351 + atomic_read(&sp->sk_refcnt),
72352 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72353 + NULL,
72354 +#else
72355 + sp,
72356 +#endif
72357 jiffies_to_clock_t(icsk->icsk_rto),
72358 jiffies_to_clock_t(icsk->icsk_ack.ato),
72359 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
72360 @@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct se
72361 dest->s6_addr32[2], dest->s6_addr32[3], destp,
72362 tw->tw_substate, 0, 0,
72363 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72364 - atomic_read(&tw->tw_refcnt), tw);
72365 + atomic_read(&tw->tw_refcnt),
72366 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72367 + NULL
72368 +#else
72369 + tw
72370 +#endif
72371 + );
72372 }
72373
72374 static int tcp6_seq_show(struct seq_file *seq, void *v)
72375 diff -urNp linux-2.6.32.45/net/ipv6/udp.c linux-2.6.32.45/net/ipv6/udp.c
72376 --- linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:04.000000000 -0400
72377 +++ linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:27.000000000 -0400
72378 @@ -49,6 +49,10 @@
72379 #include <linux/seq_file.h>
72380 #include "udp_impl.h"
72381
72382 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72383 +extern int grsec_enable_blackhole;
72384 +#endif
72385 +
72386 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
72387 {
72388 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
72389 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
72390 if (rc == -ENOMEM) {
72391 UDP6_INC_STATS_BH(sock_net(sk),
72392 UDP_MIB_RCVBUFERRORS, is_udplite);
72393 - atomic_inc(&sk->sk_drops);
72394 + atomic_inc_unchecked(&sk->sk_drops);
72395 }
72396 goto drop;
72397 }
72398 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72399 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
72400 proto == IPPROTO_UDPLITE);
72401
72402 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72403 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72404 +#endif
72405 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
72406
72407 kfree_skb(skb);
72408 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
72409 0, 0L, 0,
72410 sock_i_uid(sp), 0,
72411 sock_i_ino(sp),
72412 - atomic_read(&sp->sk_refcnt), sp,
72413 - atomic_read(&sp->sk_drops));
72414 + atomic_read(&sp->sk_refcnt),
72415 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72416 + NULL,
72417 +#else
72418 + sp,
72419 +#endif
72420 + atomic_read_unchecked(&sp->sk_drops));
72421 }
72422
72423 int udp6_seq_show(struct seq_file *seq, void *v)
72424 diff -urNp linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c
72425 --- linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
72426 +++ linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
72427 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
72428 add_wait_queue(&self->open_wait, &wait);
72429
72430 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
72431 - __FILE__,__LINE__, tty->driver->name, self->open_count );
72432 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72433
72434 /* As far as I can see, we protect open_count - Jean II */
72435 spin_lock_irqsave(&self->spinlock, flags);
72436 if (!tty_hung_up_p(filp)) {
72437 extra_count = 1;
72438 - self->open_count--;
72439 + local_dec(&self->open_count);
72440 }
72441 spin_unlock_irqrestore(&self->spinlock, flags);
72442 - self->blocked_open++;
72443 + local_inc(&self->blocked_open);
72444
72445 while (1) {
72446 if (tty->termios->c_cflag & CBAUD) {
72447 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
72448 }
72449
72450 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
72451 - __FILE__,__LINE__, tty->driver->name, self->open_count );
72452 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72453
72454 schedule();
72455 }
72456 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
72457 if (extra_count) {
72458 /* ++ is not atomic, so this should be protected - Jean II */
72459 spin_lock_irqsave(&self->spinlock, flags);
72460 - self->open_count++;
72461 + local_inc(&self->open_count);
72462 spin_unlock_irqrestore(&self->spinlock, flags);
72463 }
72464 - self->blocked_open--;
72465 + local_dec(&self->blocked_open);
72466
72467 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
72468 - __FILE__,__LINE__, tty->driver->name, self->open_count);
72469 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
72470
72471 if (!retval)
72472 self->flags |= ASYNC_NORMAL_ACTIVE;
72473 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
72474 }
72475 /* ++ is not atomic, so this should be protected - Jean II */
72476 spin_lock_irqsave(&self->spinlock, flags);
72477 - self->open_count++;
72478 + local_inc(&self->open_count);
72479
72480 tty->driver_data = self;
72481 self->tty = tty;
72482 spin_unlock_irqrestore(&self->spinlock, flags);
72483
72484 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
72485 - self->line, self->open_count);
72486 + self->line, local_read(&self->open_count));
72487
72488 /* Not really used by us, but lets do it anyway */
72489 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
72490 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
72491 return;
72492 }
72493
72494 - if ((tty->count == 1) && (self->open_count != 1)) {
72495 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
72496 /*
72497 * Uh, oh. tty->count is 1, which means that the tty
72498 * structure will be freed. state->count should always
72499 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
72500 */
72501 IRDA_DEBUG(0, "%s(), bad serial port count; "
72502 "tty->count is 1, state->count is %d\n", __func__ ,
72503 - self->open_count);
72504 - self->open_count = 1;
72505 + local_read(&self->open_count));
72506 + local_set(&self->open_count, 1);
72507 }
72508
72509 - if (--self->open_count < 0) {
72510 + if (local_dec_return(&self->open_count) < 0) {
72511 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
72512 - __func__, self->line, self->open_count);
72513 - self->open_count = 0;
72514 + __func__, self->line, local_read(&self->open_count));
72515 + local_set(&self->open_count, 0);
72516 }
72517 - if (self->open_count) {
72518 + if (local_read(&self->open_count)) {
72519 spin_unlock_irqrestore(&self->spinlock, flags);
72520
72521 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
72522 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
72523 tty->closing = 0;
72524 self->tty = NULL;
72525
72526 - if (self->blocked_open) {
72527 + if (local_read(&self->blocked_open)) {
72528 if (self->close_delay)
72529 schedule_timeout_interruptible(self->close_delay);
72530 wake_up_interruptible(&self->open_wait);
72531 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
72532 spin_lock_irqsave(&self->spinlock, flags);
72533 self->flags &= ~ASYNC_NORMAL_ACTIVE;
72534 self->tty = NULL;
72535 - self->open_count = 0;
72536 + local_set(&self->open_count, 0);
72537 spin_unlock_irqrestore(&self->spinlock, flags);
72538
72539 wake_up_interruptible(&self->open_wait);
72540 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
72541 seq_putc(m, '\n');
72542
72543 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
72544 - seq_printf(m, "Open count: %d\n", self->open_count);
72545 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
72546 seq_printf(m, "Max data size: %d\n", self->max_data_size);
72547 seq_printf(m, "Max header size: %d\n", self->max_header_size);
72548
72549 diff -urNp linux-2.6.32.45/net/iucv/af_iucv.c linux-2.6.32.45/net/iucv/af_iucv.c
72550 --- linux-2.6.32.45/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
72551 +++ linux-2.6.32.45/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
72552 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
72553
72554 write_lock_bh(&iucv_sk_list.lock);
72555
72556 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
72557 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72558 while (__iucv_get_sock_by_name(name)) {
72559 sprintf(name, "%08x",
72560 - atomic_inc_return(&iucv_sk_list.autobind_name));
72561 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72562 }
72563
72564 write_unlock_bh(&iucv_sk_list.lock);
72565 diff -urNp linux-2.6.32.45/net/key/af_key.c linux-2.6.32.45/net/key/af_key.c
72566 --- linux-2.6.32.45/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
72567 +++ linux-2.6.32.45/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
72568 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
72569 struct xfrm_migrate m[XFRM_MAX_DEPTH];
72570 struct xfrm_kmaddress k;
72571
72572 + pax_track_stack();
72573 +
72574 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
72575 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
72576 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
72577 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
72578 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
72579 else
72580 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
72581 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72582 + NULL,
72583 +#else
72584 s,
72585 +#endif
72586 atomic_read(&s->sk_refcnt),
72587 sk_rmem_alloc_get(s),
72588 sk_wmem_alloc_get(s),
72589 diff -urNp linux-2.6.32.45/net/lapb/lapb_iface.c linux-2.6.32.45/net/lapb/lapb_iface.c
72590 --- linux-2.6.32.45/net/lapb/lapb_iface.c 2011-03-27 14:31:47.000000000 -0400
72591 +++ linux-2.6.32.45/net/lapb/lapb_iface.c 2011-08-05 20:33:55.000000000 -0400
72592 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev
72593 goto out;
72594
72595 lapb->dev = dev;
72596 - lapb->callbacks = *callbacks;
72597 + lapb->callbacks = callbacks;
72598
72599 __lapb_insert_cb(lapb);
72600
72601 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device
72602
72603 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
72604 {
72605 - if (lapb->callbacks.connect_confirmation)
72606 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
72607 + if (lapb->callbacks->connect_confirmation)
72608 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
72609 }
72610
72611 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
72612 {
72613 - if (lapb->callbacks.connect_indication)
72614 - lapb->callbacks.connect_indication(lapb->dev, reason);
72615 + if (lapb->callbacks->connect_indication)
72616 + lapb->callbacks->connect_indication(lapb->dev, reason);
72617 }
72618
72619 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
72620 {
72621 - if (lapb->callbacks.disconnect_confirmation)
72622 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
72623 + if (lapb->callbacks->disconnect_confirmation)
72624 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
72625 }
72626
72627 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
72628 {
72629 - if (lapb->callbacks.disconnect_indication)
72630 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
72631 + if (lapb->callbacks->disconnect_indication)
72632 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
72633 }
72634
72635 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
72636 {
72637 - if (lapb->callbacks.data_indication)
72638 - return lapb->callbacks.data_indication(lapb->dev, skb);
72639 + if (lapb->callbacks->data_indication)
72640 + return lapb->callbacks->data_indication(lapb->dev, skb);
72641
72642 kfree_skb(skb);
72643 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
72644 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *l
72645 {
72646 int used = 0;
72647
72648 - if (lapb->callbacks.data_transmit) {
72649 - lapb->callbacks.data_transmit(lapb->dev, skb);
72650 + if (lapb->callbacks->data_transmit) {
72651 + lapb->callbacks->data_transmit(lapb->dev, skb);
72652 used = 1;
72653 }
72654
72655 diff -urNp linux-2.6.32.45/net/mac80211/cfg.c linux-2.6.32.45/net/mac80211/cfg.c
72656 --- linux-2.6.32.45/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
72657 +++ linux-2.6.32.45/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
72658 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
72659 return err;
72660 }
72661
72662 -struct cfg80211_ops mac80211_config_ops = {
72663 +const struct cfg80211_ops mac80211_config_ops = {
72664 .add_virtual_intf = ieee80211_add_iface,
72665 .del_virtual_intf = ieee80211_del_iface,
72666 .change_virtual_intf = ieee80211_change_iface,
72667 diff -urNp linux-2.6.32.45/net/mac80211/cfg.h linux-2.6.32.45/net/mac80211/cfg.h
72668 --- linux-2.6.32.45/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
72669 +++ linux-2.6.32.45/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
72670 @@ -4,6 +4,6 @@
72671 #ifndef __CFG_H
72672 #define __CFG_H
72673
72674 -extern struct cfg80211_ops mac80211_config_ops;
72675 +extern const struct cfg80211_ops mac80211_config_ops;
72676
72677 #endif /* __CFG_H */
72678 diff -urNp linux-2.6.32.45/net/mac80211/debugfs_key.c linux-2.6.32.45/net/mac80211/debugfs_key.c
72679 --- linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
72680 +++ linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
72681 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
72682 size_t count, loff_t *ppos)
72683 {
72684 struct ieee80211_key *key = file->private_data;
72685 - int i, res, bufsize = 2 * key->conf.keylen + 2;
72686 + int i, bufsize = 2 * key->conf.keylen + 2;
72687 char *buf = kmalloc(bufsize, GFP_KERNEL);
72688 char *p = buf;
72689 + ssize_t res;
72690 +
72691 + if (buf == NULL)
72692 + return -ENOMEM;
72693
72694 for (i = 0; i < key->conf.keylen; i++)
72695 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
72696 diff -urNp linux-2.6.32.45/net/mac80211/debugfs_sta.c linux-2.6.32.45/net/mac80211/debugfs_sta.c
72697 --- linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
72698 +++ linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
72699 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
72700 int i;
72701 struct sta_info *sta = file->private_data;
72702
72703 + pax_track_stack();
72704 +
72705 spin_lock_bh(&sta->lock);
72706 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
72707 sta->ampdu_mlme.dialog_token_allocator + 1);
72708 diff -urNp linux-2.6.32.45/net/mac80211/ieee80211_i.h linux-2.6.32.45/net/mac80211/ieee80211_i.h
72709 --- linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
72710 +++ linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
72711 @@ -25,6 +25,7 @@
72712 #include <linux/etherdevice.h>
72713 #include <net/cfg80211.h>
72714 #include <net/mac80211.h>
72715 +#include <asm/local.h>
72716 #include "key.h"
72717 #include "sta_info.h"
72718
72719 @@ -635,7 +636,7 @@ struct ieee80211_local {
72720 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
72721 spinlock_t queue_stop_reason_lock;
72722
72723 - int open_count;
72724 + local_t open_count;
72725 int monitors, cooked_mntrs;
72726 /* number of interfaces with corresponding FIF_ flags */
72727 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
72728 diff -urNp linux-2.6.32.45/net/mac80211/iface.c linux-2.6.32.45/net/mac80211/iface.c
72729 --- linux-2.6.32.45/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
72730 +++ linux-2.6.32.45/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
72731 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
72732 break;
72733 }
72734
72735 - if (local->open_count == 0) {
72736 + if (local_read(&local->open_count) == 0) {
72737 res = drv_start(local);
72738 if (res)
72739 goto err_del_bss;
72740 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
72741 * Validate the MAC address for this device.
72742 */
72743 if (!is_valid_ether_addr(dev->dev_addr)) {
72744 - if (!local->open_count)
72745 + if (!local_read(&local->open_count))
72746 drv_stop(local);
72747 return -EADDRNOTAVAIL;
72748 }
72749 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
72750
72751 hw_reconf_flags |= __ieee80211_recalc_idle(local);
72752
72753 - local->open_count++;
72754 + local_inc(&local->open_count);
72755 if (hw_reconf_flags) {
72756 ieee80211_hw_config(local, hw_reconf_flags);
72757 /*
72758 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
72759 err_del_interface:
72760 drv_remove_interface(local, &conf);
72761 err_stop:
72762 - if (!local->open_count)
72763 + if (!local_read(&local->open_count))
72764 drv_stop(local);
72765 err_del_bss:
72766 sdata->bss = NULL;
72767 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
72768 WARN_ON(!list_empty(&sdata->u.ap.vlans));
72769 }
72770
72771 - local->open_count--;
72772 + local_dec(&local->open_count);
72773
72774 switch (sdata->vif.type) {
72775 case NL80211_IFTYPE_AP_VLAN:
72776 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
72777
72778 ieee80211_recalc_ps(local, -1);
72779
72780 - if (local->open_count == 0) {
72781 + if (local_read(&local->open_count) == 0) {
72782 ieee80211_clear_tx_pending(local);
72783 ieee80211_stop_device(local);
72784
72785 diff -urNp linux-2.6.32.45/net/mac80211/main.c linux-2.6.32.45/net/mac80211/main.c
72786 --- linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
72787 +++ linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
72788 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
72789 local->hw.conf.power_level = power;
72790 }
72791
72792 - if (changed && local->open_count) {
72793 + if (changed && local_read(&local->open_count)) {
72794 ret = drv_config(local, changed);
72795 /*
72796 * Goal:
72797 diff -urNp linux-2.6.32.45/net/mac80211/mlme.c linux-2.6.32.45/net/mac80211/mlme.c
72798 --- linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:35:30.000000000 -0400
72799 +++ linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:34:01.000000000 -0400
72800 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
72801 bool have_higher_than_11mbit = false, newsta = false;
72802 u16 ap_ht_cap_flags;
72803
72804 + pax_track_stack();
72805 +
72806 /*
72807 * AssocResp and ReassocResp have identical structure, so process both
72808 * of them in this function.
72809 diff -urNp linux-2.6.32.45/net/mac80211/pm.c linux-2.6.32.45/net/mac80211/pm.c
72810 --- linux-2.6.32.45/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
72811 +++ linux-2.6.32.45/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
72812 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
72813 }
72814
72815 /* stop hardware - this must stop RX */
72816 - if (local->open_count)
72817 + if (local_read(&local->open_count))
72818 ieee80211_stop_device(local);
72819
72820 local->suspended = true;
72821 diff -urNp linux-2.6.32.45/net/mac80211/rate.c linux-2.6.32.45/net/mac80211/rate.c
72822 --- linux-2.6.32.45/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
72823 +++ linux-2.6.32.45/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
72824 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
72825 struct rate_control_ref *ref, *old;
72826
72827 ASSERT_RTNL();
72828 - if (local->open_count)
72829 + if (local_read(&local->open_count))
72830 return -EBUSY;
72831
72832 ref = rate_control_alloc(name, local);
72833 diff -urNp linux-2.6.32.45/net/mac80211/tx.c linux-2.6.32.45/net/mac80211/tx.c
72834 --- linux-2.6.32.45/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
72835 +++ linux-2.6.32.45/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
72836 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
72837 return cpu_to_le16(dur);
72838 }
72839
72840 -static int inline is_ieee80211_device(struct ieee80211_local *local,
72841 +static inline int is_ieee80211_device(struct ieee80211_local *local,
72842 struct net_device *dev)
72843 {
72844 return local == wdev_priv(dev->ieee80211_ptr);
72845 diff -urNp linux-2.6.32.45/net/mac80211/util.c linux-2.6.32.45/net/mac80211/util.c
72846 --- linux-2.6.32.45/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
72847 +++ linux-2.6.32.45/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
72848 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
72849 local->resuming = true;
72850
72851 /* restart hardware */
72852 - if (local->open_count) {
72853 + if (local_read(&local->open_count)) {
72854 /*
72855 * Upon resume hardware can sometimes be goofy due to
72856 * various platform / driver / bus issues, so restarting
72857 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c
72858 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
72859 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
72860 @@ -564,7 +564,7 @@ static const struct file_operations ip_v
72861 .open = ip_vs_app_open,
72862 .read = seq_read,
72863 .llseek = seq_lseek,
72864 - .release = seq_release,
72865 + .release = seq_release_net,
72866 };
72867 #endif
72868
72869 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c
72870 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
72871 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
72872 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
72873 /* if the connection is not template and is created
72874 * by sync, preserve the activity flag.
72875 */
72876 - cp->flags |= atomic_read(&dest->conn_flags) &
72877 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
72878 (~IP_VS_CONN_F_INACTIVE);
72879 else
72880 - cp->flags |= atomic_read(&dest->conn_flags);
72881 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
72882 cp->dest = dest;
72883
72884 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
72885 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
72886 atomic_set(&cp->refcnt, 1);
72887
72888 atomic_set(&cp->n_control, 0);
72889 - atomic_set(&cp->in_pkts, 0);
72890 + atomic_set_unchecked(&cp->in_pkts, 0);
72891
72892 atomic_inc(&ip_vs_conn_count);
72893 if (flags & IP_VS_CONN_F_NO_CPORT)
72894 @@ -871,7 +871,7 @@ static const struct file_operations ip_v
72895 .open = ip_vs_conn_open,
72896 .read = seq_read,
72897 .llseek = seq_lseek,
72898 - .release = seq_release,
72899 + .release = seq_release_net,
72900 };
72901
72902 static const char *ip_vs_origin_name(unsigned flags)
72903 @@ -934,7 +934,7 @@ static const struct file_operations ip_v
72904 .open = ip_vs_conn_sync_open,
72905 .read = seq_read,
72906 .llseek = seq_lseek,
72907 - .release = seq_release,
72908 + .release = seq_release_net,
72909 };
72910
72911 #endif
72912 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
72913
72914 /* Don't drop the entry if its number of incoming packets is not
72915 located in [0, 8] */
72916 - i = atomic_read(&cp->in_pkts);
72917 + i = atomic_read_unchecked(&cp->in_pkts);
72918 if (i > 8 || i < 0) return 0;
72919
72920 if (!todrop_rate[i]) return 0;
72921 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c
72922 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
72923 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
72924 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
72925 ret = cp->packet_xmit(skb, cp, pp);
72926 /* do not touch skb anymore */
72927
72928 - atomic_inc(&cp->in_pkts);
72929 + atomic_inc_unchecked(&cp->in_pkts);
72930 ip_vs_conn_put(cp);
72931 return ret;
72932 }
72933 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
72934 * Sync connection if it is about to close to
72935 * encorage the standby servers to update the connections timeout
72936 */
72937 - pkts = atomic_add_return(1, &cp->in_pkts);
72938 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
72939 if (af == AF_INET &&
72940 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
72941 (((cp->protocol != IPPROTO_TCP ||
72942 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c
72943 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
72944 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
72945 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
72946 ip_vs_rs_hash(dest);
72947 write_unlock_bh(&__ip_vs_rs_lock);
72948 }
72949 - atomic_set(&dest->conn_flags, conn_flags);
72950 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
72951
72952 /* bind the service */
72953 if (!dest->svc) {
72954 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
72955 " %-7s %-6d %-10d %-10d\n",
72956 &dest->addr.in6,
72957 ntohs(dest->port),
72958 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72959 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72960 atomic_read(&dest->weight),
72961 atomic_read(&dest->activeconns),
72962 atomic_read(&dest->inactconns));
72963 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
72964 "%-7s %-6d %-10d %-10d\n",
72965 ntohl(dest->addr.ip),
72966 ntohs(dest->port),
72967 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72968 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72969 atomic_read(&dest->weight),
72970 atomic_read(&dest->activeconns),
72971 atomic_read(&dest->inactconns));
72972 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
72973 .open = ip_vs_info_open,
72974 .read = seq_read,
72975 .llseek = seq_lseek,
72976 - .release = seq_release_private,
72977 + .release = seq_release_net,
72978 };
72979
72980 #endif
72981 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
72982 .open = ip_vs_stats_seq_open,
72983 .read = seq_read,
72984 .llseek = seq_lseek,
72985 - .release = single_release,
72986 + .release = single_release_net,
72987 };
72988
72989 #endif
72990 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
72991
72992 entry.addr = dest->addr.ip;
72993 entry.port = dest->port;
72994 - entry.conn_flags = atomic_read(&dest->conn_flags);
72995 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
72996 entry.weight = atomic_read(&dest->weight);
72997 entry.u_threshold = dest->u_threshold;
72998 entry.l_threshold = dest->l_threshold;
72999 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
73000 unsigned char arg[128];
73001 int ret = 0;
73002
73003 + pax_track_stack();
73004 +
73005 if (!capable(CAP_NET_ADMIN))
73006 return -EPERM;
73007
73008 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
73009 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73010
73011 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73012 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73013 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73014 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73015 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73016 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73017 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c
73018 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
73019 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
73020 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const
73021
73022 if (opt)
73023 memcpy(&cp->in_seq, opt, sizeof(*opt));
73024 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
73025 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
73026 cp->state = state;
73027 cp->old_state = cp->state;
73028 /*
73029 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c
73030 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
73031 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
73032 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
73033 else
73034 rc = NF_ACCEPT;
73035 /* do not touch skb anymore */
73036 - atomic_inc(&cp->in_pkts);
73037 + atomic_inc_unchecked(&cp->in_pkts);
73038 goto out;
73039 }
73040
73041 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
73042 else
73043 rc = NF_ACCEPT;
73044 /* do not touch skb anymore */
73045 - atomic_inc(&cp->in_pkts);
73046 + atomic_inc_unchecked(&cp->in_pkts);
73047 goto out;
73048 }
73049
73050 diff -urNp linux-2.6.32.45/net/netfilter/Kconfig linux-2.6.32.45/net/netfilter/Kconfig
73051 --- linux-2.6.32.45/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
73052 +++ linux-2.6.32.45/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
73053 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
73054
73055 To compile it as a module, choose M here. If unsure, say N.
73056
73057 +config NETFILTER_XT_MATCH_GRADM
73058 + tristate '"gradm" match support'
73059 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73060 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73061 + ---help---
73062 + The gradm match allows to match on grsecurity RBAC being enabled.
73063 + It is useful when iptables rules are applied early on bootup to
73064 + prevent connections to the machine (except from a trusted host)
73065 + while the RBAC system is disabled.
73066 +
73067 config NETFILTER_XT_MATCH_HASHLIMIT
73068 tristate '"hashlimit" match support'
73069 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73070 diff -urNp linux-2.6.32.45/net/netfilter/Makefile linux-2.6.32.45/net/netfilter/Makefile
73071 --- linux-2.6.32.45/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
73072 +++ linux-2.6.32.45/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
73073 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
73074 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
73075 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73076 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73077 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73078 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73079 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73080 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73081 diff -urNp linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c
73082 --- linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
73083 +++ linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
73084 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
73085 static int
73086 ctnetlink_parse_tuple(const struct nlattr * const cda[],
73087 struct nf_conntrack_tuple *tuple,
73088 - enum ctattr_tuple type, u_int8_t l3num)
73089 + enum ctattr_type type, u_int8_t l3num)
73090 {
73091 struct nlattr *tb[CTA_TUPLE_MAX+1];
73092 int err;
73093 diff -urNp linux-2.6.32.45/net/netfilter/nfnetlink_log.c linux-2.6.32.45/net/netfilter/nfnetlink_log.c
73094 --- linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
73095 +++ linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
73096 @@ -68,7 +68,7 @@ struct nfulnl_instance {
73097 };
73098
73099 static DEFINE_RWLOCK(instances_lock);
73100 -static atomic_t global_seq;
73101 +static atomic_unchecked_t global_seq;
73102
73103 #define INSTANCE_BUCKETS 16
73104 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73105 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
73106 /* global sequence number */
73107 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73108 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73109 - htonl(atomic_inc_return(&global_seq)));
73110 + htonl(atomic_inc_return_unchecked(&global_seq)));
73111
73112 if (data_len) {
73113 struct nlattr *nla;
73114 diff -urNp linux-2.6.32.45/net/netfilter/xt_gradm.c linux-2.6.32.45/net/netfilter/xt_gradm.c
73115 --- linux-2.6.32.45/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73116 +++ linux-2.6.32.45/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
73117 @@ -0,0 +1,51 @@
73118 +/*
73119 + * gradm match for netfilter
73120 + * Copyright © Zbigniew Krzystolik, 2010
73121 + *
73122 + * This program is free software; you can redistribute it and/or modify
73123 + * it under the terms of the GNU General Public License; either version
73124 + * 2 or 3 as published by the Free Software Foundation.
73125 + */
73126 +#include <linux/module.h>
73127 +#include <linux/moduleparam.h>
73128 +#include <linux/skbuff.h>
73129 +#include <linux/netfilter/x_tables.h>
73130 +#include <linux/grsecurity.h>
73131 +#include <linux/netfilter/xt_gradm.h>
73132 +
73133 +static bool
73134 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
73135 +{
73136 + const struct xt_gradm_mtinfo *info = par->matchinfo;
73137 + bool retval = false;
73138 + if (gr_acl_is_enabled())
73139 + retval = true;
73140 + return retval ^ info->invflags;
73141 +}
73142 +
73143 +static struct xt_match gradm_mt_reg __read_mostly = {
73144 + .name = "gradm",
73145 + .revision = 0,
73146 + .family = NFPROTO_UNSPEC,
73147 + .match = gradm_mt,
73148 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
73149 + .me = THIS_MODULE,
73150 +};
73151 +
73152 +static int __init gradm_mt_init(void)
73153 +{
73154 + return xt_register_match(&gradm_mt_reg);
73155 +}
73156 +
73157 +static void __exit gradm_mt_exit(void)
73158 +{
73159 + xt_unregister_match(&gradm_mt_reg);
73160 +}
73161 +
73162 +module_init(gradm_mt_init);
73163 +module_exit(gradm_mt_exit);
73164 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
73165 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
73166 +MODULE_LICENSE("GPL");
73167 +MODULE_ALIAS("ipt_gradm");
73168 +MODULE_ALIAS("ip6t_gradm");
73169 diff -urNp linux-2.6.32.45/net/netlink/af_netlink.c linux-2.6.32.45/net/netlink/af_netlink.c
73170 --- linux-2.6.32.45/net/netlink/af_netlink.c 2011-03-27 14:31:47.000000000 -0400
73171 +++ linux-2.6.32.45/net/netlink/af_netlink.c 2011-05-04 17:56:28.000000000 -0400
73172 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock
73173 sk->sk_error_report(sk);
73174 }
73175 }
73176 - atomic_inc(&sk->sk_drops);
73177 + atomic_inc_unchecked(&sk->sk_drops);
73178 }
73179
73180 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
73181 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_f
73182 struct netlink_sock *nlk = nlk_sk(s);
73183
73184 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
73185 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73186 + NULL,
73187 +#else
73188 s,
73189 +#endif
73190 s->sk_protocol,
73191 nlk->pid,
73192 nlk->groups ? (u32)nlk->groups[0] : 0,
73193 sk_rmem_alloc_get(s),
73194 sk_wmem_alloc_get(s),
73195 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73196 + NULL,
73197 +#else
73198 nlk->cb,
73199 +#endif
73200 atomic_read(&s->sk_refcnt),
73201 - atomic_read(&s->sk_drops)
73202 + atomic_read_unchecked(&s->sk_drops)
73203 );
73204
73205 }
73206 diff -urNp linux-2.6.32.45/net/netrom/af_netrom.c linux-2.6.32.45/net/netrom/af_netrom.c
73207 --- linux-2.6.32.45/net/netrom/af_netrom.c 2011-03-27 14:31:47.000000000 -0400
73208 +++ linux-2.6.32.45/net/netrom/af_netrom.c 2011-04-17 15:56:46.000000000 -0400
73209 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *soc
73210 struct sock *sk = sock->sk;
73211 struct nr_sock *nr = nr_sk(sk);
73212
73213 + memset(sax, 0, sizeof(*sax));
73214 lock_sock(sk);
73215 if (peer != 0) {
73216 if (sk->sk_state != TCP_ESTABLISHED) {
73217 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *soc
73218 *uaddr_len = sizeof(struct full_sockaddr_ax25);
73219 } else {
73220 sax->fsa_ax25.sax25_family = AF_NETROM;
73221 - sax->fsa_ax25.sax25_ndigis = 0;
73222 sax->fsa_ax25.sax25_call = nr->source_addr;
73223 *uaddr_len = sizeof(struct sockaddr_ax25);
73224 }
73225 diff -urNp linux-2.6.32.45/net/packet/af_packet.c linux-2.6.32.45/net/packet/af_packet.c
73226 --- linux-2.6.32.45/net/packet/af_packet.c 2011-07-13 17:23:04.000000000 -0400
73227 +++ linux-2.6.32.45/net/packet/af_packet.c 2011-07-13 17:23:27.000000000 -0400
73228 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_fi
73229
73230 seq_printf(seq,
73231 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
73232 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73233 + NULL,
73234 +#else
73235 s,
73236 +#endif
73237 atomic_read(&s->sk_refcnt),
73238 s->sk_type,
73239 ntohs(po->num),
73240 diff -urNp linux-2.6.32.45/net/phonet/af_phonet.c linux-2.6.32.45/net/phonet/af_phonet.c
73241 --- linux-2.6.32.45/net/phonet/af_phonet.c 2011-03-27 14:31:47.000000000 -0400
73242 +++ linux-2.6.32.45/net/phonet/af_phonet.c 2011-04-17 15:56:46.000000000 -0400
73243 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
73244 {
73245 struct phonet_protocol *pp;
73246
73247 - if (protocol >= PHONET_NPROTO)
73248 + if (protocol < 0 || protocol >= PHONET_NPROTO)
73249 return NULL;
73250
73251 spin_lock(&proto_tab_lock);
73252 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_regist
73253 {
73254 int err = 0;
73255
73256 - if (protocol >= PHONET_NPROTO)
73257 + if (protocol < 0 || protocol >= PHONET_NPROTO)
73258 return -EINVAL;
73259
73260 err = proto_register(pp->prot, 1);
73261 diff -urNp linux-2.6.32.45/net/phonet/datagram.c linux-2.6.32.45/net/phonet/datagram.c
73262 --- linux-2.6.32.45/net/phonet/datagram.c 2011-03-27 14:31:47.000000000 -0400
73263 +++ linux-2.6.32.45/net/phonet/datagram.c 2011-05-04 17:56:28.000000000 -0400
73264 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s
73265 if (err < 0) {
73266 kfree_skb(skb);
73267 if (err == -ENOMEM)
73268 - atomic_inc(&sk->sk_drops);
73269 + atomic_inc_unchecked(&sk->sk_drops);
73270 }
73271 return err ? NET_RX_DROP : NET_RX_SUCCESS;
73272 }
73273 diff -urNp linux-2.6.32.45/net/phonet/pep.c linux-2.6.32.45/net/phonet/pep.c
73274 --- linux-2.6.32.45/net/phonet/pep.c 2011-03-27 14:31:47.000000000 -0400
73275 +++ linux-2.6.32.45/net/phonet/pep.c 2011-05-04 17:56:28.000000000 -0400
73276 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk,
73277
73278 case PNS_PEP_CTRL_REQ:
73279 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
73280 - atomic_inc(&sk->sk_drops);
73281 + atomic_inc_unchecked(&sk->sk_drops);
73282 break;
73283 }
73284 __skb_pull(skb, 4);
73285 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk,
73286 if (!err)
73287 return 0;
73288 if (err == -ENOMEM)
73289 - atomic_inc(&sk->sk_drops);
73290 + atomic_inc_unchecked(&sk->sk_drops);
73291 break;
73292 }
73293
73294 if (pn->rx_credits == 0) {
73295 - atomic_inc(&sk->sk_drops);
73296 + atomic_inc_unchecked(&sk->sk_drops);
73297 err = -ENOBUFS;
73298 break;
73299 }
73300 diff -urNp linux-2.6.32.45/net/phonet/socket.c linux-2.6.32.45/net/phonet/socket.c
73301 --- linux-2.6.32.45/net/phonet/socket.c 2011-03-27 14:31:47.000000000 -0400
73302 +++ linux-2.6.32.45/net/phonet/socket.c 2011-05-04 17:57:07.000000000 -0400
73303 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_f
73304 sk->sk_state,
73305 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
73306 sock_i_uid(sk), sock_i_ino(sk),
73307 - atomic_read(&sk->sk_refcnt), sk,
73308 - atomic_read(&sk->sk_drops), &len);
73309 + atomic_read(&sk->sk_refcnt),
73310 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73311 + NULL,
73312 +#else
73313 + sk,
73314 +#endif
73315 + atomic_read_unchecked(&sk->sk_drops), &len);
73316 }
73317 seq_printf(seq, "%*s\n", 127 - len, "");
73318 return 0;
73319 diff -urNp linux-2.6.32.45/net/rds/cong.c linux-2.6.32.45/net/rds/cong.c
73320 --- linux-2.6.32.45/net/rds/cong.c 2011-03-27 14:31:47.000000000 -0400
73321 +++ linux-2.6.32.45/net/rds/cong.c 2011-05-04 17:56:28.000000000 -0400
73322 @@ -77,7 +77,7 @@
73323 * finds that the saved generation number is smaller than the global generation
73324 * number, it wakes up the process.
73325 */
73326 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
73327 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
73328
73329 /*
73330 * Congestion monitoring
73331 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
73332 rdsdebug("waking map %p for %pI4\n",
73333 map, &map->m_addr);
73334 rds_stats_inc(s_cong_update_received);
73335 - atomic_inc(&rds_cong_generation);
73336 + atomic_inc_unchecked(&rds_cong_generation);
73337 if (waitqueue_active(&map->m_waitq))
73338 wake_up(&map->m_waitq);
73339 if (waitqueue_active(&rds_poll_waitq))
73340 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
73341
73342 int rds_cong_updated_since(unsigned long *recent)
73343 {
73344 - unsigned long gen = atomic_read(&rds_cong_generation);
73345 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
73346
73347 if (likely(*recent == gen))
73348 return 0;
73349 diff -urNp linux-2.6.32.45/net/rds/iw_rdma.c linux-2.6.32.45/net/rds/iw_rdma.c
73350 --- linux-2.6.32.45/net/rds/iw_rdma.c 2011-03-27 14:31:47.000000000 -0400
73351 +++ linux-2.6.32.45/net/rds/iw_rdma.c 2011-05-16 21:46:57.000000000 -0400
73352 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
73353 struct rdma_cm_id *pcm_id;
73354 int rc;
73355
73356 + pax_track_stack();
73357 +
73358 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
73359 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
73360
73361 diff -urNp linux-2.6.32.45/net/rds/Kconfig linux-2.6.32.45/net/rds/Kconfig
73362 --- linux-2.6.32.45/net/rds/Kconfig 2011-03-27 14:31:47.000000000 -0400
73363 +++ linux-2.6.32.45/net/rds/Kconfig 2011-04-17 15:56:46.000000000 -0400
73364 @@ -1,7 +1,7 @@
73365
73366 config RDS
73367 tristate "The RDS Protocol (EXPERIMENTAL)"
73368 - depends on INET && EXPERIMENTAL
73369 + depends on INET && EXPERIMENTAL && BROKEN
73370 ---help---
73371 The RDS (Reliable Datagram Sockets) protocol provides reliable,
73372 sequenced delivery of datagrams over Infiniband, iWARP,
73373 diff -urNp linux-2.6.32.45/net/rxrpc/af_rxrpc.c linux-2.6.32.45/net/rxrpc/af_rxrpc.c
73374 --- linux-2.6.32.45/net/rxrpc/af_rxrpc.c 2011-03-27 14:31:47.000000000 -0400
73375 +++ linux-2.6.32.45/net/rxrpc/af_rxrpc.c 2011-05-04 17:56:28.000000000 -0400
73376 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_
73377 __be32 rxrpc_epoch;
73378
73379 /* current debugging ID */
73380 -atomic_t rxrpc_debug_id;
73381 +atomic_unchecked_t rxrpc_debug_id;
73382
73383 /* count of skbs currently in use */
73384 atomic_t rxrpc_n_skbs;
73385 diff -urNp linux-2.6.32.45/net/rxrpc/ar-ack.c linux-2.6.32.45/net/rxrpc/ar-ack.c
73386 --- linux-2.6.32.45/net/rxrpc/ar-ack.c 2011-03-27 14:31:47.000000000 -0400
73387 +++ linux-2.6.32.45/net/rxrpc/ar-ack.c 2011-05-16 21:46:57.000000000 -0400
73388 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca
73389
73390 _enter("{%d,%d,%d,%d},",
73391 call->acks_hard, call->acks_unacked,
73392 - atomic_read(&call->sequence),
73393 + atomic_read_unchecked(&call->sequence),
73394 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
73395
73396 stop = 0;
73397 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca
73398
73399 /* each Tx packet has a new serial number */
73400 sp->hdr.serial =
73401 - htonl(atomic_inc_return(&call->conn->serial));
73402 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
73403
73404 hdr = (struct rxrpc_header *) txb->head;
73405 hdr->serial = sp->hdr.serial;
73406 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc
73407 */
73408 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
73409 {
73410 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
73411 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
73412 }
73413
73414 /*
73415 @@ -627,7 +627,7 @@ process_further:
73416
73417 latest = ntohl(sp->hdr.serial);
73418 hard = ntohl(ack.firstPacket);
73419 - tx = atomic_read(&call->sequence);
73420 + tx = atomic_read_unchecked(&call->sequence);
73421
73422 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
73423 latest,
73424 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_stru
73425 u32 abort_code = RX_PROTOCOL_ERROR;
73426 u8 *acks = NULL;
73427
73428 + pax_track_stack();
73429 +
73430 //printk("\n--------------------\n");
73431 _enter("{%d,%s,%lx} [%lu]",
73432 call->debug_id, rxrpc_call_states[call->state], call->events,
73433 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
73434 goto maybe_reschedule;
73435
73436 send_ACK_with_skew:
73437 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
73438 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
73439 ntohl(ack.serial));
73440 send_ACK:
73441 mtu = call->conn->trans->peer->if_mtu;
73442 @@ -1171,7 +1173,7 @@ send_ACK:
73443 ackinfo.rxMTU = htonl(5692);
73444 ackinfo.jumbo_max = htonl(4);
73445
73446 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
73447 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
73448 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
73449 ntohl(hdr.serial),
73450 ntohs(ack.maxSkew),
73451 @@ -1189,7 +1191,7 @@ send_ACK:
73452 send_message:
73453 _debug("send message");
73454
73455 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
73456 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
73457 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
73458 send_message_2:
73459
73460 diff -urNp linux-2.6.32.45/net/rxrpc/ar-call.c linux-2.6.32.45/net/rxrpc/ar-call.c
73461 --- linux-2.6.32.45/net/rxrpc/ar-call.c 2011-03-27 14:31:47.000000000 -0400
73462 +++ linux-2.6.32.45/net/rxrpc/ar-call.c 2011-05-04 17:56:28.000000000 -0400
73463 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
73464 spin_lock_init(&call->lock);
73465 rwlock_init(&call->state_lock);
73466 atomic_set(&call->usage, 1);
73467 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
73468 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73469 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
73470
73471 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
73472 diff -urNp linux-2.6.32.45/net/rxrpc/ar-connection.c linux-2.6.32.45/net/rxrpc/ar-connection.c
73473 --- linux-2.6.32.45/net/rxrpc/ar-connection.c 2011-03-27 14:31:47.000000000 -0400
73474 +++ linux-2.6.32.45/net/rxrpc/ar-connection.c 2011-05-04 17:56:28.000000000 -0400
73475 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al
73476 rwlock_init(&conn->lock);
73477 spin_lock_init(&conn->state_lock);
73478 atomic_set(&conn->usage, 1);
73479 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
73480 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73481 conn->avail_calls = RXRPC_MAXCALLS;
73482 conn->size_align = 4;
73483 conn->header_size = sizeof(struct rxrpc_header);
73484 diff -urNp linux-2.6.32.45/net/rxrpc/ar-connevent.c linux-2.6.32.45/net/rxrpc/ar-connevent.c
73485 --- linux-2.6.32.45/net/rxrpc/ar-connevent.c 2011-03-27 14:31:47.000000000 -0400
73486 +++ linux-2.6.32.45/net/rxrpc/ar-connevent.c 2011-05-04 17:56:28.000000000 -0400
73487 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
73488
73489 len = iov[0].iov_len + iov[1].iov_len;
73490
73491 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
73492 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73493 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
73494
73495 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
73496 diff -urNp linux-2.6.32.45/net/rxrpc/ar-input.c linux-2.6.32.45/net/rxrpc/ar-input.c
73497 --- linux-2.6.32.45/net/rxrpc/ar-input.c 2011-03-27 14:31:47.000000000 -0400
73498 +++ linux-2.6.32.45/net/rxrpc/ar-input.c 2011-05-04 17:56:28.000000000 -0400
73499 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx
73500 /* track the latest serial number on this connection for ACK packet
73501 * information */
73502 serial = ntohl(sp->hdr.serial);
73503 - hi_serial = atomic_read(&call->conn->hi_serial);
73504 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
73505 while (serial > hi_serial)
73506 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
73507 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
73508 serial);
73509
73510 /* request ACK generation for any ACK or DATA packet that requests
73511 diff -urNp linux-2.6.32.45/net/rxrpc/ar-internal.h linux-2.6.32.45/net/rxrpc/ar-internal.h
73512 --- linux-2.6.32.45/net/rxrpc/ar-internal.h 2011-03-27 14:31:47.000000000 -0400
73513 +++ linux-2.6.32.45/net/rxrpc/ar-internal.h 2011-05-04 17:56:28.000000000 -0400
73514 @@ -272,8 +272,8 @@ struct rxrpc_connection {
73515 int error; /* error code for local abort */
73516 int debug_id; /* debug ID for printks */
73517 unsigned call_counter; /* call ID counter */
73518 - atomic_t serial; /* packet serial number counter */
73519 - atomic_t hi_serial; /* highest serial number received */
73520 + atomic_unchecked_t serial; /* packet serial number counter */
73521 + atomic_unchecked_t hi_serial; /* highest serial number received */
73522 u8 avail_calls; /* number of calls available */
73523 u8 size_align; /* data size alignment (for security) */
73524 u8 header_size; /* rxrpc + security header size */
73525 @@ -346,7 +346,7 @@ struct rxrpc_call {
73526 spinlock_t lock;
73527 rwlock_t state_lock; /* lock for state transition */
73528 atomic_t usage;
73529 - atomic_t sequence; /* Tx data packet sequence counter */
73530 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
73531 u32 abort_code; /* local/remote abort code */
73532 enum { /* current state of call */
73533 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
73534 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
73535 */
73536 extern atomic_t rxrpc_n_skbs;
73537 extern __be32 rxrpc_epoch;
73538 -extern atomic_t rxrpc_debug_id;
73539 +extern atomic_unchecked_t rxrpc_debug_id;
73540 extern struct workqueue_struct *rxrpc_workqueue;
73541
73542 /*
73543 diff -urNp linux-2.6.32.45/net/rxrpc/ar-key.c linux-2.6.32.45/net/rxrpc/ar-key.c
73544 --- linux-2.6.32.45/net/rxrpc/ar-key.c 2011-03-27 14:31:47.000000000 -0400
73545 +++ linux-2.6.32.45/net/rxrpc/ar-key.c 2011-04-17 15:56:46.000000000 -0400
73546 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(s
73547 return ret;
73548
73549 plen -= sizeof(*token);
73550 - token = kmalloc(sizeof(*token), GFP_KERNEL);
73551 + token = kzalloc(sizeof(*token), GFP_KERNEL);
73552 if (!token)
73553 return -ENOMEM;
73554
73555 - token->kad = kmalloc(plen, GFP_KERNEL);
73556 + token->kad = kzalloc(plen, GFP_KERNEL);
73557 if (!token->kad) {
73558 kfree(token);
73559 return -ENOMEM;
73560 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key
73561 goto error;
73562
73563 ret = -ENOMEM;
73564 - token = kmalloc(sizeof(*token), GFP_KERNEL);
73565 + token = kzalloc(sizeof(*token), GFP_KERNEL);
73566 if (!token)
73567 goto error;
73568 - token->kad = kmalloc(plen, GFP_KERNEL);
73569 + token->kad = kzalloc(plen, GFP_KERNEL);
73570 if (!token->kad)
73571 goto error_free;
73572
73573 diff -urNp linux-2.6.32.45/net/rxrpc/ar-local.c linux-2.6.32.45/net/rxrpc/ar-local.c
73574 --- linux-2.6.32.45/net/rxrpc/ar-local.c 2011-03-27 14:31:47.000000000 -0400
73575 +++ linux-2.6.32.45/net/rxrpc/ar-local.c 2011-05-04 17:56:28.000000000 -0400
73576 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
73577 spin_lock_init(&local->lock);
73578 rwlock_init(&local->services_lock);
73579 atomic_set(&local->usage, 1);
73580 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
73581 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73582 memcpy(&local->srx, srx, sizeof(*srx));
73583 }
73584
73585 diff -urNp linux-2.6.32.45/net/rxrpc/ar-output.c linux-2.6.32.45/net/rxrpc/ar-output.c
73586 --- linux-2.6.32.45/net/rxrpc/ar-output.c 2011-03-27 14:31:47.000000000 -0400
73587 +++ linux-2.6.32.45/net/rxrpc/ar-output.c 2011-05-04 17:56:28.000000000 -0400
73588 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb
73589 sp->hdr.cid = call->cid;
73590 sp->hdr.callNumber = call->call_id;
73591 sp->hdr.seq =
73592 - htonl(atomic_inc_return(&call->sequence));
73593 + htonl(atomic_inc_return_unchecked(&call->sequence));
73594 sp->hdr.serial =
73595 - htonl(atomic_inc_return(&conn->serial));
73596 + htonl(atomic_inc_return_unchecked(&conn->serial));
73597 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
73598 sp->hdr.userStatus = 0;
73599 sp->hdr.securityIndex = conn->security_ix;
73600 diff -urNp linux-2.6.32.45/net/rxrpc/ar-peer.c linux-2.6.32.45/net/rxrpc/ar-peer.c
73601 --- linux-2.6.32.45/net/rxrpc/ar-peer.c 2011-03-27 14:31:47.000000000 -0400
73602 +++ linux-2.6.32.45/net/rxrpc/ar-peer.c 2011-05-04 17:56:28.000000000 -0400
73603 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
73604 INIT_LIST_HEAD(&peer->error_targets);
73605 spin_lock_init(&peer->lock);
73606 atomic_set(&peer->usage, 1);
73607 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
73608 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73609 memcpy(&peer->srx, srx, sizeof(*srx));
73610
73611 rxrpc_assess_MTU_size(peer);
73612 diff -urNp linux-2.6.32.45/net/rxrpc/ar-proc.c linux-2.6.32.45/net/rxrpc/ar-proc.c
73613 --- linux-2.6.32.45/net/rxrpc/ar-proc.c 2011-03-27 14:31:47.000000000 -0400
73614 +++ linux-2.6.32.45/net/rxrpc/ar-proc.c 2011-05-04 17:56:28.000000000 -0400
73615 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
73616 atomic_read(&conn->usage),
73617 rxrpc_conn_states[conn->state],
73618 key_serial(conn->key),
73619 - atomic_read(&conn->serial),
73620 - atomic_read(&conn->hi_serial));
73621 + atomic_read_unchecked(&conn->serial),
73622 + atomic_read_unchecked(&conn->hi_serial));
73623
73624 return 0;
73625 }
73626 diff -urNp linux-2.6.32.45/net/rxrpc/ar-transport.c linux-2.6.32.45/net/rxrpc/ar-transport.c
73627 --- linux-2.6.32.45/net/rxrpc/ar-transport.c 2011-03-27 14:31:47.000000000 -0400
73628 +++ linux-2.6.32.45/net/rxrpc/ar-transport.c 2011-05-04 17:56:28.000000000 -0400
73629 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all
73630 spin_lock_init(&trans->client_lock);
73631 rwlock_init(&trans->conn_lock);
73632 atomic_set(&trans->usage, 1);
73633 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
73634 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73635
73636 if (peer->srx.transport.family == AF_INET) {
73637 switch (peer->srx.transport_type) {
73638 diff -urNp linux-2.6.32.45/net/rxrpc/rxkad.c linux-2.6.32.45/net/rxrpc/rxkad.c
73639 --- linux-2.6.32.45/net/rxrpc/rxkad.c 2011-03-27 14:31:47.000000000 -0400
73640 +++ linux-2.6.32.45/net/rxrpc/rxkad.c 2011-05-16 21:46:57.000000000 -0400
73641 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(c
73642 u16 check;
73643 int nsg;
73644
73645 + pax_track_stack();
73646 +
73647 sp = rxrpc_skb(skb);
73648
73649 _enter("");
73650 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(cons
73651 u16 check;
73652 int nsg;
73653
73654 + pax_track_stack();
73655 +
73656 _enter("");
73657
73658 sp = rxrpc_skb(skb);
73659 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct
73660
73661 len = iov[0].iov_len + iov[1].iov_len;
73662
73663 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
73664 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73665 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
73666
73667 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
73668 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rx
73669
73670 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
73671
73672 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
73673 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73674 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
73675
73676 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
73677 diff -urNp linux-2.6.32.45/net/sctp/proc.c linux-2.6.32.45/net/sctp/proc.c
73678 --- linux-2.6.32.45/net/sctp/proc.c 2011-03-27 14:31:47.000000000 -0400
73679 +++ linux-2.6.32.45/net/sctp/proc.c 2011-04-17 15:56:46.000000000 -0400
73680 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_
73681 sctp_for_each_hentry(epb, node, &head->chain) {
73682 ep = sctp_ep(epb);
73683 sk = epb->sk;
73684 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
73685 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
73686 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73687 + NULL, NULL,
73688 +#else
73689 + ep, sk,
73690 +#endif
73691 sctp_sk(sk)->type, sk->sk_state, hash,
73692 epb->bind_addr.port,
73693 sock_i_uid(sk), sock_i_ino(sk));
73694 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct s
73695 seq_printf(seq,
73696 "%8p %8p %-3d %-3d %-2d %-4d "
73697 "%4d %8d %8d %7d %5lu %-5d %5d ",
73698 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
73699 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73700 + NULL, NULL,
73701 +#else
73702 + assoc, sk,
73703 +#endif
73704 + sctp_sk(sk)->type, sk->sk_state,
73705 assoc->state, hash,
73706 assoc->assoc_id,
73707 assoc->sndbuf_used,
73708 diff -urNp linux-2.6.32.45/net/sctp/socket.c linux-2.6.32.45/net/sctp/socket.c
73709 --- linux-2.6.32.45/net/sctp/socket.c 2011-03-27 14:31:47.000000000 -0400
73710 +++ linux-2.6.32.45/net/sctp/socket.c 2011-04-23 12:56:11.000000000 -0400
73711 @@ -5802,7 +5802,6 @@ pp_found:
73712 */
73713 int reuse = sk->sk_reuse;
73714 struct sock *sk2;
73715 - struct hlist_node *node;
73716
73717 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
73718 if (pp->fastreuse && sk->sk_reuse &&
73719 diff -urNp linux-2.6.32.45/net/socket.c linux-2.6.32.45/net/socket.c
73720 --- linux-2.6.32.45/net/socket.c 2011-03-27 14:31:47.000000000 -0400
73721 +++ linux-2.6.32.45/net/socket.c 2011-05-16 21:46:57.000000000 -0400
73722 @@ -87,6 +87,7 @@
73723 #include <linux/wireless.h>
73724 #include <linux/nsproxy.h>
73725 #include <linux/magic.h>
73726 +#include <linux/in.h>
73727
73728 #include <asm/uaccess.h>
73729 #include <asm/unistd.h>
73730 @@ -97,6 +98,21 @@
73731 #include <net/sock.h>
73732 #include <linux/netfilter.h>
73733
73734 +extern void gr_attach_curr_ip(const struct sock *sk);
73735 +extern int gr_handle_sock_all(const int family, const int type,
73736 + const int protocol);
73737 +extern int gr_handle_sock_server(const struct sockaddr *sck);
73738 +extern int gr_handle_sock_server_other(const struct sock *sck);
73739 +extern int gr_handle_sock_client(const struct sockaddr *sck);
73740 +extern int gr_search_connect(struct socket * sock,
73741 + struct sockaddr_in * addr);
73742 +extern int gr_search_bind(struct socket * sock,
73743 + struct sockaddr_in * addr);
73744 +extern int gr_search_listen(struct socket * sock);
73745 +extern int gr_search_accept(struct socket * sock);
73746 +extern int gr_search_socket(const int domain, const int type,
73747 + const int protocol);
73748 +
73749 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
73750 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
73751 unsigned long nr_segs, loff_t pos);
73752 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_sys
73753 mnt);
73754 }
73755
73756 -static struct vfsmount *sock_mnt __read_mostly;
73757 +struct vfsmount *sock_mnt __read_mostly;
73758
73759 static struct file_system_type sock_fs_type = {
73760 .name = "sockfs",
73761 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net
73762 return -EAFNOSUPPORT;
73763 if (type < 0 || type >= SOCK_MAX)
73764 return -EINVAL;
73765 + if (protocol < 0)
73766 + return -EINVAL;
73767
73768 /* Compatibility.
73769
73770 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int
73771 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
73772 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
73773
73774 + if(!gr_search_socket(family, type, protocol)) {
73775 + retval = -EACCES;
73776 + goto out;
73777 + }
73778 +
73779 + if (gr_handle_sock_all(family, type, protocol)) {
73780 + retval = -EACCES;
73781 + goto out;
73782 + }
73783 +
73784 retval = sock_create(family, type, protocol, &sock);
73785 if (retval < 0)
73786 goto out;
73787 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
73788 if (sock) {
73789 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
73790 if (err >= 0) {
73791 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
73792 + err = -EACCES;
73793 + goto error;
73794 + }
73795 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
73796 + if (err)
73797 + goto error;
73798 +
73799 err = security_socket_bind(sock,
73800 (struct sockaddr *)&address,
73801 addrlen);
73802 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
73803 (struct sockaddr *)
73804 &address, addrlen);
73805 }
73806 +error:
73807 fput_light(sock->file, fput_needed);
73808 }
73809 return err;
73810 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
73811 if ((unsigned)backlog > somaxconn)
73812 backlog = somaxconn;
73813
73814 + if (gr_handle_sock_server_other(sock->sk)) {
73815 + err = -EPERM;
73816 + goto error;
73817 + }
73818 +
73819 + err = gr_search_listen(sock);
73820 + if (err)
73821 + goto error;
73822 +
73823 err = security_socket_listen(sock, backlog);
73824 if (!err)
73825 err = sock->ops->listen(sock, backlog);
73826
73827 +error:
73828 fput_light(sock->file, fput_needed);
73829 }
73830 return err;
73831 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
73832 newsock->type = sock->type;
73833 newsock->ops = sock->ops;
73834
73835 + if (gr_handle_sock_server_other(sock->sk)) {
73836 + err = -EPERM;
73837 + sock_release(newsock);
73838 + goto out_put;
73839 + }
73840 +
73841 + err = gr_search_accept(sock);
73842 + if (err) {
73843 + sock_release(newsock);
73844 + goto out_put;
73845 + }
73846 +
73847 /*
73848 * We don't need try_module_get here, as the listening socket (sock)
73849 * has the protocol module (sock->ops->owner) held.
73850 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
73851 fd_install(newfd, newfile);
73852 err = newfd;
73853
73854 + gr_attach_curr_ip(newsock->sk);
73855 +
73856 out_put:
73857 fput_light(sock->file, fput_needed);
73858 out:
73859 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
73860 int, addrlen)
73861 {
73862 struct socket *sock;
73863 + struct sockaddr *sck;
73864 struct sockaddr_storage address;
73865 int err, fput_needed;
73866
73867 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
73868 if (err < 0)
73869 goto out_put;
73870
73871 + sck = (struct sockaddr *)&address;
73872 +
73873 + if (gr_handle_sock_client(sck)) {
73874 + err = -EACCES;
73875 + goto out_put;
73876 + }
73877 +
73878 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
73879 + if (err)
73880 + goto out_put;
73881 +
73882 err =
73883 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
73884 if (err)
73885 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
73886 int err, ctl_len, iov_size, total_len;
73887 int fput_needed;
73888
73889 + pax_track_stack();
73890 +
73891 err = -EFAULT;
73892 if (MSG_CMSG_COMPAT & flags) {
73893 if (get_compat_msghdr(&msg_sys, msg_compat))
73894 diff -urNp linux-2.6.32.45/net/sunrpc/sched.c linux-2.6.32.45/net/sunrpc/sched.c
73895 --- linux-2.6.32.45/net/sunrpc/sched.c 2011-08-09 18:35:30.000000000 -0400
73896 +++ linux-2.6.32.45/net/sunrpc/sched.c 2011-08-09 18:34:01.000000000 -0400
73897 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w
73898 #ifdef RPC_DEBUG
73899 static void rpc_task_set_debuginfo(struct rpc_task *task)
73900 {
73901 - static atomic_t rpc_pid;
73902 + static atomic_unchecked_t rpc_pid;
73903
73904 task->tk_magic = RPC_TASK_MAGIC_ID;
73905 - task->tk_pid = atomic_inc_return(&rpc_pid);
73906 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
73907 }
73908 #else
73909 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
73910 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c
73911 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c 2011-03-27 14:31:47.000000000 -0400
73912 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-04 17:56:20.000000000 -0400
73913 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR
73914 static unsigned int min_max_inline = 4096;
73915 static unsigned int max_max_inline = 65536;
73916
73917 -atomic_t rdma_stat_recv;
73918 -atomic_t rdma_stat_read;
73919 -atomic_t rdma_stat_write;
73920 -atomic_t rdma_stat_sq_starve;
73921 -atomic_t rdma_stat_rq_starve;
73922 -atomic_t rdma_stat_rq_poll;
73923 -atomic_t rdma_stat_rq_prod;
73924 -atomic_t rdma_stat_sq_poll;
73925 -atomic_t rdma_stat_sq_prod;
73926 +atomic_unchecked_t rdma_stat_recv;
73927 +atomic_unchecked_t rdma_stat_read;
73928 +atomic_unchecked_t rdma_stat_write;
73929 +atomic_unchecked_t rdma_stat_sq_starve;
73930 +atomic_unchecked_t rdma_stat_rq_starve;
73931 +atomic_unchecked_t rdma_stat_rq_poll;
73932 +atomic_unchecked_t rdma_stat_rq_prod;
73933 +atomic_unchecked_t rdma_stat_sq_poll;
73934 +atomic_unchecked_t rdma_stat_sq_prod;
73935
73936 /* Temporary NFS request map and context caches */
73937 struct kmem_cache *svc_rdma_map_cachep;
73938 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *ta
73939 len -= *ppos;
73940 if (len > *lenp)
73941 len = *lenp;
73942 - if (len && copy_to_user(buffer, str_buf, len))
73943 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
73944 return -EFAULT;
73945 *lenp = len;
73946 *ppos += len;
73947 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] =
73948 {
73949 .procname = "rdma_stat_read",
73950 .data = &rdma_stat_read,
73951 - .maxlen = sizeof(atomic_t),
73952 + .maxlen = sizeof(atomic_unchecked_t),
73953 .mode = 0644,
73954 .proc_handler = &read_reset_stat,
73955 },
73956 {
73957 .procname = "rdma_stat_recv",
73958 .data = &rdma_stat_recv,
73959 - .maxlen = sizeof(atomic_t),
73960 + .maxlen = sizeof(atomic_unchecked_t),
73961 .mode = 0644,
73962 .proc_handler = &read_reset_stat,
73963 },
73964 {
73965 .procname = "rdma_stat_write",
73966 .data = &rdma_stat_write,
73967 - .maxlen = sizeof(atomic_t),
73968 + .maxlen = sizeof(atomic_unchecked_t),
73969 .mode = 0644,
73970 .proc_handler = &read_reset_stat,
73971 },
73972 {
73973 .procname = "rdma_stat_sq_starve",
73974 .data = &rdma_stat_sq_starve,
73975 - .maxlen = sizeof(atomic_t),
73976 + .maxlen = sizeof(atomic_unchecked_t),
73977 .mode = 0644,
73978 .proc_handler = &read_reset_stat,
73979 },
73980 {
73981 .procname = "rdma_stat_rq_starve",
73982 .data = &rdma_stat_rq_starve,
73983 - .maxlen = sizeof(atomic_t),
73984 + .maxlen = sizeof(atomic_unchecked_t),
73985 .mode = 0644,
73986 .proc_handler = &read_reset_stat,
73987 },
73988 {
73989 .procname = "rdma_stat_rq_poll",
73990 .data = &rdma_stat_rq_poll,
73991 - .maxlen = sizeof(atomic_t),
73992 + .maxlen = sizeof(atomic_unchecked_t),
73993 .mode = 0644,
73994 .proc_handler = &read_reset_stat,
73995 },
73996 {
73997 .procname = "rdma_stat_rq_prod",
73998 .data = &rdma_stat_rq_prod,
73999 - .maxlen = sizeof(atomic_t),
74000 + .maxlen = sizeof(atomic_unchecked_t),
74001 .mode = 0644,
74002 .proc_handler = &read_reset_stat,
74003 },
74004 {
74005 .procname = "rdma_stat_sq_poll",
74006 .data = &rdma_stat_sq_poll,
74007 - .maxlen = sizeof(atomic_t),
74008 + .maxlen = sizeof(atomic_unchecked_t),
74009 .mode = 0644,
74010 .proc_handler = &read_reset_stat,
74011 },
74012 {
74013 .procname = "rdma_stat_sq_prod",
74014 .data = &rdma_stat_sq_prod,
74015 - .maxlen = sizeof(atomic_t),
74016 + .maxlen = sizeof(atomic_unchecked_t),
74017 .mode = 0644,
74018 .proc_handler = &read_reset_stat,
74019 },
74020 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
74021 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-03-27 14:31:47.000000000 -0400
74022 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-04 17:56:28.000000000 -0400
74023 @@ -495,7 +495,7 @@ next_sge:
74024 svc_rdma_put_context(ctxt, 0);
74025 goto out;
74026 }
74027 - atomic_inc(&rdma_stat_read);
74028 + atomic_inc_unchecked(&rdma_stat_read);
74029
74030 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
74031 chl_map->ch[ch_no].count -= read_wr.num_sge;
74032 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
74033 dto_q);
74034 list_del_init(&ctxt->dto_q);
74035 } else {
74036 - atomic_inc(&rdma_stat_rq_starve);
74037 + atomic_inc_unchecked(&rdma_stat_rq_starve);
74038 clear_bit(XPT_DATA, &xprt->xpt_flags);
74039 ctxt = NULL;
74040 }
74041 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
74042 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
74043 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
74044 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
74045 - atomic_inc(&rdma_stat_recv);
74046 + atomic_inc_unchecked(&rdma_stat_recv);
74047
74048 /* Build up the XDR from the receive buffers. */
74049 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
74050 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c
74051 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-03-27 14:31:47.000000000 -0400
74052 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-04 17:56:28.000000000 -0400
74053 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm
74054 write_wr.wr.rdma.remote_addr = to;
74055
74056 /* Post It */
74057 - atomic_inc(&rdma_stat_write);
74058 + atomic_inc_unchecked(&rdma_stat_write);
74059 if (svc_rdma_send(xprt, &write_wr))
74060 goto err;
74061 return 0;
74062 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c
74063 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-03-27 14:31:47.000000000 -0400
74064 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-04 17:56:28.000000000 -0400
74065 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
74066 return;
74067
74068 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
74069 - atomic_inc(&rdma_stat_rq_poll);
74070 + atomic_inc_unchecked(&rdma_stat_rq_poll);
74071
74072 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
74073 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
74074 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
74075 }
74076
74077 if (ctxt)
74078 - atomic_inc(&rdma_stat_rq_prod);
74079 + atomic_inc_unchecked(&rdma_stat_rq_prod);
74080
74081 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
74082 /*
74083 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
74084 return;
74085
74086 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
74087 - atomic_inc(&rdma_stat_sq_poll);
74088 + atomic_inc_unchecked(&rdma_stat_sq_poll);
74089 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
74090 if (wc.status != IB_WC_SUCCESS)
74091 /* Close the transport */
74092 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
74093 }
74094
74095 if (ctxt)
74096 - atomic_inc(&rdma_stat_sq_prod);
74097 + atomic_inc_unchecked(&rdma_stat_sq_prod);
74098 }
74099
74100 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
74101 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
74102 spin_lock_bh(&xprt->sc_lock);
74103 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
74104 spin_unlock_bh(&xprt->sc_lock);
74105 - atomic_inc(&rdma_stat_sq_starve);
74106 + atomic_inc_unchecked(&rdma_stat_sq_starve);
74107
74108 /* See if we can opportunistically reap SQ WR to make room */
74109 sq_cq_reap(xprt);
74110 diff -urNp linux-2.6.32.45/net/sysctl_net.c linux-2.6.32.45/net/sysctl_net.c
74111 --- linux-2.6.32.45/net/sysctl_net.c 2011-03-27 14:31:47.000000000 -0400
74112 +++ linux-2.6.32.45/net/sysctl_net.c 2011-04-17 15:56:46.000000000 -0400
74113 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
74114 struct ctl_table *table)
74115 {
74116 /* Allow network administrator to have same access as root. */
74117 - if (capable(CAP_NET_ADMIN)) {
74118 + if (capable_nolog(CAP_NET_ADMIN)) {
74119 int mode = (table->mode >> 6) & 7;
74120 return (mode << 6) | (mode << 3) | mode;
74121 }
74122 diff -urNp linux-2.6.32.45/net/unix/af_unix.c linux-2.6.32.45/net/unix/af_unix.c
74123 --- linux-2.6.32.45/net/unix/af_unix.c 2011-05-10 22:12:02.000000000 -0400
74124 +++ linux-2.6.32.45/net/unix/af_unix.c 2011-07-18 18:17:33.000000000 -0400
74125 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(stru
74126 err = -ECONNREFUSED;
74127 if (!S_ISSOCK(inode->i_mode))
74128 goto put_fail;
74129 +
74130 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
74131 + err = -EACCES;
74132 + goto put_fail;
74133 + }
74134 +
74135 u = unix_find_socket_byinode(net, inode);
74136 if (!u)
74137 goto put_fail;
74138 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(stru
74139 if (u) {
74140 struct dentry *dentry;
74141 dentry = unix_sk(u)->dentry;
74142 +
74143 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
74144 + err = -EPERM;
74145 + sock_put(u);
74146 + goto fail;
74147 + }
74148 +
74149 if (dentry)
74150 touch_atime(unix_sk(u)->mnt, dentry);
74151 } else
74152 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock
74153 err = security_path_mknod(&nd.path, dentry, mode, 0);
74154 if (err)
74155 goto out_mknod_drop_write;
74156 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
74157 + err = -EACCES;
74158 + goto out_mknod_drop_write;
74159 + }
74160 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
74161 out_mknod_drop_write:
74162 mnt_drop_write(nd.path.mnt);
74163 if (err)
74164 goto out_mknod_dput;
74165 +
74166 + gr_handle_create(dentry, nd.path.mnt);
74167 +
74168 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
74169 dput(nd.path.dentry);
74170 nd.path.dentry = dentry;
74171 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file
74172 unix_state_lock(s);
74173
74174 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
74175 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74176 + NULL,
74177 +#else
74178 s,
74179 +#endif
74180 atomic_read(&s->sk_refcnt),
74181 0,
74182 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
74183 diff -urNp linux-2.6.32.45/net/wireless/core.c linux-2.6.32.45/net/wireless/core.c
74184 --- linux-2.6.32.45/net/wireless/core.c 2011-03-27 14:31:47.000000000 -0400
74185 +++ linux-2.6.32.45/net/wireless/core.c 2011-08-05 20:33:55.000000000 -0400
74186 @@ -367,7 +367,7 @@ struct wiphy *wiphy_new(const struct cfg
74187
74188 wiphy_net_set(&rdev->wiphy, &init_net);
74189
74190 - rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block;
74191 + *(void **)&rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block;
74192 rdev->rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev),
74193 &rdev->wiphy.dev, RFKILL_TYPE_WLAN,
74194 &rdev->rfkill_ops, rdev);
74195 @@ -505,7 +505,7 @@ void wiphy_rfkill_start_polling(struct w
74196
74197 if (!rdev->ops->rfkill_poll)
74198 return;
74199 - rdev->rfkill_ops.poll = cfg80211_rfkill_poll;
74200 + *(void **)&rdev->rfkill_ops.poll = cfg80211_rfkill_poll;
74201 rfkill_resume_polling(rdev->rfkill);
74202 }
74203 EXPORT_SYMBOL(wiphy_rfkill_start_polling);
74204 diff -urNp linux-2.6.32.45/net/wireless/wext.c linux-2.6.32.45/net/wireless/wext.c
74205 --- linux-2.6.32.45/net/wireless/wext.c 2011-03-27 14:31:47.000000000 -0400
74206 +++ linux-2.6.32.45/net/wireless/wext.c 2011-04-17 15:56:46.000000000 -0400
74207 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struc
74208 */
74209
74210 /* Support for very large requests */
74211 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
74212 - (user_length > descr->max_tokens)) {
74213 + if (user_length > descr->max_tokens) {
74214 /* Allow userspace to GET more than max so
74215 * we can support any size GET requests.
74216 * There is still a limit : -ENOMEM.
74217 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struc
74218 }
74219 }
74220
74221 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
74222 - /*
74223 - * If this is a GET, but not NOMAX, it means that the extra
74224 - * data is not bounded by userspace, but by max_tokens. Thus
74225 - * set the length to max_tokens. This matches the extra data
74226 - * allocation.
74227 - * The driver should fill it with the number of tokens it
74228 - * provided, and it may check iwp->length rather than having
74229 - * knowledge of max_tokens. If the driver doesn't change the
74230 - * iwp->length, this ioctl just copies back max_token tokens
74231 - * filled with zeroes. Hopefully the driver isn't claiming
74232 - * them to be valid data.
74233 - */
74234 - iwp->length = descr->max_tokens;
74235 - }
74236 -
74237 err = handler(dev, info, (union iwreq_data *) iwp, extra);
74238
74239 iwp->length += essid_compat;
74240 diff -urNp linux-2.6.32.45/net/xfrm/xfrm_policy.c linux-2.6.32.45/net/xfrm/xfrm_policy.c
74241 --- linux-2.6.32.45/net/xfrm/xfrm_policy.c 2011-03-27 14:31:47.000000000 -0400
74242 +++ linux-2.6.32.45/net/xfrm/xfrm_policy.c 2011-05-04 17:56:20.000000000 -0400
74243 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x
74244 hlist_add_head(&policy->bydst, chain);
74245 xfrm_pol_hold(policy);
74246 net->xfrm.policy_count[dir]++;
74247 - atomic_inc(&flow_cache_genid);
74248 + atomic_inc_unchecked(&flow_cache_genid);
74249 if (delpol)
74250 __xfrm_policy_unlink(delpol, dir);
74251 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
74252 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct
74253 write_unlock_bh(&xfrm_policy_lock);
74254
74255 if (ret && delete) {
74256 - atomic_inc(&flow_cache_genid);
74257 + atomic_inc_unchecked(&flow_cache_genid);
74258 xfrm_policy_kill(ret);
74259 }
74260 return ret;
74261 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str
74262 write_unlock_bh(&xfrm_policy_lock);
74263
74264 if (ret && delete) {
74265 - atomic_inc(&flow_cache_genid);
74266 + atomic_inc_unchecked(&flow_cache_genid);
74267 xfrm_policy_kill(ret);
74268 }
74269 return ret;
74270 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u
74271 }
74272
74273 }
74274 - atomic_inc(&flow_cache_genid);
74275 + atomic_inc_unchecked(&flow_cache_genid);
74276 out:
74277 write_unlock_bh(&xfrm_policy_lock);
74278 return err;
74279 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic
74280 write_unlock_bh(&xfrm_policy_lock);
74281 if (pol) {
74282 if (dir < XFRM_POLICY_MAX)
74283 - atomic_inc(&flow_cache_genid);
74284 + atomic_inc_unchecked(&flow_cache_genid);
74285 xfrm_policy_kill(pol);
74286 return 0;
74287 }
74288 @@ -1477,7 +1477,7 @@ free_dst:
74289 goto out;
74290 }
74291
74292 -static int inline
74293 +static inline int
74294 xfrm_dst_alloc_copy(void **target, void *src, int size)
74295 {
74296 if (!*target) {
74297 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void
74298 return 0;
74299 }
74300
74301 -static int inline
74302 +static inline int
74303 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
74304 {
74305 #ifdef CONFIG_XFRM_SUB_POLICY
74306 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry
74307 #endif
74308 }
74309
74310 -static int inline
74311 +static inline int
74312 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
74313 {
74314 #ifdef CONFIG_XFRM_SUB_POLICY
74315 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc
74316 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
74317
74318 restart:
74319 - genid = atomic_read(&flow_cache_genid);
74320 + genid = atomic_read_unchecked(&flow_cache_genid);
74321 policy = NULL;
74322 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
74323 pols[pi] = NULL;
74324 @@ -1680,7 +1680,7 @@ restart:
74325 goto error;
74326 }
74327 if (nx == -EAGAIN ||
74328 - genid != atomic_read(&flow_cache_genid)) {
74329 + genid != atomic_read_unchecked(&flow_cache_genid)) {
74330 xfrm_pols_put(pols, npols);
74331 goto restart;
74332 }
74333 diff -urNp linux-2.6.32.45/net/xfrm/xfrm_user.c linux-2.6.32.45/net/xfrm/xfrm_user.c
74334 --- linux-2.6.32.45/net/xfrm/xfrm_user.c 2011-03-27 14:31:47.000000000 -0400
74335 +++ linux-2.6.32.45/net/xfrm/xfrm_user.c 2011-05-16 21:46:57.000000000 -0400
74336 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm
74337 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
74338 int i;
74339
74340 + pax_track_stack();
74341 +
74342 if (xp->xfrm_nr == 0)
74343 return 0;
74344
74345 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buf
74346 int err;
74347 int n = 0;
74348
74349 + pax_track_stack();
74350 +
74351 if (attrs[XFRMA_MIGRATE] == NULL)
74352 return -EINVAL;
74353
74354 diff -urNp linux-2.6.32.45/samples/kobject/kset-example.c linux-2.6.32.45/samples/kobject/kset-example.c
74355 --- linux-2.6.32.45/samples/kobject/kset-example.c 2011-03-27 14:31:47.000000000 -0400
74356 +++ linux-2.6.32.45/samples/kobject/kset-example.c 2011-04-17 15:56:46.000000000 -0400
74357 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
74358 }
74359
74360 /* Our custom sysfs_ops that we will associate with our ktype later on */
74361 -static struct sysfs_ops foo_sysfs_ops = {
74362 +static const struct sysfs_ops foo_sysfs_ops = {
74363 .show = foo_attr_show,
74364 .store = foo_attr_store,
74365 };
74366 diff -urNp linux-2.6.32.45/scripts/basic/fixdep.c linux-2.6.32.45/scripts/basic/fixdep.c
74367 --- linux-2.6.32.45/scripts/basic/fixdep.c 2011-03-27 14:31:47.000000000 -0400
74368 +++ linux-2.6.32.45/scripts/basic/fixdep.c 2011-04-17 15:56:46.000000000 -0400
74369 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
74370
74371 static void parse_config_file(char *map, size_t len)
74372 {
74373 - int *end = (int *) (map + len);
74374 + unsigned int *end = (unsigned int *) (map + len);
74375 /* start at +1, so that p can never be < map */
74376 - int *m = (int *) map + 1;
74377 + unsigned int *m = (unsigned int *) map + 1;
74378 char *p, *q;
74379
74380 for (; m < end; m++) {
74381 @@ -371,7 +371,7 @@ static void print_deps(void)
74382 static void traps(void)
74383 {
74384 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
74385 - int *p = (int *)test;
74386 + unsigned int *p = (unsigned int *)test;
74387
74388 if (*p != INT_CONF) {
74389 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
74390 diff -urNp linux-2.6.32.45/scripts/gcc-plugin.sh linux-2.6.32.45/scripts/gcc-plugin.sh
74391 --- linux-2.6.32.45/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
74392 +++ linux-2.6.32.45/scripts/gcc-plugin.sh 2011-08-05 20:33:55.000000000 -0400
74393 @@ -0,0 +1,3 @@
74394 +#!/bin/sh
74395 +
74396 +echo "#include \"gcc-plugin.h\"" | $* -x c - -c -o /dev/null -I`$* -print-file-name=plugin`/include>/dev/null 2>&1 && echo "y"
74397 diff -urNp linux-2.6.32.45/scripts/Makefile.build linux-2.6.32.45/scripts/Makefile.build
74398 --- linux-2.6.32.45/scripts/Makefile.build 2011-03-27 14:31:47.000000000 -0400
74399 +++ linux-2.6.32.45/scripts/Makefile.build 2011-06-04 20:46:51.000000000 -0400
74400 @@ -59,7 +59,7 @@ endif
74401 endif
74402
74403 # Do not include host rules unless needed
74404 -ifneq ($(hostprogs-y)$(hostprogs-m),)
74405 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
74406 include scripts/Makefile.host
74407 endif
74408
74409 diff -urNp linux-2.6.32.45/scripts/Makefile.clean linux-2.6.32.45/scripts/Makefile.clean
74410 --- linux-2.6.32.45/scripts/Makefile.clean 2011-03-27 14:31:47.000000000 -0400
74411 +++ linux-2.6.32.45/scripts/Makefile.clean 2011-06-04 20:47:19.000000000 -0400
74412 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
74413 __clean-files := $(extra-y) $(always) \
74414 $(targets) $(clean-files) \
74415 $(host-progs) \
74416 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
74417 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
74418 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
74419
74420 # as clean-files is given relative to the current directory, this adds
74421 # a $(obj) prefix, except for absolute paths
74422 diff -urNp linux-2.6.32.45/scripts/Makefile.host linux-2.6.32.45/scripts/Makefile.host
74423 --- linux-2.6.32.45/scripts/Makefile.host 2011-03-27 14:31:47.000000000 -0400
74424 +++ linux-2.6.32.45/scripts/Makefile.host 2011-06-04 20:48:22.000000000 -0400
74425 @@ -31,6 +31,7 @@
74426 # Note: Shared libraries consisting of C++ files are not supported
74427
74428 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
74429 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
74430
74431 # C code
74432 # Executables compiled from a single .c file
74433 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
74434 # Shared libaries (only .c supported)
74435 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
74436 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
74437 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
74438 # Remove .so files from "xxx-objs"
74439 host-cobjs := $(filter-out %.so,$(host-cobjs))
74440
74441 diff -urNp linux-2.6.32.45/scripts/mod/file2alias.c linux-2.6.32.45/scripts/mod/file2alias.c
74442 --- linux-2.6.32.45/scripts/mod/file2alias.c 2011-03-27 14:31:47.000000000 -0400
74443 +++ linux-2.6.32.45/scripts/mod/file2alias.c 2011-04-17 15:56:46.000000000 -0400
74444 @@ -72,7 +72,7 @@ static void device_id_check(const char *
74445 unsigned long size, unsigned long id_size,
74446 void *symval)
74447 {
74448 - int i;
74449 + unsigned int i;
74450
74451 if (size % id_size || size < id_size) {
74452 if (cross_build != 0)
74453 @@ -102,7 +102,7 @@ static void device_id_check(const char *
74454 /* USB is special because the bcdDevice can be matched against a numeric range */
74455 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
74456 static void do_usb_entry(struct usb_device_id *id,
74457 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
74458 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
74459 unsigned char range_lo, unsigned char range_hi,
74460 struct module *mod)
74461 {
74462 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
74463 for (i = 0; i < count; i++) {
74464 const char *id = (char *)devs[i].id;
74465 char acpi_id[sizeof(devs[0].id)];
74466 - int j;
74467 + unsigned int j;
74468
74469 buf_printf(&mod->dev_table_buf,
74470 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
74471 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
74472
74473 for (j = 0; j < PNP_MAX_DEVICES; j++) {
74474 const char *id = (char *)card->devs[j].id;
74475 - int i2, j2;
74476 + unsigned int i2, j2;
74477 int dup = 0;
74478
74479 if (!id[0])
74480 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
74481 /* add an individual alias for every device entry */
74482 if (!dup) {
74483 char acpi_id[sizeof(card->devs[0].id)];
74484 - int k;
74485 + unsigned int k;
74486
74487 buf_printf(&mod->dev_table_buf,
74488 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
74489 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, co
74490 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
74491 char *alias)
74492 {
74493 - int i, j;
74494 + unsigned int i, j;
74495
74496 sprintf(alias, "dmi*");
74497
74498 diff -urNp linux-2.6.32.45/scripts/mod/modpost.c linux-2.6.32.45/scripts/mod/modpost.c
74499 --- linux-2.6.32.45/scripts/mod/modpost.c 2011-03-27 14:31:47.000000000 -0400
74500 +++ linux-2.6.32.45/scripts/mod/modpost.c 2011-07-06 19:53:33.000000000 -0400
74501 @@ -835,6 +835,7 @@ enum mismatch {
74502 INIT_TO_EXIT,
74503 EXIT_TO_INIT,
74504 EXPORT_TO_INIT_EXIT,
74505 + DATA_TO_TEXT
74506 };
74507
74508 struct sectioncheck {
74509 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
74510 .fromsec = { "__ksymtab*", NULL },
74511 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
74512 .mismatch = EXPORT_TO_INIT_EXIT
74513 +},
74514 +/* Do not reference code from writable data */
74515 +{
74516 + .fromsec = { DATA_SECTIONS, NULL },
74517 + .tosec = { TEXT_SECTIONS, NULL },
74518 + .mismatch = DATA_TO_TEXT
74519 }
74520 };
74521
74522 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
74523 continue;
74524 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
74525 continue;
74526 - if (sym->st_value == addr)
74527 - return sym;
74528 /* Find a symbol nearby - addr are maybe negative */
74529 d = sym->st_value - addr;
74530 + if (d == 0)
74531 + return sym;
74532 if (d < 0)
74533 d = addr - sym->st_value;
74534 if (d < distance) {
74535 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
74536 "Fix this by removing the %sannotation of %s "
74537 "or drop the export.\n",
74538 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
74539 + case DATA_TO_TEXT:
74540 +/*
74541 + fprintf(stderr,
74542 + "The variable %s references\n"
74543 + "the %s %s%s%s\n",
74544 + fromsym, to, sec2annotation(tosec), tosym, to_p);
74545 +*/
74546 + break;
74547 case NO_MISMATCH:
74548 /* To get warnings on missing members */
74549 break;
74550 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modn
74551 static void check_sec_ref(struct module *mod, const char *modname,
74552 struct elf_info *elf)
74553 {
74554 - int i;
74555 + unsigned int i;
74556 Elf_Shdr *sechdrs = elf->sechdrs;
74557
74558 /* Walk through all sections */
74559 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
74560 va_end(ap);
74561 }
74562
74563 -void buf_write(struct buffer *buf, const char *s, int len)
74564 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
74565 {
74566 if (buf->size - buf->pos < len) {
74567 buf->size += len + SZ;
74568 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
74569 if (fstat(fileno(file), &st) < 0)
74570 goto close_write;
74571
74572 - if (st.st_size != b->pos)
74573 + if (st.st_size != (off_t)b->pos)
74574 goto close_write;
74575
74576 tmp = NOFAIL(malloc(b->pos));
74577 diff -urNp linux-2.6.32.45/scripts/mod/modpost.h linux-2.6.32.45/scripts/mod/modpost.h
74578 --- linux-2.6.32.45/scripts/mod/modpost.h 2011-03-27 14:31:47.000000000 -0400
74579 +++ linux-2.6.32.45/scripts/mod/modpost.h 2011-04-17 15:56:46.000000000 -0400
74580 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
74581
74582 struct buffer {
74583 char *p;
74584 - int pos;
74585 - int size;
74586 + unsigned int pos;
74587 + unsigned int size;
74588 };
74589
74590 void __attribute__((format(printf, 2, 3)))
74591 buf_printf(struct buffer *buf, const char *fmt, ...);
74592
74593 void
74594 -buf_write(struct buffer *buf, const char *s, int len);
74595 +buf_write(struct buffer *buf, const char *s, unsigned int len);
74596
74597 struct module {
74598 struct module *next;
74599 diff -urNp linux-2.6.32.45/scripts/mod/sumversion.c linux-2.6.32.45/scripts/mod/sumversion.c
74600 --- linux-2.6.32.45/scripts/mod/sumversion.c 2011-03-27 14:31:47.000000000 -0400
74601 +++ linux-2.6.32.45/scripts/mod/sumversion.c 2011-04-17 15:56:46.000000000 -0400
74602 @@ -455,7 +455,7 @@ static void write_version(const char *fi
74603 goto out;
74604 }
74605
74606 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
74607 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
74608 warn("writing sum in %s failed: %s\n",
74609 filename, strerror(errno));
74610 goto out;
74611 diff -urNp linux-2.6.32.45/scripts/package/mkspec linux-2.6.32.45/scripts/package/mkspec
74612 --- linux-2.6.32.45/scripts/package/mkspec 2011-03-27 14:31:47.000000000 -0400
74613 +++ linux-2.6.32.45/scripts/package/mkspec 2011-07-19 18:19:12.000000000 -0400
74614 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM
74615 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
74616 echo "%endif"
74617
74618 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
74619 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
74620 echo "%ifarch ia64"
74621 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
74622 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
74623 diff -urNp linux-2.6.32.45/scripts/pnmtologo.c linux-2.6.32.45/scripts/pnmtologo.c
74624 --- linux-2.6.32.45/scripts/pnmtologo.c 2011-03-27 14:31:47.000000000 -0400
74625 +++ linux-2.6.32.45/scripts/pnmtologo.c 2011-04-17 15:56:46.000000000 -0400
74626 @@ -237,14 +237,14 @@ static void write_header(void)
74627 fprintf(out, " * Linux logo %s\n", logoname);
74628 fputs(" */\n\n", out);
74629 fputs("#include <linux/linux_logo.h>\n\n", out);
74630 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
74631 + fprintf(out, "static unsigned char %s_data[] = {\n",
74632 logoname);
74633 }
74634
74635 static void write_footer(void)
74636 {
74637 fputs("\n};\n\n", out);
74638 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
74639 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
74640 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
74641 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
74642 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
74643 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
74644 fputs("\n};\n\n", out);
74645
74646 /* write logo clut */
74647 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
74648 + fprintf(out, "static unsigned char %s_clut[] = {\n",
74649 logoname);
74650 write_hex_cnt = 0;
74651 for (i = 0; i < logo_clutsize; i++) {
74652 diff -urNp linux-2.6.32.45/scripts/tags.sh linux-2.6.32.45/scripts/tags.sh
74653 --- linux-2.6.32.45/scripts/tags.sh 2011-03-27 14:31:47.000000000 -0400
74654 +++ linux-2.6.32.45/scripts/tags.sh 2011-06-07 18:06:04.000000000 -0400
74655 @@ -93,6 +93,11 @@ docscope()
74656 cscope -b -f cscope.out
74657 }
74658
74659 +dogtags()
74660 +{
74661 + all_sources | gtags -f -
74662 +}
74663 +
74664 exuberant()
74665 {
74666 all_sources | xargs $1 -a \
74667 @@ -164,6 +169,10 @@ case "$1" in
74668 docscope
74669 ;;
74670
74671 + "gtags")
74672 + dogtags
74673 + ;;
74674 +
74675 "tags")
74676 rm -f tags
74677 xtags ctags
74678 diff -urNp linux-2.6.32.45/security/capability.c linux-2.6.32.45/security/capability.c
74679 --- linux-2.6.32.45/security/capability.c 2011-03-27 14:31:47.000000000 -0400
74680 +++ linux-2.6.32.45/security/capability.c 2011-04-17 15:56:46.000000000 -0400
74681 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *ls
74682 }
74683 #endif /* CONFIG_AUDIT */
74684
74685 -struct security_operations default_security_ops = {
74686 +struct security_operations default_security_ops __read_only = {
74687 .name = "default",
74688 };
74689
74690 diff -urNp linux-2.6.32.45/security/commoncap.c linux-2.6.32.45/security/commoncap.c
74691 --- linux-2.6.32.45/security/commoncap.c 2011-03-27 14:31:47.000000000 -0400
74692 +++ linux-2.6.32.45/security/commoncap.c 2011-08-17 19:22:13.000000000 -0400
74693 @@ -27,7 +27,7 @@
74694 #include <linux/sched.h>
74695 #include <linux/prctl.h>
74696 #include <linux/securebits.h>
74697 -
74698 +#include <net/sock.h>
74699 /*
74700 * If a non-root user executes a setuid-root binary in
74701 * !secure(SECURE_NOROOT) mode, then we raise capabilities.
74702 @@ -50,9 +50,18 @@ static void warn_setuid_and_fcaps_mixed(
74703 }
74704 }
74705
74706 +#ifdef CONFIG_NET
74707 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
74708 +#endif
74709 +
74710 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
74711 {
74712 +#ifdef CONFIG_NET
74713 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
74714 +#else
74715 NETLINK_CB(skb).eff_cap = current_cap();
74716 +#endif
74717 +
74718 return 0;
74719 }
74720
74721 @@ -582,6 +591,9 @@ int cap_bprm_secureexec(struct linux_bin
74722 {
74723 const struct cred *cred = current_cred();
74724
74725 + if (gr_acl_enable_at_secure())
74726 + return 1;
74727 +
74728 if (cred->uid != 0) {
74729 if (bprm->cap_effective)
74730 return 1;
74731 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_api.c linux-2.6.32.45/security/integrity/ima/ima_api.c
74732 --- linux-2.6.32.45/security/integrity/ima/ima_api.c 2011-03-27 14:31:47.000000000 -0400
74733 +++ linux-2.6.32.45/security/integrity/ima/ima_api.c 2011-04-17 15:56:46.000000000 -0400
74734 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino
74735 int result;
74736
74737 /* can overflow, only indicator */
74738 - atomic_long_inc(&ima_htable.violations);
74739 + atomic_long_inc_unchecked(&ima_htable.violations);
74740
74741 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
74742 if (!entry) {
74743 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_fs.c linux-2.6.32.45/security/integrity/ima/ima_fs.c
74744 --- linux-2.6.32.45/security/integrity/ima/ima_fs.c 2011-03-27 14:31:47.000000000 -0400
74745 +++ linux-2.6.32.45/security/integrity/ima/ima_fs.c 2011-04-17 15:56:46.000000000 -0400
74746 @@ -27,12 +27,12 @@
74747 static int valid_policy = 1;
74748 #define TMPBUFLEN 12
74749 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
74750 - loff_t *ppos, atomic_long_t *val)
74751 + loff_t *ppos, atomic_long_unchecked_t *val)
74752 {
74753 char tmpbuf[TMPBUFLEN];
74754 ssize_t len;
74755
74756 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
74757 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
74758 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
74759 }
74760
74761 diff -urNp linux-2.6.32.45/security/integrity/ima/ima.h linux-2.6.32.45/security/integrity/ima/ima.h
74762 --- linux-2.6.32.45/security/integrity/ima/ima.h 2011-03-27 14:31:47.000000000 -0400
74763 +++ linux-2.6.32.45/security/integrity/ima/ima.h 2011-04-17 15:56:46.000000000 -0400
74764 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino
74765 extern spinlock_t ima_queue_lock;
74766
74767 struct ima_h_table {
74768 - atomic_long_t len; /* number of stored measurements in the list */
74769 - atomic_long_t violations;
74770 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
74771 + atomic_long_unchecked_t violations;
74772 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
74773 };
74774 extern struct ima_h_table ima_htable;
74775 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_queue.c linux-2.6.32.45/security/integrity/ima/ima_queue.c
74776 --- linux-2.6.32.45/security/integrity/ima/ima_queue.c 2011-03-27 14:31:47.000000000 -0400
74777 +++ linux-2.6.32.45/security/integrity/ima/ima_queue.c 2011-04-17 15:56:46.000000000 -0400
74778 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i
74779 INIT_LIST_HEAD(&qe->later);
74780 list_add_tail_rcu(&qe->later, &ima_measurements);
74781
74782 - atomic_long_inc(&ima_htable.len);
74783 + atomic_long_inc_unchecked(&ima_htable.len);
74784 key = ima_hash_key(entry->digest);
74785 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
74786 return 0;
74787 diff -urNp linux-2.6.32.45/security/Kconfig linux-2.6.32.45/security/Kconfig
74788 --- linux-2.6.32.45/security/Kconfig 2011-03-27 14:31:47.000000000 -0400
74789 +++ linux-2.6.32.45/security/Kconfig 2011-07-06 19:58:11.000000000 -0400
74790 @@ -4,6 +4,555 @@
74791
74792 menu "Security options"
74793
74794 +source grsecurity/Kconfig
74795 +
74796 +menu "PaX"
74797 +
74798 + config ARCH_TRACK_EXEC_LIMIT
74799 + bool
74800 +
74801 + config PAX_PER_CPU_PGD
74802 + bool
74803 +
74804 + config TASK_SIZE_MAX_SHIFT
74805 + int
74806 + depends on X86_64
74807 + default 47 if !PAX_PER_CPU_PGD
74808 + default 42 if PAX_PER_CPU_PGD
74809 +
74810 + config PAX_ENABLE_PAE
74811 + bool
74812 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
74813 +
74814 +config PAX
74815 + bool "Enable various PaX features"
74816 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
74817 + help
74818 + This allows you to enable various PaX features. PaX adds
74819 + intrusion prevention mechanisms to the kernel that reduce
74820 + the risks posed by exploitable memory corruption bugs.
74821 +
74822 +menu "PaX Control"
74823 + depends on PAX
74824 +
74825 +config PAX_SOFTMODE
74826 + bool 'Support soft mode'
74827 + select PAX_PT_PAX_FLAGS
74828 + help
74829 + Enabling this option will allow you to run PaX in soft mode, that
74830 + is, PaX features will not be enforced by default, only on executables
74831 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
74832 + is the only way to mark executables for soft mode use.
74833 +
74834 + Soft mode can be activated by using the "pax_softmode=1" kernel command
74835 + line option on boot. Furthermore you can control various PaX features
74836 + at runtime via the entries in /proc/sys/kernel/pax.
74837 +
74838 +config PAX_EI_PAX
74839 + bool 'Use legacy ELF header marking'
74840 + help
74841 + Enabling this option will allow you to control PaX features on
74842 + a per executable basis via the 'chpax' utility available at
74843 + http://pax.grsecurity.net/. The control flags will be read from
74844 + an otherwise reserved part of the ELF header. This marking has
74845 + numerous drawbacks (no support for soft-mode, toolchain does not
74846 + know about the non-standard use of the ELF header) therefore it
74847 + has been deprecated in favour of PT_PAX_FLAGS support.
74848 +
74849 + Note that if you enable PT_PAX_FLAGS marking support as well,
74850 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
74851 +
74852 +config PAX_PT_PAX_FLAGS
74853 + bool 'Use ELF program header marking'
74854 + help
74855 + Enabling this option will allow you to control PaX features on
74856 + a per executable basis via the 'paxctl' utility available at
74857 + http://pax.grsecurity.net/. The control flags will be read from
74858 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
74859 + has the benefits of supporting both soft mode and being fully
74860 + integrated into the toolchain (the binutils patch is available
74861 + from http://pax.grsecurity.net).
74862 +
74863 + If your toolchain does not support PT_PAX_FLAGS markings,
74864 + you can create one in most cases with 'paxctl -C'.
74865 +
74866 + Note that if you enable the legacy EI_PAX marking support as well,
74867 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
74868 +
74869 +choice
74870 + prompt 'MAC system integration'
74871 + default PAX_HAVE_ACL_FLAGS
74872 + help
74873 + Mandatory Access Control systems have the option of controlling
74874 + PaX flags on a per executable basis, choose the method supported
74875 + by your particular system.
74876 +
74877 + - "none": if your MAC system does not interact with PaX,
74878 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
74879 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
74880 +
74881 + NOTE: this option is for developers/integrators only.
74882 +
74883 + config PAX_NO_ACL_FLAGS
74884 + bool 'none'
74885 +
74886 + config PAX_HAVE_ACL_FLAGS
74887 + bool 'direct'
74888 +
74889 + config PAX_HOOK_ACL_FLAGS
74890 + bool 'hook'
74891 +endchoice
74892 +
74893 +endmenu
74894 +
74895 +menu "Non-executable pages"
74896 + depends on PAX
74897 +
74898 +config PAX_NOEXEC
74899 + bool "Enforce non-executable pages"
74900 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
74901 + help
74902 + By design some architectures do not allow for protecting memory
74903 + pages against execution or even if they do, Linux does not make
74904 + use of this feature. In practice this means that if a page is
74905 + readable (such as the stack or heap) it is also executable.
74906 +
74907 + There is a well known exploit technique that makes use of this
74908 + fact and a common programming mistake where an attacker can
74909 + introduce code of his choice somewhere in the attacked program's
74910 + memory (typically the stack or the heap) and then execute it.
74911 +
74912 + If the attacked program was running with different (typically
74913 + higher) privileges than that of the attacker, then he can elevate
74914 + his own privilege level (e.g. get a root shell, write to files for
74915 + which he does not have write access to, etc).
74916 +
74917 + Enabling this option will let you choose from various features
74918 + that prevent the injection and execution of 'foreign' code in
74919 + a program.
74920 +
74921 + This will also break programs that rely on the old behaviour and
74922 + expect that dynamically allocated memory via the malloc() family
74923 + of functions is executable (which it is not). Notable examples
74924 + are the XFree86 4.x server, the java runtime and wine.
74925 +
74926 +config PAX_PAGEEXEC
74927 + bool "Paging based non-executable pages"
74928 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
74929 + select S390_SWITCH_AMODE if S390
74930 + select S390_EXEC_PROTECT if S390
74931 + select ARCH_TRACK_EXEC_LIMIT if X86_32
74932 + help
74933 + This implementation is based on the paging feature of the CPU.
74934 + On i386 without hardware non-executable bit support there is a
74935 + variable but usually low performance impact, however on Intel's
74936 + P4 core based CPUs it is very high so you should not enable this
74937 + for kernels meant to be used on such CPUs.
74938 +
74939 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
74940 + with hardware non-executable bit support there is no performance
74941 + impact, on ppc the impact is negligible.
74942 +
74943 + Note that several architectures require various emulations due to
74944 + badly designed userland ABIs, this will cause a performance impact
74945 + but will disappear as soon as userland is fixed. For example, ppc
74946 + userland MUST have been built with secure-plt by a recent toolchain.
74947 +
74948 +config PAX_SEGMEXEC
74949 + bool "Segmentation based non-executable pages"
74950 + depends on PAX_NOEXEC && X86_32
74951 + help
74952 + This implementation is based on the segmentation feature of the
74953 + CPU and has a very small performance impact, however applications
74954 + will be limited to a 1.5 GB address space instead of the normal
74955 + 3 GB.
74956 +
74957 +config PAX_EMUTRAMP
74958 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
74959 + default y if PARISC
74960 + help
74961 + There are some programs and libraries that for one reason or
74962 + another attempt to execute special small code snippets from
74963 + non-executable memory pages. Most notable examples are the
74964 + signal handler return code generated by the kernel itself and
74965 + the GCC trampolines.
74966 +
74967 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
74968 + such programs will no longer work under your kernel.
74969 +
74970 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
74971 + utilities to enable trampoline emulation for the affected programs
74972 + yet still have the protection provided by the non-executable pages.
74973 +
74974 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
74975 + your system will not even boot.
74976 +
74977 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
74978 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
74979 + for the affected files.
74980 +
74981 + NOTE: enabling this feature *may* open up a loophole in the
74982 + protection provided by non-executable pages that an attacker
74983 + could abuse. Therefore the best solution is to not have any
74984 + files on your system that would require this option. This can
74985 + be achieved by not using libc5 (which relies on the kernel
74986 + signal handler return code) and not using or rewriting programs
74987 + that make use of the nested function implementation of GCC.
74988 + Skilled users can just fix GCC itself so that it implements
74989 + nested function calls in a way that does not interfere with PaX.
74990 +
74991 +config PAX_EMUSIGRT
74992 + bool "Automatically emulate sigreturn trampolines"
74993 + depends on PAX_EMUTRAMP && PARISC
74994 + default y
74995 + help
74996 + Enabling this option will have the kernel automatically detect
74997 + and emulate signal return trampolines executing on the stack
74998 + that would otherwise lead to task termination.
74999 +
75000 + This solution is intended as a temporary one for users with
75001 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
75002 + Modula-3 runtime, etc) or executables linked to such, basically
75003 + everything that does not specify its own SA_RESTORER function in
75004 + normal executable memory like glibc 2.1+ does.
75005 +
75006 + On parisc you MUST enable this option, otherwise your system will
75007 + not even boot.
75008 +
75009 + NOTE: this feature cannot be disabled on a per executable basis
75010 + and since it *does* open up a loophole in the protection provided
75011 + by non-executable pages, the best solution is to not have any
75012 + files on your system that would require this option.
75013 +
75014 +config PAX_MPROTECT
75015 + bool "Restrict mprotect()"
75016 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
75017 + help
75018 + Enabling this option will prevent programs from
75019 + - changing the executable status of memory pages that were
75020 + not originally created as executable,
75021 + - making read-only executable pages writable again,
75022 + - creating executable pages from anonymous memory,
75023 + - making read-only-after-relocations (RELRO) data pages writable again.
75024 +
75025 + You should say Y here to complete the protection provided by
75026 + the enforcement of non-executable pages.
75027 +
75028 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
75029 + this feature on a per file basis.
75030 +
75031 +config PAX_MPROTECT_COMPAT
75032 + bool "Use legacy/compat protection demoting (read help)"
75033 + depends on PAX_MPROTECT
75034 + default n
75035 + help
75036 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
75037 + by sending the proper error code to the application. For some broken
75038 + userland, this can cause problems with Python or other applications. The
75039 + current implementation however allows for applications like clamav to
75040 + detect if JIT compilation/execution is allowed and to fall back gracefully
75041 + to an interpreter-based mode if it does not. While we encourage everyone
75042 + to use the current implementation as-is and push upstream to fix broken
75043 + userland (note that the RWX logging option can assist with this), in some
75044 + environments this may not be possible. Having to disable MPROTECT
75045 + completely on certain binaries reduces the security benefit of PaX,
75046 + so this option is provided for those environments to revert to the old
75047 + behavior.
75048 +
75049 +config PAX_ELFRELOCS
75050 + bool "Allow ELF text relocations (read help)"
75051 + depends on PAX_MPROTECT
75052 + default n
75053 + help
75054 + Non-executable pages and mprotect() restrictions are effective
75055 + in preventing the introduction of new executable code into an
75056 + attacked task's address space. There remain only two venues
75057 + for this kind of attack: if the attacker can execute already
75058 + existing code in the attacked task then he can either have it
75059 + create and mmap() a file containing his code or have it mmap()
75060 + an already existing ELF library that does not have position
75061 + independent code in it and use mprotect() on it to make it
75062 + writable and copy his code there. While protecting against
75063 + the former approach is beyond PaX, the latter can be prevented
75064 + by having only PIC ELF libraries on one's system (which do not
75065 + need to relocate their code). If you are sure this is your case,
75066 + as is the case with all modern Linux distributions, then leave
75067 + this option disabled. You should say 'n' here.
75068 +
75069 +config PAX_ETEXECRELOCS
75070 + bool "Allow ELF ET_EXEC text relocations"
75071 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
75072 + select PAX_ELFRELOCS
75073 + default y
75074 + help
75075 + On some architectures there are incorrectly created applications
75076 + that require text relocations and would not work without enabling
75077 + this option. If you are an alpha, ia64 or parisc user, you should
75078 + enable this option and disable it once you have made sure that
75079 + none of your applications need it.
75080 +
75081 +config PAX_EMUPLT
75082 + bool "Automatically emulate ELF PLT"
75083 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
75084 + default y
75085 + help
75086 + Enabling this option will have the kernel automatically detect
75087 + and emulate the Procedure Linkage Table entries in ELF files.
75088 + On some architectures such entries are in writable memory, and
75089 + become non-executable leading to task termination. Therefore
75090 + it is mandatory that you enable this option on alpha, parisc,
75091 + sparc and sparc64, otherwise your system would not even boot.
75092 +
75093 + NOTE: this feature *does* open up a loophole in the protection
75094 + provided by the non-executable pages, therefore the proper
75095 + solution is to modify the toolchain to produce a PLT that does
75096 + not need to be writable.
75097 +
75098 +config PAX_DLRESOLVE
75099 + bool 'Emulate old glibc resolver stub'
75100 + depends on PAX_EMUPLT && SPARC
75101 + default n
75102 + help
75103 + This option is needed if userland has an old glibc (before 2.4)
75104 + that puts a 'save' instruction into the runtime generated resolver
75105 + stub that needs special emulation.
75106 +
75107 +config PAX_KERNEXEC
75108 + bool "Enforce non-executable kernel pages"
75109 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
75110 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
75111 + help
75112 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
75113 + that is, enabling this option will make it harder to inject
75114 + and execute 'foreign' code in kernel memory itself.
75115 +
75116 + Note that on x86_64 kernels there is a known regression when
75117 + this feature and KVM/VMX are both enabled in the host kernel.
75118 +
75119 +config PAX_KERNEXEC_MODULE_TEXT
75120 + int "Minimum amount of memory reserved for module code"
75121 + default "4"
75122 + depends on PAX_KERNEXEC && X86_32 && MODULES
75123 + help
75124 + Due to implementation details the kernel must reserve a fixed
75125 + amount of memory for module code at compile time that cannot be
75126 + changed at runtime. Here you can specify the minimum amount
75127 + in MB that will be reserved. Due to the same implementation
75128 + details this size will always be rounded up to the next 2/4 MB
75129 + boundary (depends on PAE) so the actually available memory for
75130 + module code will usually be more than this minimum.
75131 +
75132 + The default 4 MB should be enough for most users but if you have
75133 + an excessive number of modules (e.g., most distribution configs
75134 + compile many drivers as modules) or use huge modules such as
75135 + nvidia's kernel driver, you will need to adjust this amount.
75136 + A good rule of thumb is to look at your currently loaded kernel
75137 + modules and add up their sizes.
75138 +
75139 +endmenu
75140 +
75141 +menu "Address Space Layout Randomization"
75142 + depends on PAX
75143 +
75144 +config PAX_ASLR
75145 + bool "Address Space Layout Randomization"
75146 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
75147 + help
75148 + Many if not most exploit techniques rely on the knowledge of
75149 + certain addresses in the attacked program. The following options
75150 + will allow the kernel to apply a certain amount of randomization
75151 + to specific parts of the program thereby forcing an attacker to
75152 + guess them in most cases. Any failed guess will most likely crash
75153 + the attacked program which allows the kernel to detect such attempts
75154 + and react on them. PaX itself provides no reaction mechanisms,
75155 + instead it is strongly encouraged that you make use of Nergal's
75156 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
75157 + (http://www.grsecurity.net/) built-in crash detection features or
75158 + develop one yourself.
75159 +
75160 + By saying Y here you can choose to randomize the following areas:
75161 + - top of the task's kernel stack
75162 + - top of the task's userland stack
75163 + - base address for mmap() requests that do not specify one
75164 + (this includes all libraries)
75165 + - base address of the main executable
75166 +
75167 + It is strongly recommended to say Y here as address space layout
75168 + randomization has negligible impact on performance yet it provides
75169 + a very effective protection.
75170 +
75171 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
75172 + this feature on a per file basis.
75173 +
75174 +config PAX_RANDKSTACK
75175 + bool "Randomize kernel stack base"
75176 + depends on PAX_ASLR && X86_TSC && X86
75177 + help
75178 + By saying Y here the kernel will randomize every task's kernel
75179 + stack on every system call. This will not only force an attacker
75180 + to guess it but also prevent him from making use of possible
75181 + leaked information about it.
75182 +
75183 + Since the kernel stack is a rather scarce resource, randomization
75184 + may cause unexpected stack overflows, therefore you should very
75185 + carefully test your system. Note that once enabled in the kernel
75186 + configuration, this feature cannot be disabled on a per file basis.
75187 +
75188 +config PAX_RANDUSTACK
75189 + bool "Randomize user stack base"
75190 + depends on PAX_ASLR
75191 + help
75192 + By saying Y here the kernel will randomize every task's userland
75193 + stack. The randomization is done in two steps where the second
75194 + one may apply a big amount of shift to the top of the stack and
75195 + cause problems for programs that want to use lots of memory (more
75196 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
75197 + For this reason the second step can be controlled by 'chpax' or
75198 + 'paxctl' on a per file basis.
75199 +
75200 +config PAX_RANDMMAP
75201 + bool "Randomize mmap() base"
75202 + depends on PAX_ASLR
75203 + help
75204 + By saying Y here the kernel will use a randomized base address for
75205 + mmap() requests that do not specify one themselves. As a result
75206 + all dynamically loaded libraries will appear at random addresses
75207 + and therefore be harder to exploit by a technique where an attacker
75208 + attempts to execute library code for his purposes (e.g. spawn a
75209 + shell from an exploited program that is running at an elevated
75210 + privilege level).
75211 +
75212 + Furthermore, if a program is relinked as a dynamic ELF file, its
75213 + base address will be randomized as well, completing the full
75214 + randomization of the address space layout. Attacking such programs
75215 + becomes a guess game. You can find an example of doing this at
75216 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
75217 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
75218 +
75219 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
75220 + feature on a per file basis.
75221 +
75222 +endmenu
75223 +
75224 +menu "Miscellaneous hardening features"
75225 +
75226 +config PAX_MEMORY_SANITIZE
75227 + bool "Sanitize all freed memory"
75228 + help
75229 + By saying Y here the kernel will erase memory pages as soon as they
75230 + are freed. This in turn reduces the lifetime of data stored in the
75231 + pages, making it less likely that sensitive information such as
75232 + passwords, cryptographic secrets, etc stay in memory for too long.
75233 +
75234 + This is especially useful for programs whose runtime is short, long
75235 + lived processes and the kernel itself benefit from this as long as
75236 + they operate on whole memory pages and ensure timely freeing of pages
75237 + that may hold sensitive information.
75238 +
75239 + The tradeoff is performance impact, on a single CPU system kernel
75240 + compilation sees a 3% slowdown, other systems and workloads may vary
75241 + and you are advised to test this feature on your expected workload
75242 + before deploying it.
75243 +
75244 + Note that this feature does not protect data stored in live pages,
75245 + e.g., process memory swapped to disk may stay there for a long time.
75246 +
75247 +config PAX_MEMORY_STACKLEAK
75248 + bool "Sanitize kernel stack"
75249 + depends on X86
75250 + help
75251 + By saying Y here the kernel will erase the kernel stack before it
75252 + returns from a system call. This in turn reduces the information
75253 + that a kernel stack leak bug can reveal.
75254 +
75255 + Note that such a bug can still leak information that was put on
75256 + the stack by the current system call (the one eventually triggering
75257 + the bug) but traces of earlier system calls on the kernel stack
75258 + cannot leak anymore.
75259 +
75260 + The tradeoff is performance impact, on a single CPU system kernel
75261 + compilation sees a 1% slowdown, other systems and workloads may vary
75262 + and you are advised to test this feature on your expected workload
75263 + before deploying it.
75264 +
75265 + Note: full support for this feature requires gcc with plugin support
75266 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
75267 + is not supported). Using older gcc versions means that functions
75268 + with large enough stack frames may leave uninitialized memory behind
75269 + that may be exposed to a later syscall leaking the stack.
75270 +
75271 +config PAX_MEMORY_UDEREF
75272 + bool "Prevent invalid userland pointer dereference"
75273 + depends on X86 && !UML_X86 && !XEN
75274 + select PAX_PER_CPU_PGD if X86_64
75275 + help
75276 + By saying Y here the kernel will be prevented from dereferencing
75277 + userland pointers in contexts where the kernel expects only kernel
75278 + pointers. This is both a useful runtime debugging feature and a
75279 + security measure that prevents exploiting a class of kernel bugs.
75280 +
75281 + The tradeoff is that some virtualization solutions may experience
75282 + a huge slowdown and therefore you should not enable this feature
75283 + for kernels meant to run in such environments. Whether a given VM
75284 + solution is affected or not is best determined by simply trying it
75285 + out, the performance impact will be obvious right on boot as this
75286 + mechanism engages from very early on. A good rule of thumb is that
75287 + VMs running on CPUs without hardware virtualization support (i.e.,
75288 + the majority of IA-32 CPUs) will likely experience the slowdown.
75289 +
75290 +config PAX_REFCOUNT
75291 + bool "Prevent various kernel object reference counter overflows"
75292 + depends on GRKERNSEC && (X86 || SPARC64)
75293 + help
75294 + By saying Y here the kernel will detect and prevent overflowing
75295 + various (but not all) kinds of object reference counters. Such
75296 + overflows can normally occur due to bugs only and are often, if
75297 + not always, exploitable.
75298 +
75299 + The tradeoff is that data structures protected by an overflowed
75300 + refcount will never be freed and therefore will leak memory. Note
75301 + that this leak also happens even without this protection but in
75302 + that case the overflow can eventually trigger the freeing of the
75303 + data structure while it is still being used elsewhere, resulting
75304 + in the exploitable situation that this feature prevents.
75305 +
75306 + Since this has a negligible performance impact, you should enable
75307 + this feature.
75308 +
75309 +config PAX_USERCOPY
75310 + bool "Harden heap object copies between kernel and userland"
75311 + depends on X86 || PPC || SPARC || ARM
75312 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
75313 + help
75314 + By saying Y here the kernel will enforce the size of heap objects
75315 + when they are copied in either direction between the kernel and
75316 + userland, even if only a part of the heap object is copied.
75317 +
75318 + Specifically, this checking prevents information leaking from the
75319 + kernel heap during kernel to userland copies (if the kernel heap
75320 + object is otherwise fully initialized) and prevents kernel heap
75321 + overflows during userland to kernel copies.
75322 +
75323 + Note that the current implementation provides the strictest bounds
75324 + checks for the SLUB allocator.
75325 +
75326 + Enabling this option also enables per-slab cache protection against
75327 + data in a given cache being copied into/out of via userland
75328 + accessors. Though the whitelist of regions will be reduced over
75329 + time, it notably protects important data structures like task structs.
75330 +
75331 +
75332 + If frame pointers are enabled on x86, this option will also
75333 + restrict copies into and out of the kernel stack to local variables
75334 + within a single frame.
75335 +
75336 + Since this has a negligible performance impact, you should enable
75337 + this feature.
75338 +
75339 +endmenu
75340 +
75341 +endmenu
75342 +
75343 config KEYS
75344 bool "Enable access key retention support"
75345 help
75346 @@ -146,7 +695,7 @@ config INTEL_TXT
75347 config LSM_MMAP_MIN_ADDR
75348 int "Low address space for LSM to protect from user allocation"
75349 depends on SECURITY && SECURITY_SELINUX
75350 - default 65536
75351 + default 32768
75352 help
75353 This is the portion of low virtual memory which should be protected
75354 from userspace allocation. Keeping a user from writing to low pages
75355 diff -urNp linux-2.6.32.45/security/keys/keyring.c linux-2.6.32.45/security/keys/keyring.c
75356 --- linux-2.6.32.45/security/keys/keyring.c 2011-03-27 14:31:47.000000000 -0400
75357 +++ linux-2.6.32.45/security/keys/keyring.c 2011-04-18 22:03:00.000000000 -0400
75358 @@ -214,15 +214,15 @@ static long keyring_read(const struct ke
75359 ret = -EFAULT;
75360
75361 for (loop = 0; loop < klist->nkeys; loop++) {
75362 + key_serial_t serial;
75363 key = klist->keys[loop];
75364 + serial = key->serial;
75365
75366 tmp = sizeof(key_serial_t);
75367 if (tmp > buflen)
75368 tmp = buflen;
75369
75370 - if (copy_to_user(buffer,
75371 - &key->serial,
75372 - tmp) != 0)
75373 + if (copy_to_user(buffer, &serial, tmp))
75374 goto error;
75375
75376 buflen -= tmp;
75377 diff -urNp linux-2.6.32.45/security/min_addr.c linux-2.6.32.45/security/min_addr.c
75378 --- linux-2.6.32.45/security/min_addr.c 2011-03-27 14:31:47.000000000 -0400
75379 +++ linux-2.6.32.45/security/min_addr.c 2011-04-17 15:56:46.000000000 -0400
75380 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
75381 */
75382 static void update_mmap_min_addr(void)
75383 {
75384 +#ifndef SPARC
75385 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
75386 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
75387 mmap_min_addr = dac_mmap_min_addr;
75388 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
75389 #else
75390 mmap_min_addr = dac_mmap_min_addr;
75391 #endif
75392 +#endif
75393 }
75394
75395 /*
75396 diff -urNp linux-2.6.32.45/security/root_plug.c linux-2.6.32.45/security/root_plug.c
75397 --- linux-2.6.32.45/security/root_plug.c 2011-03-27 14:31:47.000000000 -0400
75398 +++ linux-2.6.32.45/security/root_plug.c 2011-04-17 15:56:46.000000000 -0400
75399 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security
75400 return 0;
75401 }
75402
75403 -static struct security_operations rootplug_security_ops = {
75404 +static struct security_operations rootplug_security_ops __read_only = {
75405 .bprm_check_security = rootplug_bprm_check_security,
75406 };
75407
75408 diff -urNp linux-2.6.32.45/security/security.c linux-2.6.32.45/security/security.c
75409 --- linux-2.6.32.45/security/security.c 2011-03-27 14:31:47.000000000 -0400
75410 +++ linux-2.6.32.45/security/security.c 2011-04-17 15:56:46.000000000 -0400
75411 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURI
75412 extern struct security_operations default_security_ops;
75413 extern void security_fixup_ops(struct security_operations *ops);
75414
75415 -struct security_operations *security_ops; /* Initialized to NULL */
75416 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
75417
75418 static inline int verify(struct security_operations *ops)
75419 {
75420 @@ -106,7 +106,7 @@ int __init security_module_enable(struct
75421 * If there is already a security module registered with the kernel,
75422 * an error will be returned. Otherwise %0 is returned on success.
75423 */
75424 -int register_security(struct security_operations *ops)
75425 +int __init register_security(struct security_operations *ops)
75426 {
75427 if (verify(ops)) {
75428 printk(KERN_DEBUG "%s could not verify "
75429 diff -urNp linux-2.6.32.45/security/selinux/hooks.c linux-2.6.32.45/security/selinux/hooks.c
75430 --- linux-2.6.32.45/security/selinux/hooks.c 2011-03-27 14:31:47.000000000 -0400
75431 +++ linux-2.6.32.45/security/selinux/hooks.c 2011-04-17 15:56:46.000000000 -0400
75432 @@ -131,7 +131,7 @@ int selinux_enabled = 1;
75433 * Minimal support for a secondary security module,
75434 * just to allow the use of the capability module.
75435 */
75436 -static struct security_operations *secondary_ops;
75437 +static struct security_operations *secondary_ops __read_only;
75438
75439 /* Lists of inode and superblock security structures initialized
75440 before the policy was loaded. */
75441 @@ -5457,7 +5457,7 @@ static int selinux_key_getsecurity(struc
75442
75443 #endif
75444
75445 -static struct security_operations selinux_ops = {
75446 +static struct security_operations selinux_ops __read_only = {
75447 .name = "selinux",
75448
75449 .ptrace_access_check = selinux_ptrace_access_check,
75450 @@ -5841,7 +5841,9 @@ int selinux_disable(void)
75451 avc_disable();
75452
75453 /* Reset security_ops to the secondary module, dummy or capability. */
75454 + pax_open_kernel();
75455 security_ops = secondary_ops;
75456 + pax_close_kernel();
75457
75458 /* Unregister netfilter hooks. */
75459 selinux_nf_ip_exit();
75460 diff -urNp linux-2.6.32.45/security/selinux/include/xfrm.h linux-2.6.32.45/security/selinux/include/xfrm.h
75461 --- linux-2.6.32.45/security/selinux/include/xfrm.h 2011-03-27 14:31:47.000000000 -0400
75462 +++ linux-2.6.32.45/security/selinux/include/xfrm.h 2011-05-18 20:09:37.000000000 -0400
75463 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
75464
75465 static inline void selinux_xfrm_notify_policyload(void)
75466 {
75467 - atomic_inc(&flow_cache_genid);
75468 + atomic_inc_unchecked(&flow_cache_genid);
75469 }
75470 #else
75471 static inline int selinux_xfrm_enabled(void)
75472 diff -urNp linux-2.6.32.45/security/selinux/ss/services.c linux-2.6.32.45/security/selinux/ss/services.c
75473 --- linux-2.6.32.45/security/selinux/ss/services.c 2011-03-27 14:31:47.000000000 -0400
75474 +++ linux-2.6.32.45/security/selinux/ss/services.c 2011-05-16 21:46:57.000000000 -0400
75475 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, siz
75476 int rc = 0;
75477 struct policy_file file = { data, len }, *fp = &file;
75478
75479 + pax_track_stack();
75480 +
75481 if (!ss_initialized) {
75482 avtab_cache_init();
75483 if (policydb_read(&policydb, fp)) {
75484 diff -urNp linux-2.6.32.45/security/smack/smack_lsm.c linux-2.6.32.45/security/smack/smack_lsm.c
75485 --- linux-2.6.32.45/security/smack/smack_lsm.c 2011-03-27 14:31:47.000000000 -0400
75486 +++ linux-2.6.32.45/security/smack/smack_lsm.c 2011-04-17 15:56:46.000000000 -0400
75487 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct
75488 return 0;
75489 }
75490
75491 -struct security_operations smack_ops = {
75492 +struct security_operations smack_ops __read_only = {
75493 .name = "smack",
75494
75495 .ptrace_access_check = smack_ptrace_access_check,
75496 diff -urNp linux-2.6.32.45/security/tomoyo/tomoyo.c linux-2.6.32.45/security/tomoyo/tomoyo.c
75497 --- linux-2.6.32.45/security/tomoyo/tomoyo.c 2011-03-27 14:31:47.000000000 -0400
75498 +++ linux-2.6.32.45/security/tomoyo/tomoyo.c 2011-04-17 15:56:46.000000000 -0400
75499 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct fil
75500 * tomoyo_security_ops is a "struct security_operations" which is used for
75501 * registering TOMOYO.
75502 */
75503 -static struct security_operations tomoyo_security_ops = {
75504 +static struct security_operations tomoyo_security_ops __read_only = {
75505 .name = "tomoyo",
75506 .cred_alloc_blank = tomoyo_cred_alloc_blank,
75507 .cred_prepare = tomoyo_cred_prepare,
75508 diff -urNp linux-2.6.32.45/sound/aoa/codecs/onyx.c linux-2.6.32.45/sound/aoa/codecs/onyx.c
75509 --- linux-2.6.32.45/sound/aoa/codecs/onyx.c 2011-03-27 14:31:47.000000000 -0400
75510 +++ linux-2.6.32.45/sound/aoa/codecs/onyx.c 2011-04-17 15:56:46.000000000 -0400
75511 @@ -53,7 +53,7 @@ struct onyx {
75512 spdif_locked:1,
75513 analog_locked:1,
75514 original_mute:2;
75515 - int open_count;
75516 + local_t open_count;
75517 struct codec_info *codec_info;
75518
75519 /* mutex serializes concurrent access to the device
75520 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
75521 struct onyx *onyx = cii->codec_data;
75522
75523 mutex_lock(&onyx->mutex);
75524 - onyx->open_count++;
75525 + local_inc(&onyx->open_count);
75526 mutex_unlock(&onyx->mutex);
75527
75528 return 0;
75529 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
75530 struct onyx *onyx = cii->codec_data;
75531
75532 mutex_lock(&onyx->mutex);
75533 - onyx->open_count--;
75534 - if (!onyx->open_count)
75535 + if (local_dec_and_test(&onyx->open_count))
75536 onyx->spdif_locked = onyx->analog_locked = 0;
75537 mutex_unlock(&onyx->mutex);
75538
75539 diff -urNp linux-2.6.32.45/sound/aoa/codecs/onyx.h linux-2.6.32.45/sound/aoa/codecs/onyx.h
75540 --- linux-2.6.32.45/sound/aoa/codecs/onyx.h 2011-03-27 14:31:47.000000000 -0400
75541 +++ linux-2.6.32.45/sound/aoa/codecs/onyx.h 2011-04-17 15:56:46.000000000 -0400
75542 @@ -11,6 +11,7 @@
75543 #include <linux/i2c.h>
75544 #include <asm/pmac_low_i2c.h>
75545 #include <asm/prom.h>
75546 +#include <asm/local.h>
75547
75548 /* PCM3052 register definitions */
75549
75550 diff -urNp linux-2.6.32.45/sound/core/seq/seq_device.c linux-2.6.32.45/sound/core/seq/seq_device.c
75551 --- linux-2.6.32.45/sound/core/seq/seq_device.c 2011-03-27 14:31:47.000000000 -0400
75552 +++ linux-2.6.32.45/sound/core/seq/seq_device.c 2011-08-05 20:33:55.000000000 -0400
75553 @@ -63,7 +63,7 @@ struct ops_list {
75554 int argsize; /* argument size */
75555
75556 /* operators */
75557 - struct snd_seq_dev_ops ops;
75558 + struct snd_seq_dev_ops *ops;
75559
75560 /* registred devices */
75561 struct list_head dev_list; /* list of devices */
75562 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
75563
75564 mutex_lock(&ops->reg_mutex);
75565 /* copy driver operators */
75566 - ops->ops = *entry;
75567 + ops->ops = entry;
75568 ops->driver |= DRIVER_LOADED;
75569 ops->argsize = argsize;
75570
75571 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
75572 dev->name, ops->id, ops->argsize, dev->argsize);
75573 return -EINVAL;
75574 }
75575 - if (ops->ops.init_device(dev) >= 0) {
75576 + if (ops->ops->init_device(dev) >= 0) {
75577 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
75578 ops->num_init_devices++;
75579 } else {
75580 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
75581 dev->name, ops->id, ops->argsize, dev->argsize);
75582 return -EINVAL;
75583 }
75584 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
75585 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
75586 dev->status = SNDRV_SEQ_DEVICE_FREE;
75587 dev->driver_data = NULL;
75588 ops->num_init_devices--;
75589 diff -urNp linux-2.6.32.45/sound/drivers/mts64.c linux-2.6.32.45/sound/drivers/mts64.c
75590 --- linux-2.6.32.45/sound/drivers/mts64.c 2011-03-27 14:31:47.000000000 -0400
75591 +++ linux-2.6.32.45/sound/drivers/mts64.c 2011-04-17 15:56:46.000000000 -0400
75592 @@ -27,6 +27,7 @@
75593 #include <sound/initval.h>
75594 #include <sound/rawmidi.h>
75595 #include <sound/control.h>
75596 +#include <asm/local.h>
75597
75598 #define CARD_NAME "Miditerminal 4140"
75599 #define DRIVER_NAME "MTS64"
75600 @@ -65,7 +66,7 @@ struct mts64 {
75601 struct pardevice *pardev;
75602 int pardev_claimed;
75603
75604 - int open_count;
75605 + local_t open_count;
75606 int current_midi_output_port;
75607 int current_midi_input_port;
75608 u8 mode[MTS64_NUM_INPUT_PORTS];
75609 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
75610 {
75611 struct mts64 *mts = substream->rmidi->private_data;
75612
75613 - if (mts->open_count == 0) {
75614 + if (local_read(&mts->open_count) == 0) {
75615 /* We don't need a spinlock here, because this is just called
75616 if the device has not been opened before.
75617 So there aren't any IRQs from the device */
75618 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
75619
75620 msleep(50);
75621 }
75622 - ++(mts->open_count);
75623 + local_inc(&mts->open_count);
75624
75625 return 0;
75626 }
75627 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
75628 struct mts64 *mts = substream->rmidi->private_data;
75629 unsigned long flags;
75630
75631 - --(mts->open_count);
75632 - if (mts->open_count == 0) {
75633 + if (local_dec_return(&mts->open_count) == 0) {
75634 /* We need the spinlock_irqsave here because we can still
75635 have IRQs at this point */
75636 spin_lock_irqsave(&mts->lock, flags);
75637 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
75638
75639 msleep(500);
75640
75641 - } else if (mts->open_count < 0)
75642 - mts->open_count = 0;
75643 + } else if (local_read(&mts->open_count) < 0)
75644 + local_set(&mts->open_count, 0);
75645
75646 return 0;
75647 }
75648 diff -urNp linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c
75649 --- linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c 2011-03-27 14:31:47.000000000 -0400
75650 +++ linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c 2011-08-05 20:33:55.000000000 -0400
75651 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
75652 MODULE_DESCRIPTION("OPL4 driver");
75653 MODULE_LICENSE("GPL");
75654
75655 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
75656 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
75657 {
75658 int timeout = 10;
75659 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
75660 diff -urNp linux-2.6.32.45/sound/drivers/portman2x4.c linux-2.6.32.45/sound/drivers/portman2x4.c
75661 --- linux-2.6.32.45/sound/drivers/portman2x4.c 2011-03-27 14:31:47.000000000 -0400
75662 +++ linux-2.6.32.45/sound/drivers/portman2x4.c 2011-04-17 15:56:46.000000000 -0400
75663 @@ -46,6 +46,7 @@
75664 #include <sound/initval.h>
75665 #include <sound/rawmidi.h>
75666 #include <sound/control.h>
75667 +#include <asm/local.h>
75668
75669 #define CARD_NAME "Portman 2x4"
75670 #define DRIVER_NAME "portman"
75671 @@ -83,7 +84,7 @@ struct portman {
75672 struct pardevice *pardev;
75673 int pardev_claimed;
75674
75675 - int open_count;
75676 + local_t open_count;
75677 int mode[PORTMAN_NUM_INPUT_PORTS];
75678 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
75679 };
75680 diff -urNp linux-2.6.32.45/sound/isa/cmi8330.c linux-2.6.32.45/sound/isa/cmi8330.c
75681 --- linux-2.6.32.45/sound/isa/cmi8330.c 2011-03-27 14:31:47.000000000 -0400
75682 +++ linux-2.6.32.45/sound/isa/cmi8330.c 2011-08-05 20:33:55.000000000 -0400
75683 @@ -455,16 +455,16 @@ static int __devinit snd_cmi8330_pcm(str
75684
75685 /* SB16 */
75686 ops = snd_sb16dsp_get_pcm_ops(CMI_SB_STREAM);
75687 - chip->streams[CMI_SB_STREAM].ops = *ops;
75688 + memcpy((void *)&chip->streams[CMI_SB_STREAM].ops, ops, sizeof(*ops));
75689 chip->streams[CMI_SB_STREAM].open = ops->open;
75690 - chip->streams[CMI_SB_STREAM].ops.open = cmi_open_callbacks[CMI_SB_STREAM];
75691 + *(void **)&chip->streams[CMI_SB_STREAM].ops.open = cmi_open_callbacks[CMI_SB_STREAM];
75692 chip->streams[CMI_SB_STREAM].private_data = chip->sb;
75693
75694 /* AD1848 */
75695 ops = snd_wss_get_pcm_ops(CMI_AD_STREAM);
75696 - chip->streams[CMI_AD_STREAM].ops = *ops;
75697 + memcpy((void *)&chip->streams[CMI_AD_STREAM].ops, ops, sizeof(*ops));
75698 chip->streams[CMI_AD_STREAM].open = ops->open;
75699 - chip->streams[CMI_AD_STREAM].ops.open = cmi_open_callbacks[CMI_AD_STREAM];
75700 + *(void **)&chip->streams[CMI_AD_STREAM].ops.open = cmi_open_callbacks[CMI_AD_STREAM];
75701 chip->streams[CMI_AD_STREAM].private_data = chip->wss;
75702
75703 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &chip->streams[SNDRV_PCM_STREAM_PLAYBACK].ops);
75704 diff -urNp linux-2.6.32.45/sound/oss/sb_audio.c linux-2.6.32.45/sound/oss/sb_audio.c
75705 --- linux-2.6.32.45/sound/oss/sb_audio.c 2011-03-27 14:31:47.000000000 -0400
75706 +++ linux-2.6.32.45/sound/oss/sb_audio.c 2011-04-17 15:56:46.000000000 -0400
75707 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
75708 buf16 = (signed short *)(localbuf + localoffs);
75709 while (c)
75710 {
75711 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
75712 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
75713 if (copy_from_user(lbuf8,
75714 userbuf+useroffs + p,
75715 locallen))
75716 diff -urNp linux-2.6.32.45/sound/oss/swarm_cs4297a.c linux-2.6.32.45/sound/oss/swarm_cs4297a.c
75717 --- linux-2.6.32.45/sound/oss/swarm_cs4297a.c 2011-03-27 14:31:47.000000000 -0400
75718 +++ linux-2.6.32.45/sound/oss/swarm_cs4297a.c 2011-04-17 15:56:46.000000000 -0400
75719 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
75720 {
75721 struct cs4297a_state *s;
75722 u32 pwr, id;
75723 - mm_segment_t fs;
75724 int rval;
75725 #ifndef CONFIG_BCM_CS4297A_CSWARM
75726 u64 cfg;
75727 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
75728 if (!rval) {
75729 char *sb1250_duart_present;
75730
75731 +#if 0
75732 + mm_segment_t fs;
75733 fs = get_fs();
75734 set_fs(KERNEL_DS);
75735 -#if 0
75736 val = SOUND_MASK_LINE;
75737 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
75738 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
75739 val = initvol[i].vol;
75740 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
75741 }
75742 + set_fs(fs);
75743 // cs4297a_write_ac97(s, 0x18, 0x0808);
75744 #else
75745 // cs4297a_write_ac97(s, 0x5e, 0x180);
75746 cs4297a_write_ac97(s, 0x02, 0x0808);
75747 cs4297a_write_ac97(s, 0x18, 0x0808);
75748 #endif
75749 - set_fs(fs);
75750
75751 list_add(&s->list, &cs4297a_devs);
75752
75753 diff -urNp linux-2.6.32.45/sound/pci/ac97/ac97_codec.c linux-2.6.32.45/sound/pci/ac97/ac97_codec.c
75754 --- linux-2.6.32.45/sound/pci/ac97/ac97_codec.c 2011-03-27 14:31:47.000000000 -0400
75755 +++ linux-2.6.32.45/sound/pci/ac97/ac97_codec.c 2011-04-17 15:56:46.000000000 -0400
75756 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
75757 }
75758
75759 /* build_ops to do nothing */
75760 -static struct snd_ac97_build_ops null_build_ops;
75761 +static const struct snd_ac97_build_ops null_build_ops;
75762
75763 #ifdef CONFIG_SND_AC97_POWER_SAVE
75764 static void do_update_power(struct work_struct *work)
75765 diff -urNp linux-2.6.32.45/sound/pci/ac97/ac97_patch.c linux-2.6.32.45/sound/pci/ac97/ac97_patch.c
75766 --- linux-2.6.32.45/sound/pci/ac97/ac97_patch.c 2011-03-27 14:31:47.000000000 -0400
75767 +++ linux-2.6.32.45/sound/pci/ac97/ac97_patch.c 2011-04-23 12:56:12.000000000 -0400
75768 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
75769 return 0;
75770 }
75771
75772 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
75773 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
75774 .build_spdif = patch_yamaha_ymf743_build_spdif,
75775 .build_3d = patch_yamaha_ymf7x3_3d,
75776 };
75777 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
75778 return 0;
75779 }
75780
75781 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
75782 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
75783 .build_3d = patch_yamaha_ymf7x3_3d,
75784 .build_post_spdif = patch_yamaha_ymf753_post_spdif
75785 };
75786 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
75787 return 0;
75788 }
75789
75790 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
75791 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
75792 .build_specific = patch_wolfson_wm9703_specific,
75793 };
75794
75795 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
75796 return 0;
75797 }
75798
75799 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
75800 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
75801 .build_specific = patch_wolfson_wm9704_specific,
75802 };
75803
75804 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific
75805 return 0;
75806 }
75807
75808 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
75809 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
75810 .build_specific = patch_wolfson_wm9705_specific,
75811 };
75812
75813 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific
75814 return 0;
75815 }
75816
75817 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
75818 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
75819 .build_specific = patch_wolfson_wm9711_specific,
75820 };
75821
75822 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume
75823 }
75824 #endif
75825
75826 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
75827 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
75828 .build_specific = patch_wolfson_wm9713_specific,
75829 .build_3d = patch_wolfson_wm9713_3d,
75830 #ifdef CONFIG_PM
75831 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_speci
75832 return 0;
75833 }
75834
75835 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
75836 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
75837 .build_3d = patch_sigmatel_stac9700_3d,
75838 .build_specific = patch_sigmatel_stac97xx_specific
75839 };
75840 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_speci
75841 return patch_sigmatel_stac97xx_specific(ac97);
75842 }
75843
75844 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
75845 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
75846 .build_3d = patch_sigmatel_stac9708_3d,
75847 .build_specific = patch_sigmatel_stac9708_specific
75848 };
75849 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_speci
75850 return 0;
75851 }
75852
75853 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
75854 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
75855 .build_3d = patch_sigmatel_stac9700_3d,
75856 .build_specific = patch_sigmatel_stac9758_specific
75857 };
75858 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(stru
75859 return 0;
75860 }
75861
75862 -static struct snd_ac97_build_ops patch_cirrus_ops = {
75863 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
75864 .build_spdif = patch_cirrus_build_spdif
75865 };
75866
75867 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(st
75868 return 0;
75869 }
75870
75871 -static struct snd_ac97_build_ops patch_conexant_ops = {
75872 +static const struct snd_ac97_build_ops patch_conexant_ops = {
75873 .build_spdif = patch_conexant_build_spdif
75874 };
75875
75876 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct
75877 }
75878 }
75879
75880 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
75881 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
75882 #ifdef CONFIG_PM
75883 .resume = ad18xx_resume
75884 #endif
75885 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct
75886 return 0;
75887 }
75888
75889 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
75890 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
75891 .build_specific = &patch_ad1885_specific,
75892 #ifdef CONFIG_PM
75893 .resume = ad18xx_resume
75894 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct
75895 return 0;
75896 }
75897
75898 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
75899 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
75900 .build_specific = &patch_ad1886_specific,
75901 #ifdef CONFIG_PM
75902 .resume = ad18xx_resume
75903 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct
75904 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
75905 }
75906
75907 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
75908 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
75909 .build_post_spdif = patch_ad198x_post_spdif,
75910 .build_specific = patch_ad1981a_specific,
75911 #ifdef CONFIG_PM
75912 @@ -1951,7 +1951,7 @@ static int patch_ad1981b_specific(struct
75913 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
75914 }
75915
75916 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
75917 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
75918 .build_post_spdif = patch_ad198x_post_spdif,
75919 .build_specific = patch_ad1981b_specific,
75920 #ifdef CONFIG_PM
75921 @@ -2090,7 +2090,7 @@ static int patch_ad1888_specific(struct
75922 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
75923 }
75924
75925 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
75926 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
75927 .build_post_spdif = patch_ad198x_post_spdif,
75928 .build_specific = patch_ad1888_specific,
75929 #ifdef CONFIG_PM
75930 @@ -2139,7 +2139,7 @@ static int patch_ad1980_specific(struct
75931 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
75932 }
75933
75934 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
75935 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
75936 .build_post_spdif = patch_ad198x_post_spdif,
75937 .build_specific = patch_ad1980_specific,
75938 #ifdef CONFIG_PM
75939 @@ -2254,7 +2254,7 @@ static int patch_ad1985_specific(struct
75940 ARRAY_SIZE(snd_ac97_ad1985_controls));
75941 }
75942
75943 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
75944 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
75945 .build_post_spdif = patch_ad198x_post_spdif,
75946 .build_specific = patch_ad1985_specific,
75947 #ifdef CONFIG_PM
75948 @@ -2546,7 +2546,7 @@ static int patch_ad1986_specific(struct
75949 ARRAY_SIZE(snd_ac97_ad1985_controls));
75950 }
75951
75952 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
75953 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
75954 .build_post_spdif = patch_ad198x_post_spdif,
75955 .build_specific = patch_ad1986_specific,
75956 #ifdef CONFIG_PM
75957 @@ -2651,7 +2651,7 @@ static int patch_alc650_specific(struct
75958 return 0;
75959 }
75960
75961 -static struct snd_ac97_build_ops patch_alc650_ops = {
75962 +static const struct snd_ac97_build_ops patch_alc650_ops = {
75963 .build_specific = patch_alc650_specific,
75964 .update_jacks = alc650_update_jacks
75965 };
75966 @@ -2803,7 +2803,7 @@ static int patch_alc655_specific(struct
75967 return 0;
75968 }
75969
75970 -static struct snd_ac97_build_ops patch_alc655_ops = {
75971 +static const struct snd_ac97_build_ops patch_alc655_ops = {
75972 .build_specific = patch_alc655_specific,
75973 .update_jacks = alc655_update_jacks
75974 };
75975 @@ -2915,7 +2915,7 @@ static int patch_alc850_specific(struct
75976 return 0;
75977 }
75978
75979 -static struct snd_ac97_build_ops patch_alc850_ops = {
75980 +static const struct snd_ac97_build_ops patch_alc850_ops = {
75981 .build_specific = patch_alc850_specific,
75982 .update_jacks = alc850_update_jacks
75983 };
75984 @@ -2977,7 +2977,7 @@ static int patch_cm9738_specific(struct
75985 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
75986 }
75987
75988 -static struct snd_ac97_build_ops patch_cm9738_ops = {
75989 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
75990 .build_specific = patch_cm9738_specific,
75991 .update_jacks = cm9738_update_jacks
75992 };
75993 @@ -3068,7 +3068,7 @@ static int patch_cm9739_post_spdif(struc
75994 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
75995 }
75996
75997 -static struct snd_ac97_build_ops patch_cm9739_ops = {
75998 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
75999 .build_specific = patch_cm9739_specific,
76000 .build_post_spdif = patch_cm9739_post_spdif,
76001 .update_jacks = cm9739_update_jacks
76002 @@ -3242,7 +3242,7 @@ static int patch_cm9761_specific(struct
76003 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
76004 }
76005
76006 -static struct snd_ac97_build_ops patch_cm9761_ops = {
76007 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
76008 .build_specific = patch_cm9761_specific,
76009 .build_post_spdif = patch_cm9761_post_spdif,
76010 .update_jacks = cm9761_update_jacks
76011 @@ -3338,7 +3338,7 @@ static int patch_cm9780_specific(struct
76012 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
76013 }
76014
76015 -static struct snd_ac97_build_ops patch_cm9780_ops = {
76016 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
76017 .build_specific = patch_cm9780_specific,
76018 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
76019 };
76020 @@ -3458,7 +3458,7 @@ static int patch_vt1616_specific(struct
76021 return 0;
76022 }
76023
76024 -static struct snd_ac97_build_ops patch_vt1616_ops = {
76025 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
76026 .build_specific = patch_vt1616_specific
76027 };
76028
76029 @@ -3812,7 +3812,7 @@ static int patch_it2646_specific(struct
76030 return 0;
76031 }
76032
76033 -static struct snd_ac97_build_ops patch_it2646_ops = {
76034 +static const struct snd_ac97_build_ops patch_it2646_ops = {
76035 .build_specific = patch_it2646_specific,
76036 .update_jacks = it2646_update_jacks
76037 };
76038 @@ -3846,7 +3846,7 @@ static int patch_si3036_specific(struct
76039 return 0;
76040 }
76041
76042 -static struct snd_ac97_build_ops patch_si3036_ops = {
76043 +static const struct snd_ac97_build_ops patch_si3036_ops = {
76044 .build_specific = patch_si3036_specific,
76045 };
76046
76047 @@ -3913,7 +3913,7 @@ static int patch_ucb1400_specific(struct
76048 return 0;
76049 }
76050
76051 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
76052 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
76053 .build_specific = patch_ucb1400_specific,
76054 };
76055
76056 diff -urNp linux-2.6.32.45/sound/pci/hda/hda_codec.h linux-2.6.32.45/sound/pci/hda/hda_codec.h
76057 --- linux-2.6.32.45/sound/pci/hda/hda_codec.h 2011-03-27 14:31:47.000000000 -0400
76058 +++ linux-2.6.32.45/sound/pci/hda/hda_codec.h 2011-08-05 20:33:55.000000000 -0400
76059 @@ -580,7 +580,7 @@ struct hda_bus_ops {
76060 /* notify power-up/down from codec to controller */
76061 void (*pm_notify)(struct hda_bus *bus);
76062 #endif
76063 -};
76064 +} __no_const;
76065
76066 /* template to pass to the bus constructor */
76067 struct hda_bus_template {
76068 @@ -705,7 +705,7 @@ struct hda_pcm_ops {
76069 struct snd_pcm_substream *substream);
76070 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
76071 struct snd_pcm_substream *substream);
76072 -};
76073 +} __no_const;
76074
76075 /* PCM information for each substream */
76076 struct hda_pcm_stream {
76077 diff -urNp linux-2.6.32.45/sound/pci/hda/hda_generic.c linux-2.6.32.45/sound/pci/hda/hda_generic.c
76078 --- linux-2.6.32.45/sound/pci/hda/hda_generic.c 2011-03-27 14:31:47.000000000 -0400
76079 +++ linux-2.6.32.45/sound/pci/hda/hda_generic.c 2011-08-05 20:33:55.000000000 -0400
76080 @@ -1097,7 +1097,7 @@ int snd_hda_parse_generic_codec(struct h
76081 (err = parse_output(codec)) < 0)
76082 goto error;
76083
76084 - codec->patch_ops = generic_patch_ops;
76085 + memcpy((void *)&codec->patch_ops, &generic_patch_ops, sizeof(generic_patch_ops));
76086
76087 return 0;
76088
76089 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_analog.c linux-2.6.32.45/sound/pci/hda/patch_analog.c
76090 --- linux-2.6.32.45/sound/pci/hda/patch_analog.c 2011-03-27 14:31:47.000000000 -0400
76091 +++ linux-2.6.32.45/sound/pci/hda/patch_analog.c 2011-08-05 20:33:55.000000000 -0400
76092 @@ -1069,7 +1069,7 @@ static int patch_ad1986a(struct hda_code
76093 #endif
76094 spec->vmaster_nid = 0x1b;
76095
76096 - codec->patch_ops = ad198x_patch_ops;
76097 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76098
76099 /* override some parameters */
76100 board_config = snd_hda_check_board_config(codec, AD1986A_MODELS,
76101 @@ -1120,8 +1120,8 @@ static int patch_ad1986a(struct hda_code
76102 if (!is_jack_available(codec, 0x25))
76103 spec->multiout.dig_out_nid = 0;
76104 spec->input_mux = &ad1986a_automic_capture_source;
76105 - codec->patch_ops.unsol_event = ad1986a_automic_unsol_event;
76106 - codec->patch_ops.init = ad1986a_automic_init;
76107 + *(void **)&codec->patch_ops.unsol_event = ad1986a_automic_unsol_event;
76108 + *(void **)&codec->patch_ops.init = ad1986a_automic_init;
76109 break;
76110 case AD1986A_SAMSUNG_P50:
76111 spec->num_mixers = 2;
76112 @@ -1137,8 +1137,8 @@ static int patch_ad1986a(struct hda_code
76113 if (!is_jack_available(codec, 0x25))
76114 spec->multiout.dig_out_nid = 0;
76115 spec->input_mux = &ad1986a_automic_capture_source;
76116 - codec->patch_ops.unsol_event = ad1986a_samsung_p50_unsol_event;
76117 - codec->patch_ops.init = ad1986a_samsung_p50_init;
76118 + *(void **)&codec->patch_ops.unsol_event = ad1986a_samsung_p50_unsol_event;
76119 + *(void **)&codec->patch_ops.init = ad1986a_samsung_p50_init;
76120 break;
76121 case AD1986A_LAPTOP_AUTOMUTE:
76122 spec->num_mixers = 3;
76123 @@ -1154,8 +1154,8 @@ static int patch_ad1986a(struct hda_code
76124 if (!is_jack_available(codec, 0x25))
76125 spec->multiout.dig_out_nid = 0;
76126 spec->input_mux = &ad1986a_laptop_eapd_capture_source;
76127 - codec->patch_ops.unsol_event = ad1986a_hp_unsol_event;
76128 - codec->patch_ops.init = ad1986a_hp_init;
76129 + *(void **)&codec->patch_ops.unsol_event = ad1986a_hp_unsol_event;
76130 + *(void **)&codec->patch_ops.init = ad1986a_hp_init;
76131 /* Lenovo N100 seems to report the reversed bit
76132 * for HP jack-sensing
76133 */
76134 @@ -1363,7 +1363,7 @@ static int patch_ad1983(struct hda_codec
76135 #endif
76136 spec->vmaster_nid = 0x05;
76137
76138 - codec->patch_ops = ad198x_patch_ops;
76139 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76140
76141 return 0;
76142 }
76143 @@ -1769,7 +1769,7 @@ static int patch_ad1981(struct hda_codec
76144 #endif
76145 spec->vmaster_nid = 0x05;
76146
76147 - codec->patch_ops = ad198x_patch_ops;
76148 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76149
76150 /* override some parameters */
76151 board_config = snd_hda_check_board_config(codec, AD1981_MODELS,
76152 @@ -1783,8 +1783,8 @@ static int patch_ad1981(struct hda_codec
76153 spec->multiout.dig_out_nid = 0;
76154 spec->input_mux = &ad1981_hp_capture_source;
76155
76156 - codec->patch_ops.init = ad1981_hp_init;
76157 - codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76158 + *(void **)&codec->patch_ops.init = ad1981_hp_init;
76159 + *(void **)&codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76160 break;
76161 case AD1981_THINKPAD:
76162 spec->mixers[0] = ad1981_thinkpad_mixers;
76163 @@ -1805,8 +1805,8 @@ static int patch_ad1981(struct hda_codec
76164 spec->init_verbs[1] = ad1981_toshiba_init_verbs;
76165 spec->multiout.dig_out_nid = 0;
76166 spec->input_mux = &ad1981_hp_capture_source;
76167 - codec->patch_ops.init = ad1981_hp_init;
76168 - codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76169 + *(void **)&codec->patch_ops.init = ad1981_hp_init;
76170 + *(void **)&codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76171 break;
76172 }
76173 return 0;
76174 @@ -3096,14 +3096,14 @@ static int patch_ad1988(struct hda_codec
76175 if (spec->dig_in_nid && codec->vendor_id < 0x11d4989a)
76176 spec->mixers[spec->num_mixers++] = ad1988_spdif_in_mixers;
76177
76178 - codec->patch_ops = ad198x_patch_ops;
76179 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76180 switch (board_config) {
76181 case AD1988_AUTO:
76182 - codec->patch_ops.init = ad1988_auto_init;
76183 + *(void **)&codec->patch_ops.init = ad1988_auto_init;
76184 break;
76185 case AD1988_LAPTOP:
76186 case AD1988_LAPTOP_DIG:
76187 - codec->patch_ops.unsol_event = ad1988_laptop_unsol_event;
76188 + *(void **)&codec->patch_ops.unsol_event = ad1988_laptop_unsol_event;
76189 break;
76190 }
76191 #ifdef CONFIG_SND_HDA_POWER_SAVE
76192 @@ -3321,7 +3321,7 @@ static int patch_ad1884(struct hda_codec
76193 /* we need to cover all playback volumes */
76194 spec->slave_vols = ad1884_slave_vols;
76195
76196 - codec->patch_ops = ad198x_patch_ops;
76197 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76198
76199 return 0;
76200 }
76201 @@ -3529,7 +3529,7 @@ static int patch_ad1984(struct hda_codec
76202 case AD1984_BASIC:
76203 /* additional digital mics */
76204 spec->mixers[spec->num_mixers++] = ad1984_dmic_mixers;
76205 - codec->patch_ops.build_pcms = ad1984_build_pcms;
76206 + *(void **)&codec->patch_ops.build_pcms = ad1984_build_pcms;
76207 break;
76208 case AD1984_THINKPAD:
76209 spec->multiout.dig_out_nid = AD1884_SPDIF_OUT;
76210 @@ -4229,7 +4229,7 @@ static int patch_ad1884a(struct hda_code
76211 #ifdef CONFIG_SND_HDA_POWER_SAVE
76212 spec->loopback.amplist = ad1884a_loopbacks;
76213 #endif
76214 - codec->patch_ops = ad198x_patch_ops;
76215 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76216
76217 /* override some parameters */
76218 board_config = snd_hda_check_board_config(codec, AD1884A_MODELS,
76219 @@ -4240,8 +4240,8 @@ static int patch_ad1884a(struct hda_code
76220 spec->mixers[0] = ad1884a_laptop_mixers;
76221 spec->init_verbs[spec->num_init_verbs++] = ad1884a_laptop_verbs;
76222 spec->multiout.dig_out_nid = 0;
76223 - codec->patch_ops.unsol_event = ad1884a_laptop_unsol_event;
76224 - codec->patch_ops.init = ad1884a_laptop_init;
76225 + *(void **)&codec->patch_ops.unsol_event = ad1884a_laptop_unsol_event;
76226 + *(void **)&codec->patch_ops.init = ad1884a_laptop_init;
76227 /* set the upper-limit for mixer amp to 0dB for avoiding the
76228 * possible damage by overloading
76229 */
76230 @@ -4255,8 +4255,8 @@ static int patch_ad1884a(struct hda_code
76231 spec->mixers[0] = ad1884a_mobile_mixers;
76232 spec->init_verbs[0] = ad1884a_mobile_verbs;
76233 spec->multiout.dig_out_nid = 0;
76234 - codec->patch_ops.unsol_event = ad1884a_hp_unsol_event;
76235 - codec->patch_ops.init = ad1884a_hp_init;
76236 + *(void **)&codec->patch_ops.unsol_event = ad1884a_hp_unsol_event;
76237 + *(void **)&codec->patch_ops.init = ad1884a_hp_init;
76238 /* set the upper-limit for mixer amp to 0dB for avoiding the
76239 * possible damage by overloading
76240 */
76241 @@ -4272,15 +4272,15 @@ static int patch_ad1884a(struct hda_code
76242 ad1984a_thinkpad_verbs;
76243 spec->multiout.dig_out_nid = 0;
76244 spec->input_mux = &ad1984a_thinkpad_capture_source;
76245 - codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event;
76246 - codec->patch_ops.init = ad1984a_thinkpad_init;
76247 + *(void **)&codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event;
76248 + *(void **)&codec->patch_ops.init = ad1984a_thinkpad_init;
76249 break;
76250 case AD1984A_TOUCHSMART:
76251 spec->mixers[0] = ad1984a_touchsmart_mixers;
76252 spec->init_verbs[0] = ad1984a_touchsmart_verbs;
76253 spec->multiout.dig_out_nid = 0;
76254 - codec->patch_ops.unsol_event = ad1984a_touchsmart_unsol_event;
76255 - codec->patch_ops.init = ad1984a_touchsmart_init;
76256 + *(void **)&codec->patch_ops.unsol_event = ad1984a_touchsmart_unsol_event;
76257 + *(void **)&codec->patch_ops.init = ad1984a_touchsmart_init;
76258 /* set the upper-limit for mixer amp to 0dB for avoiding the
76259 * possible damage by overloading
76260 */
76261 @@ -4607,7 +4607,7 @@ static int patch_ad1882(struct hda_codec
76262 #endif
76263 spec->vmaster_nid = 0x04;
76264
76265 - codec->patch_ops = ad198x_patch_ops;
76266 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76267
76268 /* override some parameters */
76269 board_config = snd_hda_check_board_config(codec, AD1882_MODELS,
76270 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c
76271 --- linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c 2011-03-27 14:31:47.000000000 -0400
76272 +++ linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c 2011-08-05 20:33:55.000000000 -0400
76273 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_code
76274 */
76275 spec->multiout.dig_out_nid = CVT_NID;
76276
76277 - codec->patch_ops = atihdmi_patch_ops;
76278 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
76279
76280 return 0;
76281 }
76282 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_ca0110.c linux-2.6.32.45/sound/pci/hda/patch_ca0110.c
76283 --- linux-2.6.32.45/sound/pci/hda/patch_ca0110.c 2011-03-27 14:31:47.000000000 -0400
76284 +++ linux-2.6.32.45/sound/pci/hda/patch_ca0110.c 2011-08-05 20:33:55.000000000 -0400
76285 @@ -525,7 +525,7 @@ static int patch_ca0110(struct hda_codec
76286 if (err < 0)
76287 goto error;
76288
76289 - codec->patch_ops = ca0110_patch_ops;
76290 + memcpy((void *)&codec->patch_ops, &ca0110_patch_ops, sizeof(ca0110_patch_ops));
76291
76292 return 0;
76293
76294 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_cirrus.c linux-2.6.32.45/sound/pci/hda/patch_cirrus.c
76295 --- linux-2.6.32.45/sound/pci/hda/patch_cirrus.c 2011-05-10 22:12:02.000000000 -0400
76296 +++ linux-2.6.32.45/sound/pci/hda/patch_cirrus.c 2011-08-05 20:33:55.000000000 -0400
76297 @@ -1191,7 +1191,7 @@ static int patch_cs420x(struct hda_codec
76298 if (err < 0)
76299 goto error;
76300
76301 - codec->patch_ops = cs_patch_ops;
76302 + memcpy((void *)&codec->patch_ops, &cs_patch_ops, sizeof(cs_patch_ops));
76303
76304 return 0;
76305
76306 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_cmedia.c linux-2.6.32.45/sound/pci/hda/patch_cmedia.c
76307 --- linux-2.6.32.45/sound/pci/hda/patch_cmedia.c 2011-03-27 14:31:47.000000000 -0400
76308 +++ linux-2.6.32.45/sound/pci/hda/patch_cmedia.c 2011-08-05 20:33:55.000000000 -0400
76309 @@ -728,7 +728,7 @@ static int patch_cmi9880(struct hda_code
76310
76311 spec->adc_nids = cmi9880_adc_nids;
76312
76313 - codec->patch_ops = cmi9880_patch_ops;
76314 + memcpy((void *)&codec->patch_ops, &cmi9880_patch_ops, sizeof(cmi9880_patch_ops));
76315
76316 return 0;
76317 }
76318 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_conexant.c linux-2.6.32.45/sound/pci/hda/patch_conexant.c
76319 --- linux-2.6.32.45/sound/pci/hda/patch_conexant.c 2011-03-27 14:31:47.000000000 -0400
76320 +++ linux-2.6.32.45/sound/pci/hda/patch_conexant.c 2011-08-05 20:33:55.000000000 -0400
76321 @@ -1119,55 +1119,55 @@ static int patch_cxt5045(struct hda_code
76322 spec->channel_mode = cxt5045_modes,
76323
76324
76325 - codec->patch_ops = conexant_patch_ops;
76326 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
76327
76328 board_config = snd_hda_check_board_config(codec, CXT5045_MODELS,
76329 cxt5045_models,
76330 cxt5045_cfg_tbl);
76331 switch (board_config) {
76332 case CXT5045_LAPTOP_HPSENSE:
76333 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76334 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76335 spec->input_mux = &cxt5045_capture_source;
76336 spec->num_init_verbs = 2;
76337 spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
76338 spec->mixers[0] = cxt5045_mixers;
76339 - codec->patch_ops.init = cxt5045_init;
76340 + *(void **)&codec->patch_ops.init = cxt5045_init;
76341 break;
76342 case CXT5045_LAPTOP_MICSENSE:
76343 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76344 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76345 spec->input_mux = &cxt5045_capture_source;
76346 spec->num_init_verbs = 2;
76347 spec->init_verbs[1] = cxt5045_mic_sense_init_verbs;
76348 spec->mixers[0] = cxt5045_mixers;
76349 - codec->patch_ops.init = cxt5045_init;
76350 + *(void **)&codec->patch_ops.init = cxt5045_init;
76351 break;
76352 default:
76353 case CXT5045_LAPTOP_HPMICSENSE:
76354 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76355 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76356 spec->input_mux = &cxt5045_capture_source;
76357 spec->num_init_verbs = 3;
76358 spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
76359 spec->init_verbs[2] = cxt5045_mic_sense_init_verbs;
76360 spec->mixers[0] = cxt5045_mixers;
76361 - codec->patch_ops.init = cxt5045_init;
76362 + *(void **)&codec->patch_ops.init = cxt5045_init;
76363 break;
76364 case CXT5045_BENQ:
76365 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76366 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76367 spec->input_mux = &cxt5045_capture_source_benq;
76368 spec->num_init_verbs = 1;
76369 spec->init_verbs[0] = cxt5045_benq_init_verbs;
76370 spec->mixers[0] = cxt5045_mixers;
76371 spec->mixers[1] = cxt5045_benq_mixers;
76372 spec->num_mixers = 2;
76373 - codec->patch_ops.init = cxt5045_init;
76374 + *(void **)&codec->patch_ops.init = cxt5045_init;
76375 break;
76376 case CXT5045_LAPTOP_HP530:
76377 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76378 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76379 spec->input_mux = &cxt5045_capture_source_hp530;
76380 spec->num_init_verbs = 2;
76381 spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
76382 spec->mixers[0] = cxt5045_mixers_hp530;
76383 - codec->patch_ops.init = cxt5045_init;
76384 + *(void **)&codec->patch_ops.init = cxt5045_init;
76385 break;
76386 #ifdef CONFIG_SND_DEBUG
76387 case CXT5045_TEST:
76388 @@ -1556,7 +1556,7 @@ static int patch_cxt5047(struct hda_code
76389 spec->num_channel_mode = ARRAY_SIZE(cxt5047_modes),
76390 spec->channel_mode = cxt5047_modes,
76391
76392 - codec->patch_ops = conexant_patch_ops;
76393 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
76394
76395 board_config = snd_hda_check_board_config(codec, CXT5047_MODELS,
76396 cxt5047_models,
76397 @@ -1565,13 +1565,13 @@ static int patch_cxt5047(struct hda_code
76398 case CXT5047_LAPTOP:
76399 spec->num_mixers = 2;
76400 spec->mixers[1] = cxt5047_hp_spk_mixers;
76401 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76402 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76403 break;
76404 case CXT5047_LAPTOP_HP:
76405 spec->num_mixers = 2;
76406 spec->mixers[1] = cxt5047_hp_only_mixers;
76407 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76408 - codec->patch_ops.init = cxt5047_hp_init;
76409 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76410 + *(void **)&codec->patch_ops.init = cxt5047_hp_init;
76411 break;
76412 case CXT5047_LAPTOP_EAPD:
76413 spec->input_mux = &cxt5047_toshiba_capture_source;
76414 @@ -1579,14 +1579,14 @@ static int patch_cxt5047(struct hda_code
76415 spec->mixers[1] = cxt5047_hp_spk_mixers;
76416 spec->num_init_verbs = 2;
76417 spec->init_verbs[1] = cxt5047_toshiba_init_verbs;
76418 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76419 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76420 break;
76421 #ifdef CONFIG_SND_DEBUG
76422 case CXT5047_TEST:
76423 spec->input_mux = &cxt5047_test_capture_source;
76424 spec->mixers[0] = cxt5047_test_mixer;
76425 spec->init_verbs[0] = cxt5047_test_init_verbs;
76426 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76427 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76428 #endif
76429 }
76430 spec->vmaster_nid = 0x13;
76431 @@ -1904,8 +1904,8 @@ static int patch_cxt5051(struct hda_code
76432 codec->spec = spec;
76433 codec->pin_amp_workaround = 1;
76434
76435 - codec->patch_ops = conexant_patch_ops;
76436 - codec->patch_ops.init = cxt5051_init;
76437 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
76438 + *(void **)&codec->patch_ops.init = cxt5051_init;
76439
76440 spec->multiout.max_channels = 2;
76441 spec->multiout.num_dacs = ARRAY_SIZE(cxt5051_dac_nids);
76442 @@ -1923,7 +1923,7 @@ static int patch_cxt5051(struct hda_code
76443 spec->cur_adc = 0;
76444 spec->cur_adc_idx = 0;
76445
76446 - codec->patch_ops.unsol_event = cxt5051_hp_unsol_event;
76447 + *(void **)&codec->patch_ops.unsol_event = cxt5051_hp_unsol_event;
76448
76449 board_config = snd_hda_check_board_config(codec, CXT5051_MODELS,
76450 cxt5051_models,
76451 @@ -2372,8 +2372,8 @@ static int patch_cxt5066(struct hda_code
76452 return -ENOMEM;
76453 codec->spec = spec;
76454
76455 - codec->patch_ops = conexant_patch_ops;
76456 - codec->patch_ops.init = cxt5066_init;
76457 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
76458 + *(void **)&codec->patch_ops.init = cxt5066_init;
76459
76460 spec->dell_automute = 0;
76461 spec->multiout.max_channels = 2;
76462 @@ -2413,7 +2413,7 @@ static int patch_cxt5066(struct hda_code
76463 spec->dell_automute = 1;
76464 break;
76465 case CXT5066_OLPC_XO_1_5:
76466 - codec->patch_ops.unsol_event = cxt5066_unsol_event;
76467 + *(void **)&codec->patch_ops.unsol_event = cxt5066_unsol_event;
76468 spec->init_verbs[0] = cxt5066_init_verbs_olpc;
76469 spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
76470 spec->mixers[spec->num_mixers++] = cxt5066_mixers;
76471 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c
76472 --- linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c 2011-03-27 14:31:47.000000000 -0400
76473 +++ linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c 2011-08-05 20:33:55.000000000 -0400
76474 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(str
76475 cp_ready);
76476
76477 /* TODO */
76478 - if (cp_state)
76479 - ;
76480 - if (cp_ready)
76481 - ;
76482 + if (cp_state) {
76483 + }
76484 + if (cp_ready) {
76485 + }
76486 }
76487
76488
76489 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hd
76490 spec->multiout.dig_out_nid = cvt_nid;
76491
76492 codec->spec = spec;
76493 - codec->patch_ops = intel_hdmi_patch_ops;
76494 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
76495
76496 snd_hda_eld_proc_new(codec, &spec->sink_eld);
76497
76498 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c
76499 --- linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c 2011-03-27 14:31:47.000000000 -0400
76500 +++ linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c 2011-08-05 20:33:55.000000000 -0400
76501 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_c
76502 spec->multiout.max_channels = 8;
76503 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
76504
76505 - codec->patch_ops = nvhdmi_patch_ops_8ch;
76506 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
76507
76508 return 0;
76509 }
76510 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_c
76511 spec->multiout.max_channels = 2;
76512 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
76513
76514 - codec->patch_ops = nvhdmi_patch_ops_2ch;
76515 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
76516
76517 return 0;
76518 }
76519 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_realtek.c linux-2.6.32.45/sound/pci/hda/patch_realtek.c
76520 --- linux-2.6.32.45/sound/pci/hda/patch_realtek.c 2011-06-25 12:55:35.000000000 -0400
76521 +++ linux-2.6.32.45/sound/pci/hda/patch_realtek.c 2011-08-05 20:33:55.000000000 -0400
76522 @@ -4856,7 +4856,7 @@ static int patch_alc880(struct hda_codec
76523
76524 spec->vmaster_nid = 0x0c;
76525
76526 - codec->patch_ops = alc_patch_ops;
76527 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76528 if (board_config == ALC880_AUTO)
76529 spec->init_hook = alc880_auto_init;
76530 #ifdef CONFIG_SND_HDA_POWER_SAVE
76531 @@ -6479,7 +6479,7 @@ static int patch_alc260(struct hda_codec
76532
76533 spec->vmaster_nid = 0x08;
76534
76535 - codec->patch_ops = alc_patch_ops;
76536 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76537 if (board_config == ALC260_AUTO)
76538 spec->init_hook = alc260_auto_init;
76539 #ifdef CONFIG_SND_HDA_POWER_SAVE
76540 @@ -9997,7 +9997,7 @@ static int patch_alc882(struct hda_codec
76541
76542 spec->vmaster_nid = 0x0c;
76543
76544 - codec->patch_ops = alc_patch_ops;
76545 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76546 if (board_config == ALC882_AUTO)
76547 spec->init_hook = alc882_auto_init;
76548 #ifdef CONFIG_SND_HDA_POWER_SAVE
76549 @@ -11871,7 +11871,7 @@ static int patch_alc262(struct hda_codec
76550
76551 spec->vmaster_nid = 0x0c;
76552
76553 - codec->patch_ops = alc_patch_ops;
76554 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76555 if (board_config == ALC262_AUTO)
76556 spec->init_hook = alc262_auto_init;
76557 #ifdef CONFIG_SND_HDA_POWER_SAVE
76558 @@ -12950,7 +12950,7 @@ static int patch_alc268(struct hda_codec
76559
76560 spec->vmaster_nid = 0x02;
76561
76562 - codec->patch_ops = alc_patch_ops;
76563 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76564 if (board_config == ALC268_AUTO)
76565 spec->init_hook = alc268_auto_init;
76566
76567 @@ -13636,7 +13636,7 @@ static int patch_alc269(struct hda_codec
76568
76569 spec->vmaster_nid = 0x02;
76570
76571 - codec->patch_ops = alc_patch_ops;
76572 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76573 if (board_config == ALC269_AUTO)
76574 spec->init_hook = alc269_auto_init;
76575 #ifdef CONFIG_SND_HDA_POWER_SAVE
76576 @@ -14741,7 +14741,7 @@ static int patch_alc861(struct hda_codec
76577
76578 spec->vmaster_nid = 0x03;
76579
76580 - codec->patch_ops = alc_patch_ops;
76581 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76582 if (board_config == ALC861_AUTO)
76583 spec->init_hook = alc861_auto_init;
76584 #ifdef CONFIG_SND_HDA_POWER_SAVE
76585 @@ -15727,7 +15727,7 @@ static int patch_alc861vd(struct hda_cod
76586
76587 spec->vmaster_nid = 0x02;
76588
76589 - codec->patch_ops = alc_patch_ops;
76590 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76591
76592 if (board_config == ALC861VD_AUTO)
76593 spec->init_hook = alc861vd_auto_init;
76594 @@ -17652,7 +17652,7 @@ static int patch_alc662(struct hda_codec
76595
76596 spec->vmaster_nid = 0x02;
76597
76598 - codec->patch_ops = alc_patch_ops;
76599 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76600 if (board_config == ALC662_AUTO)
76601 spec->init_hook = alc662_auto_init;
76602 #ifdef CONFIG_SND_HDA_POWER_SAVE
76603 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_si3054.c linux-2.6.32.45/sound/pci/hda/patch_si3054.c
76604 --- linux-2.6.32.45/sound/pci/hda/patch_si3054.c 2011-03-27 14:31:47.000000000 -0400
76605 +++ linux-2.6.32.45/sound/pci/hda/patch_si3054.c 2011-08-05 20:33:55.000000000 -0400
76606 @@ -275,7 +275,7 @@ static int patch_si3054(struct hda_codec
76607 if (spec == NULL)
76608 return -ENOMEM;
76609 codec->spec = spec;
76610 - codec->patch_ops = si3054_patch_ops;
76611 + memcpy((void *)&codec->patch_ops, &si3054_patch_ops, sizeof(si3054_patch_ops));
76612 return 0;
76613 }
76614
76615 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c
76616 --- linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c 2011-06-25 12:55:35.000000000 -0400
76617 +++ linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c 2011-08-05 20:33:55.000000000 -0400
76618 @@ -4899,7 +4899,7 @@ static int patch_stac9200(struct hda_cod
76619 if (spec->board_config == STAC_9200_PANASONIC)
76620 spec->hp_detect = 0;
76621
76622 - codec->patch_ops = stac92xx_patch_ops;
76623 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76624
76625 return 0;
76626 }
76627 @@ -4981,7 +4981,7 @@ static int patch_stac925x(struct hda_cod
76628 return err;
76629 }
76630
76631 - codec->patch_ops = stac92xx_patch_ops;
76632 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76633
76634 return 0;
76635 }
76636 @@ -5125,7 +5125,7 @@ again:
76637 if (spec->board_config == STAC_92HD73XX_NO_JD)
76638 spec->hp_detect = 0;
76639
76640 - codec->patch_ops = stac92xx_patch_ops;
76641 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76642
76643 codec->proc_widget_hook = stac92hd7x_proc_hook;
76644
76645 @@ -5220,7 +5220,7 @@ again:
76646 snd_hda_codec_write_cache(codec, nid, 0,
76647 AC_VERB_SET_CONNECT_SEL, num_dacs);
76648
76649 - codec->patch_ops = stac92xx_patch_ops;
76650 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76651
76652 codec->proc_widget_hook = stac92hd_proc_hook;
76653
76654 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hd
76655 return -ENOMEM;
76656
76657 codec->spec = spec;
76658 - codec->patch_ops = stac92xx_patch_ops;
76659 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76660 spec->num_pins = STAC92HD71BXX_NUM_PINS;
76661 switch (codec->vendor_id) {
76662 case 0x111d76b6:
76663 @@ -5515,7 +5515,7 @@ again:
76664 spec->gpio_dir |= spec->gpio_led;
76665 spec->gpio_data |= spec->gpio_led;
76666 /* register check_power_status callback. */
76667 - codec->patch_ops.check_power_status =
76668 + *(void **)&codec->patch_ops.check_power_status =
76669 stac92xx_hp_check_power_status;
76670 }
76671 #endif
76672 @@ -5634,7 +5634,7 @@ static int patch_stac922x(struct hda_cod
76673 return err;
76674 }
76675
76676 - codec->patch_ops = stac92xx_patch_ops;
76677 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76678
76679 /* Fix Mux capture level; max to 2 */
76680 snd_hda_override_amp_caps(codec, 0x12, HDA_OUTPUT,
76681 @@ -5757,7 +5757,7 @@ static int patch_stac927x(struct hda_cod
76682 return err;
76683 }
76684
76685 - codec->patch_ops = stac92xx_patch_ops;
76686 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76687
76688 codec->proc_widget_hook = stac927x_proc_hook;
76689
76690 @@ -5880,7 +5880,7 @@ static int patch_stac9205(struct hda_cod
76691 return err;
76692 }
76693
76694 - codec->patch_ops = stac92xx_patch_ops;
76695 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76696
76697 codec->proc_widget_hook = stac9205_proc_hook;
76698
76699 @@ -5974,7 +5974,7 @@ static int patch_stac9872(struct hda_cod
76700 return -EINVAL;
76701 }
76702 spec->input_mux = &spec->private_imux;
76703 - codec->patch_ops = stac92xx_patch_ops;
76704 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76705 return 0;
76706 }
76707
76708 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_via.c linux-2.6.32.45/sound/pci/hda/patch_via.c
76709 --- linux-2.6.32.45/sound/pci/hda/patch_via.c 2011-03-27 14:31:47.000000000 -0400
76710 +++ linux-2.6.32.45/sound/pci/hda/patch_via.c 2011-08-05 20:33:55.000000000 -0400
76711 @@ -1399,9 +1399,9 @@ static int patch_vt1708(struct hda_codec
76712 spec->num_mixers++;
76713 }
76714
76715 - codec->patch_ops = via_patch_ops;
76716 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76717
76718 - codec->patch_ops.init = via_auto_init;
76719 + *(void **)&codec->patch_ops.init = via_auto_init;
76720 #ifdef CONFIG_SND_HDA_POWER_SAVE
76721 spec->loopback.amplist = vt1708_loopbacks;
76722 #endif
76723 @@ -1870,10 +1870,10 @@ static int patch_vt1709_10ch(struct hda_
76724 spec->num_mixers++;
76725 }
76726
76727 - codec->patch_ops = via_patch_ops;
76728 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76729
76730 - codec->patch_ops.init = via_auto_init;
76731 - codec->patch_ops.unsol_event = via_unsol_event;
76732 + *(void **)&codec->patch_ops.init = via_auto_init;
76733 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76734 #ifdef CONFIG_SND_HDA_POWER_SAVE
76735 spec->loopback.amplist = vt1709_loopbacks;
76736 #endif
76737 @@ -1964,10 +1964,10 @@ static int patch_vt1709_6ch(struct hda_c
76738 spec->num_mixers++;
76739 }
76740
76741 - codec->patch_ops = via_patch_ops;
76742 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76743
76744 - codec->patch_ops.init = via_auto_init;
76745 - codec->patch_ops.unsol_event = via_unsol_event;
76746 + *(void **)&codec->patch_ops.init = via_auto_init;
76747 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76748 #ifdef CONFIG_SND_HDA_POWER_SAVE
76749 spec->loopback.amplist = vt1709_loopbacks;
76750 #endif
76751 @@ -2418,10 +2418,10 @@ static int patch_vt1708B_8ch(struct hda_
76752 spec->num_mixers++;
76753 }
76754
76755 - codec->patch_ops = via_patch_ops;
76756 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76757
76758 - codec->patch_ops.init = via_auto_init;
76759 - codec->patch_ops.unsol_event = via_unsol_event;
76760 + *(void **)&codec->patch_ops.init = via_auto_init;
76761 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76762 #ifdef CONFIG_SND_HDA_POWER_SAVE
76763 spec->loopback.amplist = vt1708B_loopbacks;
76764 #endif
76765 @@ -2470,10 +2470,10 @@ static int patch_vt1708B_4ch(struct hda_
76766 spec->num_mixers++;
76767 }
76768
76769 - codec->patch_ops = via_patch_ops;
76770 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76771
76772 - codec->patch_ops.init = via_auto_init;
76773 - codec->patch_ops.unsol_event = via_unsol_event;
76774 + *(void **)&codec->patch_ops.init = via_auto_init;
76775 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76776 #ifdef CONFIG_SND_HDA_POWER_SAVE
76777 spec->loopback.amplist = vt1708B_loopbacks;
76778 #endif
76779 @@ -2905,10 +2905,10 @@ static int patch_vt1708S(struct hda_code
76780 spec->num_mixers++;
76781 }
76782
76783 - codec->patch_ops = via_patch_ops;
76784 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76785
76786 - codec->patch_ops.init = via_auto_init;
76787 - codec->patch_ops.unsol_event = via_unsol_event;
76788 + *(void **)&codec->patch_ops.init = via_auto_init;
76789 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76790 #ifdef CONFIG_SND_HDA_POWER_SAVE
76791 spec->loopback.amplist = vt1708S_loopbacks;
76792 #endif
76793 @@ -3223,10 +3223,10 @@ static int patch_vt1702(struct hda_codec
76794 spec->num_mixers++;
76795 }
76796
76797 - codec->patch_ops = via_patch_ops;
76798 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76799
76800 - codec->patch_ops.init = via_auto_init;
76801 - codec->patch_ops.unsol_event = via_unsol_event;
76802 + *(void **)&codec->patch_ops.init = via_auto_init;
76803 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76804 #ifdef CONFIG_SND_HDA_POWER_SAVE
76805 spec->loopback.amplist = vt1702_loopbacks;
76806 #endif
76807 diff -urNp linux-2.6.32.45/sound/pci/ice1712/ice1712.h linux-2.6.32.45/sound/pci/ice1712/ice1712.h
76808 --- linux-2.6.32.45/sound/pci/ice1712/ice1712.h 2011-03-27 14:31:47.000000000 -0400
76809 +++ linux-2.6.32.45/sound/pci/ice1712/ice1712.h 2011-08-05 20:33:55.000000000 -0400
76810 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
76811 unsigned int mask_flags; /* total mask bits */
76812 struct snd_akm4xxx_ops {
76813 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
76814 - } ops;
76815 + } __no_const ops;
76816 };
76817
76818 struct snd_ice1712_spdif {
76819 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
76820 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
76821 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
76822 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
76823 - } ops;
76824 + } __no_const ops;
76825 };
76826
76827
76828 diff -urNp linux-2.6.32.45/sound/pci/intel8x0m.c linux-2.6.32.45/sound/pci/intel8x0m.c
76829 --- linux-2.6.32.45/sound/pci/intel8x0m.c 2011-03-27 14:31:47.000000000 -0400
76830 +++ linux-2.6.32.45/sound/pci/intel8x0m.c 2011-04-23 12:56:12.000000000 -0400
76831 @@ -1264,7 +1264,7 @@ static struct shortname_table {
76832 { 0x5455, "ALi M5455" },
76833 { 0x746d, "AMD AMD8111" },
76834 #endif
76835 - { 0 },
76836 + { 0, },
76837 };
76838
76839 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
76840 diff -urNp linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c
76841 --- linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c 2011-03-27 14:31:47.000000000 -0400
76842 +++ linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c 2011-05-04 17:56:28.000000000 -0400
76843 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
76844 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
76845 break;
76846 }
76847 - if (atomic_read(&chip->interrupt_sleep_count)) {
76848 - atomic_set(&chip->interrupt_sleep_count, 0);
76849 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
76850 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
76851 wake_up(&chip->interrupt_sleep);
76852 }
76853 __end:
76854 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
76855 continue;
76856 init_waitqueue_entry(&wait, current);
76857 add_wait_queue(&chip->interrupt_sleep, &wait);
76858 - atomic_inc(&chip->interrupt_sleep_count);
76859 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
76860 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
76861 remove_wait_queue(&chip->interrupt_sleep, &wait);
76862 }
76863 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
76864 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
76865 spin_unlock(&chip->reg_lock);
76866
76867 - if (atomic_read(&chip->interrupt_sleep_count)) {
76868 - atomic_set(&chip->interrupt_sleep_count, 0);
76869 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
76870 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
76871 wake_up(&chip->interrupt_sleep);
76872 }
76873 }
76874 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s
76875 spin_lock_init(&chip->reg_lock);
76876 spin_lock_init(&chip->voice_lock);
76877 init_waitqueue_head(&chip->interrupt_sleep);
76878 - atomic_set(&chip->interrupt_sleep_count, 0);
76879 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
76880 chip->card = card;
76881 chip->pci = pci;
76882 chip->irq = -1;
76883 diff -urNp linux-2.6.32.45/sound/soc/soc-core.c linux-2.6.32.45/sound/soc/soc-core.c
76884 --- linux-2.6.32.45/sound/soc/soc-core.c 2011-03-27 14:31:47.000000000 -0400
76885 +++ linux-2.6.32.45/sound/soc/soc-core.c 2011-08-05 20:33:55.000000000 -0400
76886 @@ -1107,13 +1107,13 @@ static int soc_new_pcm(struct snd_soc_de
76887
76888 dai_link->pcm = pcm;
76889 pcm->private_data = rtd;
76890 - soc_pcm_ops.mmap = platform->pcm_ops->mmap;
76891 - soc_pcm_ops.pointer = platform->pcm_ops->pointer;
76892 - soc_pcm_ops.ioctl = platform->pcm_ops->ioctl;
76893 - soc_pcm_ops.copy = platform->pcm_ops->copy;
76894 - soc_pcm_ops.silence = platform->pcm_ops->silence;
76895 - soc_pcm_ops.ack = platform->pcm_ops->ack;
76896 - soc_pcm_ops.page = platform->pcm_ops->page;
76897 + *(void **)&soc_pcm_ops.mmap = platform->pcm_ops->mmap;
76898 + *(void **)&soc_pcm_ops.pointer = platform->pcm_ops->pointer;
76899 + *(void **)&soc_pcm_ops.ioctl = platform->pcm_ops->ioctl;
76900 + *(void **)&soc_pcm_ops.copy = platform->pcm_ops->copy;
76901 + *(void **)&soc_pcm_ops.silence = platform->pcm_ops->silence;
76902 + *(void **)&soc_pcm_ops.ack = platform->pcm_ops->ack;
76903 + *(void **)&soc_pcm_ops.page = platform->pcm_ops->page;
76904
76905 if (playback)
76906 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &soc_pcm_ops);
76907 diff -urNp linux-2.6.32.45/sound/usb/usbaudio.c linux-2.6.32.45/sound/usb/usbaudio.c
76908 --- linux-2.6.32.45/sound/usb/usbaudio.c 2011-03-27 14:31:47.000000000 -0400
76909 +++ linux-2.6.32.45/sound/usb/usbaudio.c 2011-08-05 20:33:55.000000000 -0400
76910 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(
76911 switch (cmd) {
76912 case SNDRV_PCM_TRIGGER_START:
76913 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
76914 - subs->ops.prepare = prepare_playback_urb;
76915 + *(void **)&subs->ops.prepare = prepare_playback_urb;
76916 return 0;
76917 case SNDRV_PCM_TRIGGER_STOP:
76918 return deactivate_urbs(subs, 0, 0);
76919 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
76920 - subs->ops.prepare = prepare_nodata_playback_urb;
76921 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
76922 return 0;
76923 default:
76924 return -EINVAL;
76925 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(s
76926
76927 switch (cmd) {
76928 case SNDRV_PCM_TRIGGER_START:
76929 - subs->ops.retire = retire_capture_urb;
76930 + *(void **)&subs->ops.retire = retire_capture_urb;
76931 return start_urbs(subs, substream->runtime);
76932 case SNDRV_PCM_TRIGGER_STOP:
76933 return deactivate_urbs(subs, 0, 0);
76934 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
76935 - subs->ops.retire = retire_paused_capture_urb;
76936 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
76937 return 0;
76938 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
76939 - subs->ops.retire = retire_capture_urb;
76940 + *(void **)&subs->ops.retire = retire_capture_urb;
76941 return 0;
76942 default:
76943 return -EINVAL;
76944 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct sn
76945 /* for playback, submit the URBs now; otherwise, the first hwptr_done
76946 * updates for all URBs would happen at the same time when starting */
76947 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
76948 - subs->ops.prepare = prepare_nodata_playback_urb;
76949 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
76950 return start_urbs(subs, runtime);
76951 } else
76952 return 0;
76953 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_us
76954 subs->direction = stream;
76955 subs->dev = as->chip->dev;
76956 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
76957 - subs->ops = audio_urb_ops[stream];
76958 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
76959 } else {
76960 - subs->ops = audio_urb_ops_high_speed[stream];
76961 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
76962 switch (as->chip->usb_id) {
76963 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
76964 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
76965 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
76966 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
76967 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
76968 break;
76969 }
76970 }
76971 diff -urNp linux-2.6.32.45/tools/gcc/constify_plugin.c linux-2.6.32.45/tools/gcc/constify_plugin.c
76972 --- linux-2.6.32.45/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
76973 +++ linux-2.6.32.45/tools/gcc/constify_plugin.c 2011-08-11 19:12:51.000000000 -0400
76974 @@ -0,0 +1,189 @@
76975 +/*
76976 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
76977 + * Licensed under the GPL v2, or (at your option) v3
76978 + *
76979 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
76980 + *
76981 + * Usage:
76982 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
76983 + * $ gcc -fplugin=constify_plugin.so test.c -O2
76984 + */
76985 +
76986 +#include "gcc-plugin.h"
76987 +#include "config.h"
76988 +#include "system.h"
76989 +#include "coretypes.h"
76990 +#include "tree.h"
76991 +#include "tree-pass.h"
76992 +#include "intl.h"
76993 +#include "plugin-version.h"
76994 +#include "tm.h"
76995 +#include "toplev.h"
76996 +#include "function.h"
76997 +#include "tree-flow.h"
76998 +#include "plugin.h"
76999 +
77000 +int plugin_is_GPL_compatible;
77001 +
77002 +static struct plugin_info const_plugin_info = {
77003 + .version = "20110721",
77004 + .help = "no-constify\tturn off constification\n",
77005 +};
77006 +
77007 +static bool walk_struct(tree node);
77008 +
77009 +static void deconstify_node(tree node)
77010 +{
77011 + tree field;
77012 +
77013 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
77014 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
77015 + if (code == RECORD_TYPE || code == UNION_TYPE)
77016 + deconstify_node(TREE_TYPE(field));
77017 + TREE_READONLY(field) = 0;
77018 + TREE_READONLY(TREE_TYPE(field)) = 0;
77019 + }
77020 +}
77021 +
77022 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
77023 +{
77024 + if (TREE_CODE(*node) == FUNCTION_DECL) {
77025 + error("%qE attribute does not apply to functions", name);
77026 + *no_add_attrs = true;
77027 + return NULL_TREE;
77028 + }
77029 +
77030 + if (DECL_P(*node) && lookup_attribute("no_const", TYPE_ATTRIBUTES(TREE_TYPE(*node)))) {
77031 + error("%qE attribute is already applied to the type" , name);
77032 + *no_add_attrs = true;
77033 + return NULL_TREE;
77034 + }
77035 +
77036 + if (TREE_CODE(*node) == TYPE_DECL && !TREE_READONLY(TREE_TYPE(*node))) {
77037 + error("%qE attribute used on type that is not constified" , name);
77038 + *no_add_attrs = true;
77039 + return NULL_TREE;
77040 + }
77041 +
77042 + if (TREE_CODE(*node) == TYPE_DECL) {
77043 + tree chain = TREE_CHAIN(TREE_TYPE(*node));
77044 + TREE_TYPE(*node) = copy_node(TREE_TYPE(*node));
77045 + TREE_CHAIN(TREE_TYPE(*node)) = copy_list(chain);
77046 + TREE_READONLY(TREE_TYPE(*node)) = 0;
77047 + deconstify_node(TREE_TYPE(*node));
77048 + return NULL_TREE;
77049 + }
77050 +
77051 + return NULL_TREE;
77052 +}
77053 +
77054 +static struct attribute_spec no_const_attr = {
77055 + .name = "no_const",
77056 + .min_length = 0,
77057 + .max_length = 0,
77058 + .decl_required = false,
77059 + .type_required = false,
77060 + .function_type_required = false,
77061 + .handler = handle_no_const_attribute
77062 +};
77063 +
77064 +static void register_attributes(void *event_data, void *data)
77065 +{
77066 + register_attribute(&no_const_attr);
77067 +}
77068 +
77069 +/*
77070 +static void printnode(char *prefix, tree node)
77071 +{
77072 + enum tree_code code;
77073 + enum tree_code_class tclass;
77074 +
77075 + tclass = TREE_CODE_CLASS(TREE_CODE (node));
77076 +
77077 + code = TREE_CODE(node);
77078 + fprintf(stderr, "\n%s node: %p, code: %d type: %s\n", prefix, node, code, tree_code_name[(int)code]);
77079 + if (DECL_CONTEXT(node) != NULL_TREE && TYPE_NAME(DECL_CONTEXT(node)) != NULL_TREE)
77080 + fprintf(stderr, "struct name: %s\n", IDENTIFIER_POINTER(TYPE_NAME(DECL_CONTEXT(node))));
77081 + if (tclass == tcc_declaration && DECL_NAME(node) != NULL_TREE)
77082 + fprintf(stderr, "field name: %s\n", IDENTIFIER_POINTER(DECL_NAME(node)));
77083 +}
77084 +*/
77085 +
77086 +static void constify_node(tree node)
77087 +{
77088 + TREE_READONLY(node) = 1;
77089 +}
77090 +
77091 +static bool is_fptr(tree field)
77092 +{
77093 + tree ptr = TREE_TYPE(field);
77094 +
77095 + if (TREE_CODE(ptr) != POINTER_TYPE)
77096 + return false;
77097 +
77098 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
77099 +}
77100 +
77101 +static bool walk_struct(tree node)
77102 +{
77103 + tree field;
77104 +
77105 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
77106 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
77107 + if (code == RECORD_TYPE || code == UNION_TYPE) {
77108 + if (!(walk_struct(TREE_TYPE(field))))
77109 + return false;
77110 + } else if (is_fptr(field) == false && !TREE_READONLY(field))
77111 + return false;
77112 + }
77113 + return true;
77114 +}
77115 +
77116 +static void finish_type(void *event_data, void *data)
77117 +{
77118 + tree node = (tree)event_data;
77119 +
77120 + if (node == NULL_TREE)
77121 + return;
77122 +
77123 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
77124 + return;
77125 +
77126 + if (TREE_READONLY(node))
77127 + return;
77128 +
77129 + if (TYPE_FIELDS(node) == NULL_TREE)
77130 + return;
77131 +
77132 + if (walk_struct(node))
77133 + constify_node(node);
77134 +}
77135 +
77136 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77137 +{
77138 + const char * const plugin_name = plugin_info->base_name;
77139 + const int argc = plugin_info->argc;
77140 + const struct plugin_argument * const argv = plugin_info->argv;
77141 + int i;
77142 + bool constify = true;
77143 +
77144 + if (!plugin_default_version_check(version, &gcc_version)) {
77145 + error(G_("incompatible gcc/plugin versions"));
77146 + return 1;
77147 + }
77148 +
77149 + for (i = 0; i < argc; ++i) {
77150 + if (!(strcmp(argv[i].key, "no-constify"))) {
77151 + constify = false;
77152 + continue;
77153 + }
77154 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77155 + }
77156 +
77157 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
77158 + if (constify)
77159 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
77160 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
77161 +
77162 + return 0;
77163 +}
77164 Binary files linux-2.6.32.45/tools/gcc/constify_plugin.so and linux-2.6.32.45/tools/gcc/constify_plugin.so differ
77165 diff -urNp linux-2.6.32.45/tools/gcc/Makefile linux-2.6.32.45/tools/gcc/Makefile
77166 --- linux-2.6.32.45/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
77167 +++ linux-2.6.32.45/tools/gcc/Makefile 2011-08-05 20:33:55.000000000 -0400
77168 @@ -0,0 +1,12 @@
77169 +#CC := gcc
77170 +#PLUGIN_SOURCE_FILES := pax_plugin.c
77171 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
77172 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
77173 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
77174 +
77175 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
77176 +
77177 +hostlibs-y := stackleak_plugin.so constify_plugin.so
77178 +always := $(hostlibs-y)
77179 +stackleak_plugin-objs := stackleak_plugin.o
77180 +constify_plugin-objs := constify_plugin.o
77181 Binary files linux-2.6.32.45/tools/gcc/pax_plugin.so and linux-2.6.32.45/tools/gcc/pax_plugin.so differ
77182 diff -urNp linux-2.6.32.45/tools/gcc/stackleak_plugin.c linux-2.6.32.45/tools/gcc/stackleak_plugin.c
77183 --- linux-2.6.32.45/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
77184 +++ linux-2.6.32.45/tools/gcc/stackleak_plugin.c 2011-08-05 20:33:55.000000000 -0400
77185 @@ -0,0 +1,243 @@
77186 +/*
77187 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77188 + * Licensed under the GPL v2
77189 + *
77190 + * Note: the choice of the license means that the compilation process is
77191 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77192 + * but for the kernel it doesn't matter since it doesn't link against
77193 + * any of the gcc libraries
77194 + *
77195 + * gcc plugin to help implement various PaX features
77196 + *
77197 + * - track lowest stack pointer
77198 + *
77199 + * TODO:
77200 + * - initialize all local variables
77201 + *
77202 + * BUGS:
77203 + * - cloned functions are instrumented twice
77204 + */
77205 +#include "gcc-plugin.h"
77206 +#include "plugin-version.h"
77207 +#include "config.h"
77208 +#include "system.h"
77209 +#include "coretypes.h"
77210 +#include "tm.h"
77211 +#include "toplev.h"
77212 +#include "basic-block.h"
77213 +#include "gimple.h"
77214 +//#include "expr.h" where are you...
77215 +#include "diagnostic.h"
77216 +#include "rtl.h"
77217 +#include "emit-rtl.h"
77218 +#include "function.h"
77219 +#include "tree.h"
77220 +#include "tree-pass.h"
77221 +#include "intl.h"
77222 +
77223 +int plugin_is_GPL_compatible;
77224 +
77225 +static int track_frame_size = -1;
77226 +static const char track_function[] = "pax_track_stack";
77227 +static bool init_locals;
77228 +
77229 +static struct plugin_info stackleak_plugin_info = {
77230 + .version = "201106030000",
77231 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
77232 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
77233 +};
77234 +
77235 +static bool gate_stackleak_track_stack(void);
77236 +static unsigned int execute_stackleak_tree_instrument(void);
77237 +static unsigned int execute_stackleak_final(void);
77238 +
77239 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
77240 + .pass = {
77241 + .type = GIMPLE_PASS,
77242 + .name = "stackleak_tree_instrument",
77243 + .gate = gate_stackleak_track_stack,
77244 + .execute = execute_stackleak_tree_instrument,
77245 + .sub = NULL,
77246 + .next = NULL,
77247 + .static_pass_number = 0,
77248 + .tv_id = TV_NONE,
77249 + .properties_required = PROP_gimple_leh | PROP_cfg,
77250 + .properties_provided = 0,
77251 + .properties_destroyed = 0,
77252 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
77253 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
77254 + }
77255 +};
77256 +
77257 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
77258 + .pass = {
77259 + .type = RTL_PASS,
77260 + .name = "stackleak_final",
77261 + .gate = gate_stackleak_track_stack,
77262 + .execute = execute_stackleak_final,
77263 + .sub = NULL,
77264 + .next = NULL,
77265 + .static_pass_number = 0,
77266 + .tv_id = TV_NONE,
77267 + .properties_required = 0,
77268 + .properties_provided = 0,
77269 + .properties_destroyed = 0,
77270 + .todo_flags_start = 0,
77271 + .todo_flags_finish = 0
77272 + }
77273 +};
77274 +
77275 +static bool gate_stackleak_track_stack(void)
77276 +{
77277 + return track_frame_size >= 0;
77278 +}
77279 +
77280 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
77281 +{
77282 + gimple call;
77283 + tree decl, type;
77284 +
77285 + // insert call to void pax_track_stack(void)
77286 + type = build_function_type_list(void_type_node, NULL_TREE);
77287 + decl = build_fn_decl(track_function, type);
77288 + DECL_ASSEMBLER_NAME(decl); // for LTO
77289 + call = gimple_build_call(decl, 0);
77290 + if (before)
77291 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
77292 + else
77293 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
77294 +}
77295 +
77296 +static unsigned int execute_stackleak_tree_instrument(void)
77297 +{
77298 + basic_block bb;
77299 + gimple_stmt_iterator gsi;
77300 +
77301 + // 1. loop through BBs and GIMPLE statements
77302 + FOR_EACH_BB(bb) {
77303 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
77304 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
77305 + tree decl;
77306 + gimple stmt = gsi_stmt(gsi);
77307 +
77308 + if (!is_gimple_call(stmt))
77309 + continue;
77310 + decl = gimple_call_fndecl(stmt);
77311 + if (!decl)
77312 + continue;
77313 + if (TREE_CODE(decl) != FUNCTION_DECL)
77314 + continue;
77315 + if (!DECL_BUILT_IN(decl))
77316 + continue;
77317 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
77318 + continue;
77319 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
77320 + continue;
77321 +
77322 + // 2. insert track call after each __builtin_alloca call
77323 + stackleak_add_instrumentation(&gsi, false);
77324 +// print_node(stderr, "pax", decl, 4);
77325 + }
77326 + }
77327 +
77328 + // 3. insert track call at the beginning
77329 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
77330 + gsi = gsi_start_bb(bb);
77331 + stackleak_add_instrumentation(&gsi, true);
77332 +
77333 + return 0;
77334 +}
77335 +
77336 +static unsigned int execute_stackleak_final(void)
77337 +{
77338 + rtx insn;
77339 +
77340 + if (cfun->calls_alloca)
77341 + return 0;
77342 +
77343 + // 1. find pax_track_stack calls
77344 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
77345 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
77346 + rtx body;
77347 +
77348 + if (!CALL_P(insn))
77349 + continue;
77350 + body = PATTERN(insn);
77351 + if (GET_CODE(body) != CALL)
77352 + continue;
77353 + body = XEXP(body, 0);
77354 + if (GET_CODE(body) != MEM)
77355 + continue;
77356 + body = XEXP(body, 0);
77357 + if (GET_CODE(body) != SYMBOL_REF)
77358 + continue;
77359 + if (strcmp(XSTR(body, 0), track_function))
77360 + continue;
77361 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
77362 + // 2. delete call if function frame is not big enough
77363 + if (get_frame_size() >= track_frame_size)
77364 + continue;
77365 + delete_insn_and_edges(insn);
77366 + }
77367 +
77368 +// print_simple_rtl(stderr, get_insns());
77369 +// print_rtl(stderr, get_insns());
77370 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
77371 +
77372 + return 0;
77373 +}
77374 +
77375 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77376 +{
77377 + const char * const plugin_name = plugin_info->base_name;
77378 + const int argc = plugin_info->argc;
77379 + const struct plugin_argument * const argv = plugin_info->argv;
77380 + int i;
77381 + struct register_pass_info stackleak_tree_instrument_pass_info = {
77382 + .pass = &stackleak_tree_instrument_pass.pass,
77383 +// .reference_pass_name = "tree_profile",
77384 + .reference_pass_name = "optimized",
77385 + .ref_pass_instance_number = 0,
77386 + .pos_op = PASS_POS_INSERT_AFTER
77387 + };
77388 + struct register_pass_info stackleak_final_pass_info = {
77389 + .pass = &stackleak_final_rtl_opt_pass.pass,
77390 + .reference_pass_name = "final",
77391 + .ref_pass_instance_number = 0,
77392 + .pos_op = PASS_POS_INSERT_BEFORE
77393 + };
77394 +
77395 + if (!plugin_default_version_check(version, &gcc_version)) {
77396 + error(G_("incompatible gcc/plugin versions"));
77397 + return 1;
77398 + }
77399 +
77400 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
77401 +
77402 + for (i = 0; i < argc; ++i) {
77403 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
77404 + if (!argv[i].value) {
77405 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77406 + continue;
77407 + }
77408 + track_frame_size = atoi(argv[i].value);
77409 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
77410 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
77411 + continue;
77412 + }
77413 + if (!strcmp(argv[i].key, "initialize-locals")) {
77414 + if (argv[i].value) {
77415 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
77416 + continue;
77417 + }
77418 + init_locals = true;
77419 + continue;
77420 + }
77421 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77422 + }
77423 +
77424 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
77425 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
77426 +
77427 + return 0;
77428 +}
77429 Binary files linux-2.6.32.45/tools/gcc/stackleak_plugin.so and linux-2.6.32.45/tools/gcc/stackleak_plugin.so differ
77430 diff -urNp linux-2.6.32.45/usr/gen_init_cpio.c linux-2.6.32.45/usr/gen_init_cpio.c
77431 --- linux-2.6.32.45/usr/gen_init_cpio.c 2011-03-27 14:31:47.000000000 -0400
77432 +++ linux-2.6.32.45/usr/gen_init_cpio.c 2011-04-17 15:56:46.000000000 -0400
77433 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
77434 int retval;
77435 int rc = -1;
77436 int namesize;
77437 - int i;
77438 + unsigned int i;
77439
77440 mode |= S_IFREG;
77441
77442 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
77443 *env_var = *expanded = '\0';
77444 strncat(env_var, start + 2, end - start - 2);
77445 strncat(expanded, new_location, start - new_location);
77446 - strncat(expanded, getenv(env_var), PATH_MAX);
77447 - strncat(expanded, end + 1, PATH_MAX);
77448 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
77449 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
77450 strncpy(new_location, expanded, PATH_MAX);
77451 + new_location[PATH_MAX] = 0;
77452 } else
77453 break;
77454 }
77455 diff -urNp linux-2.6.32.45/virt/kvm/kvm_main.c linux-2.6.32.45/virt/kvm/kvm_main.c
77456 --- linux-2.6.32.45/virt/kvm/kvm_main.c 2011-03-27 14:31:47.000000000 -0400
77457 +++ linux-2.6.32.45/virt/kvm/kvm_main.c 2011-08-05 20:33:55.000000000 -0400
77458 @@ -2494,7 +2494,7 @@ asmlinkage void kvm_handle_fault_on_rebo
77459 if (kvm_rebooting)
77460 /* spin while reset goes on */
77461 while (true)
77462 - ;
77463 + cpu_relax();
77464 /* Fault while not rebooting. We want the trace. */
77465 BUG();
77466 }
77467 @@ -2714,7 +2714,7 @@ static void kvm_sched_out(struct preempt
77468 kvm_arch_vcpu_put(vcpu);
77469 }
77470
77471 -int kvm_init(void *opaque, unsigned int vcpu_size,
77472 +int kvm_init(const void *opaque, unsigned int vcpu_size,
77473 struct module *module)
77474 {
77475 int r;
77476 @@ -2767,15 +2767,17 @@ int kvm_init(void *opaque, unsigned int
77477 /* A kmem cache lets us meet the alignment requirements of fx_save. */
77478 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
77479 __alignof__(struct kvm_vcpu),
77480 - 0, NULL);
77481 + SLAB_USERCOPY, NULL);
77482 if (!kvm_vcpu_cache) {
77483 r = -ENOMEM;
77484 goto out_free_5;
77485 }
77486
77487 - kvm_chardev_ops.owner = module;
77488 - kvm_vm_fops.owner = module;
77489 - kvm_vcpu_fops.owner = module;
77490 + pax_open_kernel();
77491 + *(void **)&kvm_chardev_ops.owner = module;
77492 + *(void **)&kvm_vm_fops.owner = module;
77493 + *(void **)&kvm_vcpu_fops.owner = module;
77494 + pax_close_kernel();
77495
77496 r = misc_register(&kvm_dev);
77497 if (r) {