]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.45-201108172006.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.45-201108172006.patch
1 diff -urNp linux-2.6.32.45/arch/alpha/include/asm/elf.h linux-2.6.32.45/arch/alpha/include/asm/elf.h
2 --- linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3 +++ linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.32.45/arch/alpha/include/asm/pgtable.h linux-2.6.32.45/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20 +++ linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.32.45/arch/alpha/kernel/module.c linux-2.6.32.45/arch/alpha/kernel/module.c
40 --- linux-2.6.32.45/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41 +++ linux-2.6.32.45/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.32.45/arch/alpha/kernel/osf_sys.c linux-2.6.32.45/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-08-09 18:35:28.000000000 -0400
53 +++ linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-2.6.32.45/arch/alpha/mm/fault.c linux-2.6.32.45/arch/alpha/mm/fault.c
86 --- linux-2.6.32.45/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
87 +++ linux-2.6.32.45/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-2.6.32.45/arch/arm/include/asm/elf.h linux-2.6.32.45/arch/arm/include/asm/elf.h
245 --- linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
246 +++ linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
247 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 diff -urNp linux-2.6.32.45/arch/arm/include/asm/kmap_types.h linux-2.6.32.45/arch/arm/include/asm/kmap_types.h
264 --- linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
265 +++ linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
266 @@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270 + KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274 diff -urNp linux-2.6.32.45/arch/arm/include/asm/uaccess.h linux-2.6.32.45/arch/arm/include/asm/uaccess.h
275 --- linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
276 +++ linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
277 @@ -22,6 +22,8 @@
278 #define VERIFY_READ 0
279 #define VERIFY_WRITE 1
280
281 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
282 +
283 /*
284 * The exception table consists of pairs of addresses: the first is the
285 * address of an instruction that is allowed to fault, and the second is
286 @@ -387,8 +389,23 @@ do { \
287
288
289 #ifdef CONFIG_MMU
290 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
291 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
292 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
293 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
294 +
295 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
296 +{
297 + if (!__builtin_constant_p(n))
298 + check_object_size(to, n, false);
299 + return ___copy_from_user(to, from, n);
300 +}
301 +
302 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
303 +{
304 + if (!__builtin_constant_p(n))
305 + check_object_size(from, n, true);
306 + return ___copy_to_user(to, from, n);
307 +}
308 +
309 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
310 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
311 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
312 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
313
314 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
315 {
316 + if ((long)n < 0)
317 + return n;
318 +
319 if (access_ok(VERIFY_READ, from, n))
320 n = __copy_from_user(to, from, n);
321 else /* security hole - plug it */
322 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
323
324 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
325 {
326 + if ((long)n < 0)
327 + return n;
328 +
329 if (access_ok(VERIFY_WRITE, to, n))
330 n = __copy_to_user(to, from, n);
331 return n;
332 diff -urNp linux-2.6.32.45/arch/arm/kernel/armksyms.c linux-2.6.32.45/arch/arm/kernel/armksyms.c
333 --- linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
334 +++ linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
335 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
336 #ifdef CONFIG_MMU
337 EXPORT_SYMBOL(copy_page);
338
339 -EXPORT_SYMBOL(__copy_from_user);
340 -EXPORT_SYMBOL(__copy_to_user);
341 +EXPORT_SYMBOL(___copy_from_user);
342 +EXPORT_SYMBOL(___copy_to_user);
343 EXPORT_SYMBOL(__clear_user);
344
345 EXPORT_SYMBOL(__get_user_1);
346 diff -urNp linux-2.6.32.45/arch/arm/kernel/kgdb.c linux-2.6.32.45/arch/arm/kernel/kgdb.c
347 --- linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
348 +++ linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
349 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
350 * and we handle the normal undef case within the do_undefinstr
351 * handler.
352 */
353 -struct kgdb_arch arch_kgdb_ops = {
354 +const struct kgdb_arch arch_kgdb_ops = {
355 #ifndef __ARMEB__
356 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
357 #else /* ! __ARMEB__ */
358 diff -urNp linux-2.6.32.45/arch/arm/kernel/traps.c linux-2.6.32.45/arch/arm/kernel/traps.c
359 --- linux-2.6.32.45/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
360 +++ linux-2.6.32.45/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
361 @@ -247,6 +247,8 @@ static void __die(const char *str, int e
362
363 DEFINE_SPINLOCK(die_lock);
364
365 +extern void gr_handle_kernel_exploit(void);
366 +
367 /*
368 * This function is protected against re-entrancy.
369 */
370 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
371 if (panic_on_oops)
372 panic("Fatal exception");
373
374 + gr_handle_kernel_exploit();
375 +
376 do_exit(SIGSEGV);
377 }
378
379 diff -urNp linux-2.6.32.45/arch/arm/lib/copy_from_user.S linux-2.6.32.45/arch/arm/lib/copy_from_user.S
380 --- linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
381 +++ linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
382 @@ -16,7 +16,7 @@
383 /*
384 * Prototype:
385 *
386 - * size_t __copy_from_user(void *to, const void *from, size_t n)
387 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
388 *
389 * Purpose:
390 *
391 @@ -84,11 +84,11 @@
392
393 .text
394
395 -ENTRY(__copy_from_user)
396 +ENTRY(___copy_from_user)
397
398 #include "copy_template.S"
399
400 -ENDPROC(__copy_from_user)
401 +ENDPROC(___copy_from_user)
402
403 .section .fixup,"ax"
404 .align 0
405 diff -urNp linux-2.6.32.45/arch/arm/lib/copy_to_user.S linux-2.6.32.45/arch/arm/lib/copy_to_user.S
406 --- linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
407 +++ linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
408 @@ -16,7 +16,7 @@
409 /*
410 * Prototype:
411 *
412 - * size_t __copy_to_user(void *to, const void *from, size_t n)
413 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
414 *
415 * Purpose:
416 *
417 @@ -88,11 +88,11 @@
418 .text
419
420 ENTRY(__copy_to_user_std)
421 -WEAK(__copy_to_user)
422 +WEAK(___copy_to_user)
423
424 #include "copy_template.S"
425
426 -ENDPROC(__copy_to_user)
427 +ENDPROC(___copy_to_user)
428
429 .section .fixup,"ax"
430 .align 0
431 diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess.S linux-2.6.32.45/arch/arm/lib/uaccess.S
432 --- linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
433 +++ linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
434 @@ -19,7 +19,7 @@
435
436 #define PAGE_SHIFT 12
437
438 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
439 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
440 * Purpose : copy a block to user memory from kernel memory
441 * Params : to - user memory
442 * : from - kernel memory
443 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
444 sub r2, r2, ip
445 b .Lc2u_dest_aligned
446
447 -ENTRY(__copy_to_user)
448 +ENTRY(___copy_to_user)
449 stmfd sp!, {r2, r4 - r7, lr}
450 cmp r2, #4
451 blt .Lc2u_not_enough
452 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
453 ldrgtb r3, [r1], #0
454 USER( strgtbt r3, [r0], #1) @ May fault
455 b .Lc2u_finished
456 -ENDPROC(__copy_to_user)
457 +ENDPROC(___copy_to_user)
458
459 .section .fixup,"ax"
460 .align 0
461 9001: ldmfd sp!, {r0, r4 - r7, pc}
462 .previous
463
464 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
465 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
466 * Purpose : copy a block from user memory to kernel memory
467 * Params : to - kernel memory
468 * : from - user memory
469 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
470 sub r2, r2, ip
471 b .Lcfu_dest_aligned
472
473 -ENTRY(__copy_from_user)
474 +ENTRY(___copy_from_user)
475 stmfd sp!, {r0, r2, r4 - r7, lr}
476 cmp r2, #4
477 blt .Lcfu_not_enough
478 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
479 USER( ldrgtbt r3, [r1], #1) @ May fault
480 strgtb r3, [r0], #1
481 b .Lcfu_finished
482 -ENDPROC(__copy_from_user)
483 +ENDPROC(___copy_from_user)
484
485 .section .fixup,"ax"
486 .align 0
487 diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c
488 --- linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
489 +++ linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
490 @@ -97,7 +97,7 @@ out:
491 }
492
493 unsigned long
494 -__copy_to_user(void __user *to, const void *from, unsigned long n)
495 +___copy_to_user(void __user *to, const void *from, unsigned long n)
496 {
497 /*
498 * This test is stubbed out of the main function above to keep
499 diff -urNp linux-2.6.32.45/arch/arm/mach-at91/pm.c linux-2.6.32.45/arch/arm/mach-at91/pm.c
500 --- linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
501 +++ linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
502 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
503 }
504
505
506 -static struct platform_suspend_ops at91_pm_ops ={
507 +static const struct platform_suspend_ops at91_pm_ops ={
508 .valid = at91_pm_valid_state,
509 .begin = at91_pm_begin,
510 .enter = at91_pm_enter,
511 diff -urNp linux-2.6.32.45/arch/arm/mach-omap1/pm.c linux-2.6.32.45/arch/arm/mach-omap1/pm.c
512 --- linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
513 +++ linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
514 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
515
516
517
518 -static struct platform_suspend_ops omap_pm_ops ={
519 +static const struct platform_suspend_ops omap_pm_ops ={
520 .prepare = omap_pm_prepare,
521 .enter = omap_pm_enter,
522 .finish = omap_pm_finish,
523 diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c
524 --- linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
525 +++ linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
526 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
527 enable_hlt();
528 }
529
530 -static struct platform_suspend_ops omap_pm_ops = {
531 +static const struct platform_suspend_ops omap_pm_ops = {
532 .prepare = omap2_pm_prepare,
533 .enter = omap2_pm_enter,
534 .finish = omap2_pm_finish,
535 diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c
536 --- linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
537 +++ linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
538 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
539 return;
540 }
541
542 -static struct platform_suspend_ops omap_pm_ops = {
543 +static const struct platform_suspend_ops omap_pm_ops = {
544 .begin = omap3_pm_begin,
545 .end = omap3_pm_end,
546 .prepare = omap3_pm_prepare,
547 diff -urNp linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c
548 --- linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
549 +++ linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
550 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
551 (state == PM_SUSPEND_MEM);
552 }
553
554 -static struct platform_suspend_ops pnx4008_pm_ops = {
555 +static const struct platform_suspend_ops pnx4008_pm_ops = {
556 .enter = pnx4008_pm_enter,
557 .valid = pnx4008_pm_valid,
558 };
559 diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/pm.c linux-2.6.32.45/arch/arm/mach-pxa/pm.c
560 --- linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
561 +++ linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
562 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
563 pxa_cpu_pm_fns->finish();
564 }
565
566 -static struct platform_suspend_ops pxa_pm_ops = {
567 +static const struct platform_suspend_ops pxa_pm_ops = {
568 .valid = pxa_pm_valid,
569 .enter = pxa_pm_enter,
570 .prepare = pxa_pm_prepare,
571 diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c
572 --- linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
573 +++ linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
574 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
575 }
576
577 #ifdef CONFIG_PM
578 -static struct platform_suspend_ops sharpsl_pm_ops = {
579 +static const struct platform_suspend_ops sharpsl_pm_ops = {
580 .prepare = pxa_pm_prepare,
581 .finish = pxa_pm_finish,
582 .enter = corgi_pxa_pm_enter,
583 diff -urNp linux-2.6.32.45/arch/arm/mach-sa1100/pm.c linux-2.6.32.45/arch/arm/mach-sa1100/pm.c
584 --- linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
585 +++ linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
586 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
587 return virt_to_phys(sp);
588 }
589
590 -static struct platform_suspend_ops sa11x0_pm_ops = {
591 +static const struct platform_suspend_ops sa11x0_pm_ops = {
592 .enter = sa11x0_pm_enter,
593 .valid = suspend_valid_only_mem,
594 };
595 diff -urNp linux-2.6.32.45/arch/arm/mm/fault.c linux-2.6.32.45/arch/arm/mm/fault.c
596 --- linux-2.6.32.45/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
597 +++ linux-2.6.32.45/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
598 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
599 }
600 #endif
601
602 +#ifdef CONFIG_PAX_PAGEEXEC
603 + if (fsr & FSR_LNX_PF) {
604 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
605 + do_group_exit(SIGKILL);
606 + }
607 +#endif
608 +
609 tsk->thread.address = addr;
610 tsk->thread.error_code = fsr;
611 tsk->thread.trap_no = 14;
612 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
613 }
614 #endif /* CONFIG_MMU */
615
616 +#ifdef CONFIG_PAX_PAGEEXEC
617 +void pax_report_insns(void *pc, void *sp)
618 +{
619 + long i;
620 +
621 + printk(KERN_ERR "PAX: bytes at PC: ");
622 + for (i = 0; i < 20; i++) {
623 + unsigned char c;
624 + if (get_user(c, (__force unsigned char __user *)pc+i))
625 + printk(KERN_CONT "?? ");
626 + else
627 + printk(KERN_CONT "%02x ", c);
628 + }
629 + printk("\n");
630 +
631 + printk(KERN_ERR "PAX: bytes at SP-4: ");
632 + for (i = -1; i < 20; i++) {
633 + unsigned long c;
634 + if (get_user(c, (__force unsigned long __user *)sp+i))
635 + printk(KERN_CONT "???????? ");
636 + else
637 + printk(KERN_CONT "%08lx ", c);
638 + }
639 + printk("\n");
640 +}
641 +#endif
642 +
643 /*
644 * First Level Translation Fault Handler
645 *
646 diff -urNp linux-2.6.32.45/arch/arm/mm/mmap.c linux-2.6.32.45/arch/arm/mm/mmap.c
647 --- linux-2.6.32.45/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
648 +++ linux-2.6.32.45/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
649 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
650 if (len > TASK_SIZE)
651 return -ENOMEM;
652
653 +#ifdef CONFIG_PAX_RANDMMAP
654 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
655 +#endif
656 +
657 if (addr) {
658 if (do_align)
659 addr = COLOUR_ALIGN(addr, pgoff);
660 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
661 addr = PAGE_ALIGN(addr);
662
663 vma = find_vma(mm, addr);
664 - if (TASK_SIZE - len >= addr &&
665 - (!vma || addr + len <= vma->vm_start))
666 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
667 return addr;
668 }
669 if (len > mm->cached_hole_size) {
670 - start_addr = addr = mm->free_area_cache;
671 + start_addr = addr = mm->free_area_cache;
672 } else {
673 - start_addr = addr = TASK_UNMAPPED_BASE;
674 - mm->cached_hole_size = 0;
675 + start_addr = addr = mm->mmap_base;
676 + mm->cached_hole_size = 0;
677 }
678
679 full_search:
680 @@ -94,14 +97,14 @@ full_search:
681 * Start a new search - just in case we missed
682 * some holes.
683 */
684 - if (start_addr != TASK_UNMAPPED_BASE) {
685 - start_addr = addr = TASK_UNMAPPED_BASE;
686 + if (start_addr != mm->mmap_base) {
687 + start_addr = addr = mm->mmap_base;
688 mm->cached_hole_size = 0;
689 goto full_search;
690 }
691 return -ENOMEM;
692 }
693 - if (!vma || addr + len <= vma->vm_start) {
694 + if (check_heap_stack_gap(vma, addr, len)) {
695 /*
696 * Remember the place where we stopped the search:
697 */
698 diff -urNp linux-2.6.32.45/arch/arm/plat-s3c/pm.c linux-2.6.32.45/arch/arm/plat-s3c/pm.c
699 --- linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
700 +++ linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
701 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
702 s3c_pm_check_cleanup();
703 }
704
705 -static struct platform_suspend_ops s3c_pm_ops = {
706 +static const struct platform_suspend_ops s3c_pm_ops = {
707 .enter = s3c_pm_enter,
708 .prepare = s3c_pm_prepare,
709 .finish = s3c_pm_finish,
710 diff -urNp linux-2.6.32.45/arch/avr32/include/asm/elf.h linux-2.6.32.45/arch/avr32/include/asm/elf.h
711 --- linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
712 +++ linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
713 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719
720 +#ifdef CONFIG_PAX_ASLR
721 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
722 +
723 +#define PAX_DELTA_MMAP_LEN 15
724 +#define PAX_DELTA_STACK_LEN 15
725 +#endif
726
727 /* This yields a mask that user programs can use to figure out what
728 instruction set this CPU supports. This could be done in user space,
729 diff -urNp linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h
730 --- linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
731 +++ linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
732 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
733 D(11) KM_IRQ1,
734 D(12) KM_SOFTIRQ0,
735 D(13) KM_SOFTIRQ1,
736 -D(14) KM_TYPE_NR
737 +D(14) KM_CLEARPAGE,
738 +D(15) KM_TYPE_NR
739 };
740
741 #undef D
742 diff -urNp linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c
743 --- linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
744 +++ linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
745 @@ -176,7 +176,7 @@ out:
746 return 0;
747 }
748
749 -static struct platform_suspend_ops avr32_pm_ops = {
750 +static const struct platform_suspend_ops avr32_pm_ops = {
751 .valid = avr32_pm_valid_state,
752 .enter = avr32_pm_enter,
753 };
754 diff -urNp linux-2.6.32.45/arch/avr32/mm/fault.c linux-2.6.32.45/arch/avr32/mm/fault.c
755 --- linux-2.6.32.45/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
756 +++ linux-2.6.32.45/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
757 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
758
759 int exception_trace = 1;
760
761 +#ifdef CONFIG_PAX_PAGEEXEC
762 +void pax_report_insns(void *pc, void *sp)
763 +{
764 + unsigned long i;
765 +
766 + printk(KERN_ERR "PAX: bytes at PC: ");
767 + for (i = 0; i < 20; i++) {
768 + unsigned char c;
769 + if (get_user(c, (unsigned char *)pc+i))
770 + printk(KERN_CONT "???????? ");
771 + else
772 + printk(KERN_CONT "%02x ", c);
773 + }
774 + printk("\n");
775 +}
776 +#endif
777 +
778 /*
779 * This routine handles page faults. It determines the address and the
780 * problem, and then passes it off to one of the appropriate routines.
781 @@ -157,6 +174,16 @@ bad_area:
782 up_read(&mm->mmap_sem);
783
784 if (user_mode(regs)) {
785 +
786 +#ifdef CONFIG_PAX_PAGEEXEC
787 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
788 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
789 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
790 + do_group_exit(SIGKILL);
791 + }
792 + }
793 +#endif
794 +
795 if (exception_trace && printk_ratelimit())
796 printk("%s%s[%d]: segfault at %08lx pc %08lx "
797 "sp %08lx ecr %lu\n",
798 diff -urNp linux-2.6.32.45/arch/blackfin/kernel/kgdb.c linux-2.6.32.45/arch/blackfin/kernel/kgdb.c
799 --- linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
800 +++ linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
801 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
802 return -1; /* this means that we do not want to exit from the handler */
803 }
804
805 -struct kgdb_arch arch_kgdb_ops = {
806 +const struct kgdb_arch arch_kgdb_ops = {
807 .gdb_bpt_instr = {0xa1},
808 #ifdef CONFIG_SMP
809 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
810 diff -urNp linux-2.6.32.45/arch/blackfin/mach-common/pm.c linux-2.6.32.45/arch/blackfin/mach-common/pm.c
811 --- linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
812 +++ linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
813 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
814 return 0;
815 }
816
817 -struct platform_suspend_ops bfin_pm_ops = {
818 +const struct platform_suspend_ops bfin_pm_ops = {
819 .enter = bfin_pm_enter,
820 .valid = bfin_pm_valid,
821 };
822 diff -urNp linux-2.6.32.45/arch/frv/include/asm/kmap_types.h linux-2.6.32.45/arch/frv/include/asm/kmap_types.h
823 --- linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
824 +++ linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
825 @@ -23,6 +23,7 @@ enum km_type {
826 KM_IRQ1,
827 KM_SOFTIRQ0,
828 KM_SOFTIRQ1,
829 + KM_CLEARPAGE,
830 KM_TYPE_NR
831 };
832
833 diff -urNp linux-2.6.32.45/arch/frv/mm/elf-fdpic.c linux-2.6.32.45/arch/frv/mm/elf-fdpic.c
834 --- linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
835 +++ linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
836 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
837 if (addr) {
838 addr = PAGE_ALIGN(addr);
839 vma = find_vma(current->mm, addr);
840 - if (TASK_SIZE - len >= addr &&
841 - (!vma || addr + len <= vma->vm_start))
842 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
843 goto success;
844 }
845
846 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
847 for (; vma; vma = vma->vm_next) {
848 if (addr > limit)
849 break;
850 - if (addr + len <= vma->vm_start)
851 + if (check_heap_stack_gap(vma, addr, len))
852 goto success;
853 addr = vma->vm_end;
854 }
855 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
856 for (; vma; vma = vma->vm_next) {
857 if (addr > limit)
858 break;
859 - if (addr + len <= vma->vm_start)
860 + if (check_heap_stack_gap(vma, addr, len))
861 goto success;
862 addr = vma->vm_end;
863 }
864 diff -urNp linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c
865 --- linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
866 +++ linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
867 @@ -17,7 +17,7 @@
868 #include <linux/swiotlb.h>
869 #include <asm/machvec.h>
870
871 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
872 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
873
874 /* swiotlb declarations & definitions: */
875 extern int swiotlb_late_init_with_default_size (size_t size);
876 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
877 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
878 }
879
880 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
881 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
882 {
883 if (use_swiotlb(dev))
884 return &swiotlb_dma_ops;
885 diff -urNp linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c
886 --- linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
887 +++ linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
888 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
889 },
890 };
891
892 -extern struct dma_map_ops swiotlb_dma_ops;
893 +extern const struct dma_map_ops swiotlb_dma_ops;
894
895 static int __init
896 sba_init(void)
897 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
898
899 __setup("sbapagesize=",sba_page_override);
900
901 -struct dma_map_ops sba_dma_ops = {
902 +const struct dma_map_ops sba_dma_ops = {
903 .alloc_coherent = sba_alloc_coherent,
904 .free_coherent = sba_free_coherent,
905 .map_page = sba_map_page,
906 diff -urNp linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c
907 --- linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
908 +++ linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
909 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
910
911 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
912
913 +#ifdef CONFIG_PAX_ASLR
914 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
915 +
916 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
917 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
918 +#endif
919 +
920 /* Ugly but avoids duplication */
921 #include "../../../fs/binfmt_elf.c"
922
923 diff -urNp linux-2.6.32.45/arch/ia64/ia32/ia32priv.h linux-2.6.32.45/arch/ia64/ia32/ia32priv.h
924 --- linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
925 +++ linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
926 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
927 #define ELF_DATA ELFDATA2LSB
928 #define ELF_ARCH EM_386
929
930 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
931 +#ifdef CONFIG_PAX_RANDUSTACK
932 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
933 +#else
934 +#define __IA32_DELTA_STACK 0UL
935 +#endif
936 +
937 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
938 +
939 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
940 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
941
942 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h
943 --- linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
944 +++ linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
945 @@ -12,7 +12,7 @@
946
947 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
948
949 -extern struct dma_map_ops *dma_ops;
950 +extern const struct dma_map_ops *dma_ops;
951 extern struct ia64_machine_vector ia64_mv;
952 extern void set_iommu_machvec(void);
953
954 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
955 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
956 dma_addr_t *daddr, gfp_t gfp)
957 {
958 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
959 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
960 void *caddr;
961
962 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
963 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
964 static inline void dma_free_coherent(struct device *dev, size_t size,
965 void *caddr, dma_addr_t daddr)
966 {
967 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
968 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
969 debug_dma_free_coherent(dev, size, caddr, daddr);
970 ops->free_coherent(dev, size, caddr, daddr);
971 }
972 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
973
974 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
975 {
976 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
977 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
978 return ops->mapping_error(dev, daddr);
979 }
980
981 static inline int dma_supported(struct device *dev, u64 mask)
982 {
983 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
984 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
985 return ops->dma_supported(dev, mask);
986 }
987
988 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/elf.h linux-2.6.32.45/arch/ia64/include/asm/elf.h
989 --- linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
990 +++ linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
991 @@ -43,6 +43,13 @@
992 */
993 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
994
995 +#ifdef CONFIG_PAX_ASLR
996 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
997 +
998 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
999 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1000 +#endif
1001 +
1002 #define PT_IA_64_UNWIND 0x70000001
1003
1004 /* IA-64 relocations: */
1005 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/machvec.h linux-2.6.32.45/arch/ia64/include/asm/machvec.h
1006 --- linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1007 +++ linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1008 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1009 /* DMA-mapping interface: */
1010 typedef void ia64_mv_dma_init (void);
1011 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1012 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1013 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1014
1015 /*
1016 * WARNING: The legacy I/O space is _architected_. Platforms are
1017 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1018 # endif /* CONFIG_IA64_GENERIC */
1019
1020 extern void swiotlb_dma_init(void);
1021 -extern struct dma_map_ops *dma_get_ops(struct device *);
1022 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1023
1024 /*
1025 * Define default versions so we can extend machvec for new platforms without having
1026 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/pgtable.h linux-2.6.32.45/arch/ia64/include/asm/pgtable.h
1027 --- linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1028 +++ linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1029 @@ -12,7 +12,7 @@
1030 * David Mosberger-Tang <davidm@hpl.hp.com>
1031 */
1032
1033 -
1034 +#include <linux/const.h>
1035 #include <asm/mman.h>
1036 #include <asm/page.h>
1037 #include <asm/processor.h>
1038 @@ -143,6 +143,17 @@
1039 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1040 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1041 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1042 +
1043 +#ifdef CONFIG_PAX_PAGEEXEC
1044 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1045 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1046 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1047 +#else
1048 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1049 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1050 +# define PAGE_COPY_NOEXEC PAGE_COPY
1051 +#endif
1052 +
1053 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1054 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1055 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1056 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/spinlock.h linux-2.6.32.45/arch/ia64/include/asm/spinlock.h
1057 --- linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1058 +++ linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1059 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1060 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1061
1062 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1063 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1064 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1065 }
1066
1067 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1068 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/uaccess.h linux-2.6.32.45/arch/ia64/include/asm/uaccess.h
1069 --- linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1070 +++ linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1071 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1072 const void *__cu_from = (from); \
1073 long __cu_len = (n); \
1074 \
1075 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1076 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1077 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1078 __cu_len; \
1079 })
1080 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1081 long __cu_len = (n); \
1082 \
1083 __chk_user_ptr(__cu_from); \
1084 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1085 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1086 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1087 __cu_len; \
1088 })
1089 diff -urNp linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c
1090 --- linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1091 +++ linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1092 @@ -3,7 +3,7 @@
1093 /* Set this to 1 if there is a HW IOMMU in the system */
1094 int iommu_detected __read_mostly;
1095
1096 -struct dma_map_ops *dma_ops;
1097 +const struct dma_map_ops *dma_ops;
1098 EXPORT_SYMBOL(dma_ops);
1099
1100 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1101 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1102 }
1103 fs_initcall(dma_init);
1104
1105 -struct dma_map_ops *dma_get_ops(struct device *dev)
1106 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1107 {
1108 return dma_ops;
1109 }
1110 diff -urNp linux-2.6.32.45/arch/ia64/kernel/module.c linux-2.6.32.45/arch/ia64/kernel/module.c
1111 --- linux-2.6.32.45/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1112 +++ linux-2.6.32.45/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1113 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1114 void
1115 module_free (struct module *mod, void *module_region)
1116 {
1117 - if (mod && mod->arch.init_unw_table &&
1118 - module_region == mod->module_init) {
1119 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1120 unw_remove_unwind_table(mod->arch.init_unw_table);
1121 mod->arch.init_unw_table = NULL;
1122 }
1123 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1124 }
1125
1126 static inline int
1127 +in_init_rx (const struct module *mod, uint64_t addr)
1128 +{
1129 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1130 +}
1131 +
1132 +static inline int
1133 +in_init_rw (const struct module *mod, uint64_t addr)
1134 +{
1135 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1136 +}
1137 +
1138 +static inline int
1139 in_init (const struct module *mod, uint64_t addr)
1140 {
1141 - return addr - (uint64_t) mod->module_init < mod->init_size;
1142 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1143 +}
1144 +
1145 +static inline int
1146 +in_core_rx (const struct module *mod, uint64_t addr)
1147 +{
1148 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1149 +}
1150 +
1151 +static inline int
1152 +in_core_rw (const struct module *mod, uint64_t addr)
1153 +{
1154 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1155 }
1156
1157 static inline int
1158 in_core (const struct module *mod, uint64_t addr)
1159 {
1160 - return addr - (uint64_t) mod->module_core < mod->core_size;
1161 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1162 }
1163
1164 static inline int
1165 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1166 break;
1167
1168 case RV_BDREL:
1169 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1170 + if (in_init_rx(mod, val))
1171 + val -= (uint64_t) mod->module_init_rx;
1172 + else if (in_init_rw(mod, val))
1173 + val -= (uint64_t) mod->module_init_rw;
1174 + else if (in_core_rx(mod, val))
1175 + val -= (uint64_t) mod->module_core_rx;
1176 + else if (in_core_rw(mod, val))
1177 + val -= (uint64_t) mod->module_core_rw;
1178 break;
1179
1180 case RV_LTV:
1181 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1182 * addresses have been selected...
1183 */
1184 uint64_t gp;
1185 - if (mod->core_size > MAX_LTOFF)
1186 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1187 /*
1188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1189 * at the end of the module.
1190 */
1191 - gp = mod->core_size - MAX_LTOFF / 2;
1192 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1193 else
1194 - gp = mod->core_size / 2;
1195 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1196 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1197 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1198 mod->arch.gp = gp;
1199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1200 }
1201 diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-dma.c linux-2.6.32.45/arch/ia64/kernel/pci-dma.c
1202 --- linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1203 +++ linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1204 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1205 .dma_mask = &fallback_dev.coherent_dma_mask,
1206 };
1207
1208 -extern struct dma_map_ops intel_dma_ops;
1209 +extern const struct dma_map_ops intel_dma_ops;
1210
1211 static int __init pci_iommu_init(void)
1212 {
1213 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1214 }
1215 EXPORT_SYMBOL(iommu_dma_supported);
1216
1217 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1218 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1219 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1220 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1221 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1222 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1223 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1224 +
1225 +static const struct dma_map_ops intel_iommu_dma_ops = {
1226 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1227 + .alloc_coherent = intel_alloc_coherent,
1228 + .free_coherent = intel_free_coherent,
1229 + .map_sg = intel_map_sg,
1230 + .unmap_sg = intel_unmap_sg,
1231 + .map_page = intel_map_page,
1232 + .unmap_page = intel_unmap_page,
1233 + .mapping_error = intel_mapping_error,
1234 +
1235 + .sync_single_for_cpu = machvec_dma_sync_single,
1236 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1237 + .sync_single_for_device = machvec_dma_sync_single,
1238 + .sync_sg_for_device = machvec_dma_sync_sg,
1239 + .dma_supported = iommu_dma_supported,
1240 +};
1241 +
1242 void __init pci_iommu_alloc(void)
1243 {
1244 - dma_ops = &intel_dma_ops;
1245 -
1246 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1247 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1248 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1249 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1250 - dma_ops->dma_supported = iommu_dma_supported;
1251 + dma_ops = &intel_iommu_dma_ops;
1252
1253 /*
1254 * The order of these functions is important for
1255 diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c
1256 --- linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1257 +++ linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1258 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1259 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1260 }
1261
1262 -struct dma_map_ops swiotlb_dma_ops = {
1263 +const struct dma_map_ops swiotlb_dma_ops = {
1264 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1265 .free_coherent = swiotlb_free_coherent,
1266 .map_page = swiotlb_map_page,
1267 diff -urNp linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c
1268 --- linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1269 +++ linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1270 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1271 if (REGION_NUMBER(addr) == RGN_HPAGE)
1272 addr = 0;
1273 #endif
1274 +
1275 +#ifdef CONFIG_PAX_RANDMMAP
1276 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1277 + addr = mm->free_area_cache;
1278 + else
1279 +#endif
1280 +
1281 if (!addr)
1282 addr = mm->free_area_cache;
1283
1284 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1285 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1286 /* At this point: (!vma || addr < vma->vm_end). */
1287 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1288 - if (start_addr != TASK_UNMAPPED_BASE) {
1289 + if (start_addr != mm->mmap_base) {
1290 /* Start a new search --- just in case we missed some holes. */
1291 - addr = TASK_UNMAPPED_BASE;
1292 + addr = mm->mmap_base;
1293 goto full_search;
1294 }
1295 return -ENOMEM;
1296 }
1297 - if (!vma || addr + len <= vma->vm_start) {
1298 + if (check_heap_stack_gap(vma, addr, len)) {
1299 /* Remember the address where we stopped this search: */
1300 mm->free_area_cache = addr + len;
1301 return addr;
1302 diff -urNp linux-2.6.32.45/arch/ia64/kernel/topology.c linux-2.6.32.45/arch/ia64/kernel/topology.c
1303 --- linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1304 +++ linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1305 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1306 return ret;
1307 }
1308
1309 -static struct sysfs_ops cache_sysfs_ops = {
1310 +static const struct sysfs_ops cache_sysfs_ops = {
1311 .show = cache_show
1312 };
1313
1314 diff -urNp linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S
1315 --- linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1316 +++ linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1317 @@ -190,7 +190,7 @@ SECTIONS
1318 /* Per-cpu data: */
1319 . = ALIGN(PERCPU_PAGE_SIZE);
1320 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1321 - __phys_per_cpu_start = __per_cpu_load;
1322 + __phys_per_cpu_start = per_cpu_load;
1323 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1324 * into percpu page size
1325 */
1326 diff -urNp linux-2.6.32.45/arch/ia64/mm/fault.c linux-2.6.32.45/arch/ia64/mm/fault.c
1327 --- linux-2.6.32.45/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1328 +++ linux-2.6.32.45/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1329 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1330 return pte_present(pte);
1331 }
1332
1333 +#ifdef CONFIG_PAX_PAGEEXEC
1334 +void pax_report_insns(void *pc, void *sp)
1335 +{
1336 + unsigned long i;
1337 +
1338 + printk(KERN_ERR "PAX: bytes at PC: ");
1339 + for (i = 0; i < 8; i++) {
1340 + unsigned int c;
1341 + if (get_user(c, (unsigned int *)pc+i))
1342 + printk(KERN_CONT "???????? ");
1343 + else
1344 + printk(KERN_CONT "%08x ", c);
1345 + }
1346 + printk("\n");
1347 +}
1348 +#endif
1349 +
1350 void __kprobes
1351 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1352 {
1353 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1354 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1355 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1356
1357 - if ((vma->vm_flags & mask) != mask)
1358 + if ((vma->vm_flags & mask) != mask) {
1359 +
1360 +#ifdef CONFIG_PAX_PAGEEXEC
1361 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1362 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1363 + goto bad_area;
1364 +
1365 + up_read(&mm->mmap_sem);
1366 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1367 + do_group_exit(SIGKILL);
1368 + }
1369 +#endif
1370 +
1371 goto bad_area;
1372
1373 + }
1374 +
1375 survive:
1376 /*
1377 * If for any reason at all we couldn't handle the fault, make
1378 diff -urNp linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c
1379 --- linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1380 +++ linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1381 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1382 /* At this point: (!vmm || addr < vmm->vm_end). */
1383 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1384 return -ENOMEM;
1385 - if (!vmm || (addr + len) <= vmm->vm_start)
1386 + if (check_heap_stack_gap(vmm, addr, len))
1387 return addr;
1388 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1389 }
1390 diff -urNp linux-2.6.32.45/arch/ia64/mm/init.c linux-2.6.32.45/arch/ia64/mm/init.c
1391 --- linux-2.6.32.45/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1392 +++ linux-2.6.32.45/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1393 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1394 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1395 vma->vm_end = vma->vm_start + PAGE_SIZE;
1396 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1397 +
1398 +#ifdef CONFIG_PAX_PAGEEXEC
1399 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1400 + vma->vm_flags &= ~VM_EXEC;
1401 +
1402 +#ifdef CONFIG_PAX_MPROTECT
1403 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1404 + vma->vm_flags &= ~VM_MAYEXEC;
1405 +#endif
1406 +
1407 + }
1408 +#endif
1409 +
1410 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1411 down_write(&current->mm->mmap_sem);
1412 if (insert_vm_struct(current->mm, vma)) {
1413 diff -urNp linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c
1414 --- linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1415 +++ linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1416 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1417 return ret;
1418 }
1419
1420 -static struct dma_map_ops sn_dma_ops = {
1421 +static const struct dma_map_ops sn_dma_ops = {
1422 .alloc_coherent = sn_dma_alloc_coherent,
1423 .free_coherent = sn_dma_free_coherent,
1424 .map_page = sn_dma_map_page,
1425 diff -urNp linux-2.6.32.45/arch/m32r/lib/usercopy.c linux-2.6.32.45/arch/m32r/lib/usercopy.c
1426 --- linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1427 +++ linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1428 @@ -14,6 +14,9 @@
1429 unsigned long
1430 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1431 {
1432 + if ((long)n < 0)
1433 + return n;
1434 +
1435 prefetch(from);
1436 if (access_ok(VERIFY_WRITE, to, n))
1437 __copy_user(to,from,n);
1438 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1439 unsigned long
1440 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1441 {
1442 + if ((long)n < 0)
1443 + return n;
1444 +
1445 prefetchw(to);
1446 if (access_ok(VERIFY_READ, from, n))
1447 __copy_user_zeroing(to,from,n);
1448 diff -urNp linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c
1449 --- linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1450 +++ linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1451 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1452
1453 }
1454
1455 -static struct platform_suspend_ops db1x_pm_ops = {
1456 +static const struct platform_suspend_ops db1x_pm_ops = {
1457 .valid = suspend_valid_only_mem,
1458 .begin = db1x_pm_begin,
1459 .enter = db1x_pm_enter,
1460 diff -urNp linux-2.6.32.45/arch/mips/include/asm/elf.h linux-2.6.32.45/arch/mips/include/asm/elf.h
1461 --- linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1462 +++ linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1463 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1464 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1465 #endif
1466
1467 +#ifdef CONFIG_PAX_ASLR
1468 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1469 +
1470 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1471 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1472 +#endif
1473 +
1474 #endif /* _ASM_ELF_H */
1475 diff -urNp linux-2.6.32.45/arch/mips/include/asm/page.h linux-2.6.32.45/arch/mips/include/asm/page.h
1476 --- linux-2.6.32.45/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1477 +++ linux-2.6.32.45/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1478 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1479 #ifdef CONFIG_CPU_MIPS32
1480 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1481 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1482 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1483 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1484 #else
1485 typedef struct { unsigned long long pte; } pte_t;
1486 #define pte_val(x) ((x).pte)
1487 diff -urNp linux-2.6.32.45/arch/mips/include/asm/system.h linux-2.6.32.45/arch/mips/include/asm/system.h
1488 --- linux-2.6.32.45/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1489 +++ linux-2.6.32.45/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1490 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1491 */
1492 #define __ARCH_WANT_UNLOCKED_CTXSW
1493
1494 -extern unsigned long arch_align_stack(unsigned long sp);
1495 +#define arch_align_stack(x) ((x) & ~0xfUL)
1496
1497 #endif /* _ASM_SYSTEM_H */
1498 diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c
1499 --- linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1500 +++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1501 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1502 #undef ELF_ET_DYN_BASE
1503 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1504
1505 +#ifdef CONFIG_PAX_ASLR
1506 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1507 +
1508 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1509 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1510 +#endif
1511 +
1512 #include <asm/processor.h>
1513 #include <linux/module.h>
1514 #include <linux/elfcore.h>
1515 diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c
1516 --- linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1517 +++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1518 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1519 #undef ELF_ET_DYN_BASE
1520 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1521
1522 +#ifdef CONFIG_PAX_ASLR
1523 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1524 +
1525 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1526 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1527 +#endif
1528 +
1529 #include <asm/processor.h>
1530
1531 /*
1532 diff -urNp linux-2.6.32.45/arch/mips/kernel/kgdb.c linux-2.6.32.45/arch/mips/kernel/kgdb.c
1533 --- linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1534 +++ linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1535 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1536 return -1;
1537 }
1538
1539 +/* cannot be const */
1540 struct kgdb_arch arch_kgdb_ops;
1541
1542 /*
1543 diff -urNp linux-2.6.32.45/arch/mips/kernel/process.c linux-2.6.32.45/arch/mips/kernel/process.c
1544 --- linux-2.6.32.45/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1545 +++ linux-2.6.32.45/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1546 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1547 out:
1548 return pc;
1549 }
1550 -
1551 -/*
1552 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1553 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1554 - */
1555 -unsigned long arch_align_stack(unsigned long sp)
1556 -{
1557 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1558 - sp -= get_random_int() & ~PAGE_MASK;
1559 -
1560 - return sp & ALMASK;
1561 -}
1562 diff -urNp linux-2.6.32.45/arch/mips/kernel/syscall.c linux-2.6.32.45/arch/mips/kernel/syscall.c
1563 --- linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1564 +++ linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1565 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1566 do_color_align = 0;
1567 if (filp || (flags & MAP_SHARED))
1568 do_color_align = 1;
1569 +
1570 +#ifdef CONFIG_PAX_RANDMMAP
1571 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1572 +#endif
1573 +
1574 if (addr) {
1575 if (do_color_align)
1576 addr = COLOUR_ALIGN(addr, pgoff);
1577 else
1578 addr = PAGE_ALIGN(addr);
1579 vmm = find_vma(current->mm, addr);
1580 - if (task_size - len >= addr &&
1581 - (!vmm || addr + len <= vmm->vm_start))
1582 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1583 return addr;
1584 }
1585 - addr = TASK_UNMAPPED_BASE;
1586 + addr = current->mm->mmap_base;
1587 if (do_color_align)
1588 addr = COLOUR_ALIGN(addr, pgoff);
1589 else
1590 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1591 /* At this point: (!vmm || addr < vmm->vm_end). */
1592 if (task_size - len < addr)
1593 return -ENOMEM;
1594 - if (!vmm || addr + len <= vmm->vm_start)
1595 + if (check_heap_stack_gap(vmm, addr, len))
1596 return addr;
1597 addr = vmm->vm_end;
1598 if (do_color_align)
1599 diff -urNp linux-2.6.32.45/arch/mips/mm/fault.c linux-2.6.32.45/arch/mips/mm/fault.c
1600 --- linux-2.6.32.45/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1601 +++ linux-2.6.32.45/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1602 @@ -26,6 +26,23 @@
1603 #include <asm/ptrace.h>
1604 #include <asm/highmem.h> /* For VMALLOC_END */
1605
1606 +#ifdef CONFIG_PAX_PAGEEXEC
1607 +void pax_report_insns(void *pc, void *sp)
1608 +{
1609 + unsigned long i;
1610 +
1611 + printk(KERN_ERR "PAX: bytes at PC: ");
1612 + for (i = 0; i < 5; i++) {
1613 + unsigned int c;
1614 + if (get_user(c, (unsigned int *)pc+i))
1615 + printk(KERN_CONT "???????? ");
1616 + else
1617 + printk(KERN_CONT "%08x ", c);
1618 + }
1619 + printk("\n");
1620 +}
1621 +#endif
1622 +
1623 /*
1624 * This routine handles page faults. It determines the address,
1625 * and the problem, and then passes it off to one of the appropriate
1626 diff -urNp linux-2.6.32.45/arch/parisc/include/asm/elf.h linux-2.6.32.45/arch/parisc/include/asm/elf.h
1627 --- linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1628 +++ linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1629 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1630
1631 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1632
1633 +#ifdef CONFIG_PAX_ASLR
1634 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1635 +
1636 +#define PAX_DELTA_MMAP_LEN 16
1637 +#define PAX_DELTA_STACK_LEN 16
1638 +#endif
1639 +
1640 /* This yields a mask that user programs can use to figure out what
1641 instruction set this CPU supports. This could be done in user space,
1642 but it's not easy, and we've already done it here. */
1643 diff -urNp linux-2.6.32.45/arch/parisc/include/asm/pgtable.h linux-2.6.32.45/arch/parisc/include/asm/pgtable.h
1644 --- linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1645 +++ linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1646 @@ -207,6 +207,17 @@
1647 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1648 #define PAGE_COPY PAGE_EXECREAD
1649 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1650 +
1651 +#ifdef CONFIG_PAX_PAGEEXEC
1652 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1653 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1654 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1655 +#else
1656 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1657 +# define PAGE_COPY_NOEXEC PAGE_COPY
1658 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1659 +#endif
1660 +
1661 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1662 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1663 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1664 diff -urNp linux-2.6.32.45/arch/parisc/kernel/module.c linux-2.6.32.45/arch/parisc/kernel/module.c
1665 --- linux-2.6.32.45/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1666 +++ linux-2.6.32.45/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1667 @@ -95,16 +95,38 @@
1668
1669 /* three functions to determine where in the module core
1670 * or init pieces the location is */
1671 +static inline int in_init_rx(struct module *me, void *loc)
1672 +{
1673 + return (loc >= me->module_init_rx &&
1674 + loc < (me->module_init_rx + me->init_size_rx));
1675 +}
1676 +
1677 +static inline int in_init_rw(struct module *me, void *loc)
1678 +{
1679 + return (loc >= me->module_init_rw &&
1680 + loc < (me->module_init_rw + me->init_size_rw));
1681 +}
1682 +
1683 static inline int in_init(struct module *me, void *loc)
1684 {
1685 - return (loc >= me->module_init &&
1686 - loc <= (me->module_init + me->init_size));
1687 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1688 +}
1689 +
1690 +static inline int in_core_rx(struct module *me, void *loc)
1691 +{
1692 + return (loc >= me->module_core_rx &&
1693 + loc < (me->module_core_rx + me->core_size_rx));
1694 +}
1695 +
1696 +static inline int in_core_rw(struct module *me, void *loc)
1697 +{
1698 + return (loc >= me->module_core_rw &&
1699 + loc < (me->module_core_rw + me->core_size_rw));
1700 }
1701
1702 static inline int in_core(struct module *me, void *loc)
1703 {
1704 - return (loc >= me->module_core &&
1705 - loc <= (me->module_core + me->core_size));
1706 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1707 }
1708
1709 static inline int in_local(struct module *me, void *loc)
1710 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1711 }
1712
1713 /* align things a bit */
1714 - me->core_size = ALIGN(me->core_size, 16);
1715 - me->arch.got_offset = me->core_size;
1716 - me->core_size += gots * sizeof(struct got_entry);
1717 -
1718 - me->core_size = ALIGN(me->core_size, 16);
1719 - me->arch.fdesc_offset = me->core_size;
1720 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1721 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1722 + me->arch.got_offset = me->core_size_rw;
1723 + me->core_size_rw += gots * sizeof(struct got_entry);
1724 +
1725 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1726 + me->arch.fdesc_offset = me->core_size_rw;
1727 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1728
1729 me->arch.got_max = gots;
1730 me->arch.fdesc_max = fdescs;
1731 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1732
1733 BUG_ON(value == 0);
1734
1735 - got = me->module_core + me->arch.got_offset;
1736 + got = me->module_core_rw + me->arch.got_offset;
1737 for (i = 0; got[i].addr; i++)
1738 if (got[i].addr == value)
1739 goto out;
1740 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1741 #ifdef CONFIG_64BIT
1742 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1743 {
1744 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1745 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1746
1747 if (!value) {
1748 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1749 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1750
1751 /* Create new one */
1752 fdesc->addr = value;
1753 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1754 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1755 return (Elf_Addr)fdesc;
1756 }
1757 #endif /* CONFIG_64BIT */
1758 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1759
1760 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1761 end = table + sechdrs[me->arch.unwind_section].sh_size;
1762 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1763 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1764
1765 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1766 me->arch.unwind_section, table, end, gp);
1767 diff -urNp linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c
1768 --- linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1769 +++ linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1770 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1771 /* At this point: (!vma || addr < vma->vm_end). */
1772 if (TASK_SIZE - len < addr)
1773 return -ENOMEM;
1774 - if (!vma || addr + len <= vma->vm_start)
1775 + if (check_heap_stack_gap(vma, addr, len))
1776 return addr;
1777 addr = vma->vm_end;
1778 }
1779 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1780 /* At this point: (!vma || addr < vma->vm_end). */
1781 if (TASK_SIZE - len < addr)
1782 return -ENOMEM;
1783 - if (!vma || addr + len <= vma->vm_start)
1784 + if (check_heap_stack_gap(vma, addr, len))
1785 return addr;
1786 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1787 if (addr < vma->vm_end) /* handle wraparound */
1788 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1789 if (flags & MAP_FIXED)
1790 return addr;
1791 if (!addr)
1792 - addr = TASK_UNMAPPED_BASE;
1793 + addr = current->mm->mmap_base;
1794
1795 if (filp) {
1796 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1797 diff -urNp linux-2.6.32.45/arch/parisc/kernel/traps.c linux-2.6.32.45/arch/parisc/kernel/traps.c
1798 --- linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1799 +++ linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1800 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1801
1802 down_read(&current->mm->mmap_sem);
1803 vma = find_vma(current->mm,regs->iaoq[0]);
1804 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1805 - && (vma->vm_flags & VM_EXEC)) {
1806 -
1807 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1808 fault_address = regs->iaoq[0];
1809 fault_space = regs->iasq[0];
1810
1811 diff -urNp linux-2.6.32.45/arch/parisc/mm/fault.c linux-2.6.32.45/arch/parisc/mm/fault.c
1812 --- linux-2.6.32.45/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1813 +++ linux-2.6.32.45/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1814 @@ -15,6 +15,7 @@
1815 #include <linux/sched.h>
1816 #include <linux/interrupt.h>
1817 #include <linux/module.h>
1818 +#include <linux/unistd.h>
1819
1820 #include <asm/uaccess.h>
1821 #include <asm/traps.h>
1822 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1823 static unsigned long
1824 parisc_acctyp(unsigned long code, unsigned int inst)
1825 {
1826 - if (code == 6 || code == 16)
1827 + if (code == 6 || code == 7 || code == 16)
1828 return VM_EXEC;
1829
1830 switch (inst & 0xf0000000) {
1831 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1832 }
1833 #endif
1834
1835 +#ifdef CONFIG_PAX_PAGEEXEC
1836 +/*
1837 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1838 + *
1839 + * returns 1 when task should be killed
1840 + * 2 when rt_sigreturn trampoline was detected
1841 + * 3 when unpatched PLT trampoline was detected
1842 + */
1843 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1844 +{
1845 +
1846 +#ifdef CONFIG_PAX_EMUPLT
1847 + int err;
1848 +
1849 + do { /* PaX: unpatched PLT emulation */
1850 + unsigned int bl, depwi;
1851 +
1852 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1853 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1854 +
1855 + if (err)
1856 + break;
1857 +
1858 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1859 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1860 +
1861 + err = get_user(ldw, (unsigned int *)addr);
1862 + err |= get_user(bv, (unsigned int *)(addr+4));
1863 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1864 +
1865 + if (err)
1866 + break;
1867 +
1868 + if (ldw == 0x0E801096U &&
1869 + bv == 0xEAC0C000U &&
1870 + ldw2 == 0x0E881095U)
1871 + {
1872 + unsigned int resolver, map;
1873 +
1874 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1875 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1876 + if (err)
1877 + break;
1878 +
1879 + regs->gr[20] = instruction_pointer(regs)+8;
1880 + regs->gr[21] = map;
1881 + regs->gr[22] = resolver;
1882 + regs->iaoq[0] = resolver | 3UL;
1883 + regs->iaoq[1] = regs->iaoq[0] + 4;
1884 + return 3;
1885 + }
1886 + }
1887 + } while (0);
1888 +#endif
1889 +
1890 +#ifdef CONFIG_PAX_EMUTRAMP
1891 +
1892 +#ifndef CONFIG_PAX_EMUSIGRT
1893 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1894 + return 1;
1895 +#endif
1896 +
1897 + do { /* PaX: rt_sigreturn emulation */
1898 + unsigned int ldi1, ldi2, bel, nop;
1899 +
1900 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1901 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1902 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1903 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1904 +
1905 + if (err)
1906 + break;
1907 +
1908 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1909 + ldi2 == 0x3414015AU &&
1910 + bel == 0xE4008200U &&
1911 + nop == 0x08000240U)
1912 + {
1913 + regs->gr[25] = (ldi1 & 2) >> 1;
1914 + regs->gr[20] = __NR_rt_sigreturn;
1915 + regs->gr[31] = regs->iaoq[1] + 16;
1916 + regs->sr[0] = regs->iasq[1];
1917 + regs->iaoq[0] = 0x100UL;
1918 + regs->iaoq[1] = regs->iaoq[0] + 4;
1919 + regs->iasq[0] = regs->sr[2];
1920 + regs->iasq[1] = regs->sr[2];
1921 + return 2;
1922 + }
1923 + } while (0);
1924 +#endif
1925 +
1926 + return 1;
1927 +}
1928 +
1929 +void pax_report_insns(void *pc, void *sp)
1930 +{
1931 + unsigned long i;
1932 +
1933 + printk(KERN_ERR "PAX: bytes at PC: ");
1934 + for (i = 0; i < 5; i++) {
1935 + unsigned int c;
1936 + if (get_user(c, (unsigned int *)pc+i))
1937 + printk(KERN_CONT "???????? ");
1938 + else
1939 + printk(KERN_CONT "%08x ", c);
1940 + }
1941 + printk("\n");
1942 +}
1943 +#endif
1944 +
1945 int fixup_exception(struct pt_regs *regs)
1946 {
1947 const struct exception_table_entry *fix;
1948 @@ -192,8 +303,33 @@ good_area:
1949
1950 acc_type = parisc_acctyp(code,regs->iir);
1951
1952 - if ((vma->vm_flags & acc_type) != acc_type)
1953 + if ((vma->vm_flags & acc_type) != acc_type) {
1954 +
1955 +#ifdef CONFIG_PAX_PAGEEXEC
1956 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1957 + (address & ~3UL) == instruction_pointer(regs))
1958 + {
1959 + up_read(&mm->mmap_sem);
1960 + switch (pax_handle_fetch_fault(regs)) {
1961 +
1962 +#ifdef CONFIG_PAX_EMUPLT
1963 + case 3:
1964 + return;
1965 +#endif
1966 +
1967 +#ifdef CONFIG_PAX_EMUTRAMP
1968 + case 2:
1969 + return;
1970 +#endif
1971 +
1972 + }
1973 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1974 + do_group_exit(SIGKILL);
1975 + }
1976 +#endif
1977 +
1978 goto bad_area;
1979 + }
1980
1981 /*
1982 * If for any reason at all we couldn't handle the fault, make
1983 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/device.h linux-2.6.32.45/arch/powerpc/include/asm/device.h
1984 --- linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
1985 +++ linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
1986 @@ -14,7 +14,7 @@ struct dev_archdata {
1987 struct device_node *of_node;
1988
1989 /* DMA operations on that device */
1990 - struct dma_map_ops *dma_ops;
1991 + const struct dma_map_ops *dma_ops;
1992
1993 /*
1994 * When an iommu is in use, dma_data is used as a ptr to the base of the
1995 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h
1996 --- linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
1997 +++ linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
1998 @@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
1999 #ifdef CONFIG_PPC64
2000 extern struct dma_map_ops dma_iommu_ops;
2001 #endif
2002 -extern struct dma_map_ops dma_direct_ops;
2003 +extern const struct dma_map_ops dma_direct_ops;
2004
2005 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2006 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2007 {
2008 /* We don't handle the NULL dev case for ISA for now. We could
2009 * do it via an out of line call but it is not needed for now. The
2010 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2011 return dev->archdata.dma_ops;
2012 }
2013
2014 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2015 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2016 {
2017 dev->archdata.dma_ops = ops;
2018 }
2019 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2020
2021 static inline int dma_supported(struct device *dev, u64 mask)
2022 {
2023 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2024 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2025
2026 if (unlikely(dma_ops == NULL))
2027 return 0;
2028 @@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2029
2030 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2031 {
2032 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2033 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2034
2035 if (unlikely(dma_ops == NULL))
2036 return -EIO;
2037 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2038 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2039 dma_addr_t *dma_handle, gfp_t flag)
2040 {
2041 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2042 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2043 void *cpu_addr;
2044
2045 BUG_ON(!dma_ops);
2046 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2047 static inline void dma_free_coherent(struct device *dev, size_t size,
2048 void *cpu_addr, dma_addr_t dma_handle)
2049 {
2050 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2051 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2052
2053 BUG_ON(!dma_ops);
2054
2055 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2056
2057 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2058 {
2059 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2060 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2061
2062 if (dma_ops->mapping_error)
2063 return dma_ops->mapping_error(dev, dma_addr);
2064 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/elf.h linux-2.6.32.45/arch/powerpc/include/asm/elf.h
2065 --- linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2066 +++ linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2067 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2068 the loader. We need to make sure that it is out of the way of the program
2069 that it will "exec", and that there is sufficient room for the brk. */
2070
2071 -extern unsigned long randomize_et_dyn(unsigned long base);
2072 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2073 +#define ELF_ET_DYN_BASE (0x20000000)
2074 +
2075 +#ifdef CONFIG_PAX_ASLR
2076 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2077 +
2078 +#ifdef __powerpc64__
2079 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2080 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2081 +#else
2082 +#define PAX_DELTA_MMAP_LEN 15
2083 +#define PAX_DELTA_STACK_LEN 15
2084 +#endif
2085 +#endif
2086
2087 /*
2088 * Our registers are always unsigned longs, whether we're a 32 bit
2089 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2090 (0x7ff >> (PAGE_SHIFT - 12)) : \
2091 (0x3ffff >> (PAGE_SHIFT - 12)))
2092
2093 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2094 -#define arch_randomize_brk arch_randomize_brk
2095 -
2096 #endif /* __KERNEL__ */
2097
2098 /*
2099 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/iommu.h linux-2.6.32.45/arch/powerpc/include/asm/iommu.h
2100 --- linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2101 +++ linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2102 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2103 extern void iommu_init_early_dart(void);
2104 extern void iommu_init_early_pasemi(void);
2105
2106 +/* dma-iommu.c */
2107 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2108 +
2109 #ifdef CONFIG_PCI
2110 extern void pci_iommu_init(void);
2111 extern void pci_direct_iommu_init(void);
2112 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h
2113 --- linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2114 +++ linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2115 @@ -26,6 +26,7 @@ enum km_type {
2116 KM_SOFTIRQ1,
2117 KM_PPC_SYNC_PAGE,
2118 KM_PPC_SYNC_ICACHE,
2119 + KM_CLEARPAGE,
2120 KM_TYPE_NR
2121 };
2122
2123 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page_64.h linux-2.6.32.45/arch/powerpc/include/asm/page_64.h
2124 --- linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2125 +++ linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2126 @@ -180,15 +180,18 @@ do { \
2127 * stack by default, so in the absense of a PT_GNU_STACK program header
2128 * we turn execute permission off.
2129 */
2130 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2131 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2132 +#define VM_STACK_DEFAULT_FLAGS32 \
2133 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2134 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2135
2136 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2137 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2138
2139 +#ifndef CONFIG_PAX_PAGEEXEC
2140 #define VM_STACK_DEFAULT_FLAGS \
2141 (test_thread_flag(TIF_32BIT) ? \
2142 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2143 +#endif
2144
2145 #include <asm-generic/getorder.h>
2146
2147 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page.h linux-2.6.32.45/arch/powerpc/include/asm/page.h
2148 --- linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2149 +++ linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
2150 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2151 * and needs to be executable. This means the whole heap ends
2152 * up being executable.
2153 */
2154 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2155 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156 +#define VM_DATA_DEFAULT_FLAGS32 \
2157 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2158 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159
2160 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2161 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2162 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2163 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2164 #endif
2165
2166 +#define ktla_ktva(addr) (addr)
2167 +#define ktva_ktla(addr) (addr)
2168 +
2169 #ifndef __ASSEMBLY__
2170
2171 #undef STRICT_MM_TYPECHECKS
2172 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pci.h linux-2.6.32.45/arch/powerpc/include/asm/pci.h
2173 --- linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2174 +++ linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2175 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2176 }
2177
2178 #ifdef CONFIG_PCI
2179 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2180 -extern struct dma_map_ops *get_pci_dma_ops(void);
2181 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2182 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2183 #else /* CONFIG_PCI */
2184 #define set_pci_dma_ops(d)
2185 #define get_pci_dma_ops() NULL
2186 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h
2187 --- linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2188 +++ linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2189 @@ -2,6 +2,7 @@
2190 #define _ASM_POWERPC_PGTABLE_H
2191 #ifdef __KERNEL__
2192
2193 +#include <linux/const.h>
2194 #ifndef __ASSEMBLY__
2195 #include <asm/processor.h> /* For TASK_SIZE */
2196 #include <asm/mmu.h>
2197 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h
2198 --- linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2199 +++ linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2200 @@ -21,6 +21,7 @@
2201 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2202 #define _PAGE_USER 0x004 /* usermode access allowed */
2203 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2204 +#define _PAGE_EXEC _PAGE_GUARDED
2205 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2206 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2207 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2208 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/reg.h linux-2.6.32.45/arch/powerpc/include/asm/reg.h
2209 --- linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2210 +++ linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2211 @@ -191,6 +191,7 @@
2212 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2213 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2214 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2215 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2216 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2217 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2218 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2219 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h
2220 --- linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2221 +++ linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2222 @@ -13,7 +13,7 @@
2223
2224 #include <linux/swiotlb.h>
2225
2226 -extern struct dma_map_ops swiotlb_dma_ops;
2227 +extern const struct dma_map_ops swiotlb_dma_ops;
2228
2229 static inline void dma_mark_clean(void *addr, size_t size) {}
2230
2231 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/system.h linux-2.6.32.45/arch/powerpc/include/asm/system.h
2232 --- linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2233 +++ linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2234 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2235 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2236 #endif
2237
2238 -extern unsigned long arch_align_stack(unsigned long sp);
2239 +#define arch_align_stack(x) ((x) & ~0xfUL)
2240
2241 /* Used in very early kernel initialization. */
2242 extern unsigned long reloc_offset(void);
2243 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h
2244 --- linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2245 +++ linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2246 @@ -13,6 +13,8 @@
2247 #define VERIFY_READ 0
2248 #define VERIFY_WRITE 1
2249
2250 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2251 +
2252 /*
2253 * The fs value determines whether argument validity checking should be
2254 * performed or not. If get_fs() == USER_DS, checking is performed, with
2255 @@ -327,52 +329,6 @@ do { \
2256 extern unsigned long __copy_tofrom_user(void __user *to,
2257 const void __user *from, unsigned long size);
2258
2259 -#ifndef __powerpc64__
2260 -
2261 -static inline unsigned long copy_from_user(void *to,
2262 - const void __user *from, unsigned long n)
2263 -{
2264 - unsigned long over;
2265 -
2266 - if (access_ok(VERIFY_READ, from, n))
2267 - return __copy_tofrom_user((__force void __user *)to, from, n);
2268 - if ((unsigned long)from < TASK_SIZE) {
2269 - over = (unsigned long)from + n - TASK_SIZE;
2270 - return __copy_tofrom_user((__force void __user *)to, from,
2271 - n - over) + over;
2272 - }
2273 - return n;
2274 -}
2275 -
2276 -static inline unsigned long copy_to_user(void __user *to,
2277 - const void *from, unsigned long n)
2278 -{
2279 - unsigned long over;
2280 -
2281 - if (access_ok(VERIFY_WRITE, to, n))
2282 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2283 - if ((unsigned long)to < TASK_SIZE) {
2284 - over = (unsigned long)to + n - TASK_SIZE;
2285 - return __copy_tofrom_user(to, (__force void __user *)from,
2286 - n - over) + over;
2287 - }
2288 - return n;
2289 -}
2290 -
2291 -#else /* __powerpc64__ */
2292 -
2293 -#define __copy_in_user(to, from, size) \
2294 - __copy_tofrom_user((to), (from), (size))
2295 -
2296 -extern unsigned long copy_from_user(void *to, const void __user *from,
2297 - unsigned long n);
2298 -extern unsigned long copy_to_user(void __user *to, const void *from,
2299 - unsigned long n);
2300 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2301 - unsigned long n);
2302 -
2303 -#endif /* __powerpc64__ */
2304 -
2305 static inline unsigned long __copy_from_user_inatomic(void *to,
2306 const void __user *from, unsigned long n)
2307 {
2308 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2309 if (ret == 0)
2310 return 0;
2311 }
2312 +
2313 + if (!__builtin_constant_p(n))
2314 + check_object_size(to, n, false);
2315 +
2316 return __copy_tofrom_user((__force void __user *)to, from, n);
2317 }
2318
2319 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2320 if (ret == 0)
2321 return 0;
2322 }
2323 +
2324 + if (!__builtin_constant_p(n))
2325 + check_object_size(from, n, true);
2326 +
2327 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2328 }
2329
2330 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2331 return __copy_to_user_inatomic(to, from, size);
2332 }
2333
2334 +#ifndef __powerpc64__
2335 +
2336 +static inline unsigned long __must_check copy_from_user(void *to,
2337 + const void __user *from, unsigned long n)
2338 +{
2339 + unsigned long over;
2340 +
2341 + if ((long)n < 0)
2342 + return n;
2343 +
2344 + if (access_ok(VERIFY_READ, from, n)) {
2345 + if (!__builtin_constant_p(n))
2346 + check_object_size(to, n, false);
2347 + return __copy_tofrom_user((__force void __user *)to, from, n);
2348 + }
2349 + if ((unsigned long)from < TASK_SIZE) {
2350 + over = (unsigned long)from + n - TASK_SIZE;
2351 + if (!__builtin_constant_p(n - over))
2352 + check_object_size(to, n - over, false);
2353 + return __copy_tofrom_user((__force void __user *)to, from,
2354 + n - over) + over;
2355 + }
2356 + return n;
2357 +}
2358 +
2359 +static inline unsigned long __must_check copy_to_user(void __user *to,
2360 + const void *from, unsigned long n)
2361 +{
2362 + unsigned long over;
2363 +
2364 + if ((long)n < 0)
2365 + return n;
2366 +
2367 + if (access_ok(VERIFY_WRITE, to, n)) {
2368 + if (!__builtin_constant_p(n))
2369 + check_object_size(from, n, true);
2370 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2371 + }
2372 + if ((unsigned long)to < TASK_SIZE) {
2373 + over = (unsigned long)to + n - TASK_SIZE;
2374 + if (!__builtin_constant_p(n))
2375 + check_object_size(from, n - over, true);
2376 + return __copy_tofrom_user(to, (__force void __user *)from,
2377 + n - over) + over;
2378 + }
2379 + return n;
2380 +}
2381 +
2382 +#else /* __powerpc64__ */
2383 +
2384 +#define __copy_in_user(to, from, size) \
2385 + __copy_tofrom_user((to), (from), (size))
2386 +
2387 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2388 +{
2389 + if ((long)n < 0 || n > INT_MAX)
2390 + return n;
2391 +
2392 + if (!__builtin_constant_p(n))
2393 + check_object_size(to, n, false);
2394 +
2395 + if (likely(access_ok(VERIFY_READ, from, n)))
2396 + n = __copy_from_user(to, from, n);
2397 + else
2398 + memset(to, 0, n);
2399 + return n;
2400 +}
2401 +
2402 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2403 +{
2404 + if ((long)n < 0 || n > INT_MAX)
2405 + return n;
2406 +
2407 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2408 + if (!__builtin_constant_p(n))
2409 + check_object_size(from, n, true);
2410 + n = __copy_to_user(to, from, n);
2411 + }
2412 + return n;
2413 +}
2414 +
2415 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2416 + unsigned long n);
2417 +
2418 +#endif /* __powerpc64__ */
2419 +
2420 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2421
2422 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2423 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c
2424 --- linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2425 +++ linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2426 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2427 &cache_assoc_attr,
2428 };
2429
2430 -static struct sysfs_ops cache_index_ops = {
2431 +static const struct sysfs_ops cache_index_ops = {
2432 .show = cache_index_show,
2433 };
2434
2435 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma.c linux-2.6.32.45/arch/powerpc/kernel/dma.c
2436 --- linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2437 +++ linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2438 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2439 }
2440 #endif
2441
2442 -struct dma_map_ops dma_direct_ops = {
2443 +const struct dma_map_ops dma_direct_ops = {
2444 .alloc_coherent = dma_direct_alloc_coherent,
2445 .free_coherent = dma_direct_free_coherent,
2446 .map_sg = dma_direct_map_sg,
2447 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c
2448 --- linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2449 +++ linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2450 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2451 }
2452
2453 /* We support DMA to/from any memory page via the iommu */
2454 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2455 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2456 {
2457 struct iommu_table *tbl = get_iommu_table_base(dev);
2458
2459 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c
2460 --- linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2461 +++ linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2462 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2463 * map_page, and unmap_page on highmem, use normal dma_ops
2464 * for everything else.
2465 */
2466 -struct dma_map_ops swiotlb_dma_ops = {
2467 +const struct dma_map_ops swiotlb_dma_ops = {
2468 .alloc_coherent = dma_direct_alloc_coherent,
2469 .free_coherent = dma_direct_free_coherent,
2470 .map_sg = swiotlb_map_sg_attrs,
2471 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S
2472 --- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2473 +++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2474 @@ -455,6 +455,7 @@ storage_fault_common:
2475 std r14,_DAR(r1)
2476 std r15,_DSISR(r1)
2477 addi r3,r1,STACK_FRAME_OVERHEAD
2478 + bl .save_nvgprs
2479 mr r4,r14
2480 mr r5,r15
2481 ld r14,PACA_EXGEN+EX_R14(r13)
2482 @@ -464,8 +465,7 @@ storage_fault_common:
2483 cmpdi r3,0
2484 bne- 1f
2485 b .ret_from_except_lite
2486 -1: bl .save_nvgprs
2487 - mr r5,r3
2488 +1: mr r5,r3
2489 addi r3,r1,STACK_FRAME_OVERHEAD
2490 ld r4,_DAR(r1)
2491 bl .bad_page_fault
2492 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S
2493 --- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2494 +++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2495 @@ -818,10 +818,10 @@ handle_page_fault:
2496 11: ld r4,_DAR(r1)
2497 ld r5,_DSISR(r1)
2498 addi r3,r1,STACK_FRAME_OVERHEAD
2499 + bl .save_nvgprs
2500 bl .do_page_fault
2501 cmpdi r3,0
2502 beq+ 13f
2503 - bl .save_nvgprs
2504 mr r5,r3
2505 addi r3,r1,STACK_FRAME_OVERHEAD
2506 lwz r4,_DAR(r1)
2507 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c
2508 --- linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2509 +++ linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2510 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2511 return 1;
2512 }
2513
2514 -static struct dma_map_ops ibmebus_dma_ops = {
2515 +static const struct dma_map_ops ibmebus_dma_ops = {
2516 .alloc_coherent = ibmebus_alloc_coherent,
2517 .free_coherent = ibmebus_free_coherent,
2518 .map_sg = ibmebus_map_sg,
2519 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/kgdb.c linux-2.6.32.45/arch/powerpc/kernel/kgdb.c
2520 --- linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2521 +++ linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2522 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2523 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2524 return 0;
2525
2526 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2527 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2528 regs->nip += 4;
2529
2530 return 1;
2531 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2532 /*
2533 * Global data
2534 */
2535 -struct kgdb_arch arch_kgdb_ops = {
2536 +const struct kgdb_arch arch_kgdb_ops = {
2537 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2538 };
2539
2540 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module_32.c linux-2.6.32.45/arch/powerpc/kernel/module_32.c
2541 --- linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2542 +++ linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2543 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2544 me->arch.core_plt_section = i;
2545 }
2546 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2547 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2548 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2549 return -ENOEXEC;
2550 }
2551
2552 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2553
2554 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2555 /* Init, or core PLT? */
2556 - if (location >= mod->module_core
2557 - && location < mod->module_core + mod->core_size)
2558 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2559 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2560 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2561 - else
2562 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2563 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2564 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2565 + else {
2566 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2567 + return ~0UL;
2568 + }
2569
2570 /* Find this entry, or if that fails, the next avail. entry */
2571 while (entry->jump[0]) {
2572 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module.c linux-2.6.32.45/arch/powerpc/kernel/module.c
2573 --- linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2574 +++ linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2575 @@ -31,11 +31,24 @@
2576
2577 LIST_HEAD(module_bug_list);
2578
2579 +#ifdef CONFIG_PAX_KERNEXEC
2580 void *module_alloc(unsigned long size)
2581 {
2582 if (size == 0)
2583 return NULL;
2584
2585 + return vmalloc(size);
2586 +}
2587 +
2588 +void *module_alloc_exec(unsigned long size)
2589 +#else
2590 +void *module_alloc(unsigned long size)
2591 +#endif
2592 +
2593 +{
2594 + if (size == 0)
2595 + return NULL;
2596 +
2597 return vmalloc_exec(size);
2598 }
2599
2600 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2601 vfree(module_region);
2602 }
2603
2604 +#ifdef CONFIG_PAX_KERNEXEC
2605 +void module_free_exec(struct module *mod, void *module_region)
2606 +{
2607 + module_free(mod, module_region);
2608 +}
2609 +#endif
2610 +
2611 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2612 const Elf_Shdr *sechdrs,
2613 const char *name)
2614 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/pci-common.c linux-2.6.32.45/arch/powerpc/kernel/pci-common.c
2615 --- linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2616 +++ linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2617 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2618 unsigned int ppc_pci_flags = 0;
2619
2620
2621 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2622 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2623
2624 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2625 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2626 {
2627 pci_dma_ops = dma_ops;
2628 }
2629
2630 -struct dma_map_ops *get_pci_dma_ops(void)
2631 +const struct dma_map_ops *get_pci_dma_ops(void)
2632 {
2633 return pci_dma_ops;
2634 }
2635 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/process.c linux-2.6.32.45/arch/powerpc/kernel/process.c
2636 --- linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2637 +++ linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2638 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2639 * Lookup NIP late so we have the best change of getting the
2640 * above info out without failing
2641 */
2642 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2643 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2644 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2645 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2646 #endif
2647 show_stack(current, (unsigned long *) regs->gpr[1]);
2648 if (!user_mode(regs))
2649 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2650 newsp = stack[0];
2651 ip = stack[STACK_FRAME_LR_SAVE];
2652 if (!firstframe || ip != lr) {
2653 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2654 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2655 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2656 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2657 - printk(" (%pS)",
2658 + printk(" (%pA)",
2659 (void *)current->ret_stack[curr_frame].ret);
2660 curr_frame--;
2661 }
2662 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2663 struct pt_regs *regs = (struct pt_regs *)
2664 (sp + STACK_FRAME_OVERHEAD);
2665 lr = regs->link;
2666 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2667 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2668 regs->trap, (void *)regs->nip, (void *)lr);
2669 firstframe = 1;
2670 }
2671 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2672 }
2673
2674 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2675 -
2676 -unsigned long arch_align_stack(unsigned long sp)
2677 -{
2678 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2679 - sp -= get_random_int() & ~PAGE_MASK;
2680 - return sp & ~0xf;
2681 -}
2682 -
2683 -static inline unsigned long brk_rnd(void)
2684 -{
2685 - unsigned long rnd = 0;
2686 -
2687 - /* 8MB for 32bit, 1GB for 64bit */
2688 - if (is_32bit_task())
2689 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2690 - else
2691 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2692 -
2693 - return rnd << PAGE_SHIFT;
2694 -}
2695 -
2696 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2697 -{
2698 - unsigned long base = mm->brk;
2699 - unsigned long ret;
2700 -
2701 -#ifdef CONFIG_PPC_STD_MMU_64
2702 - /*
2703 - * If we are using 1TB segments and we are allowed to randomise
2704 - * the heap, we can put it above 1TB so it is backed by a 1TB
2705 - * segment. Otherwise the heap will be in the bottom 1TB
2706 - * which always uses 256MB segments and this may result in a
2707 - * performance penalty.
2708 - */
2709 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2710 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2711 -#endif
2712 -
2713 - ret = PAGE_ALIGN(base + brk_rnd());
2714 -
2715 - if (ret < mm->brk)
2716 - return mm->brk;
2717 -
2718 - return ret;
2719 -}
2720 -
2721 -unsigned long randomize_et_dyn(unsigned long base)
2722 -{
2723 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2724 -
2725 - if (ret < base)
2726 - return base;
2727 -
2728 - return ret;
2729 -}
2730 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_32.c linux-2.6.32.45/arch/powerpc/kernel/signal_32.c
2731 --- linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2732 +++ linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2733 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2734 /* Save user registers on the stack */
2735 frame = &rt_sf->uc.uc_mcontext;
2736 addr = frame;
2737 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2738 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2739 if (save_user_regs(regs, frame, 0, 1))
2740 goto badframe;
2741 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2742 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_64.c linux-2.6.32.45/arch/powerpc/kernel/signal_64.c
2743 --- linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2744 +++ linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2745 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2746 current->thread.fpscr.val = 0;
2747
2748 /* Set up to return from userspace. */
2749 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2750 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2751 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2752 } else {
2753 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2754 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c
2755 --- linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2756 +++ linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2757 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2758 if (oldlenp) {
2759 if (!error) {
2760 if (get_user(oldlen, oldlenp) ||
2761 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2762 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2763 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2764 error = -EFAULT;
2765 }
2766 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2767 }
2768 return error;
2769 }
2770 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/traps.c linux-2.6.32.45/arch/powerpc/kernel/traps.c
2771 --- linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2772 +++ linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2773 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2774 static inline void pmac_backlight_unblank(void) { }
2775 #endif
2776
2777 +extern void gr_handle_kernel_exploit(void);
2778 +
2779 int die(const char *str, struct pt_regs *regs, long err)
2780 {
2781 static struct {
2782 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2783 if (panic_on_oops)
2784 panic("Fatal exception");
2785
2786 + gr_handle_kernel_exploit();
2787 +
2788 oops_exit();
2789 do_exit(err);
2790
2791 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vdso.c linux-2.6.32.45/arch/powerpc/kernel/vdso.c
2792 --- linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2793 +++ linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2794 @@ -36,6 +36,7 @@
2795 #include <asm/firmware.h>
2796 #include <asm/vdso.h>
2797 #include <asm/vdso_datapage.h>
2798 +#include <asm/mman.h>
2799
2800 #include "setup.h"
2801
2802 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2803 vdso_base = VDSO32_MBASE;
2804 #endif
2805
2806 - current->mm->context.vdso_base = 0;
2807 + current->mm->context.vdso_base = ~0UL;
2808
2809 /* vDSO has a problem and was disabled, just don't "enable" it for the
2810 * process
2811 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2812 vdso_base = get_unmapped_area(NULL, vdso_base,
2813 (vdso_pages << PAGE_SHIFT) +
2814 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2815 - 0, 0);
2816 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2817 if (IS_ERR_VALUE(vdso_base)) {
2818 rc = vdso_base;
2819 goto fail_mmapsem;
2820 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vio.c linux-2.6.32.45/arch/powerpc/kernel/vio.c
2821 --- linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2822 +++ linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2823 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2824 vio_cmo_dealloc(viodev, alloc_size);
2825 }
2826
2827 -struct dma_map_ops vio_dma_mapping_ops = {
2828 +static const struct dma_map_ops vio_dma_mapping_ops = {
2829 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2830 .free_coherent = vio_dma_iommu_free_coherent,
2831 .map_sg = vio_dma_iommu_map_sg,
2832 .unmap_sg = vio_dma_iommu_unmap_sg,
2833 + .dma_supported = dma_iommu_dma_supported,
2834 .map_page = vio_dma_iommu_map_page,
2835 .unmap_page = vio_dma_iommu_unmap_page,
2836
2837 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2838
2839 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2840 {
2841 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2842 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2843 }
2844
2845 diff -urNp linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c
2846 --- linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2847 +++ linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2848 @@ -9,22 +9,6 @@
2849 #include <linux/module.h>
2850 #include <asm/uaccess.h>
2851
2852 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2853 -{
2854 - if (likely(access_ok(VERIFY_READ, from, n)))
2855 - n = __copy_from_user(to, from, n);
2856 - else
2857 - memset(to, 0, n);
2858 - return n;
2859 -}
2860 -
2861 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2862 -{
2863 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2864 - n = __copy_to_user(to, from, n);
2865 - return n;
2866 -}
2867 -
2868 unsigned long copy_in_user(void __user *to, const void __user *from,
2869 unsigned long n)
2870 {
2871 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2872 return n;
2873 }
2874
2875 -EXPORT_SYMBOL(copy_from_user);
2876 -EXPORT_SYMBOL(copy_to_user);
2877 EXPORT_SYMBOL(copy_in_user);
2878
2879 diff -urNp linux-2.6.32.45/arch/powerpc/mm/fault.c linux-2.6.32.45/arch/powerpc/mm/fault.c
2880 --- linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2881 +++ linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2882 @@ -30,6 +30,10 @@
2883 #include <linux/kprobes.h>
2884 #include <linux/kdebug.h>
2885 #include <linux/perf_event.h>
2886 +#include <linux/slab.h>
2887 +#include <linux/pagemap.h>
2888 +#include <linux/compiler.h>
2889 +#include <linux/unistd.h>
2890
2891 #include <asm/firmware.h>
2892 #include <asm/page.h>
2893 @@ -40,6 +44,7 @@
2894 #include <asm/uaccess.h>
2895 #include <asm/tlbflush.h>
2896 #include <asm/siginfo.h>
2897 +#include <asm/ptrace.h>
2898
2899
2900 #ifdef CONFIG_KPROBES
2901 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2902 }
2903 #endif
2904
2905 +#ifdef CONFIG_PAX_PAGEEXEC
2906 +/*
2907 + * PaX: decide what to do with offenders (regs->nip = fault address)
2908 + *
2909 + * returns 1 when task should be killed
2910 + */
2911 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2912 +{
2913 + return 1;
2914 +}
2915 +
2916 +void pax_report_insns(void *pc, void *sp)
2917 +{
2918 + unsigned long i;
2919 +
2920 + printk(KERN_ERR "PAX: bytes at PC: ");
2921 + for (i = 0; i < 5; i++) {
2922 + unsigned int c;
2923 + if (get_user(c, (unsigned int __user *)pc+i))
2924 + printk(KERN_CONT "???????? ");
2925 + else
2926 + printk(KERN_CONT "%08x ", c);
2927 + }
2928 + printk("\n");
2929 +}
2930 +#endif
2931 +
2932 /*
2933 * Check whether the instruction at regs->nip is a store using
2934 * an update addressing form which will update r1.
2935 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2936 * indicate errors in DSISR but can validly be set in SRR1.
2937 */
2938 if (trap == 0x400)
2939 - error_code &= 0x48200000;
2940 + error_code &= 0x58200000;
2941 else
2942 is_write = error_code & DSISR_ISSTORE;
2943 #else
2944 @@ -250,7 +282,7 @@ good_area:
2945 * "undefined". Of those that can be set, this is the only
2946 * one which seems bad.
2947 */
2948 - if (error_code & 0x10000000)
2949 + if (error_code & DSISR_GUARDED)
2950 /* Guarded storage error. */
2951 goto bad_area;
2952 #endif /* CONFIG_8xx */
2953 @@ -265,7 +297,7 @@ good_area:
2954 * processors use the same I/D cache coherency mechanism
2955 * as embedded.
2956 */
2957 - if (error_code & DSISR_PROTFAULT)
2958 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2959 goto bad_area;
2960 #endif /* CONFIG_PPC_STD_MMU */
2961
2962 @@ -335,6 +367,23 @@ bad_area:
2963 bad_area_nosemaphore:
2964 /* User mode accesses cause a SIGSEGV */
2965 if (user_mode(regs)) {
2966 +
2967 +#ifdef CONFIG_PAX_PAGEEXEC
2968 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2969 +#ifdef CONFIG_PPC_STD_MMU
2970 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2971 +#else
2972 + if (is_exec && regs->nip == address) {
2973 +#endif
2974 + switch (pax_handle_fetch_fault(regs)) {
2975 + }
2976 +
2977 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2978 + do_group_exit(SIGKILL);
2979 + }
2980 + }
2981 +#endif
2982 +
2983 _exception(SIGSEGV, regs, code, address);
2984 return 0;
2985 }
2986 diff -urNp linux-2.6.32.45/arch/powerpc/mm/mmap_64.c linux-2.6.32.45/arch/powerpc/mm/mmap_64.c
2987 --- linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
2988 +++ linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
2989 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2990 */
2991 if (mmap_is_legacy()) {
2992 mm->mmap_base = TASK_UNMAPPED_BASE;
2993 +
2994 +#ifdef CONFIG_PAX_RANDMMAP
2995 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2996 + mm->mmap_base += mm->delta_mmap;
2997 +#endif
2998 +
2999 mm->get_unmapped_area = arch_get_unmapped_area;
3000 mm->unmap_area = arch_unmap_area;
3001 } else {
3002 mm->mmap_base = mmap_base();
3003 +
3004 +#ifdef CONFIG_PAX_RANDMMAP
3005 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3006 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3007 +#endif
3008 +
3009 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3010 mm->unmap_area = arch_unmap_area_topdown;
3011 }
3012 diff -urNp linux-2.6.32.45/arch/powerpc/mm/slice.c linux-2.6.32.45/arch/powerpc/mm/slice.c
3013 --- linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3014 +++ linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3015 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3016 if ((mm->task_size - len) < addr)
3017 return 0;
3018 vma = find_vma(mm, addr);
3019 - return (!vma || (addr + len) <= vma->vm_start);
3020 + return check_heap_stack_gap(vma, addr, len);
3021 }
3022
3023 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3024 @@ -256,7 +256,7 @@ full_search:
3025 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3026 continue;
3027 }
3028 - if (!vma || addr + len <= vma->vm_start) {
3029 + if (check_heap_stack_gap(vma, addr, len)) {
3030 /*
3031 * Remember the place where we stopped the search:
3032 */
3033 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3034 }
3035 }
3036
3037 - addr = mm->mmap_base;
3038 - while (addr > len) {
3039 + if (mm->mmap_base < len)
3040 + addr = -ENOMEM;
3041 + else
3042 + addr = mm->mmap_base - len;
3043 +
3044 + while (!IS_ERR_VALUE(addr)) {
3045 /* Go down by chunk size */
3046 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3047 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3048
3049 /* Check for hit with different page size */
3050 mask = slice_range_to_mask(addr, len);
3051 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3052 * return with success:
3053 */
3054 vma = find_vma(mm, addr);
3055 - if (!vma || (addr + len) <= vma->vm_start) {
3056 + if (check_heap_stack_gap(vma, addr, len)) {
3057 /* remember the address as a hint for next time */
3058 if (use_cache)
3059 mm->free_area_cache = addr;
3060 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3061 mm->cached_hole_size = vma->vm_start - addr;
3062
3063 /* try just below the current vma->vm_start */
3064 - addr = vma->vm_start;
3065 + addr = skip_heap_stack_gap(vma, len);
3066 }
3067
3068 /*
3069 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3070 if (fixed && addr > (mm->task_size - len))
3071 return -EINVAL;
3072
3073 +#ifdef CONFIG_PAX_RANDMMAP
3074 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3075 + addr = 0;
3076 +#endif
3077 +
3078 /* If hint, make sure it matches our alignment restrictions */
3079 if (!fixed && addr) {
3080 addr = _ALIGN_UP(addr, 1ul << pshift);
3081 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c
3082 --- linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3083 +++ linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3084 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3085 lite5200_pm_target_state = PM_SUSPEND_ON;
3086 }
3087
3088 -static struct platform_suspend_ops lite5200_pm_ops = {
3089 +static const struct platform_suspend_ops lite5200_pm_ops = {
3090 .valid = lite5200_pm_valid,
3091 .begin = lite5200_pm_begin,
3092 .prepare = lite5200_pm_prepare,
3093 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3094 --- linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3095 +++ linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3096 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3097 iounmap(mbar);
3098 }
3099
3100 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3101 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3102 .valid = mpc52xx_pm_valid,
3103 .prepare = mpc52xx_pm_prepare,
3104 .enter = mpc52xx_pm_enter,
3105 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c
3106 --- linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3107 +++ linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3108 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3109 return ret;
3110 }
3111
3112 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3113 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3114 .valid = mpc83xx_suspend_valid,
3115 .begin = mpc83xx_suspend_begin,
3116 .enter = mpc83xx_suspend_enter,
3117 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c
3118 --- linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3119 +++ linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3120 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3121
3122 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3123
3124 -struct dma_map_ops dma_iommu_fixed_ops = {
3125 +const struct dma_map_ops dma_iommu_fixed_ops = {
3126 .alloc_coherent = dma_fixed_alloc_coherent,
3127 .free_coherent = dma_fixed_free_coherent,
3128 .map_sg = dma_fixed_map_sg,
3129 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c
3130 --- linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3131 +++ linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3132 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3133 return mask >= DMA_BIT_MASK(32);
3134 }
3135
3136 -static struct dma_map_ops ps3_sb_dma_ops = {
3137 +static const struct dma_map_ops ps3_sb_dma_ops = {
3138 .alloc_coherent = ps3_alloc_coherent,
3139 .free_coherent = ps3_free_coherent,
3140 .map_sg = ps3_sb_map_sg,
3141 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3142 .unmap_page = ps3_unmap_page,
3143 };
3144
3145 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3146 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3147 .alloc_coherent = ps3_alloc_coherent,
3148 .free_coherent = ps3_free_coherent,
3149 .map_sg = ps3_ioc0_map_sg,
3150 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig
3151 --- linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3152 +++ linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3153 @@ -2,6 +2,8 @@ config PPC_PSERIES
3154 depends on PPC64 && PPC_BOOK3S
3155 bool "IBM pSeries & new (POWER5-based) iSeries"
3156 select MPIC
3157 + select PCI_MSI
3158 + select XICS
3159 select PPC_I8259
3160 select PPC_RTAS
3161 select RTAS_ERROR_LOGGING
3162 diff -urNp linux-2.6.32.45/arch/s390/include/asm/elf.h linux-2.6.32.45/arch/s390/include/asm/elf.h
3163 --- linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3164 +++ linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3165 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3166 that it will "exec", and that there is sufficient room for the brk. */
3167 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3168
3169 +#ifdef CONFIG_PAX_ASLR
3170 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3171 +
3172 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3173 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3174 +#endif
3175 +
3176 /* This yields a mask that user programs can use to figure out what
3177 instruction set this CPU supports. */
3178
3179 diff -urNp linux-2.6.32.45/arch/s390/include/asm/setup.h linux-2.6.32.45/arch/s390/include/asm/setup.h
3180 --- linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3181 +++ linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3182 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3183 void detect_memory_layout(struct mem_chunk chunk[]);
3184
3185 #ifdef CONFIG_S390_SWITCH_AMODE
3186 -extern unsigned int switch_amode;
3187 +#define switch_amode (1)
3188 #else
3189 #define switch_amode (0)
3190 #endif
3191
3192 #ifdef CONFIG_S390_EXEC_PROTECT
3193 -extern unsigned int s390_noexec;
3194 +#define s390_noexec (1)
3195 #else
3196 #define s390_noexec (0)
3197 #endif
3198 diff -urNp linux-2.6.32.45/arch/s390/include/asm/uaccess.h linux-2.6.32.45/arch/s390/include/asm/uaccess.h
3199 --- linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3200 +++ linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3201 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3202 copy_to_user(void __user *to, const void *from, unsigned long n)
3203 {
3204 might_fault();
3205 +
3206 + if ((long)n < 0)
3207 + return n;
3208 +
3209 if (access_ok(VERIFY_WRITE, to, n))
3210 n = __copy_to_user(to, from, n);
3211 return n;
3212 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3213 static inline unsigned long __must_check
3214 __copy_from_user(void *to, const void __user *from, unsigned long n)
3215 {
3216 + if ((long)n < 0)
3217 + return n;
3218 +
3219 if (__builtin_constant_p(n) && (n <= 256))
3220 return uaccess.copy_from_user_small(n, from, to);
3221 else
3222 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3223 copy_from_user(void *to, const void __user *from, unsigned long n)
3224 {
3225 might_fault();
3226 +
3227 + if ((long)n < 0)
3228 + return n;
3229 +
3230 if (access_ok(VERIFY_READ, from, n))
3231 n = __copy_from_user(to, from, n);
3232 else
3233 diff -urNp linux-2.6.32.45/arch/s390/Kconfig linux-2.6.32.45/arch/s390/Kconfig
3234 --- linux-2.6.32.45/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3235 +++ linux-2.6.32.45/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3236 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3237
3238 config S390_SWITCH_AMODE
3239 bool "Switch kernel/user addressing modes"
3240 + default y
3241 help
3242 This option allows to switch the addressing modes of kernel and user
3243 - space. The kernel parameter switch_amode=on will enable this feature,
3244 - default is disabled. Enabling this (via kernel parameter) on machines
3245 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3246 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3247 + will reduce system performance.
3248
3249 Note that this option will also be selected by selecting the execute
3250 - protection option below. Enabling the execute protection via the
3251 - noexec kernel parameter will also switch the addressing modes,
3252 - independent of the switch_amode kernel parameter.
3253 + protection option below. Enabling the execute protection will also
3254 + switch the addressing modes, independent of this option.
3255
3256
3257 config S390_EXEC_PROTECT
3258 bool "Data execute protection"
3259 + default y
3260 select S390_SWITCH_AMODE
3261 help
3262 This option allows to enable a buffer overflow protection for user
3263 space programs and it also selects the addressing mode option above.
3264 - The kernel parameter noexec=on will enable this feature and also
3265 - switch the addressing modes, default is disabled. Enabling this (via
3266 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3267 - will reduce system performance.
3268 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3269 + reduce system performance.
3270
3271 comment "Code generation options"
3272
3273 diff -urNp linux-2.6.32.45/arch/s390/kernel/module.c linux-2.6.32.45/arch/s390/kernel/module.c
3274 --- linux-2.6.32.45/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3275 +++ linux-2.6.32.45/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3276 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3277
3278 /* Increase core size by size of got & plt and set start
3279 offsets for got and plt. */
3280 - me->core_size = ALIGN(me->core_size, 4);
3281 - me->arch.got_offset = me->core_size;
3282 - me->core_size += me->arch.got_size;
3283 - me->arch.plt_offset = me->core_size;
3284 - me->core_size += me->arch.plt_size;
3285 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3286 + me->arch.got_offset = me->core_size_rw;
3287 + me->core_size_rw += me->arch.got_size;
3288 + me->arch.plt_offset = me->core_size_rx;
3289 + me->core_size_rx += me->arch.plt_size;
3290 return 0;
3291 }
3292
3293 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3294 if (info->got_initialized == 0) {
3295 Elf_Addr *gotent;
3296
3297 - gotent = me->module_core + me->arch.got_offset +
3298 + gotent = me->module_core_rw + me->arch.got_offset +
3299 info->got_offset;
3300 *gotent = val;
3301 info->got_initialized = 1;
3302 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3303 else if (r_type == R_390_GOTENT ||
3304 r_type == R_390_GOTPLTENT)
3305 *(unsigned int *) loc =
3306 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3307 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3308 else if (r_type == R_390_GOT64 ||
3309 r_type == R_390_GOTPLT64)
3310 *(unsigned long *) loc = val;
3311 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3312 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3313 if (info->plt_initialized == 0) {
3314 unsigned int *ip;
3315 - ip = me->module_core + me->arch.plt_offset +
3316 + ip = me->module_core_rx + me->arch.plt_offset +
3317 info->plt_offset;
3318 #ifndef CONFIG_64BIT
3319 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3320 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3321 val - loc + 0xffffUL < 0x1ffffeUL) ||
3322 (r_type == R_390_PLT32DBL &&
3323 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3324 - val = (Elf_Addr) me->module_core +
3325 + val = (Elf_Addr) me->module_core_rx +
3326 me->arch.plt_offset +
3327 info->plt_offset;
3328 val += rela->r_addend - loc;
3329 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3330 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3331 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3332 val = val + rela->r_addend -
3333 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3334 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3335 if (r_type == R_390_GOTOFF16)
3336 *(unsigned short *) loc = val;
3337 else if (r_type == R_390_GOTOFF32)
3338 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3339 break;
3340 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3341 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3342 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3343 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3344 rela->r_addend - loc;
3345 if (r_type == R_390_GOTPC)
3346 *(unsigned int *) loc = val;
3347 diff -urNp linux-2.6.32.45/arch/s390/kernel/setup.c linux-2.6.32.45/arch/s390/kernel/setup.c
3348 --- linux-2.6.32.45/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3349 +++ linux-2.6.32.45/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3350 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3351 early_param("mem", early_parse_mem);
3352
3353 #ifdef CONFIG_S390_SWITCH_AMODE
3354 -unsigned int switch_amode = 0;
3355 -EXPORT_SYMBOL_GPL(switch_amode);
3356 -
3357 static int set_amode_and_uaccess(unsigned long user_amode,
3358 unsigned long user32_amode)
3359 {
3360 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3361 return 0;
3362 }
3363 }
3364 -
3365 -/*
3366 - * Switch kernel/user addressing modes?
3367 - */
3368 -static int __init early_parse_switch_amode(char *p)
3369 -{
3370 - switch_amode = 1;
3371 - return 0;
3372 -}
3373 -early_param("switch_amode", early_parse_switch_amode);
3374 -
3375 #else /* CONFIG_S390_SWITCH_AMODE */
3376 static inline int set_amode_and_uaccess(unsigned long user_amode,
3377 unsigned long user32_amode)
3378 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3379 }
3380 #endif /* CONFIG_S390_SWITCH_AMODE */
3381
3382 -#ifdef CONFIG_S390_EXEC_PROTECT
3383 -unsigned int s390_noexec = 0;
3384 -EXPORT_SYMBOL_GPL(s390_noexec);
3385 -
3386 -/*
3387 - * Enable execute protection?
3388 - */
3389 -static int __init early_parse_noexec(char *p)
3390 -{
3391 - if (!strncmp(p, "off", 3))
3392 - return 0;
3393 - switch_amode = 1;
3394 - s390_noexec = 1;
3395 - return 0;
3396 -}
3397 -early_param("noexec", early_parse_noexec);
3398 -#endif /* CONFIG_S390_EXEC_PROTECT */
3399 -
3400 static void setup_addressing_mode(void)
3401 {
3402 if (s390_noexec) {
3403 diff -urNp linux-2.6.32.45/arch/s390/mm/mmap.c linux-2.6.32.45/arch/s390/mm/mmap.c
3404 --- linux-2.6.32.45/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3405 +++ linux-2.6.32.45/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3406 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3407 */
3408 if (mmap_is_legacy()) {
3409 mm->mmap_base = TASK_UNMAPPED_BASE;
3410 +
3411 +#ifdef CONFIG_PAX_RANDMMAP
3412 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3413 + mm->mmap_base += mm->delta_mmap;
3414 +#endif
3415 +
3416 mm->get_unmapped_area = arch_get_unmapped_area;
3417 mm->unmap_area = arch_unmap_area;
3418 } else {
3419 mm->mmap_base = mmap_base();
3420 +
3421 +#ifdef CONFIG_PAX_RANDMMAP
3422 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3423 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3424 +#endif
3425 +
3426 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3427 mm->unmap_area = arch_unmap_area_topdown;
3428 }
3429 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3430 */
3431 if (mmap_is_legacy()) {
3432 mm->mmap_base = TASK_UNMAPPED_BASE;
3433 +
3434 +#ifdef CONFIG_PAX_RANDMMAP
3435 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3436 + mm->mmap_base += mm->delta_mmap;
3437 +#endif
3438 +
3439 mm->get_unmapped_area = s390_get_unmapped_area;
3440 mm->unmap_area = arch_unmap_area;
3441 } else {
3442 mm->mmap_base = mmap_base();
3443 +
3444 +#ifdef CONFIG_PAX_RANDMMAP
3445 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3446 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3447 +#endif
3448 +
3449 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3450 mm->unmap_area = arch_unmap_area_topdown;
3451 }
3452 diff -urNp linux-2.6.32.45/arch/score/include/asm/system.h linux-2.6.32.45/arch/score/include/asm/system.h
3453 --- linux-2.6.32.45/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3454 +++ linux-2.6.32.45/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3455 @@ -17,7 +17,7 @@ do { \
3456 #define finish_arch_switch(prev) do {} while (0)
3457
3458 typedef void (*vi_handler_t)(void);
3459 -extern unsigned long arch_align_stack(unsigned long sp);
3460 +#define arch_align_stack(x) (x)
3461
3462 #define mb() barrier()
3463 #define rmb() barrier()
3464 diff -urNp linux-2.6.32.45/arch/score/kernel/process.c linux-2.6.32.45/arch/score/kernel/process.c
3465 --- linux-2.6.32.45/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3466 +++ linux-2.6.32.45/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3467 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3468
3469 return task_pt_regs(task)->cp0_epc;
3470 }
3471 -
3472 -unsigned long arch_align_stack(unsigned long sp)
3473 -{
3474 - return sp;
3475 -}
3476 diff -urNp linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c
3477 --- linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3478 +++ linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3479 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3480 return 0;
3481 }
3482
3483 -static struct platform_suspend_ops hp6x0_pm_ops = {
3484 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3485 .enter = hp6x0_pm_enter,
3486 .valid = suspend_valid_only_mem,
3487 };
3488 diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c
3489 --- linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3490 +++ linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3491 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3492 NULL,
3493 };
3494
3495 -static struct sysfs_ops sq_sysfs_ops = {
3496 +static const struct sysfs_ops sq_sysfs_ops = {
3497 .show = sq_sysfs_show,
3498 .store = sq_sysfs_store,
3499 };
3500 diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c
3501 --- linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3502 +++ linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3503 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3504 return 0;
3505 }
3506
3507 -static struct platform_suspend_ops sh_pm_ops = {
3508 +static const struct platform_suspend_ops sh_pm_ops = {
3509 .enter = sh_pm_enter,
3510 .valid = suspend_valid_only_mem,
3511 };
3512 diff -urNp linux-2.6.32.45/arch/sh/kernel/kgdb.c linux-2.6.32.45/arch/sh/kernel/kgdb.c
3513 --- linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3514 +++ linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3515 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3516 {
3517 }
3518
3519 -struct kgdb_arch arch_kgdb_ops = {
3520 +const struct kgdb_arch arch_kgdb_ops = {
3521 /* Breakpoint instruction: trapa #0x3c */
3522 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3523 .gdb_bpt_instr = { 0x3c, 0xc3 },
3524 diff -urNp linux-2.6.32.45/arch/sh/mm/mmap.c linux-2.6.32.45/arch/sh/mm/mmap.c
3525 --- linux-2.6.32.45/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3526 +++ linux-2.6.32.45/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3527 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3528 addr = PAGE_ALIGN(addr);
3529
3530 vma = find_vma(mm, addr);
3531 - if (TASK_SIZE - len >= addr &&
3532 - (!vma || addr + len <= vma->vm_start))
3533 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3534 return addr;
3535 }
3536
3537 @@ -106,7 +105,7 @@ full_search:
3538 }
3539 return -ENOMEM;
3540 }
3541 - if (likely(!vma || addr + len <= vma->vm_start)) {
3542 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3543 /*
3544 * Remember the place where we stopped the search:
3545 */
3546 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3547 addr = PAGE_ALIGN(addr);
3548
3549 vma = find_vma(mm, addr);
3550 - if (TASK_SIZE - len >= addr &&
3551 - (!vma || addr + len <= vma->vm_start))
3552 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3553 return addr;
3554 }
3555
3556 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3557 /* make sure it can fit in the remaining address space */
3558 if (likely(addr > len)) {
3559 vma = find_vma(mm, addr-len);
3560 - if (!vma || addr <= vma->vm_start) {
3561 + if (check_heap_stack_gap(vma, addr - len, len)) {
3562 /* remember the address as a hint for next time */
3563 return (mm->free_area_cache = addr-len);
3564 }
3565 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3566 if (unlikely(mm->mmap_base < len))
3567 goto bottomup;
3568
3569 - addr = mm->mmap_base-len;
3570 - if (do_colour_align)
3571 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3572 + addr = mm->mmap_base - len;
3573
3574 do {
3575 + if (do_colour_align)
3576 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3577 /*
3578 * Lookup failure means no vma is above this address,
3579 * else if new region fits below vma->vm_start,
3580 * return with success:
3581 */
3582 vma = find_vma(mm, addr);
3583 - if (likely(!vma || addr+len <= vma->vm_start)) {
3584 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3585 /* remember the address as a hint for next time */
3586 return (mm->free_area_cache = addr);
3587 }
3588 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3589 mm->cached_hole_size = vma->vm_start - addr;
3590
3591 /* try just below the current vma->vm_start */
3592 - addr = vma->vm_start-len;
3593 - if (do_colour_align)
3594 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3595 - } while (likely(len < vma->vm_start));
3596 + addr = skip_heap_stack_gap(vma, len);
3597 + } while (!IS_ERR_VALUE(addr));
3598
3599 bottomup:
3600 /*
3601 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h
3602 --- linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3603 +++ linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-07-13 22:22:56.000000000 -0400
3604 @@ -14,18 +14,40 @@
3605 #define ATOMIC64_INIT(i) { (i) }
3606
3607 #define atomic_read(v) ((v)->counter)
3608 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3609 +{
3610 + return v->counter;
3611 +}
3612 #define atomic64_read(v) ((v)->counter)
3613 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3614 +{
3615 + return v->counter;
3616 +}
3617
3618 #define atomic_set(v, i) (((v)->counter) = i)
3619 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3620 +{
3621 + v->counter = i;
3622 +}
3623 #define atomic64_set(v, i) (((v)->counter) = i)
3624 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3625 +{
3626 + v->counter = i;
3627 +}
3628
3629 extern void atomic_add(int, atomic_t *);
3630 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3631 extern void atomic64_add(long, atomic64_t *);
3632 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3633 extern void atomic_sub(int, atomic_t *);
3634 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3635 extern void atomic64_sub(long, atomic64_t *);
3636 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3637
3638 extern int atomic_add_ret(int, atomic_t *);
3639 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3640 extern long atomic64_add_ret(long, atomic64_t *);
3641 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3642 extern int atomic_sub_ret(int, atomic_t *);
3643 extern long atomic64_sub_ret(long, atomic64_t *);
3644
3645 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3646 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3647
3648 #define atomic_inc_return(v) atomic_add_ret(1, v)
3649 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3650 +{
3651 + return atomic_add_ret_unchecked(1, v);
3652 +}
3653 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3654 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3655 +{
3656 + return atomic64_add_ret_unchecked(1, v);
3657 +}
3658
3659 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3660 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3661
3662 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3663 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3664 +{
3665 + return atomic_add_ret_unchecked(i, v);
3666 +}
3667 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3668 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3669 +{
3670 + return atomic64_add_ret_unchecked(i, v);
3671 +}
3672
3673 /*
3674 * atomic_inc_and_test - increment and test
3675 @@ -50,6 +88,7 @@ extern long atomic64_sub_ret(long, atomi
3676 * other cases.
3677 */
3678 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3679 +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3680 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3681
3682 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3683 @@ -59,30 +98,59 @@ extern long atomic64_sub_ret(long, atomi
3684 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3685
3686 #define atomic_inc(v) atomic_add(1, v)
3687 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3688 +{
3689 + atomic_add_unchecked(1, v);
3690 +}
3691 #define atomic64_inc(v) atomic64_add(1, v)
3692 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3693 +{
3694 + atomic64_add_unchecked(1, v);
3695 +}
3696
3697 #define atomic_dec(v) atomic_sub(1, v)
3698 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3699 +{
3700 + atomic_sub_unchecked(1, v);
3701 +}
3702 #define atomic64_dec(v) atomic64_sub(1, v)
3703 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3704 +{
3705 + atomic64_sub_unchecked(1, v);
3706 +}
3707
3708 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3709 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3710
3711 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3712 +#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3713 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3714 +#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3715
3716 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3717 {
3718 - int c, old;
3719 + int c, old, new;
3720 c = atomic_read(v);
3721 for (;;) {
3722 - if (unlikely(c == (u)))
3723 + if (unlikely(c == u))
3724 break;
3725 - old = atomic_cmpxchg((v), c, c + (a));
3726 +
3727 + asm volatile("addcc %2, %0, %0\n"
3728 +
3729 +#ifdef CONFIG_PAX_REFCOUNT
3730 + "tvs %%icc, 6\n"
3731 +#endif
3732 +
3733 + : "=r" (new)
3734 + : "0" (c), "ir" (a)
3735 + : "cc");
3736 +
3737 + old = atomic_cmpxchg(v, c, new);
3738 if (likely(old == c))
3739 break;
3740 c = old;
3741 }
3742 - return c != (u);
3743 + return c != u;
3744 }
3745
3746 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3747 @@ -93,17 +161,28 @@ static inline int atomic_add_unless(atom
3748
3749 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3750 {
3751 - long c, old;
3752 + long c, old, new;
3753 c = atomic64_read(v);
3754 for (;;) {
3755 - if (unlikely(c == (u)))
3756 + if (unlikely(c == u))
3757 break;
3758 - old = atomic64_cmpxchg((v), c, c + (a));
3759 +
3760 + asm volatile("addcc %2, %0, %0\n"
3761 +
3762 +#ifdef CONFIG_PAX_REFCOUNT
3763 + "tvs %%xcc, 6\n"
3764 +#endif
3765 +
3766 + : "=r" (new)
3767 + : "0" (c), "ir" (a)
3768 + : "cc");
3769 +
3770 + old = atomic64_cmpxchg(v, c, new);
3771 if (likely(old == c))
3772 break;
3773 c = old;
3774 }
3775 - return c != (u);
3776 + return c != u;
3777 }
3778
3779 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3780 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/cache.h linux-2.6.32.45/arch/sparc/include/asm/cache.h
3781 --- linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3782 +++ linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3783 @@ -8,7 +8,7 @@
3784 #define _SPARC_CACHE_H
3785
3786 #define L1_CACHE_SHIFT 5
3787 -#define L1_CACHE_BYTES 32
3788 +#define L1_CACHE_BYTES 32UL
3789 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3790
3791 #ifdef CONFIG_SPARC32
3792 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h
3793 --- linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3794 +++ linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3795 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3796 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3797 #define dma_is_consistent(d, h) (1)
3798
3799 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3800 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3801 extern struct bus_type pci_bus_type;
3802
3803 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3804 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3805 {
3806 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3807 if (dev->bus == &pci_bus_type)
3808 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3809 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3810 dma_addr_t *dma_handle, gfp_t flag)
3811 {
3812 - struct dma_map_ops *ops = get_dma_ops(dev);
3813 + const struct dma_map_ops *ops = get_dma_ops(dev);
3814 void *cpu_addr;
3815
3816 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3817 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3818 static inline void dma_free_coherent(struct device *dev, size_t size,
3819 void *cpu_addr, dma_addr_t dma_handle)
3820 {
3821 - struct dma_map_ops *ops = get_dma_ops(dev);
3822 + const struct dma_map_ops *ops = get_dma_ops(dev);
3823
3824 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3825 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3826 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_32.h linux-2.6.32.45/arch/sparc/include/asm/elf_32.h
3827 --- linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3828 +++ linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3829 @@ -116,6 +116,13 @@ typedef struct {
3830
3831 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3832
3833 +#ifdef CONFIG_PAX_ASLR
3834 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3835 +
3836 +#define PAX_DELTA_MMAP_LEN 16
3837 +#define PAX_DELTA_STACK_LEN 16
3838 +#endif
3839 +
3840 /* This yields a mask that user programs can use to figure out what
3841 instruction set this cpu supports. This can NOT be done in userspace
3842 on Sparc. */
3843 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_64.h linux-2.6.32.45/arch/sparc/include/asm/elf_64.h
3844 --- linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3845 +++ linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3846 @@ -163,6 +163,12 @@ typedef struct {
3847 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3848 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3849
3850 +#ifdef CONFIG_PAX_ASLR
3851 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3852 +
3853 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3854 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3855 +#endif
3856
3857 /* This yields a mask that user programs can use to figure out what
3858 instruction set this cpu supports. */
3859 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h
3860 --- linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3861 +++ linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3862 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3863 BTFIXUPDEF_INT(page_none)
3864 BTFIXUPDEF_INT(page_copy)
3865 BTFIXUPDEF_INT(page_readonly)
3866 +
3867 +#ifdef CONFIG_PAX_PAGEEXEC
3868 +BTFIXUPDEF_INT(page_shared_noexec)
3869 +BTFIXUPDEF_INT(page_copy_noexec)
3870 +BTFIXUPDEF_INT(page_readonly_noexec)
3871 +#endif
3872 +
3873 BTFIXUPDEF_INT(page_kernel)
3874
3875 #define PMD_SHIFT SUN4C_PMD_SHIFT
3876 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3877 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3878 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3879
3880 +#ifdef CONFIG_PAX_PAGEEXEC
3881 +extern pgprot_t PAGE_SHARED_NOEXEC;
3882 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3883 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3884 +#else
3885 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3886 +# define PAGE_COPY_NOEXEC PAGE_COPY
3887 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3888 +#endif
3889 +
3890 extern unsigned long page_kernel;
3891
3892 #ifdef MODULE
3893 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h
3894 --- linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3895 +++ linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3896 @@ -115,6 +115,13 @@
3897 SRMMU_EXEC | SRMMU_REF)
3898 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3899 SRMMU_EXEC | SRMMU_REF)
3900 +
3901 +#ifdef CONFIG_PAX_PAGEEXEC
3902 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3903 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3904 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3905 +#endif
3906 +
3907 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3908 SRMMU_DIRTY | SRMMU_REF)
3909
3910 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h
3911 --- linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3912 +++ linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3913 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3914
3915 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3916
3917 -static void inline arch_read_lock(raw_rwlock_t *lock)
3918 +static inline void arch_read_lock(raw_rwlock_t *lock)
3919 {
3920 unsigned long tmp1, tmp2;
3921
3922 __asm__ __volatile__ (
3923 "1: ldsw [%2], %0\n"
3924 " brlz,pn %0, 2f\n"
3925 -"4: add %0, 1, %1\n"
3926 +"4: addcc %0, 1, %1\n"
3927 +
3928 +#ifdef CONFIG_PAX_REFCOUNT
3929 +" tvs %%icc, 6\n"
3930 +#endif
3931 +
3932 " cas [%2], %0, %1\n"
3933 " cmp %0, %1\n"
3934 " bne,pn %%icc, 1b\n"
3935 @@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3936 " .previous"
3937 : "=&r" (tmp1), "=&r" (tmp2)
3938 : "r" (lock)
3939 - : "memory");
3940 + : "memory", "cc");
3941 }
3942
3943 static int inline arch_read_trylock(raw_rwlock_t *lock)
3944 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3945 "1: ldsw [%2], %0\n"
3946 " brlz,a,pn %0, 2f\n"
3947 " mov 0, %0\n"
3948 -" add %0, 1, %1\n"
3949 +" addcc %0, 1, %1\n"
3950 +
3951 +#ifdef CONFIG_PAX_REFCOUNT
3952 +" tvs %%icc, 6\n"
3953 +#endif
3954 +
3955 " cas [%2], %0, %1\n"
3956 " cmp %0, %1\n"
3957 " bne,pn %%icc, 1b\n"
3958 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
3959 return tmp1;
3960 }
3961
3962 -static void inline arch_read_unlock(raw_rwlock_t *lock)
3963 +static inline void arch_read_unlock(raw_rwlock_t *lock)
3964 {
3965 unsigned long tmp1, tmp2;
3966
3967 __asm__ __volatile__(
3968 "1: lduw [%2], %0\n"
3969 -" sub %0, 1, %1\n"
3970 +" subcc %0, 1, %1\n"
3971 +
3972 +#ifdef CONFIG_PAX_REFCOUNT
3973 +" tvs %%icc, 6\n"
3974 +#endif
3975 +
3976 " cas [%2], %0, %1\n"
3977 " cmp %0, %1\n"
3978 " bne,pn %%xcc, 1b\n"
3979 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
3980 : "memory");
3981 }
3982
3983 -static void inline arch_write_lock(raw_rwlock_t *lock)
3984 +static inline void arch_write_lock(raw_rwlock_t *lock)
3985 {
3986 unsigned long mask, tmp1, tmp2;
3987
3988 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
3989 : "memory");
3990 }
3991
3992 -static void inline arch_write_unlock(raw_rwlock_t *lock)
3993 +static inline void arch_write_unlock(raw_rwlock_t *lock)
3994 {
3995 __asm__ __volatile__(
3996 " stw %%g0, [%0]"
3997 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h
3998 --- linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
3999 +++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4000 @@ -50,6 +50,8 @@ struct thread_info {
4001 unsigned long w_saved;
4002
4003 struct restart_block restart_block;
4004 +
4005 + unsigned long lowest_stack;
4006 };
4007
4008 /*
4009 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h
4010 --- linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4011 +++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4012 @@ -68,6 +68,8 @@ struct thread_info {
4013 struct pt_regs *kern_una_regs;
4014 unsigned int kern_una_insn;
4015
4016 + unsigned long lowest_stack;
4017 +
4018 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4019 };
4020
4021 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h
4022 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4023 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4024 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4025
4026 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4027 {
4028 - if (n && __access_ok((unsigned long) to, n))
4029 + if ((long)n < 0)
4030 + return n;
4031 +
4032 + if (n && __access_ok((unsigned long) to, n)) {
4033 + if (!__builtin_constant_p(n))
4034 + check_object_size(from, n, true);
4035 return __copy_user(to, (__force void __user *) from, n);
4036 - else
4037 + } else
4038 return n;
4039 }
4040
4041 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4042 {
4043 + if ((long)n < 0)
4044 + return n;
4045 +
4046 + if (!__builtin_constant_p(n))
4047 + check_object_size(from, n, true);
4048 +
4049 return __copy_user(to, (__force void __user *) from, n);
4050 }
4051
4052 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4053 {
4054 - if (n && __access_ok((unsigned long) from, n))
4055 + if ((long)n < 0)
4056 + return n;
4057 +
4058 + if (n && __access_ok((unsigned long) from, n)) {
4059 + if (!__builtin_constant_p(n))
4060 + check_object_size(to, n, false);
4061 return __copy_user((__force void __user *) to, from, n);
4062 - else
4063 + } else
4064 return n;
4065 }
4066
4067 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4068 {
4069 + if ((long)n < 0)
4070 + return n;
4071 +
4072 return __copy_user((__force void __user *) to, from, n);
4073 }
4074
4075 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h
4076 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4077 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4078 @@ -9,6 +9,7 @@
4079 #include <linux/compiler.h>
4080 #include <linux/string.h>
4081 #include <linux/thread_info.h>
4082 +#include <linux/kernel.h>
4083 #include <asm/asi.h>
4084 #include <asm/system.h>
4085 #include <asm/spitfire.h>
4086 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4087 static inline unsigned long __must_check
4088 copy_from_user(void *to, const void __user *from, unsigned long size)
4089 {
4090 - unsigned long ret = ___copy_from_user(to, from, size);
4091 + unsigned long ret;
4092
4093 + if ((long)size < 0 || size > INT_MAX)
4094 + return size;
4095 +
4096 + if (!__builtin_constant_p(size))
4097 + check_object_size(to, size, false);
4098 +
4099 + ret = ___copy_from_user(to, from, size);
4100 if (unlikely(ret))
4101 ret = copy_from_user_fixup(to, from, size);
4102 return ret;
4103 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4104 static inline unsigned long __must_check
4105 copy_to_user(void __user *to, const void *from, unsigned long size)
4106 {
4107 - unsigned long ret = ___copy_to_user(to, from, size);
4108 + unsigned long ret;
4109 +
4110 + if ((long)size < 0 || size > INT_MAX)
4111 + return size;
4112 +
4113 + if (!__builtin_constant_p(size))
4114 + check_object_size(from, size, true);
4115
4116 + ret = ___copy_to_user(to, from, size);
4117 if (unlikely(ret))
4118 ret = copy_to_user_fixup(to, from, size);
4119 return ret;
4120 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess.h linux-2.6.32.45/arch/sparc/include/asm/uaccess.h
4121 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4122 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4123 @@ -1,5 +1,13 @@
4124 #ifndef ___ASM_SPARC_UACCESS_H
4125 #define ___ASM_SPARC_UACCESS_H
4126 +
4127 +#ifdef __KERNEL__
4128 +#ifndef __ASSEMBLY__
4129 +#include <linux/types.h>
4130 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4131 +#endif
4132 +#endif
4133 +
4134 #if defined(__sparc__) && defined(__arch64__)
4135 #include <asm/uaccess_64.h>
4136 #else
4137 diff -urNp linux-2.6.32.45/arch/sparc/kernel/iommu.c linux-2.6.32.45/arch/sparc/kernel/iommu.c
4138 --- linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4139 +++ linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4140 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4141 spin_unlock_irqrestore(&iommu->lock, flags);
4142 }
4143
4144 -static struct dma_map_ops sun4u_dma_ops = {
4145 +static const struct dma_map_ops sun4u_dma_ops = {
4146 .alloc_coherent = dma_4u_alloc_coherent,
4147 .free_coherent = dma_4u_free_coherent,
4148 .map_page = dma_4u_map_page,
4149 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4150 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4151 };
4152
4153 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4154 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4155 EXPORT_SYMBOL(dma_ops);
4156
4157 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4158 diff -urNp linux-2.6.32.45/arch/sparc/kernel/ioport.c linux-2.6.32.45/arch/sparc/kernel/ioport.c
4159 --- linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4160 +++ linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4161 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4162 BUG();
4163 }
4164
4165 -struct dma_map_ops sbus_dma_ops = {
4166 +const struct dma_map_ops sbus_dma_ops = {
4167 .alloc_coherent = sbus_alloc_coherent,
4168 .free_coherent = sbus_free_coherent,
4169 .map_page = sbus_map_page,
4170 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4171 .sync_sg_for_device = sbus_sync_sg_for_device,
4172 };
4173
4174 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4175 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4176 EXPORT_SYMBOL(dma_ops);
4177
4178 static int __init sparc_register_ioport(void)
4179 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4180 }
4181 }
4182
4183 -struct dma_map_ops pci32_dma_ops = {
4184 +const struct dma_map_ops pci32_dma_ops = {
4185 .alloc_coherent = pci32_alloc_coherent,
4186 .free_coherent = pci32_free_coherent,
4187 .map_page = pci32_map_page,
4188 diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c
4189 --- linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4190 +++ linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4191 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4192 {
4193 }
4194
4195 -struct kgdb_arch arch_kgdb_ops = {
4196 +const struct kgdb_arch arch_kgdb_ops = {
4197 /* Breakpoint instruction: ta 0x7d */
4198 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4199 };
4200 diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c
4201 --- linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4202 +++ linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4203 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4204 {
4205 }
4206
4207 -struct kgdb_arch arch_kgdb_ops = {
4208 +const struct kgdb_arch arch_kgdb_ops = {
4209 /* Breakpoint instruction: ta 0x72 */
4210 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4211 };
4212 diff -urNp linux-2.6.32.45/arch/sparc/kernel/Makefile linux-2.6.32.45/arch/sparc/kernel/Makefile
4213 --- linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4214 +++ linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4215 @@ -3,7 +3,7 @@
4216 #
4217
4218 asflags-y := -ansi
4219 -ccflags-y := -Werror
4220 +#ccflags-y := -Werror
4221
4222 extra-y := head_$(BITS).o
4223 extra-y += init_task.o
4224 diff -urNp linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c
4225 --- linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4226 +++ linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4227 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4228 spin_unlock_irqrestore(&iommu->lock, flags);
4229 }
4230
4231 -static struct dma_map_ops sun4v_dma_ops = {
4232 +static const struct dma_map_ops sun4v_dma_ops = {
4233 .alloc_coherent = dma_4v_alloc_coherent,
4234 .free_coherent = dma_4v_free_coherent,
4235 .map_page = dma_4v_map_page,
4236 diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_32.c linux-2.6.32.45/arch/sparc/kernel/process_32.c
4237 --- linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4238 +++ linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4239 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4240 rw->ins[4], rw->ins[5],
4241 rw->ins[6],
4242 rw->ins[7]);
4243 - printk("%pS\n", (void *) rw->ins[7]);
4244 + printk("%pA\n", (void *) rw->ins[7]);
4245 rw = (struct reg_window32 *) rw->ins[6];
4246 }
4247 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4248 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4249
4250 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4251 r->psr, r->pc, r->npc, r->y, print_tainted());
4252 - printk("PC: <%pS>\n", (void *) r->pc);
4253 + printk("PC: <%pA>\n", (void *) r->pc);
4254 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4255 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4256 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4257 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4258 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4259 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4260 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4261 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4262
4263 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4264 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4265 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4266 rw = (struct reg_window32 *) fp;
4267 pc = rw->ins[7];
4268 printk("[%08lx : ", pc);
4269 - printk("%pS ] ", (void *) pc);
4270 + printk("%pA ] ", (void *) pc);
4271 fp = rw->ins[6];
4272 } while (++count < 16);
4273 printk("\n");
4274 diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_64.c linux-2.6.32.45/arch/sparc/kernel/process_64.c
4275 --- linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4276 +++ linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4277 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4278 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4279 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4280 if (regs->tstate & TSTATE_PRIV)
4281 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4282 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4283 }
4284
4285 void show_regs(struct pt_regs *regs)
4286 {
4287 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4288 regs->tpc, regs->tnpc, regs->y, print_tainted());
4289 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4290 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4291 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4292 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4293 regs->u_regs[3]);
4294 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4295 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4296 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4297 regs->u_regs[15]);
4298 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4299 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4300 show_regwindow(regs);
4301 }
4302
4303 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4304 ((tp && tp->task) ? tp->task->pid : -1));
4305
4306 if (gp->tstate & TSTATE_PRIV) {
4307 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4308 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4309 (void *) gp->tpc,
4310 (void *) gp->o7,
4311 (void *) gp->i7,
4312 diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c
4313 --- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4314 +++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4315 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4316 if (ARCH_SUN4C && len > 0x20000000)
4317 return -ENOMEM;
4318 if (!addr)
4319 - addr = TASK_UNMAPPED_BASE;
4320 + addr = current->mm->mmap_base;
4321
4322 if (flags & MAP_SHARED)
4323 addr = COLOUR_ALIGN(addr);
4324 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4325 }
4326 if (TASK_SIZE - PAGE_SIZE - len < addr)
4327 return -ENOMEM;
4328 - if (!vmm || addr + len <= vmm->vm_start)
4329 + if (check_heap_stack_gap(vmm, addr, len))
4330 return addr;
4331 addr = vmm->vm_end;
4332 if (flags & MAP_SHARED)
4333 diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c
4334 --- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4335 +++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4336 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4337 /* We do not accept a shared mapping if it would violate
4338 * cache aliasing constraints.
4339 */
4340 - if ((flags & MAP_SHARED) &&
4341 + if ((filp || (flags & MAP_SHARED)) &&
4342 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4343 return -EINVAL;
4344 return addr;
4345 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4346 if (filp || (flags & MAP_SHARED))
4347 do_color_align = 1;
4348
4349 +#ifdef CONFIG_PAX_RANDMMAP
4350 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4351 +#endif
4352 +
4353 if (addr) {
4354 if (do_color_align)
4355 addr = COLOUR_ALIGN(addr, pgoff);
4356 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4357 addr = PAGE_ALIGN(addr);
4358
4359 vma = find_vma(mm, addr);
4360 - if (task_size - len >= addr &&
4361 - (!vma || addr + len <= vma->vm_start))
4362 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4363 return addr;
4364 }
4365
4366 if (len > mm->cached_hole_size) {
4367 - start_addr = addr = mm->free_area_cache;
4368 + start_addr = addr = mm->free_area_cache;
4369 } else {
4370 - start_addr = addr = TASK_UNMAPPED_BASE;
4371 + start_addr = addr = mm->mmap_base;
4372 mm->cached_hole_size = 0;
4373 }
4374
4375 @@ -175,14 +178,14 @@ full_search:
4376 vma = find_vma(mm, VA_EXCLUDE_END);
4377 }
4378 if (unlikely(task_size < addr)) {
4379 - if (start_addr != TASK_UNMAPPED_BASE) {
4380 - start_addr = addr = TASK_UNMAPPED_BASE;
4381 + if (start_addr != mm->mmap_base) {
4382 + start_addr = addr = mm->mmap_base;
4383 mm->cached_hole_size = 0;
4384 goto full_search;
4385 }
4386 return -ENOMEM;
4387 }
4388 - if (likely(!vma || addr + len <= vma->vm_start)) {
4389 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4390 /*
4391 * Remember the place where we stopped the search:
4392 */
4393 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4394 /* We do not accept a shared mapping if it would violate
4395 * cache aliasing constraints.
4396 */
4397 - if ((flags & MAP_SHARED) &&
4398 + if ((filp || (flags & MAP_SHARED)) &&
4399 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4400 return -EINVAL;
4401 return addr;
4402 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4403 addr = PAGE_ALIGN(addr);
4404
4405 vma = find_vma(mm, addr);
4406 - if (task_size - len >= addr &&
4407 - (!vma || addr + len <= vma->vm_start))
4408 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4409 return addr;
4410 }
4411
4412 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4413 /* make sure it can fit in the remaining address space */
4414 if (likely(addr > len)) {
4415 vma = find_vma(mm, addr-len);
4416 - if (!vma || addr <= vma->vm_start) {
4417 + if (check_heap_stack_gap(vma, addr - len, len)) {
4418 /* remember the address as a hint for next time */
4419 return (mm->free_area_cache = addr-len);
4420 }
4421 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4422 if (unlikely(mm->mmap_base < len))
4423 goto bottomup;
4424
4425 - addr = mm->mmap_base-len;
4426 - if (do_color_align)
4427 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4428 + addr = mm->mmap_base - len;
4429
4430 do {
4431 + if (do_color_align)
4432 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4433 /*
4434 * Lookup failure means no vma is above this address,
4435 * else if new region fits below vma->vm_start,
4436 * return with success:
4437 */
4438 vma = find_vma(mm, addr);
4439 - if (likely(!vma || addr+len <= vma->vm_start)) {
4440 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4441 /* remember the address as a hint for next time */
4442 return (mm->free_area_cache = addr);
4443 }
4444 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4445 mm->cached_hole_size = vma->vm_start - addr;
4446
4447 /* try just below the current vma->vm_start */
4448 - addr = vma->vm_start-len;
4449 - if (do_color_align)
4450 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4451 - } while (likely(len < vma->vm_start));
4452 + addr = skip_heap_stack_gap(vma, len);
4453 + } while (!IS_ERR_VALUE(addr));
4454
4455 bottomup:
4456 /*
4457 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4458 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4459 sysctl_legacy_va_layout) {
4460 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4461 +
4462 +#ifdef CONFIG_PAX_RANDMMAP
4463 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4464 + mm->mmap_base += mm->delta_mmap;
4465 +#endif
4466 +
4467 mm->get_unmapped_area = arch_get_unmapped_area;
4468 mm->unmap_area = arch_unmap_area;
4469 } else {
4470 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4471 gap = (task_size / 6 * 5);
4472
4473 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4474 +
4475 +#ifdef CONFIG_PAX_RANDMMAP
4476 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4477 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4478 +#endif
4479 +
4480 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4481 mm->unmap_area = arch_unmap_area_topdown;
4482 }
4483 diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_32.c linux-2.6.32.45/arch/sparc/kernel/traps_32.c
4484 --- linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4485 +++ linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4486 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4487 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4488 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4489
4490 +extern void gr_handle_kernel_exploit(void);
4491 +
4492 void die_if_kernel(char *str, struct pt_regs *regs)
4493 {
4494 static int die_counter;
4495 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4496 count++ < 30 &&
4497 (((unsigned long) rw) >= PAGE_OFFSET) &&
4498 !(((unsigned long) rw) & 0x7)) {
4499 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4500 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4501 (void *) rw->ins[7]);
4502 rw = (struct reg_window32 *)rw->ins[6];
4503 }
4504 }
4505 printk("Instruction DUMP:");
4506 instruction_dump ((unsigned long *) regs->pc);
4507 - if(regs->psr & PSR_PS)
4508 + if(regs->psr & PSR_PS) {
4509 + gr_handle_kernel_exploit();
4510 do_exit(SIGKILL);
4511 + }
4512 do_exit(SIGSEGV);
4513 }
4514
4515 diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_64.c linux-2.6.32.45/arch/sparc/kernel/traps_64.c
4516 --- linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4517 +++ linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4518 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4519 i + 1,
4520 p->trapstack[i].tstate, p->trapstack[i].tpc,
4521 p->trapstack[i].tnpc, p->trapstack[i].tt);
4522 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4523 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4524 }
4525 }
4526
4527 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4528
4529 lvl -= 0x100;
4530 if (regs->tstate & TSTATE_PRIV) {
4531 +
4532 +#ifdef CONFIG_PAX_REFCOUNT
4533 + if (lvl == 6)
4534 + pax_report_refcount_overflow(regs);
4535 +#endif
4536 +
4537 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4538 die_if_kernel(buffer, regs);
4539 }
4540 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4541 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4542 {
4543 char buffer[32];
4544 -
4545 +
4546 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4547 0, lvl, SIGTRAP) == NOTIFY_STOP)
4548 return;
4549
4550 +#ifdef CONFIG_PAX_REFCOUNT
4551 + if (lvl == 6)
4552 + pax_report_refcount_overflow(regs);
4553 +#endif
4554 +
4555 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4556
4557 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4558 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4559 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4560 printk("%s" "ERROR(%d): ",
4561 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4562 - printk("TPC<%pS>\n", (void *) regs->tpc);
4563 + printk("TPC<%pA>\n", (void *) regs->tpc);
4564 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4565 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4566 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4567 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4568 smp_processor_id(),
4569 (type & 0x1) ? 'I' : 'D',
4570 regs->tpc);
4571 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4572 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4573 panic("Irrecoverable Cheetah+ parity error.");
4574 }
4575
4576 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4577 smp_processor_id(),
4578 (type & 0x1) ? 'I' : 'D',
4579 regs->tpc);
4580 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4581 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4582 }
4583
4584 struct sun4v_error_entry {
4585 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4586
4587 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4588 regs->tpc, tl);
4589 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4590 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4591 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4592 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4593 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4594 (void *) regs->u_regs[UREG_I7]);
4595 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4596 "pte[%lx] error[%lx]\n",
4597 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4598
4599 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4600 regs->tpc, tl);
4601 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4602 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4603 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4604 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4605 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4606 (void *) regs->u_regs[UREG_I7]);
4607 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4608 "pte[%lx] error[%lx]\n",
4609 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4610 fp = (unsigned long)sf->fp + STACK_BIAS;
4611 }
4612
4613 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4614 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4615 } while (++count < 16);
4616 }
4617
4618 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4619 return (struct reg_window *) (fp + STACK_BIAS);
4620 }
4621
4622 +extern void gr_handle_kernel_exploit(void);
4623 +
4624 void die_if_kernel(char *str, struct pt_regs *regs)
4625 {
4626 static int die_counter;
4627 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4628 while (rw &&
4629 count++ < 30&&
4630 is_kernel_stack(current, rw)) {
4631 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4632 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4633 (void *) rw->ins[7]);
4634
4635 rw = kernel_stack_up(rw);
4636 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4637 }
4638 user_instruction_dump ((unsigned int __user *) regs->tpc);
4639 }
4640 - if (regs->tstate & TSTATE_PRIV)
4641 + if (regs->tstate & TSTATE_PRIV) {
4642 + gr_handle_kernel_exploit();
4643 do_exit(SIGKILL);
4644 + }
4645 +
4646 do_exit(SIGSEGV);
4647 }
4648 EXPORT_SYMBOL(die_if_kernel);
4649 diff -urNp linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S
4650 --- linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-03-27 14:31:47.000000000 -0400
4651 +++ linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-07-13 22:20:05.000000000 -0400
4652 @@ -127,7 +127,7 @@ do_int_load:
4653 wr %o5, 0x0, %asi
4654 retl
4655 mov 0, %o0
4656 - .size __do_int_load, .-__do_int_load
4657 + .size do_int_load, .-do_int_load
4658
4659 .section __ex_table,"a"
4660 .word 4b, __retl_efault
4661 diff -urNp linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c
4662 --- linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4663 +++ linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4664 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4665 if (count < 5) {
4666 last_time = jiffies;
4667 count++;
4668 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4669 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4670 regs->tpc, (void *) regs->tpc);
4671 }
4672 }
4673 diff -urNp linux-2.6.32.45/arch/sparc/lib/atomic_64.S linux-2.6.32.45/arch/sparc/lib/atomic_64.S
4674 --- linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4675 +++ linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4676 @@ -18,7 +18,12 @@
4677 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4678 BACKOFF_SETUP(%o2)
4679 1: lduw [%o1], %g1
4680 - add %g1, %o0, %g7
4681 + addcc %g1, %o0, %g7
4682 +
4683 +#ifdef CONFIG_PAX_REFCOUNT
4684 + tvs %icc, 6
4685 +#endif
4686 +
4687 cas [%o1], %g1, %g7
4688 cmp %g1, %g7
4689 bne,pn %icc, 2f
4690 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4691 2: BACKOFF_SPIN(%o2, %o3, 1b)
4692 .size atomic_add, .-atomic_add
4693
4694 + .globl atomic_add_unchecked
4695 + .type atomic_add_unchecked,#function
4696 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4697 + BACKOFF_SETUP(%o2)
4698 +1: lduw [%o1], %g1
4699 + add %g1, %o0, %g7
4700 + cas [%o1], %g1, %g7
4701 + cmp %g1, %g7
4702 + bne,pn %icc, 2f
4703 + nop
4704 + retl
4705 + nop
4706 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4707 + .size atomic_add_unchecked, .-atomic_add_unchecked
4708 +
4709 .globl atomic_sub
4710 .type atomic_sub,#function
4711 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4712 BACKOFF_SETUP(%o2)
4713 1: lduw [%o1], %g1
4714 - sub %g1, %o0, %g7
4715 + subcc %g1, %o0, %g7
4716 +
4717 +#ifdef CONFIG_PAX_REFCOUNT
4718 + tvs %icc, 6
4719 +#endif
4720 +
4721 cas [%o1], %g1, %g7
4722 cmp %g1, %g7
4723 bne,pn %icc, 2f
4724 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4725 2: BACKOFF_SPIN(%o2, %o3, 1b)
4726 .size atomic_sub, .-atomic_sub
4727
4728 + .globl atomic_sub_unchecked
4729 + .type atomic_sub_unchecked,#function
4730 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4731 + BACKOFF_SETUP(%o2)
4732 +1: lduw [%o1], %g1
4733 + sub %g1, %o0, %g7
4734 + cas [%o1], %g1, %g7
4735 + cmp %g1, %g7
4736 + bne,pn %icc, 2f
4737 + nop
4738 + retl
4739 + nop
4740 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4741 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4742 +
4743 .globl atomic_add_ret
4744 .type atomic_add_ret,#function
4745 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4746 BACKOFF_SETUP(%o2)
4747 1: lduw [%o1], %g1
4748 - add %g1, %o0, %g7
4749 + addcc %g1, %o0, %g7
4750 +
4751 +#ifdef CONFIG_PAX_REFCOUNT
4752 + tvs %icc, 6
4753 +#endif
4754 +
4755 cas [%o1], %g1, %g7
4756 cmp %g1, %g7
4757 bne,pn %icc, 2f
4758 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4759 2: BACKOFF_SPIN(%o2, %o3, 1b)
4760 .size atomic_add_ret, .-atomic_add_ret
4761
4762 + .globl atomic_add_ret_unchecked
4763 + .type atomic_add_ret_unchecked,#function
4764 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4765 + BACKOFF_SETUP(%o2)
4766 +1: lduw [%o1], %g1
4767 + addcc %g1, %o0, %g7
4768 + cas [%o1], %g1, %g7
4769 + cmp %g1, %g7
4770 + bne,pn %icc, 2f
4771 + add %g7, %o0, %g7
4772 + sra %g7, 0, %o0
4773 + retl
4774 + nop
4775 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4776 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4777 +
4778 .globl atomic_sub_ret
4779 .type atomic_sub_ret,#function
4780 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4781 BACKOFF_SETUP(%o2)
4782 1: lduw [%o1], %g1
4783 - sub %g1, %o0, %g7
4784 + subcc %g1, %o0, %g7
4785 +
4786 +#ifdef CONFIG_PAX_REFCOUNT
4787 + tvs %icc, 6
4788 +#endif
4789 +
4790 cas [%o1], %g1, %g7
4791 cmp %g1, %g7
4792 bne,pn %icc, 2f
4793 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4794 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4795 BACKOFF_SETUP(%o2)
4796 1: ldx [%o1], %g1
4797 - add %g1, %o0, %g7
4798 + addcc %g1, %o0, %g7
4799 +
4800 +#ifdef CONFIG_PAX_REFCOUNT
4801 + tvs %xcc, 6
4802 +#endif
4803 +
4804 casx [%o1], %g1, %g7
4805 cmp %g1, %g7
4806 bne,pn %xcc, 2f
4807 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4808 2: BACKOFF_SPIN(%o2, %o3, 1b)
4809 .size atomic64_add, .-atomic64_add
4810
4811 + .globl atomic64_add_unchecked
4812 + .type atomic64_add_unchecked,#function
4813 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4814 + BACKOFF_SETUP(%o2)
4815 +1: ldx [%o1], %g1
4816 + addcc %g1, %o0, %g7
4817 + casx [%o1], %g1, %g7
4818 + cmp %g1, %g7
4819 + bne,pn %xcc, 2f
4820 + nop
4821 + retl
4822 + nop
4823 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4824 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4825 +
4826 .globl atomic64_sub
4827 .type atomic64_sub,#function
4828 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4829 BACKOFF_SETUP(%o2)
4830 1: ldx [%o1], %g1
4831 - sub %g1, %o0, %g7
4832 + subcc %g1, %o0, %g7
4833 +
4834 +#ifdef CONFIG_PAX_REFCOUNT
4835 + tvs %xcc, 6
4836 +#endif
4837 +
4838 casx [%o1], %g1, %g7
4839 cmp %g1, %g7
4840 bne,pn %xcc, 2f
4841 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4842 2: BACKOFF_SPIN(%o2, %o3, 1b)
4843 .size atomic64_sub, .-atomic64_sub
4844
4845 + .globl atomic64_sub_unchecked
4846 + .type atomic64_sub_unchecked,#function
4847 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4848 + BACKOFF_SETUP(%o2)
4849 +1: ldx [%o1], %g1
4850 + subcc %g1, %o0, %g7
4851 + casx [%o1], %g1, %g7
4852 + cmp %g1, %g7
4853 + bne,pn %xcc, 2f
4854 + nop
4855 + retl
4856 + nop
4857 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4858 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4859 +
4860 .globl atomic64_add_ret
4861 .type atomic64_add_ret,#function
4862 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4863 BACKOFF_SETUP(%o2)
4864 1: ldx [%o1], %g1
4865 - add %g1, %o0, %g7
4866 + addcc %g1, %o0, %g7
4867 +
4868 +#ifdef CONFIG_PAX_REFCOUNT
4869 + tvs %xcc, 6
4870 +#endif
4871 +
4872 casx [%o1], %g1, %g7
4873 cmp %g1, %g7
4874 bne,pn %xcc, 2f
4875 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4876 2: BACKOFF_SPIN(%o2, %o3, 1b)
4877 .size atomic64_add_ret, .-atomic64_add_ret
4878
4879 + .globl atomic64_add_ret_unchecked
4880 + .type atomic64_add_ret_unchecked,#function
4881 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4882 + BACKOFF_SETUP(%o2)
4883 +1: ldx [%o1], %g1
4884 + addcc %g1, %o0, %g7
4885 + casx [%o1], %g1, %g7
4886 + cmp %g1, %g7
4887 + bne,pn %xcc, 2f
4888 + add %g7, %o0, %g7
4889 + mov %g7, %o0
4890 + retl
4891 + nop
4892 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4893 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4894 +
4895 .globl atomic64_sub_ret
4896 .type atomic64_sub_ret,#function
4897 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4898 BACKOFF_SETUP(%o2)
4899 1: ldx [%o1], %g1
4900 - sub %g1, %o0, %g7
4901 + subcc %g1, %o0, %g7
4902 +
4903 +#ifdef CONFIG_PAX_REFCOUNT
4904 + tvs %xcc, 6
4905 +#endif
4906 +
4907 casx [%o1], %g1, %g7
4908 cmp %g1, %g7
4909 bne,pn %xcc, 2f
4910 diff -urNp linux-2.6.32.45/arch/sparc/lib/ksyms.c linux-2.6.32.45/arch/sparc/lib/ksyms.c
4911 --- linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4912 +++ linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4913 @@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4914
4915 /* Atomic counter implementation. */
4916 EXPORT_SYMBOL(atomic_add);
4917 +EXPORT_SYMBOL(atomic_add_unchecked);
4918 EXPORT_SYMBOL(atomic_add_ret);
4919 EXPORT_SYMBOL(atomic_sub);
4920 +EXPORT_SYMBOL(atomic_sub_unchecked);
4921 EXPORT_SYMBOL(atomic_sub_ret);
4922 EXPORT_SYMBOL(atomic64_add);
4923 +EXPORT_SYMBOL(atomic64_add_unchecked);
4924 EXPORT_SYMBOL(atomic64_add_ret);
4925 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4926 EXPORT_SYMBOL(atomic64_sub);
4927 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4928 EXPORT_SYMBOL(atomic64_sub_ret);
4929
4930 /* Atomic bit operations. */
4931 diff -urNp linux-2.6.32.45/arch/sparc/lib/Makefile linux-2.6.32.45/arch/sparc/lib/Makefile
4932 --- linux-2.6.32.45/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4933 +++ linux-2.6.32.45/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4934 @@ -2,7 +2,7 @@
4935 #
4936
4937 asflags-y := -ansi -DST_DIV0=0x02
4938 -ccflags-y := -Werror
4939 +#ccflags-y := -Werror
4940
4941 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4942 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4943 diff -urNp linux-2.6.32.45/arch/sparc/lib/rwsem_64.S linux-2.6.32.45/arch/sparc/lib/rwsem_64.S
4944 --- linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4945 +++ linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4946 @@ -11,7 +11,12 @@
4947 .globl __down_read
4948 __down_read:
4949 1: lduw [%o0], %g1
4950 - add %g1, 1, %g7
4951 + addcc %g1, 1, %g7
4952 +
4953 +#ifdef CONFIG_PAX_REFCOUNT
4954 + tvs %icc, 6
4955 +#endif
4956 +
4957 cas [%o0], %g1, %g7
4958 cmp %g1, %g7
4959 bne,pn %icc, 1b
4960 @@ -33,7 +38,12 @@ __down_read:
4961 .globl __down_read_trylock
4962 __down_read_trylock:
4963 1: lduw [%o0], %g1
4964 - add %g1, 1, %g7
4965 + addcc %g1, 1, %g7
4966 +
4967 +#ifdef CONFIG_PAX_REFCOUNT
4968 + tvs %icc, 6
4969 +#endif
4970 +
4971 cmp %g7, 0
4972 bl,pn %icc, 2f
4973 mov 0, %o1
4974 @@ -51,7 +61,12 @@ __down_write:
4975 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4976 1:
4977 lduw [%o0], %g3
4978 - add %g3, %g1, %g7
4979 + addcc %g3, %g1, %g7
4980 +
4981 +#ifdef CONFIG_PAX_REFCOUNT
4982 + tvs %icc, 6
4983 +#endif
4984 +
4985 cas [%o0], %g3, %g7
4986 cmp %g3, %g7
4987 bne,pn %icc, 1b
4988 @@ -77,7 +92,12 @@ __down_write_trylock:
4989 cmp %g3, 0
4990 bne,pn %icc, 2f
4991 mov 0, %o1
4992 - add %g3, %g1, %g7
4993 + addcc %g3, %g1, %g7
4994 +
4995 +#ifdef CONFIG_PAX_REFCOUNT
4996 + tvs %icc, 6
4997 +#endif
4998 +
4999 cas [%o0], %g3, %g7
5000 cmp %g3, %g7
5001 bne,pn %icc, 1b
5002 @@ -90,7 +110,12 @@ __down_write_trylock:
5003 __up_read:
5004 1:
5005 lduw [%o0], %g1
5006 - sub %g1, 1, %g7
5007 + subcc %g1, 1, %g7
5008 +
5009 +#ifdef CONFIG_PAX_REFCOUNT
5010 + tvs %icc, 6
5011 +#endif
5012 +
5013 cas [%o0], %g1, %g7
5014 cmp %g1, %g7
5015 bne,pn %icc, 1b
5016 @@ -118,7 +143,12 @@ __up_write:
5017 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5018 1:
5019 lduw [%o0], %g3
5020 - sub %g3, %g1, %g7
5021 + subcc %g3, %g1, %g7
5022 +
5023 +#ifdef CONFIG_PAX_REFCOUNT
5024 + tvs %icc, 6
5025 +#endif
5026 +
5027 cas [%o0], %g3, %g7
5028 cmp %g3, %g7
5029 bne,pn %icc, 1b
5030 @@ -143,7 +173,12 @@ __downgrade_write:
5031 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5032 1:
5033 lduw [%o0], %g3
5034 - sub %g3, %g1, %g7
5035 + subcc %g3, %g1, %g7
5036 +
5037 +#ifdef CONFIG_PAX_REFCOUNT
5038 + tvs %icc, 6
5039 +#endif
5040 +
5041 cas [%o0], %g3, %g7
5042 cmp %g3, %g7
5043 bne,pn %icc, 1b
5044 diff -urNp linux-2.6.32.45/arch/sparc/Makefile linux-2.6.32.45/arch/sparc/Makefile
5045 --- linux-2.6.32.45/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5046 +++ linux-2.6.32.45/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5047 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5048 # Export what is needed by arch/sparc/boot/Makefile
5049 export VMLINUX_INIT VMLINUX_MAIN
5050 VMLINUX_INIT := $(head-y) $(init-y)
5051 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5052 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5053 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5054 VMLINUX_MAIN += $(drivers-y) $(net-y)
5055
5056 diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_32.c linux-2.6.32.45/arch/sparc/mm/fault_32.c
5057 --- linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5058 +++ linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5059 @@ -21,6 +21,9 @@
5060 #include <linux/interrupt.h>
5061 #include <linux/module.h>
5062 #include <linux/kdebug.h>
5063 +#include <linux/slab.h>
5064 +#include <linux/pagemap.h>
5065 +#include <linux/compiler.h>
5066
5067 #include <asm/system.h>
5068 #include <asm/page.h>
5069 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5070 return safe_compute_effective_address(regs, insn);
5071 }
5072
5073 +#ifdef CONFIG_PAX_PAGEEXEC
5074 +#ifdef CONFIG_PAX_DLRESOLVE
5075 +static void pax_emuplt_close(struct vm_area_struct *vma)
5076 +{
5077 + vma->vm_mm->call_dl_resolve = 0UL;
5078 +}
5079 +
5080 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5081 +{
5082 + unsigned int *kaddr;
5083 +
5084 + vmf->page = alloc_page(GFP_HIGHUSER);
5085 + if (!vmf->page)
5086 + return VM_FAULT_OOM;
5087 +
5088 + kaddr = kmap(vmf->page);
5089 + memset(kaddr, 0, PAGE_SIZE);
5090 + kaddr[0] = 0x9DE3BFA8U; /* save */
5091 + flush_dcache_page(vmf->page);
5092 + kunmap(vmf->page);
5093 + return VM_FAULT_MAJOR;
5094 +}
5095 +
5096 +static const struct vm_operations_struct pax_vm_ops = {
5097 + .close = pax_emuplt_close,
5098 + .fault = pax_emuplt_fault
5099 +};
5100 +
5101 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5102 +{
5103 + int ret;
5104 +
5105 + vma->vm_mm = current->mm;
5106 + vma->vm_start = addr;
5107 + vma->vm_end = addr + PAGE_SIZE;
5108 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5109 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5110 + vma->vm_ops = &pax_vm_ops;
5111 +
5112 + ret = insert_vm_struct(current->mm, vma);
5113 + if (ret)
5114 + return ret;
5115 +
5116 + ++current->mm->total_vm;
5117 + return 0;
5118 +}
5119 +#endif
5120 +
5121 +/*
5122 + * PaX: decide what to do with offenders (regs->pc = fault address)
5123 + *
5124 + * returns 1 when task should be killed
5125 + * 2 when patched PLT trampoline was detected
5126 + * 3 when unpatched PLT trampoline was detected
5127 + */
5128 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5129 +{
5130 +
5131 +#ifdef CONFIG_PAX_EMUPLT
5132 + int err;
5133 +
5134 + do { /* PaX: patched PLT emulation #1 */
5135 + unsigned int sethi1, sethi2, jmpl;
5136 +
5137 + err = get_user(sethi1, (unsigned int *)regs->pc);
5138 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5139 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5140 +
5141 + if (err)
5142 + break;
5143 +
5144 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5145 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5146 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5147 + {
5148 + unsigned int addr;
5149 +
5150 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5151 + addr = regs->u_regs[UREG_G1];
5152 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5153 + regs->pc = addr;
5154 + regs->npc = addr+4;
5155 + return 2;
5156 + }
5157 + } while (0);
5158 +
5159 + { /* PaX: patched PLT emulation #2 */
5160 + unsigned int ba;
5161 +
5162 + err = get_user(ba, (unsigned int *)regs->pc);
5163 +
5164 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5165 + unsigned int addr;
5166 +
5167 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5168 + regs->pc = addr;
5169 + regs->npc = addr+4;
5170 + return 2;
5171 + }
5172 + }
5173 +
5174 + do { /* PaX: patched PLT emulation #3 */
5175 + unsigned int sethi, jmpl, nop;
5176 +
5177 + err = get_user(sethi, (unsigned int *)regs->pc);
5178 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5179 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5180 +
5181 + if (err)
5182 + break;
5183 +
5184 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5185 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5186 + nop == 0x01000000U)
5187 + {
5188 + unsigned int addr;
5189 +
5190 + addr = (sethi & 0x003FFFFFU) << 10;
5191 + regs->u_regs[UREG_G1] = addr;
5192 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5193 + regs->pc = addr;
5194 + regs->npc = addr+4;
5195 + return 2;
5196 + }
5197 + } while (0);
5198 +
5199 + do { /* PaX: unpatched PLT emulation step 1 */
5200 + unsigned int sethi, ba, nop;
5201 +
5202 + err = get_user(sethi, (unsigned int *)regs->pc);
5203 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
5204 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5205 +
5206 + if (err)
5207 + break;
5208 +
5209 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5210 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5211 + nop == 0x01000000U)
5212 + {
5213 + unsigned int addr, save, call;
5214 +
5215 + if ((ba & 0xFFC00000U) == 0x30800000U)
5216 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5217 + else
5218 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5219 +
5220 + err = get_user(save, (unsigned int *)addr);
5221 + err |= get_user(call, (unsigned int *)(addr+4));
5222 + err |= get_user(nop, (unsigned int *)(addr+8));
5223 + if (err)
5224 + break;
5225 +
5226 +#ifdef CONFIG_PAX_DLRESOLVE
5227 + if (save == 0x9DE3BFA8U &&
5228 + (call & 0xC0000000U) == 0x40000000U &&
5229 + nop == 0x01000000U)
5230 + {
5231 + struct vm_area_struct *vma;
5232 + unsigned long call_dl_resolve;
5233 +
5234 + down_read(&current->mm->mmap_sem);
5235 + call_dl_resolve = current->mm->call_dl_resolve;
5236 + up_read(&current->mm->mmap_sem);
5237 + if (likely(call_dl_resolve))
5238 + goto emulate;
5239 +
5240 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5241 +
5242 + down_write(&current->mm->mmap_sem);
5243 + if (current->mm->call_dl_resolve) {
5244 + call_dl_resolve = current->mm->call_dl_resolve;
5245 + up_write(&current->mm->mmap_sem);
5246 + if (vma)
5247 + kmem_cache_free(vm_area_cachep, vma);
5248 + goto emulate;
5249 + }
5250 +
5251 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5252 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5253 + up_write(&current->mm->mmap_sem);
5254 + if (vma)
5255 + kmem_cache_free(vm_area_cachep, vma);
5256 + return 1;
5257 + }
5258 +
5259 + if (pax_insert_vma(vma, call_dl_resolve)) {
5260 + up_write(&current->mm->mmap_sem);
5261 + kmem_cache_free(vm_area_cachep, vma);
5262 + return 1;
5263 + }
5264 +
5265 + current->mm->call_dl_resolve = call_dl_resolve;
5266 + up_write(&current->mm->mmap_sem);
5267 +
5268 +emulate:
5269 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5270 + regs->pc = call_dl_resolve;
5271 + regs->npc = addr+4;
5272 + return 3;
5273 + }
5274 +#endif
5275 +
5276 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5277 + if ((save & 0xFFC00000U) == 0x05000000U &&
5278 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5279 + nop == 0x01000000U)
5280 + {
5281 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5282 + regs->u_regs[UREG_G2] = addr + 4;
5283 + addr = (save & 0x003FFFFFU) << 10;
5284 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5285 + regs->pc = addr;
5286 + regs->npc = addr+4;
5287 + return 3;
5288 + }
5289 + }
5290 + } while (0);
5291 +
5292 + do { /* PaX: unpatched PLT emulation step 2 */
5293 + unsigned int save, call, nop;
5294 +
5295 + err = get_user(save, (unsigned int *)(regs->pc-4));
5296 + err |= get_user(call, (unsigned int *)regs->pc);
5297 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5298 + if (err)
5299 + break;
5300 +
5301 + if (save == 0x9DE3BFA8U &&
5302 + (call & 0xC0000000U) == 0x40000000U &&
5303 + nop == 0x01000000U)
5304 + {
5305 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5306 +
5307 + regs->u_regs[UREG_RETPC] = regs->pc;
5308 + regs->pc = dl_resolve;
5309 + regs->npc = dl_resolve+4;
5310 + return 3;
5311 + }
5312 + } while (0);
5313 +#endif
5314 +
5315 + return 1;
5316 +}
5317 +
5318 +void pax_report_insns(void *pc, void *sp)
5319 +{
5320 + unsigned long i;
5321 +
5322 + printk(KERN_ERR "PAX: bytes at PC: ");
5323 + for (i = 0; i < 8; i++) {
5324 + unsigned int c;
5325 + if (get_user(c, (unsigned int *)pc+i))
5326 + printk(KERN_CONT "???????? ");
5327 + else
5328 + printk(KERN_CONT "%08x ", c);
5329 + }
5330 + printk("\n");
5331 +}
5332 +#endif
5333 +
5334 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5335 unsigned long address)
5336 {
5337 @@ -231,6 +495,24 @@ good_area:
5338 if(!(vma->vm_flags & VM_WRITE))
5339 goto bad_area;
5340 } else {
5341 +
5342 +#ifdef CONFIG_PAX_PAGEEXEC
5343 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5344 + up_read(&mm->mmap_sem);
5345 + switch (pax_handle_fetch_fault(regs)) {
5346 +
5347 +#ifdef CONFIG_PAX_EMUPLT
5348 + case 2:
5349 + case 3:
5350 + return;
5351 +#endif
5352 +
5353 + }
5354 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5355 + do_group_exit(SIGKILL);
5356 + }
5357 +#endif
5358 +
5359 /* Allow reads even for write-only mappings */
5360 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5361 goto bad_area;
5362 diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_64.c linux-2.6.32.45/arch/sparc/mm/fault_64.c
5363 --- linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5364 +++ linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5365 @@ -20,6 +20,9 @@
5366 #include <linux/kprobes.h>
5367 #include <linux/kdebug.h>
5368 #include <linux/percpu.h>
5369 +#include <linux/slab.h>
5370 +#include <linux/pagemap.h>
5371 +#include <linux/compiler.h>
5372
5373 #include <asm/page.h>
5374 #include <asm/pgtable.h>
5375 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5376 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5377 regs->tpc);
5378 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5379 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5380 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5381 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5382 dump_stack();
5383 unhandled_fault(regs->tpc, current, regs);
5384 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5385 show_regs(regs);
5386 }
5387
5388 +#ifdef CONFIG_PAX_PAGEEXEC
5389 +#ifdef CONFIG_PAX_DLRESOLVE
5390 +static void pax_emuplt_close(struct vm_area_struct *vma)
5391 +{
5392 + vma->vm_mm->call_dl_resolve = 0UL;
5393 +}
5394 +
5395 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5396 +{
5397 + unsigned int *kaddr;
5398 +
5399 + vmf->page = alloc_page(GFP_HIGHUSER);
5400 + if (!vmf->page)
5401 + return VM_FAULT_OOM;
5402 +
5403 + kaddr = kmap(vmf->page);
5404 + memset(kaddr, 0, PAGE_SIZE);
5405 + kaddr[0] = 0x9DE3BFA8U; /* save */
5406 + flush_dcache_page(vmf->page);
5407 + kunmap(vmf->page);
5408 + return VM_FAULT_MAJOR;
5409 +}
5410 +
5411 +static const struct vm_operations_struct pax_vm_ops = {
5412 + .close = pax_emuplt_close,
5413 + .fault = pax_emuplt_fault
5414 +};
5415 +
5416 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5417 +{
5418 + int ret;
5419 +
5420 + vma->vm_mm = current->mm;
5421 + vma->vm_start = addr;
5422 + vma->vm_end = addr + PAGE_SIZE;
5423 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5424 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5425 + vma->vm_ops = &pax_vm_ops;
5426 +
5427 + ret = insert_vm_struct(current->mm, vma);
5428 + if (ret)
5429 + return ret;
5430 +
5431 + ++current->mm->total_vm;
5432 + return 0;
5433 +}
5434 +#endif
5435 +
5436 +/*
5437 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5438 + *
5439 + * returns 1 when task should be killed
5440 + * 2 when patched PLT trampoline was detected
5441 + * 3 when unpatched PLT trampoline was detected
5442 + */
5443 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5444 +{
5445 +
5446 +#ifdef CONFIG_PAX_EMUPLT
5447 + int err;
5448 +
5449 + do { /* PaX: patched PLT emulation #1 */
5450 + unsigned int sethi1, sethi2, jmpl;
5451 +
5452 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5453 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5454 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5455 +
5456 + if (err)
5457 + break;
5458 +
5459 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5460 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5461 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5462 + {
5463 + unsigned long addr;
5464 +
5465 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5466 + addr = regs->u_regs[UREG_G1];
5467 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5468 +
5469 + if (test_thread_flag(TIF_32BIT))
5470 + addr &= 0xFFFFFFFFUL;
5471 +
5472 + regs->tpc = addr;
5473 + regs->tnpc = addr+4;
5474 + return 2;
5475 + }
5476 + } while (0);
5477 +
5478 + { /* PaX: patched PLT emulation #2 */
5479 + unsigned int ba;
5480 +
5481 + err = get_user(ba, (unsigned int *)regs->tpc);
5482 +
5483 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5484 + unsigned long addr;
5485 +
5486 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5487 +
5488 + if (test_thread_flag(TIF_32BIT))
5489 + addr &= 0xFFFFFFFFUL;
5490 +
5491 + regs->tpc = addr;
5492 + regs->tnpc = addr+4;
5493 + return 2;
5494 + }
5495 + }
5496 +
5497 + do { /* PaX: patched PLT emulation #3 */
5498 + unsigned int sethi, jmpl, nop;
5499 +
5500 + err = get_user(sethi, (unsigned int *)regs->tpc);
5501 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5502 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5503 +
5504 + if (err)
5505 + break;
5506 +
5507 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5508 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5509 + nop == 0x01000000U)
5510 + {
5511 + unsigned long addr;
5512 +
5513 + addr = (sethi & 0x003FFFFFU) << 10;
5514 + regs->u_regs[UREG_G1] = addr;
5515 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5516 +
5517 + if (test_thread_flag(TIF_32BIT))
5518 + addr &= 0xFFFFFFFFUL;
5519 +
5520 + regs->tpc = addr;
5521 + regs->tnpc = addr+4;
5522 + return 2;
5523 + }
5524 + } while (0);
5525 +
5526 + do { /* PaX: patched PLT emulation #4 */
5527 + unsigned int sethi, mov1, call, mov2;
5528 +
5529 + err = get_user(sethi, (unsigned int *)regs->tpc);
5530 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5531 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5532 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5533 +
5534 + if (err)
5535 + break;
5536 +
5537 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5538 + mov1 == 0x8210000FU &&
5539 + (call & 0xC0000000U) == 0x40000000U &&
5540 + mov2 == 0x9E100001U)
5541 + {
5542 + unsigned long addr;
5543 +
5544 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5545 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5546 +
5547 + if (test_thread_flag(TIF_32BIT))
5548 + addr &= 0xFFFFFFFFUL;
5549 +
5550 + regs->tpc = addr;
5551 + regs->tnpc = addr+4;
5552 + return 2;
5553 + }
5554 + } while (0);
5555 +
5556 + do { /* PaX: patched PLT emulation #5 */
5557 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5558 +
5559 + err = get_user(sethi, (unsigned int *)regs->tpc);
5560 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5561 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5562 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5563 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5564 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5565 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5566 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5567 +
5568 + if (err)
5569 + break;
5570 +
5571 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5572 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5573 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5574 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5575 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5576 + sllx == 0x83287020U &&
5577 + jmpl == 0x81C04005U &&
5578 + nop == 0x01000000U)
5579 + {
5580 + unsigned long addr;
5581 +
5582 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5583 + regs->u_regs[UREG_G1] <<= 32;
5584 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5585 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5586 + regs->tpc = addr;
5587 + regs->tnpc = addr+4;
5588 + return 2;
5589 + }
5590 + } while (0);
5591 +
5592 + do { /* PaX: patched PLT emulation #6 */
5593 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5594 +
5595 + err = get_user(sethi, (unsigned int *)regs->tpc);
5596 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5597 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5598 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5599 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5600 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5601 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5602 +
5603 + if (err)
5604 + break;
5605 +
5606 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5607 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5608 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5609 + sllx == 0x83287020U &&
5610 + (or & 0xFFFFE000U) == 0x8A116000U &&
5611 + jmpl == 0x81C04005U &&
5612 + nop == 0x01000000U)
5613 + {
5614 + unsigned long addr;
5615 +
5616 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5617 + regs->u_regs[UREG_G1] <<= 32;
5618 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5619 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5620 + regs->tpc = addr;
5621 + regs->tnpc = addr+4;
5622 + return 2;
5623 + }
5624 + } while (0);
5625 +
5626 + do { /* PaX: unpatched PLT emulation step 1 */
5627 + unsigned int sethi, ba, nop;
5628 +
5629 + err = get_user(sethi, (unsigned int *)regs->tpc);
5630 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5631 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5632 +
5633 + if (err)
5634 + break;
5635 +
5636 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5637 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5638 + nop == 0x01000000U)
5639 + {
5640 + unsigned long addr;
5641 + unsigned int save, call;
5642 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5643 +
5644 + if ((ba & 0xFFC00000U) == 0x30800000U)
5645 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5646 + else
5647 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5648 +
5649 + if (test_thread_flag(TIF_32BIT))
5650 + addr &= 0xFFFFFFFFUL;
5651 +
5652 + err = get_user(save, (unsigned int *)addr);
5653 + err |= get_user(call, (unsigned int *)(addr+4));
5654 + err |= get_user(nop, (unsigned int *)(addr+8));
5655 + if (err)
5656 + break;
5657 +
5658 +#ifdef CONFIG_PAX_DLRESOLVE
5659 + if (save == 0x9DE3BFA8U &&
5660 + (call & 0xC0000000U) == 0x40000000U &&
5661 + nop == 0x01000000U)
5662 + {
5663 + struct vm_area_struct *vma;
5664 + unsigned long call_dl_resolve;
5665 +
5666 + down_read(&current->mm->mmap_sem);
5667 + call_dl_resolve = current->mm->call_dl_resolve;
5668 + up_read(&current->mm->mmap_sem);
5669 + if (likely(call_dl_resolve))
5670 + goto emulate;
5671 +
5672 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5673 +
5674 + down_write(&current->mm->mmap_sem);
5675 + if (current->mm->call_dl_resolve) {
5676 + call_dl_resolve = current->mm->call_dl_resolve;
5677 + up_write(&current->mm->mmap_sem);
5678 + if (vma)
5679 + kmem_cache_free(vm_area_cachep, vma);
5680 + goto emulate;
5681 + }
5682 +
5683 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5684 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5685 + up_write(&current->mm->mmap_sem);
5686 + if (vma)
5687 + kmem_cache_free(vm_area_cachep, vma);
5688 + return 1;
5689 + }
5690 +
5691 + if (pax_insert_vma(vma, call_dl_resolve)) {
5692 + up_write(&current->mm->mmap_sem);
5693 + kmem_cache_free(vm_area_cachep, vma);
5694 + return 1;
5695 + }
5696 +
5697 + current->mm->call_dl_resolve = call_dl_resolve;
5698 + up_write(&current->mm->mmap_sem);
5699 +
5700 +emulate:
5701 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5702 + regs->tpc = call_dl_resolve;
5703 + regs->tnpc = addr+4;
5704 + return 3;
5705 + }
5706 +#endif
5707 +
5708 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5709 + if ((save & 0xFFC00000U) == 0x05000000U &&
5710 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5711 + nop == 0x01000000U)
5712 + {
5713 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5714 + regs->u_regs[UREG_G2] = addr + 4;
5715 + addr = (save & 0x003FFFFFU) << 10;
5716 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5717 +
5718 + if (test_thread_flag(TIF_32BIT))
5719 + addr &= 0xFFFFFFFFUL;
5720 +
5721 + regs->tpc = addr;
5722 + regs->tnpc = addr+4;
5723 + return 3;
5724 + }
5725 +
5726 + /* PaX: 64-bit PLT stub */
5727 + err = get_user(sethi1, (unsigned int *)addr);
5728 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5729 + err |= get_user(or1, (unsigned int *)(addr+8));
5730 + err |= get_user(or2, (unsigned int *)(addr+12));
5731 + err |= get_user(sllx, (unsigned int *)(addr+16));
5732 + err |= get_user(add, (unsigned int *)(addr+20));
5733 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5734 + err |= get_user(nop, (unsigned int *)(addr+28));
5735 + if (err)
5736 + break;
5737 +
5738 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5739 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5740 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5741 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5742 + sllx == 0x89293020U &&
5743 + add == 0x8A010005U &&
5744 + jmpl == 0x89C14000U &&
5745 + nop == 0x01000000U)
5746 + {
5747 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5748 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5749 + regs->u_regs[UREG_G4] <<= 32;
5750 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5751 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5752 + regs->u_regs[UREG_G4] = addr + 24;
5753 + addr = regs->u_regs[UREG_G5];
5754 + regs->tpc = addr;
5755 + regs->tnpc = addr+4;
5756 + return 3;
5757 + }
5758 + }
5759 + } while (0);
5760 +
5761 +#ifdef CONFIG_PAX_DLRESOLVE
5762 + do { /* PaX: unpatched PLT emulation step 2 */
5763 + unsigned int save, call, nop;
5764 +
5765 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5766 + err |= get_user(call, (unsigned int *)regs->tpc);
5767 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5768 + if (err)
5769 + break;
5770 +
5771 + if (save == 0x9DE3BFA8U &&
5772 + (call & 0xC0000000U) == 0x40000000U &&
5773 + nop == 0x01000000U)
5774 + {
5775 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5776 +
5777 + if (test_thread_flag(TIF_32BIT))
5778 + dl_resolve &= 0xFFFFFFFFUL;
5779 +
5780 + regs->u_regs[UREG_RETPC] = regs->tpc;
5781 + regs->tpc = dl_resolve;
5782 + regs->tnpc = dl_resolve+4;
5783 + return 3;
5784 + }
5785 + } while (0);
5786 +#endif
5787 +
5788 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5789 + unsigned int sethi, ba, nop;
5790 +
5791 + err = get_user(sethi, (unsigned int *)regs->tpc);
5792 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5793 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5794 +
5795 + if (err)
5796 + break;
5797 +
5798 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5799 + (ba & 0xFFF00000U) == 0x30600000U &&
5800 + nop == 0x01000000U)
5801 + {
5802 + unsigned long addr;
5803 +
5804 + addr = (sethi & 0x003FFFFFU) << 10;
5805 + regs->u_regs[UREG_G1] = addr;
5806 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5807 +
5808 + if (test_thread_flag(TIF_32BIT))
5809 + addr &= 0xFFFFFFFFUL;
5810 +
5811 + regs->tpc = addr;
5812 + regs->tnpc = addr+4;
5813 + return 2;
5814 + }
5815 + } while (0);
5816 +
5817 +#endif
5818 +
5819 + return 1;
5820 +}
5821 +
5822 +void pax_report_insns(void *pc, void *sp)
5823 +{
5824 + unsigned long i;
5825 +
5826 + printk(KERN_ERR "PAX: bytes at PC: ");
5827 + for (i = 0; i < 8; i++) {
5828 + unsigned int c;
5829 + if (get_user(c, (unsigned int *)pc+i))
5830 + printk(KERN_CONT "???????? ");
5831 + else
5832 + printk(KERN_CONT "%08x ", c);
5833 + }
5834 + printk("\n");
5835 +}
5836 +#endif
5837 +
5838 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5839 {
5840 struct mm_struct *mm = current->mm;
5841 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5842 if (!vma)
5843 goto bad_area;
5844
5845 +#ifdef CONFIG_PAX_PAGEEXEC
5846 + /* PaX: detect ITLB misses on non-exec pages */
5847 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5848 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5849 + {
5850 + if (address != regs->tpc)
5851 + goto good_area;
5852 +
5853 + up_read(&mm->mmap_sem);
5854 + switch (pax_handle_fetch_fault(regs)) {
5855 +
5856 +#ifdef CONFIG_PAX_EMUPLT
5857 + case 2:
5858 + case 3:
5859 + return;
5860 +#endif
5861 +
5862 + }
5863 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5864 + do_group_exit(SIGKILL);
5865 + }
5866 +#endif
5867 +
5868 /* Pure DTLB misses do not tell us whether the fault causing
5869 * load/store/atomic was a write or not, it only says that there
5870 * was no match. So in such a case we (carefully) read the
5871 diff -urNp linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c
5872 --- linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5873 +++ linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5874 @@ -69,7 +69,7 @@ full_search:
5875 }
5876 return -ENOMEM;
5877 }
5878 - if (likely(!vma || addr + len <= vma->vm_start)) {
5879 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5880 /*
5881 * Remember the place where we stopped the search:
5882 */
5883 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5884 /* make sure it can fit in the remaining address space */
5885 if (likely(addr > len)) {
5886 vma = find_vma(mm, addr-len);
5887 - if (!vma || addr <= vma->vm_start) {
5888 + if (check_heap_stack_gap(vma, addr - len, len)) {
5889 /* remember the address as a hint for next time */
5890 return (mm->free_area_cache = addr-len);
5891 }
5892 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5893 if (unlikely(mm->mmap_base < len))
5894 goto bottomup;
5895
5896 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5897 + addr = mm->mmap_base - len;
5898
5899 do {
5900 + addr &= HPAGE_MASK;
5901 /*
5902 * Lookup failure means no vma is above this address,
5903 * else if new region fits below vma->vm_start,
5904 * return with success:
5905 */
5906 vma = find_vma(mm, addr);
5907 - if (likely(!vma || addr+len <= vma->vm_start)) {
5908 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5909 /* remember the address as a hint for next time */
5910 return (mm->free_area_cache = addr);
5911 }
5912 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5913 mm->cached_hole_size = vma->vm_start - addr;
5914
5915 /* try just below the current vma->vm_start */
5916 - addr = (vma->vm_start-len) & HPAGE_MASK;
5917 - } while (likely(len < vma->vm_start));
5918 + addr = skip_heap_stack_gap(vma, len);
5919 + } while (!IS_ERR_VALUE(addr));
5920
5921 bottomup:
5922 /*
5923 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5924 if (addr) {
5925 addr = ALIGN(addr, HPAGE_SIZE);
5926 vma = find_vma(mm, addr);
5927 - if (task_size - len >= addr &&
5928 - (!vma || addr + len <= vma->vm_start))
5929 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5930 return addr;
5931 }
5932 if (mm->get_unmapped_area == arch_get_unmapped_area)
5933 diff -urNp linux-2.6.32.45/arch/sparc/mm/init_32.c linux-2.6.32.45/arch/sparc/mm/init_32.c
5934 --- linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5935 +++ linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5936 @@ -317,6 +317,9 @@ extern void device_scan(void);
5937 pgprot_t PAGE_SHARED __read_mostly;
5938 EXPORT_SYMBOL(PAGE_SHARED);
5939
5940 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5941 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5942 +
5943 void __init paging_init(void)
5944 {
5945 switch(sparc_cpu_model) {
5946 @@ -345,17 +348,17 @@ void __init paging_init(void)
5947
5948 /* Initialize the protection map with non-constant, MMU dependent values. */
5949 protection_map[0] = PAGE_NONE;
5950 - protection_map[1] = PAGE_READONLY;
5951 - protection_map[2] = PAGE_COPY;
5952 - protection_map[3] = PAGE_COPY;
5953 + protection_map[1] = PAGE_READONLY_NOEXEC;
5954 + protection_map[2] = PAGE_COPY_NOEXEC;
5955 + protection_map[3] = PAGE_COPY_NOEXEC;
5956 protection_map[4] = PAGE_READONLY;
5957 protection_map[5] = PAGE_READONLY;
5958 protection_map[6] = PAGE_COPY;
5959 protection_map[7] = PAGE_COPY;
5960 protection_map[8] = PAGE_NONE;
5961 - protection_map[9] = PAGE_READONLY;
5962 - protection_map[10] = PAGE_SHARED;
5963 - protection_map[11] = PAGE_SHARED;
5964 + protection_map[9] = PAGE_READONLY_NOEXEC;
5965 + protection_map[10] = PAGE_SHARED_NOEXEC;
5966 + protection_map[11] = PAGE_SHARED_NOEXEC;
5967 protection_map[12] = PAGE_READONLY;
5968 protection_map[13] = PAGE_READONLY;
5969 protection_map[14] = PAGE_SHARED;
5970 diff -urNp linux-2.6.32.45/arch/sparc/mm/Makefile linux-2.6.32.45/arch/sparc/mm/Makefile
5971 --- linux-2.6.32.45/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
5972 +++ linux-2.6.32.45/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
5973 @@ -2,7 +2,7 @@
5974 #
5975
5976 asflags-y := -ansi
5977 -ccflags-y := -Werror
5978 +#ccflags-y := -Werror
5979
5980 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5981 obj-y += fault_$(BITS).o
5982 diff -urNp linux-2.6.32.45/arch/sparc/mm/srmmu.c linux-2.6.32.45/arch/sparc/mm/srmmu.c
5983 --- linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
5984 +++ linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
5985 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5986 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5987 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5988 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5989 +
5990 +#ifdef CONFIG_PAX_PAGEEXEC
5991 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5992 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5993 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5994 +#endif
5995 +
5996 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5997 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5998
5999 diff -urNp linux-2.6.32.45/arch/um/include/asm/kmap_types.h linux-2.6.32.45/arch/um/include/asm/kmap_types.h
6000 --- linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6001 +++ linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6002 @@ -23,6 +23,7 @@ enum km_type {
6003 KM_IRQ1,
6004 KM_SOFTIRQ0,
6005 KM_SOFTIRQ1,
6006 + KM_CLEARPAGE,
6007 KM_TYPE_NR
6008 };
6009
6010 diff -urNp linux-2.6.32.45/arch/um/include/asm/page.h linux-2.6.32.45/arch/um/include/asm/page.h
6011 --- linux-2.6.32.45/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6012 +++ linux-2.6.32.45/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6013 @@ -14,6 +14,9 @@
6014 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6015 #define PAGE_MASK (~(PAGE_SIZE-1))
6016
6017 +#define ktla_ktva(addr) (addr)
6018 +#define ktva_ktla(addr) (addr)
6019 +
6020 #ifndef __ASSEMBLY__
6021
6022 struct page;
6023 diff -urNp linux-2.6.32.45/arch/um/kernel/process.c linux-2.6.32.45/arch/um/kernel/process.c
6024 --- linux-2.6.32.45/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6025 +++ linux-2.6.32.45/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6026 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6027 return 2;
6028 }
6029
6030 -/*
6031 - * Only x86 and x86_64 have an arch_align_stack().
6032 - * All other arches have "#define arch_align_stack(x) (x)"
6033 - * in their asm/system.h
6034 - * As this is included in UML from asm-um/system-generic.h,
6035 - * we can use it to behave as the subarch does.
6036 - */
6037 -#ifndef arch_align_stack
6038 -unsigned long arch_align_stack(unsigned long sp)
6039 -{
6040 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6041 - sp -= get_random_int() % 8192;
6042 - return sp & ~0xf;
6043 -}
6044 -#endif
6045 -
6046 unsigned long get_wchan(struct task_struct *p)
6047 {
6048 unsigned long stack_page, sp, ip;
6049 diff -urNp linux-2.6.32.45/arch/um/sys-i386/syscalls.c linux-2.6.32.45/arch/um/sys-i386/syscalls.c
6050 --- linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6051 +++ linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6052 @@ -11,6 +11,21 @@
6053 #include "asm/uaccess.h"
6054 #include "asm/unistd.h"
6055
6056 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6057 +{
6058 + unsigned long pax_task_size = TASK_SIZE;
6059 +
6060 +#ifdef CONFIG_PAX_SEGMEXEC
6061 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6062 + pax_task_size = SEGMEXEC_TASK_SIZE;
6063 +#endif
6064 +
6065 + if (len > pax_task_size || addr > pax_task_size - len)
6066 + return -EINVAL;
6067 +
6068 + return 0;
6069 +}
6070 +
6071 /*
6072 * Perform the select(nd, in, out, ex, tv) and mmap() system
6073 * calls. Linux/i386 didn't use to be able to handle more than
6074 diff -urNp linux-2.6.32.45/arch/x86/boot/bitops.h linux-2.6.32.45/arch/x86/boot/bitops.h
6075 --- linux-2.6.32.45/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6076 +++ linux-2.6.32.45/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6077 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6078 u8 v;
6079 const u32 *p = (const u32 *)addr;
6080
6081 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6082 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6083 return v;
6084 }
6085
6086 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6087
6088 static inline void set_bit(int nr, void *addr)
6089 {
6090 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6091 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6092 }
6093
6094 #endif /* BOOT_BITOPS_H */
6095 diff -urNp linux-2.6.32.45/arch/x86/boot/boot.h linux-2.6.32.45/arch/x86/boot/boot.h
6096 --- linux-2.6.32.45/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6097 +++ linux-2.6.32.45/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6098 @@ -82,7 +82,7 @@ static inline void io_delay(void)
6099 static inline u16 ds(void)
6100 {
6101 u16 seg;
6102 - asm("movw %%ds,%0" : "=rm" (seg));
6103 + asm volatile("movw %%ds,%0" : "=rm" (seg));
6104 return seg;
6105 }
6106
6107 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6108 static inline int memcmp(const void *s1, const void *s2, size_t len)
6109 {
6110 u8 diff;
6111 - asm("repe; cmpsb; setnz %0"
6112 + asm volatile("repe; cmpsb; setnz %0"
6113 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6114 return diff;
6115 }
6116 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_32.S linux-2.6.32.45/arch/x86/boot/compressed/head_32.S
6117 --- linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6118 +++ linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6119 @@ -76,7 +76,7 @@ ENTRY(startup_32)
6120 notl %eax
6121 andl %eax, %ebx
6122 #else
6123 - movl $LOAD_PHYSICAL_ADDR, %ebx
6124 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6125 #endif
6126
6127 /* Target address to relocate to for decompression */
6128 @@ -149,7 +149,7 @@ relocated:
6129 * and where it was actually loaded.
6130 */
6131 movl %ebp, %ebx
6132 - subl $LOAD_PHYSICAL_ADDR, %ebx
6133 + subl $____LOAD_PHYSICAL_ADDR, %ebx
6134 jz 2f /* Nothing to be done if loaded at compiled addr. */
6135 /*
6136 * Process relocations.
6137 @@ -157,8 +157,7 @@ relocated:
6138
6139 1: subl $4, %edi
6140 movl (%edi), %ecx
6141 - testl %ecx, %ecx
6142 - jz 2f
6143 + jecxz 2f
6144 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6145 jmp 1b
6146 2:
6147 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_64.S linux-2.6.32.45/arch/x86/boot/compressed/head_64.S
6148 --- linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6149 +++ linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6150 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6151 notl %eax
6152 andl %eax, %ebx
6153 #else
6154 - movl $LOAD_PHYSICAL_ADDR, %ebx
6155 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6156 #endif
6157
6158 /* Target address to relocate to for decompression */
6159 @@ -183,7 +183,7 @@ no_longmode:
6160 hlt
6161 jmp 1b
6162
6163 -#include "../../kernel/verify_cpu_64.S"
6164 +#include "../../kernel/verify_cpu.S"
6165
6166 /*
6167 * Be careful here startup_64 needs to be at a predictable
6168 @@ -234,7 +234,7 @@ ENTRY(startup_64)
6169 notq %rax
6170 andq %rax, %rbp
6171 #else
6172 - movq $LOAD_PHYSICAL_ADDR, %rbp
6173 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6174 #endif
6175
6176 /* Target address to relocate to for decompression */
6177 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/Makefile linux-2.6.32.45/arch/x86/boot/compressed/Makefile
6178 --- linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-03-27 14:31:47.000000000 -0400
6179 +++ linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-08-07 14:38:34.000000000 -0400
6180 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
6181 KBUILD_CFLAGS += $(cflags-y)
6182 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6183 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6184 +ifdef CONSTIFY_PLUGIN
6185 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6186 +endif
6187
6188 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6189 GCOV_PROFILE := n
6190 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/misc.c linux-2.6.32.45/arch/x86/boot/compressed/misc.c
6191 --- linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6192 +++ linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6193 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
6194 case PT_LOAD:
6195 #ifdef CONFIG_RELOCATABLE
6196 dest = output;
6197 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6198 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6199 #else
6200 dest = (void *)(phdr->p_paddr);
6201 #endif
6202 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6203 error("Destination address too large");
6204 #endif
6205 #ifndef CONFIG_RELOCATABLE
6206 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6207 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6208 error("Wrong destination address");
6209 #endif
6210
6211 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c
6212 --- linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6213 +++ linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6214 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6215
6216 offs = (olen > ilen) ? olen - ilen : 0;
6217 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6218 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6219 + offs += 64*1024; /* Add 64K bytes slack */
6220 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6221
6222 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6223 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/relocs.c linux-2.6.32.45/arch/x86/boot/compressed/relocs.c
6224 --- linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6225 +++ linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6226 @@ -10,8 +10,11 @@
6227 #define USE_BSD
6228 #include <endian.h>
6229
6230 +#include "../../../../include/linux/autoconf.h"
6231 +
6232 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6233 static Elf32_Ehdr ehdr;
6234 +static Elf32_Phdr *phdr;
6235 static unsigned long reloc_count, reloc_idx;
6236 static unsigned long *relocs;
6237
6238 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6239
6240 static int is_safe_abs_reloc(const char* sym_name)
6241 {
6242 - int i;
6243 + unsigned int i;
6244
6245 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6246 if (!strcmp(sym_name, safe_abs_relocs[i]))
6247 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6248 }
6249 }
6250
6251 +static void read_phdrs(FILE *fp)
6252 +{
6253 + unsigned int i;
6254 +
6255 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6256 + if (!phdr) {
6257 + die("Unable to allocate %d program headers\n",
6258 + ehdr.e_phnum);
6259 + }
6260 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6261 + die("Seek to %d failed: %s\n",
6262 + ehdr.e_phoff, strerror(errno));
6263 + }
6264 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6265 + die("Cannot read ELF program headers: %s\n",
6266 + strerror(errno));
6267 + }
6268 + for(i = 0; i < ehdr.e_phnum; i++) {
6269 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6270 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6271 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6272 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6273 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6274 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6275 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6276 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6277 + }
6278 +
6279 +}
6280 +
6281 static void read_shdrs(FILE *fp)
6282 {
6283 - int i;
6284 + unsigned int i;
6285 Elf32_Shdr shdr;
6286
6287 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6288 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6289
6290 static void read_strtabs(FILE *fp)
6291 {
6292 - int i;
6293 + unsigned int i;
6294 for (i = 0; i < ehdr.e_shnum; i++) {
6295 struct section *sec = &secs[i];
6296 if (sec->shdr.sh_type != SHT_STRTAB) {
6297 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6298
6299 static void read_symtabs(FILE *fp)
6300 {
6301 - int i,j;
6302 + unsigned int i,j;
6303 for (i = 0; i < ehdr.e_shnum; i++) {
6304 struct section *sec = &secs[i];
6305 if (sec->shdr.sh_type != SHT_SYMTAB) {
6306 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6307
6308 static void read_relocs(FILE *fp)
6309 {
6310 - int i,j;
6311 + unsigned int i,j;
6312 + uint32_t base;
6313 +
6314 for (i = 0; i < ehdr.e_shnum; i++) {
6315 struct section *sec = &secs[i];
6316 if (sec->shdr.sh_type != SHT_REL) {
6317 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6318 die("Cannot read symbol table: %s\n",
6319 strerror(errno));
6320 }
6321 + base = 0;
6322 + for (j = 0; j < ehdr.e_phnum; j++) {
6323 + if (phdr[j].p_type != PT_LOAD )
6324 + continue;
6325 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6326 + continue;
6327 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6328 + break;
6329 + }
6330 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6331 Elf32_Rel *rel = &sec->reltab[j];
6332 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6333 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6334 rel->r_info = elf32_to_cpu(rel->r_info);
6335 }
6336 }
6337 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6338
6339 static void print_absolute_symbols(void)
6340 {
6341 - int i;
6342 + unsigned int i;
6343 printf("Absolute symbols\n");
6344 printf(" Num: Value Size Type Bind Visibility Name\n");
6345 for (i = 0; i < ehdr.e_shnum; i++) {
6346 struct section *sec = &secs[i];
6347 char *sym_strtab;
6348 Elf32_Sym *sh_symtab;
6349 - int j;
6350 + unsigned int j;
6351
6352 if (sec->shdr.sh_type != SHT_SYMTAB) {
6353 continue;
6354 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6355
6356 static void print_absolute_relocs(void)
6357 {
6358 - int i, printed = 0;
6359 + unsigned int i, printed = 0;
6360
6361 for (i = 0; i < ehdr.e_shnum; i++) {
6362 struct section *sec = &secs[i];
6363 struct section *sec_applies, *sec_symtab;
6364 char *sym_strtab;
6365 Elf32_Sym *sh_symtab;
6366 - int j;
6367 + unsigned int j;
6368 if (sec->shdr.sh_type != SHT_REL) {
6369 continue;
6370 }
6371 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6372
6373 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6374 {
6375 - int i;
6376 + unsigned int i;
6377 /* Walk through the relocations */
6378 for (i = 0; i < ehdr.e_shnum; i++) {
6379 char *sym_strtab;
6380 Elf32_Sym *sh_symtab;
6381 struct section *sec_applies, *sec_symtab;
6382 - int j;
6383 + unsigned int j;
6384 struct section *sec = &secs[i];
6385
6386 if (sec->shdr.sh_type != SHT_REL) {
6387 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6388 if (sym->st_shndx == SHN_ABS) {
6389 continue;
6390 }
6391 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6392 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6393 + continue;
6394 +
6395 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6396 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6397 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6398 + continue;
6399 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6400 + continue;
6401 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6402 + continue;
6403 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6404 + continue;
6405 +#endif
6406 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6407 /*
6408 * NONE can be ignored and and PC relative
6409 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6410
6411 static void emit_relocs(int as_text)
6412 {
6413 - int i;
6414 + unsigned int i;
6415 /* Count how many relocations I have and allocate space for them. */
6416 reloc_count = 0;
6417 walk_relocs(count_reloc);
6418 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
6419 fname, strerror(errno));
6420 }
6421 read_ehdr(fp);
6422 + read_phdrs(fp);
6423 read_shdrs(fp);
6424 read_strtabs(fp);
6425 read_symtabs(fp);
6426 diff -urNp linux-2.6.32.45/arch/x86/boot/cpucheck.c linux-2.6.32.45/arch/x86/boot/cpucheck.c
6427 --- linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6428 +++ linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6429 @@ -74,7 +74,7 @@ static int has_fpu(void)
6430 u16 fcw = -1, fsw = -1;
6431 u32 cr0;
6432
6433 - asm("movl %%cr0,%0" : "=r" (cr0));
6434 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6435 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6436 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6437 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6438 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6439 {
6440 u32 f0, f1;
6441
6442 - asm("pushfl ; "
6443 + asm volatile("pushfl ; "
6444 "pushfl ; "
6445 "popl %0 ; "
6446 "movl %0,%1 ; "
6447 @@ -115,7 +115,7 @@ static void get_flags(void)
6448 set_bit(X86_FEATURE_FPU, cpu.flags);
6449
6450 if (has_eflag(X86_EFLAGS_ID)) {
6451 - asm("cpuid"
6452 + asm volatile("cpuid"
6453 : "=a" (max_intel_level),
6454 "=b" (cpu_vendor[0]),
6455 "=d" (cpu_vendor[1]),
6456 @@ -124,7 +124,7 @@ static void get_flags(void)
6457
6458 if (max_intel_level >= 0x00000001 &&
6459 max_intel_level <= 0x0000ffff) {
6460 - asm("cpuid"
6461 + asm volatile("cpuid"
6462 : "=a" (tfms),
6463 "=c" (cpu.flags[4]),
6464 "=d" (cpu.flags[0])
6465 @@ -136,7 +136,7 @@ static void get_flags(void)
6466 cpu.model += ((tfms >> 16) & 0xf) << 4;
6467 }
6468
6469 - asm("cpuid"
6470 + asm volatile("cpuid"
6471 : "=a" (max_amd_level)
6472 : "a" (0x80000000)
6473 : "ebx", "ecx", "edx");
6474 @@ -144,7 +144,7 @@ static void get_flags(void)
6475 if (max_amd_level >= 0x80000001 &&
6476 max_amd_level <= 0x8000ffff) {
6477 u32 eax = 0x80000001;
6478 - asm("cpuid"
6479 + asm volatile("cpuid"
6480 : "+a" (eax),
6481 "=c" (cpu.flags[6]),
6482 "=d" (cpu.flags[1])
6483 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6484 u32 ecx = MSR_K7_HWCR;
6485 u32 eax, edx;
6486
6487 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6488 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6489 eax &= ~(1 << 15);
6490 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6491 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6492
6493 get_flags(); /* Make sure it really did something */
6494 err = check_flags();
6495 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6496 u32 ecx = MSR_VIA_FCR;
6497 u32 eax, edx;
6498
6499 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6500 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6501 eax |= (1<<1)|(1<<7);
6502 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6503 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6504
6505 set_bit(X86_FEATURE_CX8, cpu.flags);
6506 err = check_flags();
6507 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6508 u32 eax, edx;
6509 u32 level = 1;
6510
6511 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6512 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6513 - asm("cpuid"
6514 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6515 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6516 + asm volatile("cpuid"
6517 : "+a" (level), "=d" (cpu.flags[0])
6518 : : "ecx", "ebx");
6519 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6520 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6521
6522 err = check_flags();
6523 }
6524 diff -urNp linux-2.6.32.45/arch/x86/boot/header.S linux-2.6.32.45/arch/x86/boot/header.S
6525 --- linux-2.6.32.45/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6526 +++ linux-2.6.32.45/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6527 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6528 # single linked list of
6529 # struct setup_data
6530
6531 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6532 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6533
6534 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6535 #define VO_INIT_SIZE (VO__end - VO__text)
6536 diff -urNp linux-2.6.32.45/arch/x86/boot/Makefile linux-2.6.32.45/arch/x86/boot/Makefile
6537 --- linux-2.6.32.45/arch/x86/boot/Makefile 2011-03-27 14:31:47.000000000 -0400
6538 +++ linux-2.6.32.45/arch/x86/boot/Makefile 2011-08-07 14:38:13.000000000 -0400
6539 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
6540 $(call cc-option, -fno-stack-protector) \
6541 $(call cc-option, -mpreferred-stack-boundary=2)
6542 KBUILD_CFLAGS += $(call cc-option, -m32)
6543 +ifdef CONSTIFY_PLUGIN
6544 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6545 +endif
6546 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6547 GCOV_PROFILE := n
6548
6549 diff -urNp linux-2.6.32.45/arch/x86/boot/memory.c linux-2.6.32.45/arch/x86/boot/memory.c
6550 --- linux-2.6.32.45/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6551 +++ linux-2.6.32.45/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6552 @@ -19,7 +19,7 @@
6553
6554 static int detect_memory_e820(void)
6555 {
6556 - int count = 0;
6557 + unsigned int count = 0;
6558 struct biosregs ireg, oreg;
6559 struct e820entry *desc = boot_params.e820_map;
6560 static struct e820entry buf; /* static so it is zeroed */
6561 diff -urNp linux-2.6.32.45/arch/x86/boot/video.c linux-2.6.32.45/arch/x86/boot/video.c
6562 --- linux-2.6.32.45/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6563 +++ linux-2.6.32.45/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6564 @@ -90,7 +90,7 @@ static void store_mode_params(void)
6565 static unsigned int get_entry(void)
6566 {
6567 char entry_buf[4];
6568 - int i, len = 0;
6569 + unsigned int i, len = 0;
6570 int key;
6571 unsigned int v;
6572
6573 diff -urNp linux-2.6.32.45/arch/x86/boot/video-vesa.c linux-2.6.32.45/arch/x86/boot/video-vesa.c
6574 --- linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6575 +++ linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6576 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6577
6578 boot_params.screen_info.vesapm_seg = oreg.es;
6579 boot_params.screen_info.vesapm_off = oreg.di;
6580 + boot_params.screen_info.vesapm_size = oreg.cx;
6581 }
6582
6583 /*
6584 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_aout.c linux-2.6.32.45/arch/x86/ia32/ia32_aout.c
6585 --- linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6586 +++ linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6587 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6588 unsigned long dump_start, dump_size;
6589 struct user32 dump;
6590
6591 + memset(&dump, 0, sizeof(dump));
6592 +
6593 fs = get_fs();
6594 set_fs(KERNEL_DS);
6595 has_dumped = 1;
6596 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6597 dump_size = dump.u_ssize << PAGE_SHIFT;
6598 DUMP_WRITE(dump_start, dump_size);
6599 }
6600 - /*
6601 - * Finally dump the task struct. Not be used by gdb, but
6602 - * could be useful
6603 - */
6604 - set_fs(KERNEL_DS);
6605 - DUMP_WRITE(current, sizeof(*current));
6606 end_coredump:
6607 set_fs(fs);
6608 return has_dumped;
6609 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32entry.S linux-2.6.32.45/arch/x86/ia32/ia32entry.S
6610 --- linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6611 +++ linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6612 @@ -13,6 +13,7 @@
6613 #include <asm/thread_info.h>
6614 #include <asm/segment.h>
6615 #include <asm/irqflags.h>
6616 +#include <asm/pgtable.h>
6617 #include <linux/linkage.h>
6618
6619 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6620 @@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6621 ENDPROC(native_irq_enable_sysexit)
6622 #endif
6623
6624 + .macro pax_enter_kernel_user
6625 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6626 + call pax_enter_kernel_user
6627 +#endif
6628 + .endm
6629 +
6630 + .macro pax_exit_kernel_user
6631 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6632 + call pax_exit_kernel_user
6633 +#endif
6634 +#ifdef CONFIG_PAX_RANDKSTACK
6635 + pushq %rax
6636 + call pax_randomize_kstack
6637 + popq %rax
6638 +#endif
6639 + pax_erase_kstack
6640 + .endm
6641 +
6642 +.macro pax_erase_kstack
6643 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6644 + call pax_erase_kstack
6645 +#endif
6646 +.endm
6647 +
6648 /*
6649 * 32bit SYSENTER instruction entry.
6650 *
6651 @@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6652 CFI_REGISTER rsp,rbp
6653 SWAPGS_UNSAFE_STACK
6654 movq PER_CPU_VAR(kernel_stack), %rsp
6655 - addq $(KERNEL_STACK_OFFSET),%rsp
6656 + pax_enter_kernel_user
6657 /*
6658 * No need to follow this irqs on/off section: the syscall
6659 * disabled irqs, here we enable it straight after entry:
6660 @@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6661 pushfq
6662 CFI_ADJUST_CFA_OFFSET 8
6663 /*CFI_REL_OFFSET rflags,0*/
6664 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6665 + GET_THREAD_INFO(%r10)
6666 + movl TI_sysenter_return(%r10), %r10d
6667 CFI_REGISTER rip,r10
6668 pushq $__USER32_CS
6669 CFI_ADJUST_CFA_OFFSET 8
6670 @@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6671 SAVE_ARGS 0,0,1
6672 /* no need to do an access_ok check here because rbp has been
6673 32bit zero extended */
6674 +
6675 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6676 + mov $PAX_USER_SHADOW_BASE,%r10
6677 + add %r10,%rbp
6678 +#endif
6679 +
6680 1: movl (%rbp),%ebp
6681 .section __ex_table,"a"
6682 .quad 1b,ia32_badarg
6683 @@ -172,6 +204,7 @@ sysenter_dispatch:
6684 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6685 jnz sysexit_audit
6686 sysexit_from_sys_call:
6687 + pax_exit_kernel_user
6688 andl $~TS_COMPAT,TI_status(%r10)
6689 /* clear IF, that popfq doesn't enable interrupts early */
6690 andl $~0x200,EFLAGS-R11(%rsp)
6691 @@ -200,6 +233,9 @@ sysexit_from_sys_call:
6692 movl %eax,%esi /* 2nd arg: syscall number */
6693 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6694 call audit_syscall_entry
6695 +
6696 + pax_erase_kstack
6697 +
6698 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6699 cmpq $(IA32_NR_syscalls-1),%rax
6700 ja ia32_badsys
6701 @@ -252,6 +288,9 @@ sysenter_tracesys:
6702 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6703 movq %rsp,%rdi /* &pt_regs -> arg1 */
6704 call syscall_trace_enter
6705 +
6706 + pax_erase_kstack
6707 +
6708 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6709 RESTORE_REST
6710 cmpq $(IA32_NR_syscalls-1),%rax
6711 @@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6712 ENTRY(ia32_cstar_target)
6713 CFI_STARTPROC32 simple
6714 CFI_SIGNAL_FRAME
6715 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6716 + CFI_DEF_CFA rsp,0
6717 CFI_REGISTER rip,rcx
6718 /*CFI_REGISTER rflags,r11*/
6719 SWAPGS_UNSAFE_STACK
6720 movl %esp,%r8d
6721 CFI_REGISTER rsp,r8
6722 movq PER_CPU_VAR(kernel_stack),%rsp
6723 +
6724 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6725 + pax_enter_kernel_user
6726 +#endif
6727 +
6728 /*
6729 * No need to follow this irqs on/off section: the syscall
6730 * disabled irqs and here we enable it straight after entry:
6731 */
6732 ENABLE_INTERRUPTS(CLBR_NONE)
6733 - SAVE_ARGS 8,1,1
6734 + SAVE_ARGS 8*6,1,1
6735 movl %eax,%eax /* zero extension */
6736 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6737 movq %rcx,RIP-ARGOFFSET(%rsp)
6738 @@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6739 /* no need to do an access_ok check here because r8 has been
6740 32bit zero extended */
6741 /* hardware stack frame is complete now */
6742 +
6743 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6744 + mov $PAX_USER_SHADOW_BASE,%r10
6745 + add %r10,%r8
6746 +#endif
6747 +
6748 1: movl (%r8),%r9d
6749 .section __ex_table,"a"
6750 .quad 1b,ia32_badarg
6751 @@ -333,6 +383,7 @@ cstar_dispatch:
6752 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6753 jnz sysretl_audit
6754 sysretl_from_sys_call:
6755 + pax_exit_kernel_user
6756 andl $~TS_COMPAT,TI_status(%r10)
6757 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6758 movl RIP-ARGOFFSET(%rsp),%ecx
6759 @@ -370,6 +421,9 @@ cstar_tracesys:
6760 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6761 movq %rsp,%rdi /* &pt_regs -> arg1 */
6762 call syscall_trace_enter
6763 +
6764 + pax_erase_kstack
6765 +
6766 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6767 RESTORE_REST
6768 xchgl %ebp,%r9d
6769 @@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6770 CFI_REL_OFFSET rip,RIP-RIP
6771 PARAVIRT_ADJUST_EXCEPTION_FRAME
6772 SWAPGS
6773 + pax_enter_kernel_user
6774 /*
6775 * No need to follow this irqs on/off section: the syscall
6776 * disabled irqs and here we enable it straight after entry:
6777 @@ -448,6 +503,9 @@ ia32_tracesys:
6778 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6779 movq %rsp,%rdi /* &pt_regs -> arg1 */
6780 call syscall_trace_enter
6781 +
6782 + pax_erase_kstack
6783 +
6784 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6785 RESTORE_REST
6786 cmpq $(IA32_NR_syscalls-1),%rax
6787 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_signal.c linux-2.6.32.45/arch/x86/ia32/ia32_signal.c
6788 --- linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6789 +++ linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6790 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6791 sp -= frame_size;
6792 /* Align the stack pointer according to the i386 ABI,
6793 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6794 - sp = ((sp + 4) & -16ul) - 4;
6795 + sp = ((sp - 12) & -16ul) - 4;
6796 return (void __user *) sp;
6797 }
6798
6799 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6800 * These are actually not used anymore, but left because some
6801 * gdb versions depend on them as a marker.
6802 */
6803 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6804 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6805 } put_user_catch(err);
6806
6807 if (err)
6808 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6809 0xb8,
6810 __NR_ia32_rt_sigreturn,
6811 0x80cd,
6812 - 0,
6813 + 0
6814 };
6815
6816 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6817 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6818
6819 if (ka->sa.sa_flags & SA_RESTORER)
6820 restorer = ka->sa.sa_restorer;
6821 + else if (current->mm->context.vdso)
6822 + /* Return stub is in 32bit vsyscall page */
6823 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6824 else
6825 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6826 - rt_sigreturn);
6827 + restorer = &frame->retcode;
6828 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6829
6830 /*
6831 * Not actually used anymore, but left because some gdb
6832 * versions need it.
6833 */
6834 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6835 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6836 } put_user_catch(err);
6837
6838 if (err)
6839 diff -urNp linux-2.6.32.45/arch/x86/include/asm/alternative.h linux-2.6.32.45/arch/x86/include/asm/alternative.h
6840 --- linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6841 +++ linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6842 @@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6843 " .byte 662b-661b\n" /* sourcelen */ \
6844 " .byte 664f-663f\n" /* replacementlen */ \
6845 ".previous\n" \
6846 - ".section .altinstr_replacement, \"ax\"\n" \
6847 + ".section .altinstr_replacement, \"a\"\n" \
6848 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6849 ".previous"
6850
6851 diff -urNp linux-2.6.32.45/arch/x86/include/asm/apic.h linux-2.6.32.45/arch/x86/include/asm/apic.h
6852 --- linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-03-27 14:31:47.000000000 -0400
6853 +++ linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-08-17 20:01:15.000000000 -0400
6854 @@ -46,7 +46,7 @@ static inline void generic_apic_probe(vo
6855
6856 #ifdef CONFIG_X86_LOCAL_APIC
6857
6858 -extern unsigned int apic_verbosity;
6859 +extern int apic_verbosity;
6860 extern int local_apic_timer_c2_ok;
6861
6862 extern int disable_apic;
6863 diff -urNp linux-2.6.32.45/arch/x86/include/asm/apm.h linux-2.6.32.45/arch/x86/include/asm/apm.h
6864 --- linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6865 +++ linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6866 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6867 __asm__ __volatile__(APM_DO_ZERO_SEGS
6868 "pushl %%edi\n\t"
6869 "pushl %%ebp\n\t"
6870 - "lcall *%%cs:apm_bios_entry\n\t"
6871 + "lcall *%%ss:apm_bios_entry\n\t"
6872 "setc %%al\n\t"
6873 "popl %%ebp\n\t"
6874 "popl %%edi\n\t"
6875 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6876 __asm__ __volatile__(APM_DO_ZERO_SEGS
6877 "pushl %%edi\n\t"
6878 "pushl %%ebp\n\t"
6879 - "lcall *%%cs:apm_bios_entry\n\t"
6880 + "lcall *%%ss:apm_bios_entry\n\t"
6881 "setc %%bl\n\t"
6882 "popl %%ebp\n\t"
6883 "popl %%edi\n\t"
6884 diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_32.h linux-2.6.32.45/arch/x86/include/asm/atomic_32.h
6885 --- linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6886 +++ linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6887 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6888 }
6889
6890 /**
6891 + * atomic_read_unchecked - read atomic variable
6892 + * @v: pointer of type atomic_unchecked_t
6893 + *
6894 + * Atomically reads the value of @v.
6895 + */
6896 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6897 +{
6898 + return v->counter;
6899 +}
6900 +
6901 +/**
6902 * atomic_set - set atomic variable
6903 * @v: pointer of type atomic_t
6904 * @i: required value
6905 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6906 }
6907
6908 /**
6909 + * atomic_set_unchecked - set atomic variable
6910 + * @v: pointer of type atomic_unchecked_t
6911 + * @i: required value
6912 + *
6913 + * Atomically sets the value of @v to @i.
6914 + */
6915 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6916 +{
6917 + v->counter = i;
6918 +}
6919 +
6920 +/**
6921 * atomic_add - add integer to atomic variable
6922 * @i: integer value to add
6923 * @v: pointer of type atomic_t
6924 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6925 */
6926 static inline void atomic_add(int i, atomic_t *v)
6927 {
6928 - asm volatile(LOCK_PREFIX "addl %1,%0"
6929 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6930 +
6931 +#ifdef CONFIG_PAX_REFCOUNT
6932 + "jno 0f\n"
6933 + LOCK_PREFIX "subl %1,%0\n"
6934 + "int $4\n0:\n"
6935 + _ASM_EXTABLE(0b, 0b)
6936 +#endif
6937 +
6938 + : "+m" (v->counter)
6939 + : "ir" (i));
6940 +}
6941 +
6942 +/**
6943 + * atomic_add_unchecked - add integer to atomic variable
6944 + * @i: integer value to add
6945 + * @v: pointer of type atomic_unchecked_t
6946 + *
6947 + * Atomically adds @i to @v.
6948 + */
6949 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6950 +{
6951 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6952 : "+m" (v->counter)
6953 : "ir" (i));
6954 }
6955 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6956 */
6957 static inline void atomic_sub(int i, atomic_t *v)
6958 {
6959 - asm volatile(LOCK_PREFIX "subl %1,%0"
6960 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6961 +
6962 +#ifdef CONFIG_PAX_REFCOUNT
6963 + "jno 0f\n"
6964 + LOCK_PREFIX "addl %1,%0\n"
6965 + "int $4\n0:\n"
6966 + _ASM_EXTABLE(0b, 0b)
6967 +#endif
6968 +
6969 + : "+m" (v->counter)
6970 + : "ir" (i));
6971 +}
6972 +
6973 +/**
6974 + * atomic_sub_unchecked - subtract integer from atomic variable
6975 + * @i: integer value to subtract
6976 + * @v: pointer of type atomic_unchecked_t
6977 + *
6978 + * Atomically subtracts @i from @v.
6979 + */
6980 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6981 +{
6982 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6983 : "+m" (v->counter)
6984 : "ir" (i));
6985 }
6986 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
6987 {
6988 unsigned char c;
6989
6990 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6991 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6992 +
6993 +#ifdef CONFIG_PAX_REFCOUNT
6994 + "jno 0f\n"
6995 + LOCK_PREFIX "addl %2,%0\n"
6996 + "int $4\n0:\n"
6997 + _ASM_EXTABLE(0b, 0b)
6998 +#endif
6999 +
7000 + "sete %1\n"
7001 : "+m" (v->counter), "=qm" (c)
7002 : "ir" (i) : "memory");
7003 return c;
7004 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
7005 */
7006 static inline void atomic_inc(atomic_t *v)
7007 {
7008 - asm volatile(LOCK_PREFIX "incl %0"
7009 + asm volatile(LOCK_PREFIX "incl %0\n"
7010 +
7011 +#ifdef CONFIG_PAX_REFCOUNT
7012 + "jno 0f\n"
7013 + LOCK_PREFIX "decl %0\n"
7014 + "int $4\n0:\n"
7015 + _ASM_EXTABLE(0b, 0b)
7016 +#endif
7017 +
7018 + : "+m" (v->counter));
7019 +}
7020 +
7021 +/**
7022 + * atomic_inc_unchecked - increment atomic variable
7023 + * @v: pointer of type atomic_unchecked_t
7024 + *
7025 + * Atomically increments @v by 1.
7026 + */
7027 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7028 +{
7029 + asm volatile(LOCK_PREFIX "incl %0\n"
7030 : "+m" (v->counter));
7031 }
7032
7033 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7034 */
7035 static inline void atomic_dec(atomic_t *v)
7036 {
7037 - asm volatile(LOCK_PREFIX "decl %0"
7038 + asm volatile(LOCK_PREFIX "decl %0\n"
7039 +
7040 +#ifdef CONFIG_PAX_REFCOUNT
7041 + "jno 0f\n"
7042 + LOCK_PREFIX "incl %0\n"
7043 + "int $4\n0:\n"
7044 + _ASM_EXTABLE(0b, 0b)
7045 +#endif
7046 +
7047 + : "+m" (v->counter));
7048 +}
7049 +
7050 +/**
7051 + * atomic_dec_unchecked - decrement atomic variable
7052 + * @v: pointer of type atomic_unchecked_t
7053 + *
7054 + * Atomically decrements @v by 1.
7055 + */
7056 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7057 +{
7058 + asm volatile(LOCK_PREFIX "decl %0\n"
7059 : "+m" (v->counter));
7060 }
7061
7062 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7063 {
7064 unsigned char c;
7065
7066 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7067 + asm volatile(LOCK_PREFIX "decl %0\n"
7068 +
7069 +#ifdef CONFIG_PAX_REFCOUNT
7070 + "jno 0f\n"
7071 + LOCK_PREFIX "incl %0\n"
7072 + "int $4\n0:\n"
7073 + _ASM_EXTABLE(0b, 0b)
7074 +#endif
7075 +
7076 + "sete %1\n"
7077 : "+m" (v->counter), "=qm" (c)
7078 : : "memory");
7079 return c != 0;
7080 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7081 {
7082 unsigned char c;
7083
7084 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7085 + asm volatile(LOCK_PREFIX "incl %0\n"
7086 +
7087 +#ifdef CONFIG_PAX_REFCOUNT
7088 + "jno 0f\n"
7089 + LOCK_PREFIX "decl %0\n"
7090 + "into\n0:\n"
7091 + _ASM_EXTABLE(0b, 0b)
7092 +#endif
7093 +
7094 + "sete %1\n"
7095 + : "+m" (v->counter), "=qm" (c)
7096 + : : "memory");
7097 + return c != 0;
7098 +}
7099 +
7100 +/**
7101 + * atomic_inc_and_test_unchecked - increment and test
7102 + * @v: pointer of type atomic_unchecked_t
7103 + *
7104 + * Atomically increments @v by 1
7105 + * and returns true if the result is zero, or false for all
7106 + * other cases.
7107 + */
7108 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7109 +{
7110 + unsigned char c;
7111 +
7112 + asm volatile(LOCK_PREFIX "incl %0\n"
7113 + "sete %1\n"
7114 : "+m" (v->counter), "=qm" (c)
7115 : : "memory");
7116 return c != 0;
7117 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7118 {
7119 unsigned char c;
7120
7121 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7122 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7123 +
7124 +#ifdef CONFIG_PAX_REFCOUNT
7125 + "jno 0f\n"
7126 + LOCK_PREFIX "subl %2,%0\n"
7127 + "int $4\n0:\n"
7128 + _ASM_EXTABLE(0b, 0b)
7129 +#endif
7130 +
7131 + "sets %1\n"
7132 : "+m" (v->counter), "=qm" (c)
7133 : "ir" (i) : "memory");
7134 return c;
7135 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7136 #endif
7137 /* Modern 486+ processor */
7138 __i = i;
7139 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7140 +
7141 +#ifdef CONFIG_PAX_REFCOUNT
7142 + "jno 0f\n"
7143 + "movl %0, %1\n"
7144 + "int $4\n0:\n"
7145 + _ASM_EXTABLE(0b, 0b)
7146 +#endif
7147 +
7148 + : "+r" (i), "+m" (v->counter)
7149 + : : "memory");
7150 + return i + __i;
7151 +
7152 +#ifdef CONFIG_M386
7153 +no_xadd: /* Legacy 386 processor */
7154 + local_irq_save(flags);
7155 + __i = atomic_read(v);
7156 + atomic_set(v, i + __i);
7157 + local_irq_restore(flags);
7158 + return i + __i;
7159 +#endif
7160 +}
7161 +
7162 +/**
7163 + * atomic_add_return_unchecked - add integer and return
7164 + * @v: pointer of type atomic_unchecked_t
7165 + * @i: integer value to add
7166 + *
7167 + * Atomically adds @i to @v and returns @i + @v
7168 + */
7169 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7170 +{
7171 + int __i;
7172 +#ifdef CONFIG_M386
7173 + unsigned long flags;
7174 + if (unlikely(boot_cpu_data.x86 <= 3))
7175 + goto no_xadd;
7176 +#endif
7177 + /* Modern 486+ processor */
7178 + __i = i;
7179 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7180 : "+r" (i), "+m" (v->counter)
7181 : : "memory");
7182 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7183 return cmpxchg(&v->counter, old, new);
7184 }
7185
7186 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7187 +{
7188 + return cmpxchg(&v->counter, old, new);
7189 +}
7190 +
7191 static inline int atomic_xchg(atomic_t *v, int new)
7192 {
7193 return xchg(&v->counter, new);
7194 }
7195
7196 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7197 +{
7198 + return xchg(&v->counter, new);
7199 +}
7200 +
7201 /**
7202 * atomic_add_unless - add unless the number is already a given value
7203 * @v: pointer of type atomic_t
7204 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7205 */
7206 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7207 {
7208 - int c, old;
7209 + int c, old, new;
7210 c = atomic_read(v);
7211 for (;;) {
7212 - if (unlikely(c == (u)))
7213 + if (unlikely(c == u))
7214 break;
7215 - old = atomic_cmpxchg((v), c, c + (a));
7216 +
7217 + asm volatile("addl %2,%0\n"
7218 +
7219 +#ifdef CONFIG_PAX_REFCOUNT
7220 + "jno 0f\n"
7221 + "subl %2,%0\n"
7222 + "int $4\n0:\n"
7223 + _ASM_EXTABLE(0b, 0b)
7224 +#endif
7225 +
7226 + : "=r" (new)
7227 + : "0" (c), "ir" (a));
7228 +
7229 + old = atomic_cmpxchg(v, c, new);
7230 if (likely(old == c))
7231 break;
7232 c = old;
7233 }
7234 - return c != (u);
7235 + return c != u;
7236 }
7237
7238 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7239
7240 #define atomic_inc_return(v) (atomic_add_return(1, v))
7241 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7242 +{
7243 + return atomic_add_return_unchecked(1, v);
7244 +}
7245 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7246
7247 /* These are x86-specific, used by some header files */
7248 @@ -266,9 +495,18 @@ typedef struct {
7249 u64 __aligned(8) counter;
7250 } atomic64_t;
7251
7252 +#ifdef CONFIG_PAX_REFCOUNT
7253 +typedef struct {
7254 + u64 __aligned(8) counter;
7255 +} atomic64_unchecked_t;
7256 +#else
7257 +typedef atomic64_t atomic64_unchecked_t;
7258 +#endif
7259 +
7260 #define ATOMIC64_INIT(val) { (val) }
7261
7262 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7263 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7264
7265 /**
7266 * atomic64_xchg - xchg atomic64 variable
7267 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7268 * the old value.
7269 */
7270 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7271 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7272
7273 /**
7274 * atomic64_set - set atomic64 variable
7275 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7276 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7277
7278 /**
7279 + * atomic64_unchecked_set - set atomic64 variable
7280 + * @ptr: pointer to type atomic64_unchecked_t
7281 + * @new_val: value to assign
7282 + *
7283 + * Atomically sets the value of @ptr to @new_val.
7284 + */
7285 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7286 +
7287 +/**
7288 * atomic64_read - read atomic64 variable
7289 * @ptr: pointer to type atomic64_t
7290 *
7291 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7292 return res;
7293 }
7294
7295 -extern u64 atomic64_read(atomic64_t *ptr);
7296 +/**
7297 + * atomic64_read_unchecked - read atomic64 variable
7298 + * @ptr: pointer to type atomic64_unchecked_t
7299 + *
7300 + * Atomically reads the value of @ptr and returns it.
7301 + */
7302 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7303 +{
7304 + u64 res;
7305 +
7306 + /*
7307 + * Note, we inline this atomic64_unchecked_t primitive because
7308 + * it only clobbers EAX/EDX and leaves the others
7309 + * untouched. We also (somewhat subtly) rely on the
7310 + * fact that cmpxchg8b returns the current 64-bit value
7311 + * of the memory location we are touching:
7312 + */
7313 + asm volatile(
7314 + "mov %%ebx, %%eax\n\t"
7315 + "mov %%ecx, %%edx\n\t"
7316 + LOCK_PREFIX "cmpxchg8b %1\n"
7317 + : "=&A" (res)
7318 + : "m" (*ptr)
7319 + );
7320 +
7321 + return res;
7322 +}
7323
7324 /**
7325 * atomic64_add_return - add and return
7326 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7327 * Other variants with different arithmetic operators:
7328 */
7329 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7330 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7331 extern u64 atomic64_inc_return(atomic64_t *ptr);
7332 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7333 extern u64 atomic64_dec_return(atomic64_t *ptr);
7334 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7335
7336 /**
7337 * atomic64_add - add integer to atomic64 variable
7338 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7339 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7340
7341 /**
7342 + * atomic64_add_unchecked - add integer to atomic64 variable
7343 + * @delta: integer value to add
7344 + * @ptr: pointer to type atomic64_unchecked_t
7345 + *
7346 + * Atomically adds @delta to @ptr.
7347 + */
7348 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7349 +
7350 +/**
7351 * atomic64_sub - subtract the atomic64 variable
7352 * @delta: integer value to subtract
7353 * @ptr: pointer to type atomic64_t
7354 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7355 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7356
7357 /**
7358 + * atomic64_sub_unchecked - subtract the atomic64 variable
7359 + * @delta: integer value to subtract
7360 + * @ptr: pointer to type atomic64_unchecked_t
7361 + *
7362 + * Atomically subtracts @delta from @ptr.
7363 + */
7364 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7365 +
7366 +/**
7367 * atomic64_sub_and_test - subtract value from variable and test result
7368 * @delta: integer value to subtract
7369 * @ptr: pointer to type atomic64_t
7370 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7371 extern void atomic64_inc(atomic64_t *ptr);
7372
7373 /**
7374 + * atomic64_inc_unchecked - increment atomic64 variable
7375 + * @ptr: pointer to type atomic64_unchecked_t
7376 + *
7377 + * Atomically increments @ptr by 1.
7378 + */
7379 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7380 +
7381 +/**
7382 * atomic64_dec - decrement atomic64 variable
7383 * @ptr: pointer to type atomic64_t
7384 *
7385 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7386 extern void atomic64_dec(atomic64_t *ptr);
7387
7388 /**
7389 + * atomic64_dec_unchecked - decrement atomic64 variable
7390 + * @ptr: pointer to type atomic64_unchecked_t
7391 + *
7392 + * Atomically decrements @ptr by 1.
7393 + */
7394 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7395 +
7396 +/**
7397 * atomic64_dec_and_test - decrement and test
7398 * @ptr: pointer to type atomic64_t
7399 *
7400 diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_64.h linux-2.6.32.45/arch/x86/include/asm/atomic_64.h
7401 --- linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7402 +++ linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7403 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7404 }
7405
7406 /**
7407 + * atomic_read_unchecked - read atomic variable
7408 + * @v: pointer of type atomic_unchecked_t
7409 + *
7410 + * Atomically reads the value of @v.
7411 + */
7412 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7413 +{
7414 + return v->counter;
7415 +}
7416 +
7417 +/**
7418 * atomic_set - set atomic variable
7419 * @v: pointer of type atomic_t
7420 * @i: required value
7421 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7422 }
7423
7424 /**
7425 + * atomic_set_unchecked - set atomic variable
7426 + * @v: pointer of type atomic_unchecked_t
7427 + * @i: required value
7428 + *
7429 + * Atomically sets the value of @v to @i.
7430 + */
7431 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7432 +{
7433 + v->counter = i;
7434 +}
7435 +
7436 +/**
7437 * atomic_add - add integer to atomic variable
7438 * @i: integer value to add
7439 * @v: pointer of type atomic_t
7440 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7441 */
7442 static inline void atomic_add(int i, atomic_t *v)
7443 {
7444 - asm volatile(LOCK_PREFIX "addl %1,%0"
7445 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7446 +
7447 +#ifdef CONFIG_PAX_REFCOUNT
7448 + "jno 0f\n"
7449 + LOCK_PREFIX "subl %1,%0\n"
7450 + "int $4\n0:\n"
7451 + _ASM_EXTABLE(0b, 0b)
7452 +#endif
7453 +
7454 + : "=m" (v->counter)
7455 + : "ir" (i), "m" (v->counter));
7456 +}
7457 +
7458 +/**
7459 + * atomic_add_unchecked - add integer to atomic variable
7460 + * @i: integer value to add
7461 + * @v: pointer of type atomic_unchecked_t
7462 + *
7463 + * Atomically adds @i to @v.
7464 + */
7465 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7466 +{
7467 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7468 : "=m" (v->counter)
7469 : "ir" (i), "m" (v->counter));
7470 }
7471 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7472 */
7473 static inline void atomic_sub(int i, atomic_t *v)
7474 {
7475 - asm volatile(LOCK_PREFIX "subl %1,%0"
7476 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7477 +
7478 +#ifdef CONFIG_PAX_REFCOUNT
7479 + "jno 0f\n"
7480 + LOCK_PREFIX "addl %1,%0\n"
7481 + "int $4\n0:\n"
7482 + _ASM_EXTABLE(0b, 0b)
7483 +#endif
7484 +
7485 + : "=m" (v->counter)
7486 + : "ir" (i), "m" (v->counter));
7487 +}
7488 +
7489 +/**
7490 + * atomic_sub_unchecked - subtract the atomic variable
7491 + * @i: integer value to subtract
7492 + * @v: pointer of type atomic_unchecked_t
7493 + *
7494 + * Atomically subtracts @i from @v.
7495 + */
7496 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7497 +{
7498 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7499 : "=m" (v->counter)
7500 : "ir" (i), "m" (v->counter));
7501 }
7502 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7503 {
7504 unsigned char c;
7505
7506 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7507 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7508 +
7509 +#ifdef CONFIG_PAX_REFCOUNT
7510 + "jno 0f\n"
7511 + LOCK_PREFIX "addl %2,%0\n"
7512 + "int $4\n0:\n"
7513 + _ASM_EXTABLE(0b, 0b)
7514 +#endif
7515 +
7516 + "sete %1\n"
7517 : "=m" (v->counter), "=qm" (c)
7518 : "ir" (i), "m" (v->counter) : "memory");
7519 return c;
7520 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7521 */
7522 static inline void atomic_inc(atomic_t *v)
7523 {
7524 - asm volatile(LOCK_PREFIX "incl %0"
7525 + asm volatile(LOCK_PREFIX "incl %0\n"
7526 +
7527 +#ifdef CONFIG_PAX_REFCOUNT
7528 + "jno 0f\n"
7529 + LOCK_PREFIX "decl %0\n"
7530 + "int $4\n0:\n"
7531 + _ASM_EXTABLE(0b, 0b)
7532 +#endif
7533 +
7534 + : "=m" (v->counter)
7535 + : "m" (v->counter));
7536 +}
7537 +
7538 +/**
7539 + * atomic_inc_unchecked - increment atomic variable
7540 + * @v: pointer of type atomic_unchecked_t
7541 + *
7542 + * Atomically increments @v by 1.
7543 + */
7544 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7545 +{
7546 + asm volatile(LOCK_PREFIX "incl %0\n"
7547 : "=m" (v->counter)
7548 : "m" (v->counter));
7549 }
7550 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7551 */
7552 static inline void atomic_dec(atomic_t *v)
7553 {
7554 - asm volatile(LOCK_PREFIX "decl %0"
7555 + asm volatile(LOCK_PREFIX "decl %0\n"
7556 +
7557 +#ifdef CONFIG_PAX_REFCOUNT
7558 + "jno 0f\n"
7559 + LOCK_PREFIX "incl %0\n"
7560 + "int $4\n0:\n"
7561 + _ASM_EXTABLE(0b, 0b)
7562 +#endif
7563 +
7564 + : "=m" (v->counter)
7565 + : "m" (v->counter));
7566 +}
7567 +
7568 +/**
7569 + * atomic_dec_unchecked - decrement atomic variable
7570 + * @v: pointer of type atomic_unchecked_t
7571 + *
7572 + * Atomically decrements @v by 1.
7573 + */
7574 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7575 +{
7576 + asm volatile(LOCK_PREFIX "decl %0\n"
7577 : "=m" (v->counter)
7578 : "m" (v->counter));
7579 }
7580 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7581 {
7582 unsigned char c;
7583
7584 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7585 + asm volatile(LOCK_PREFIX "decl %0\n"
7586 +
7587 +#ifdef CONFIG_PAX_REFCOUNT
7588 + "jno 0f\n"
7589 + LOCK_PREFIX "incl %0\n"
7590 + "int $4\n0:\n"
7591 + _ASM_EXTABLE(0b, 0b)
7592 +#endif
7593 +
7594 + "sete %1\n"
7595 : "=m" (v->counter), "=qm" (c)
7596 : "m" (v->counter) : "memory");
7597 return c != 0;
7598 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7599 {
7600 unsigned char c;
7601
7602 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7603 + asm volatile(LOCK_PREFIX "incl %0\n"
7604 +
7605 +#ifdef CONFIG_PAX_REFCOUNT
7606 + "jno 0f\n"
7607 + LOCK_PREFIX "decl %0\n"
7608 + "int $4\n0:\n"
7609 + _ASM_EXTABLE(0b, 0b)
7610 +#endif
7611 +
7612 + "sete %1\n"
7613 + : "=m" (v->counter), "=qm" (c)
7614 + : "m" (v->counter) : "memory");
7615 + return c != 0;
7616 +}
7617 +
7618 +/**
7619 + * atomic_inc_and_test_unchecked - increment and test
7620 + * @v: pointer of type atomic_unchecked_t
7621 + *
7622 + * Atomically increments @v by 1
7623 + * and returns true if the result is zero, or false for all
7624 + * other cases.
7625 + */
7626 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7627 +{
7628 + unsigned char c;
7629 +
7630 + asm volatile(LOCK_PREFIX "incl %0\n"
7631 + "sete %1\n"
7632 : "=m" (v->counter), "=qm" (c)
7633 : "m" (v->counter) : "memory");
7634 return c != 0;
7635 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7636 {
7637 unsigned char c;
7638
7639 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7640 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7641 +
7642 +#ifdef CONFIG_PAX_REFCOUNT
7643 + "jno 0f\n"
7644 + LOCK_PREFIX "subl %2,%0\n"
7645 + "int $4\n0:\n"
7646 + _ASM_EXTABLE(0b, 0b)
7647 +#endif
7648 +
7649 + "sets %1\n"
7650 : "=m" (v->counter), "=qm" (c)
7651 : "ir" (i), "m" (v->counter) : "memory");
7652 return c;
7653 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7654 static inline int atomic_add_return(int i, atomic_t *v)
7655 {
7656 int __i = i;
7657 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
7658 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7659 +
7660 +#ifdef CONFIG_PAX_REFCOUNT
7661 + "jno 0f\n"
7662 + "movl %0, %1\n"
7663 + "int $4\n0:\n"
7664 + _ASM_EXTABLE(0b, 0b)
7665 +#endif
7666 +
7667 + : "+r" (i), "+m" (v->counter)
7668 + : : "memory");
7669 + return i + __i;
7670 +}
7671 +
7672 +/**
7673 + * atomic_add_return_unchecked - add and return
7674 + * @i: integer value to add
7675 + * @v: pointer of type atomic_unchecked_t
7676 + *
7677 + * Atomically adds @i to @v and returns @i + @v
7678 + */
7679 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7680 +{
7681 + int __i = i;
7682 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7683 : "+r" (i), "+m" (v->counter)
7684 : : "memory");
7685 return i + __i;
7686 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7687 }
7688
7689 #define atomic_inc_return(v) (atomic_add_return(1, v))
7690 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7691 +{
7692 + return atomic_add_return_unchecked(1, v);
7693 +}
7694 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7695
7696 /* The 64-bit atomic type */
7697 @@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7698 }
7699
7700 /**
7701 + * atomic64_read_unchecked - read atomic64 variable
7702 + * @v: pointer of type atomic64_unchecked_t
7703 + *
7704 + * Atomically reads the value of @v.
7705 + * Doesn't imply a read memory barrier.
7706 + */
7707 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7708 +{
7709 + return v->counter;
7710 +}
7711 +
7712 +/**
7713 * atomic64_set - set atomic64 variable
7714 * @v: pointer to type atomic64_t
7715 * @i: required value
7716 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7717 }
7718
7719 /**
7720 + * atomic64_set_unchecked - set atomic64 variable
7721 + * @v: pointer to type atomic64_unchecked_t
7722 + * @i: required value
7723 + *
7724 + * Atomically sets the value of @v to @i.
7725 + */
7726 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7727 +{
7728 + v->counter = i;
7729 +}
7730 +
7731 +/**
7732 * atomic64_add - add integer to atomic64 variable
7733 * @i: integer value to add
7734 * @v: pointer to type atomic64_t
7735 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7736 */
7737 static inline void atomic64_add(long i, atomic64_t *v)
7738 {
7739 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7740 +
7741 +#ifdef CONFIG_PAX_REFCOUNT
7742 + "jno 0f\n"
7743 + LOCK_PREFIX "subq %1,%0\n"
7744 + "int $4\n0:\n"
7745 + _ASM_EXTABLE(0b, 0b)
7746 +#endif
7747 +
7748 + : "=m" (v->counter)
7749 + : "er" (i), "m" (v->counter));
7750 +}
7751 +
7752 +/**
7753 + * atomic64_add_unchecked - add integer to atomic64 variable
7754 + * @i: integer value to add
7755 + * @v: pointer to type atomic64_unchecked_t
7756 + *
7757 + * Atomically adds @i to @v.
7758 + */
7759 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7760 +{
7761 asm volatile(LOCK_PREFIX "addq %1,%0"
7762 : "=m" (v->counter)
7763 : "er" (i), "m" (v->counter));
7764 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7765 */
7766 static inline void atomic64_sub(long i, atomic64_t *v)
7767 {
7768 - asm volatile(LOCK_PREFIX "subq %1,%0"
7769 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7770 +
7771 +#ifdef CONFIG_PAX_REFCOUNT
7772 + "jno 0f\n"
7773 + LOCK_PREFIX "addq %1,%0\n"
7774 + "int $4\n0:\n"
7775 + _ASM_EXTABLE(0b, 0b)
7776 +#endif
7777 +
7778 : "=m" (v->counter)
7779 : "er" (i), "m" (v->counter));
7780 }
7781 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7782 {
7783 unsigned char c;
7784
7785 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7786 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7787 +
7788 +#ifdef CONFIG_PAX_REFCOUNT
7789 + "jno 0f\n"
7790 + LOCK_PREFIX "addq %2,%0\n"
7791 + "int $4\n0:\n"
7792 + _ASM_EXTABLE(0b, 0b)
7793 +#endif
7794 +
7795 + "sete %1\n"
7796 : "=m" (v->counter), "=qm" (c)
7797 : "er" (i), "m" (v->counter) : "memory");
7798 return c;
7799 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7800 */
7801 static inline void atomic64_inc(atomic64_t *v)
7802 {
7803 + asm volatile(LOCK_PREFIX "incq %0\n"
7804 +
7805 +#ifdef CONFIG_PAX_REFCOUNT
7806 + "jno 0f\n"
7807 + LOCK_PREFIX "decq %0\n"
7808 + "int $4\n0:\n"
7809 + _ASM_EXTABLE(0b, 0b)
7810 +#endif
7811 +
7812 + : "=m" (v->counter)
7813 + : "m" (v->counter));
7814 +}
7815 +
7816 +/**
7817 + * atomic64_inc_unchecked - increment atomic64 variable
7818 + * @v: pointer to type atomic64_unchecked_t
7819 + *
7820 + * Atomically increments @v by 1.
7821 + */
7822 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7823 +{
7824 asm volatile(LOCK_PREFIX "incq %0"
7825 : "=m" (v->counter)
7826 : "m" (v->counter));
7827 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7828 */
7829 static inline void atomic64_dec(atomic64_t *v)
7830 {
7831 - asm volatile(LOCK_PREFIX "decq %0"
7832 + asm volatile(LOCK_PREFIX "decq %0\n"
7833 +
7834 +#ifdef CONFIG_PAX_REFCOUNT
7835 + "jno 0f\n"
7836 + LOCK_PREFIX "incq %0\n"
7837 + "int $4\n0:\n"
7838 + _ASM_EXTABLE(0b, 0b)
7839 +#endif
7840 +
7841 + : "=m" (v->counter)
7842 + : "m" (v->counter));
7843 +}
7844 +
7845 +/**
7846 + * atomic64_dec_unchecked - decrement atomic64 variable
7847 + * @v: pointer to type atomic64_t
7848 + *
7849 + * Atomically decrements @v by 1.
7850 + */
7851 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7852 +{
7853 + asm volatile(LOCK_PREFIX "decq %0\n"
7854 : "=m" (v->counter)
7855 : "m" (v->counter));
7856 }
7857 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7858 {
7859 unsigned char c;
7860
7861 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7862 + asm volatile(LOCK_PREFIX "decq %0\n"
7863 +
7864 +#ifdef CONFIG_PAX_REFCOUNT
7865 + "jno 0f\n"
7866 + LOCK_PREFIX "incq %0\n"
7867 + "int $4\n0:\n"
7868 + _ASM_EXTABLE(0b, 0b)
7869 +#endif
7870 +
7871 + "sete %1\n"
7872 : "=m" (v->counter), "=qm" (c)
7873 : "m" (v->counter) : "memory");
7874 return c != 0;
7875 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7876 {
7877 unsigned char c;
7878
7879 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7880 + asm volatile(LOCK_PREFIX "incq %0\n"
7881 +
7882 +#ifdef CONFIG_PAX_REFCOUNT
7883 + "jno 0f\n"
7884 + LOCK_PREFIX "decq %0\n"
7885 + "int $4\n0:\n"
7886 + _ASM_EXTABLE(0b, 0b)
7887 +#endif
7888 +
7889 + "sete %1\n"
7890 : "=m" (v->counter), "=qm" (c)
7891 : "m" (v->counter) : "memory");
7892 return c != 0;
7893 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7894 {
7895 unsigned char c;
7896
7897 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7898 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7899 +
7900 +#ifdef CONFIG_PAX_REFCOUNT
7901 + "jno 0f\n"
7902 + LOCK_PREFIX "subq %2,%0\n"
7903 + "int $4\n0:\n"
7904 + _ASM_EXTABLE(0b, 0b)
7905 +#endif
7906 +
7907 + "sets %1\n"
7908 : "=m" (v->counter), "=qm" (c)
7909 : "er" (i), "m" (v->counter) : "memory");
7910 return c;
7911 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7912 static inline long atomic64_add_return(long i, atomic64_t *v)
7913 {
7914 long __i = i;
7915 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7916 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7917 +
7918 +#ifdef CONFIG_PAX_REFCOUNT
7919 + "jno 0f\n"
7920 + "movq %0, %1\n"
7921 + "int $4\n0:\n"
7922 + _ASM_EXTABLE(0b, 0b)
7923 +#endif
7924 +
7925 + : "+r" (i), "+m" (v->counter)
7926 + : : "memory");
7927 + return i + __i;
7928 +}
7929 +
7930 +/**
7931 + * atomic64_add_return_unchecked - add and return
7932 + * @i: integer value to add
7933 + * @v: pointer to type atomic64_unchecked_t
7934 + *
7935 + * Atomically adds @i to @v and returns @i + @v
7936 + */
7937 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7938 +{
7939 + long __i = i;
7940 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7941 : "+r" (i), "+m" (v->counter)
7942 : : "memory");
7943 return i + __i;
7944 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7945 }
7946
7947 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7948 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7949 +{
7950 + return atomic64_add_return_unchecked(1, v);
7951 +}
7952 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7953
7954 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7955 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7956 return cmpxchg(&v->counter, old, new);
7957 }
7958
7959 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7960 +{
7961 + return cmpxchg(&v->counter, old, new);
7962 +}
7963 +
7964 static inline long atomic64_xchg(atomic64_t *v, long new)
7965 {
7966 return xchg(&v->counter, new);
7967 }
7968
7969 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7970 +{
7971 + return xchg(&v->counter, new);
7972 +}
7973 +
7974 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
7975 {
7976 return cmpxchg(&v->counter, old, new);
7977 }
7978
7979 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7980 +{
7981 + return cmpxchg(&v->counter, old, new);
7982 +}
7983 +
7984 static inline long atomic_xchg(atomic_t *v, int new)
7985 {
7986 return xchg(&v->counter, new);
7987 }
7988
7989 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7990 +{
7991 + return xchg(&v->counter, new);
7992 +}
7993 +
7994 /**
7995 * atomic_add_unless - add unless the number is a given value
7996 * @v: pointer of type atomic_t
7997 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
7998 */
7999 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8000 {
8001 - int c, old;
8002 + int c, old, new;
8003 c = atomic_read(v);
8004 for (;;) {
8005 - if (unlikely(c == (u)))
8006 + if (unlikely(c == u))
8007 break;
8008 - old = atomic_cmpxchg((v), c, c + (a));
8009 +
8010 + asm volatile("addl %2,%0\n"
8011 +
8012 +#ifdef CONFIG_PAX_REFCOUNT
8013 + "jno 0f\n"
8014 + "subl %2,%0\n"
8015 + "int $4\n0:\n"
8016 + _ASM_EXTABLE(0b, 0b)
8017 +#endif
8018 +
8019 + : "=r" (new)
8020 + : "0" (c), "ir" (a));
8021 +
8022 + old = atomic_cmpxchg(v, c, new);
8023 if (likely(old == c))
8024 break;
8025 c = old;
8026 }
8027 - return c != (u);
8028 + return c != u;
8029 }
8030
8031 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8032 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8033 */
8034 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8035 {
8036 - long c, old;
8037 + long c, old, new;
8038 c = atomic64_read(v);
8039 for (;;) {
8040 - if (unlikely(c == (u)))
8041 + if (unlikely(c == u))
8042 break;
8043 - old = atomic64_cmpxchg((v), c, c + (a));
8044 +
8045 + asm volatile("addq %2,%0\n"
8046 +
8047 +#ifdef CONFIG_PAX_REFCOUNT
8048 + "jno 0f\n"
8049 + "subq %2,%0\n"
8050 + "int $4\n0:\n"
8051 + _ASM_EXTABLE(0b, 0b)
8052 +#endif
8053 +
8054 + : "=r" (new)
8055 + : "0" (c), "er" (a));
8056 +
8057 + old = atomic64_cmpxchg(v, c, new);
8058 if (likely(old == c))
8059 break;
8060 c = old;
8061 }
8062 - return c != (u);
8063 + return c != u;
8064 }
8065
8066 /**
8067 diff -urNp linux-2.6.32.45/arch/x86/include/asm/bitops.h linux-2.6.32.45/arch/x86/include/asm/bitops.h
8068 --- linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8069 +++ linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8070 @@ -38,7 +38,7 @@
8071 * a mask operation on a byte.
8072 */
8073 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8074 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8075 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8076 #define CONST_MASK(nr) (1 << ((nr) & 7))
8077
8078 /**
8079 diff -urNp linux-2.6.32.45/arch/x86/include/asm/boot.h linux-2.6.32.45/arch/x86/include/asm/boot.h
8080 --- linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8081 +++ linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8082 @@ -11,10 +11,15 @@
8083 #include <asm/pgtable_types.h>
8084
8085 /* Physical address where kernel should be loaded. */
8086 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8087 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8088 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8089 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8090
8091 +#ifndef __ASSEMBLY__
8092 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8093 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8094 +#endif
8095 +
8096 /* Minimum kernel alignment, as a power of two */
8097 #ifdef CONFIG_X86_64
8098 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8099 diff -urNp linux-2.6.32.45/arch/x86/include/asm/cacheflush.h linux-2.6.32.45/arch/x86/include/asm/cacheflush.h
8100 --- linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8101 +++ linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8102 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8103 static inline unsigned long get_page_memtype(struct page *pg)
8104 {
8105 if (!PageUncached(pg) && !PageWC(pg))
8106 - return -1;
8107 + return ~0UL;
8108 else if (!PageUncached(pg) && PageWC(pg))
8109 return _PAGE_CACHE_WC;
8110 else if (PageUncached(pg) && !PageWC(pg))
8111 @@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8112 SetPageWC(pg);
8113 break;
8114 default:
8115 - case -1:
8116 + case ~0UL:
8117 ClearPageUncached(pg);
8118 ClearPageWC(pg);
8119 break;
8120 diff -urNp linux-2.6.32.45/arch/x86/include/asm/cache.h linux-2.6.32.45/arch/x86/include/asm/cache.h
8121 --- linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8122 +++ linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8123 @@ -5,9 +5,10 @@
8124
8125 /* L1 cache line size */
8126 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8127 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8128 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8129
8130 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8131 +#define __read_only __attribute__((__section__(".data.read_only")))
8132
8133 #ifdef CONFIG_X86_VSMP
8134 /* vSMP Internode cacheline shift */
8135 diff -urNp linux-2.6.32.45/arch/x86/include/asm/checksum_32.h linux-2.6.32.45/arch/x86/include/asm/checksum_32.h
8136 --- linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8137 +++ linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8138 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8139 int len, __wsum sum,
8140 int *src_err_ptr, int *dst_err_ptr);
8141
8142 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8143 + int len, __wsum sum,
8144 + int *src_err_ptr, int *dst_err_ptr);
8145 +
8146 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8147 + int len, __wsum sum,
8148 + int *src_err_ptr, int *dst_err_ptr);
8149 +
8150 /*
8151 * Note: when you get a NULL pointer exception here this means someone
8152 * passed in an incorrect kernel address to one of these functions.
8153 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8154 int *err_ptr)
8155 {
8156 might_sleep();
8157 - return csum_partial_copy_generic((__force void *)src, dst,
8158 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8159 len, sum, err_ptr, NULL);
8160 }
8161
8162 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8163 {
8164 might_sleep();
8165 if (access_ok(VERIFY_WRITE, dst, len))
8166 - return csum_partial_copy_generic(src, (__force void *)dst,
8167 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8168 len, sum, NULL, err_ptr);
8169
8170 if (len)
8171 diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc_defs.h linux-2.6.32.45/arch/x86/include/asm/desc_defs.h
8172 --- linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8173 +++ linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8174 @@ -31,6 +31,12 @@ struct desc_struct {
8175 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8176 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8177 };
8178 + struct {
8179 + u16 offset_low;
8180 + u16 seg;
8181 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8182 + unsigned offset_high: 16;
8183 + } gate;
8184 };
8185 } __attribute__((packed));
8186
8187 diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc.h linux-2.6.32.45/arch/x86/include/asm/desc.h
8188 --- linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8189 +++ linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8190 @@ -4,6 +4,7 @@
8191 #include <asm/desc_defs.h>
8192 #include <asm/ldt.h>
8193 #include <asm/mmu.h>
8194 +#include <asm/pgtable.h>
8195 #include <linux/smp.h>
8196
8197 static inline void fill_ldt(struct desc_struct *desc,
8198 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8199 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8200 desc->type = (info->read_exec_only ^ 1) << 1;
8201 desc->type |= info->contents << 2;
8202 + desc->type |= info->seg_not_present ^ 1;
8203 desc->s = 1;
8204 desc->dpl = 0x3;
8205 desc->p = info->seg_not_present ^ 1;
8206 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8207 }
8208
8209 extern struct desc_ptr idt_descr;
8210 -extern gate_desc idt_table[];
8211 -
8212 -struct gdt_page {
8213 - struct desc_struct gdt[GDT_ENTRIES];
8214 -} __attribute__((aligned(PAGE_SIZE)));
8215 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8216 +extern gate_desc idt_table[256];
8217
8218 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8219 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8220 {
8221 - return per_cpu(gdt_page, cpu).gdt;
8222 + return cpu_gdt_table[cpu];
8223 }
8224
8225 #ifdef CONFIG_X86_64
8226 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8227 unsigned long base, unsigned dpl, unsigned flags,
8228 unsigned short seg)
8229 {
8230 - gate->a = (seg << 16) | (base & 0xffff);
8231 - gate->b = (base & 0xffff0000) |
8232 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8233 + gate->gate.offset_low = base;
8234 + gate->gate.seg = seg;
8235 + gate->gate.reserved = 0;
8236 + gate->gate.type = type;
8237 + gate->gate.s = 0;
8238 + gate->gate.dpl = dpl;
8239 + gate->gate.p = 1;
8240 + gate->gate.offset_high = base >> 16;
8241 }
8242
8243 #endif
8244 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8245 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8246 const gate_desc *gate)
8247 {
8248 + pax_open_kernel();
8249 memcpy(&idt[entry], gate, sizeof(*gate));
8250 + pax_close_kernel();
8251 }
8252
8253 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8254 const void *desc)
8255 {
8256 + pax_open_kernel();
8257 memcpy(&ldt[entry], desc, 8);
8258 + pax_close_kernel();
8259 }
8260
8261 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8262 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8263 size = sizeof(struct desc_struct);
8264 break;
8265 }
8266 +
8267 + pax_open_kernel();
8268 memcpy(&gdt[entry], desc, size);
8269 + pax_close_kernel();
8270 }
8271
8272 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8273 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8274
8275 static inline void native_load_tr_desc(void)
8276 {
8277 + pax_open_kernel();
8278 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8279 + pax_close_kernel();
8280 }
8281
8282 static inline void native_load_gdt(const struct desc_ptr *dtr)
8283 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8284 unsigned int i;
8285 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8286
8287 + pax_open_kernel();
8288 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8289 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8290 + pax_close_kernel();
8291 }
8292
8293 #define _LDT_empty(info) \
8294 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8295 desc->limit = (limit >> 16) & 0xf;
8296 }
8297
8298 -static inline void _set_gate(int gate, unsigned type, void *addr,
8299 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8300 unsigned dpl, unsigned ist, unsigned seg)
8301 {
8302 gate_desc s;
8303 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8304 * Pentium F0 0F bugfix can have resulted in the mapped
8305 * IDT being write-protected.
8306 */
8307 -static inline void set_intr_gate(unsigned int n, void *addr)
8308 +static inline void set_intr_gate(unsigned int n, const void *addr)
8309 {
8310 BUG_ON((unsigned)n > 0xFF);
8311 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8312 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8313 /*
8314 * This routine sets up an interrupt gate at directory privilege level 3.
8315 */
8316 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8317 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8318 {
8319 BUG_ON((unsigned)n > 0xFF);
8320 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8321 }
8322
8323 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8324 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8325 {
8326 BUG_ON((unsigned)n > 0xFF);
8327 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8328 }
8329
8330 -static inline void set_trap_gate(unsigned int n, void *addr)
8331 +static inline void set_trap_gate(unsigned int n, const void *addr)
8332 {
8333 BUG_ON((unsigned)n > 0xFF);
8334 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8335 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8336 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8337 {
8338 BUG_ON((unsigned)n > 0xFF);
8339 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8340 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8341 }
8342
8343 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8344 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8345 {
8346 BUG_ON((unsigned)n > 0xFF);
8347 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8348 }
8349
8350 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8351 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8352 {
8353 BUG_ON((unsigned)n > 0xFF);
8354 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8355 }
8356
8357 +#ifdef CONFIG_X86_32
8358 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8359 +{
8360 + struct desc_struct d;
8361 +
8362 + if (likely(limit))
8363 + limit = (limit - 1UL) >> PAGE_SHIFT;
8364 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8365 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8366 +}
8367 +#endif
8368 +
8369 #endif /* _ASM_X86_DESC_H */
8370 diff -urNp linux-2.6.32.45/arch/x86/include/asm/device.h linux-2.6.32.45/arch/x86/include/asm/device.h
8371 --- linux-2.6.32.45/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8372 +++ linux-2.6.32.45/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8373 @@ -6,7 +6,7 @@ struct dev_archdata {
8374 void *acpi_handle;
8375 #endif
8376 #ifdef CONFIG_X86_64
8377 -struct dma_map_ops *dma_ops;
8378 + const struct dma_map_ops *dma_ops;
8379 #endif
8380 #ifdef CONFIG_DMAR
8381 void *iommu; /* hook for IOMMU specific extension */
8382 diff -urNp linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h
8383 --- linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8384 +++ linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8385 @@ -25,9 +25,9 @@ extern int iommu_merge;
8386 extern struct device x86_dma_fallback_dev;
8387 extern int panic_on_overflow;
8388
8389 -extern struct dma_map_ops *dma_ops;
8390 +extern const struct dma_map_ops *dma_ops;
8391
8392 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8393 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8394 {
8395 #ifdef CONFIG_X86_32
8396 return dma_ops;
8397 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8398 /* Make sure we keep the same behaviour */
8399 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8400 {
8401 - struct dma_map_ops *ops = get_dma_ops(dev);
8402 + const struct dma_map_ops *ops = get_dma_ops(dev);
8403 if (ops->mapping_error)
8404 return ops->mapping_error(dev, dma_addr);
8405
8406 @@ -122,7 +122,7 @@ static inline void *
8407 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8408 gfp_t gfp)
8409 {
8410 - struct dma_map_ops *ops = get_dma_ops(dev);
8411 + const struct dma_map_ops *ops = get_dma_ops(dev);
8412 void *memory;
8413
8414 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8415 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8416 static inline void dma_free_coherent(struct device *dev, size_t size,
8417 void *vaddr, dma_addr_t bus)
8418 {
8419 - struct dma_map_ops *ops = get_dma_ops(dev);
8420 + const struct dma_map_ops *ops = get_dma_ops(dev);
8421
8422 WARN_ON(irqs_disabled()); /* for portability */
8423
8424 diff -urNp linux-2.6.32.45/arch/x86/include/asm/e820.h linux-2.6.32.45/arch/x86/include/asm/e820.h
8425 --- linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8426 +++ linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8427 @@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8428 #define ISA_END_ADDRESS 0x100000
8429 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8430
8431 -#define BIOS_BEGIN 0x000a0000
8432 +#define BIOS_BEGIN 0x000c0000
8433 #define BIOS_END 0x00100000
8434
8435 #ifdef __KERNEL__
8436 diff -urNp linux-2.6.32.45/arch/x86/include/asm/elf.h linux-2.6.32.45/arch/x86/include/asm/elf.h
8437 --- linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8438 +++ linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8439 @@ -257,7 +257,25 @@ extern int force_personality32;
8440 the loader. We need to make sure that it is out of the way of the program
8441 that it will "exec", and that there is sufficient room for the brk. */
8442
8443 +#ifdef CONFIG_PAX_SEGMEXEC
8444 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8445 +#else
8446 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8447 +#endif
8448 +
8449 +#ifdef CONFIG_PAX_ASLR
8450 +#ifdef CONFIG_X86_32
8451 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8452 +
8453 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8454 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8455 +#else
8456 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8457 +
8458 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8459 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8460 +#endif
8461 +#endif
8462
8463 /* This yields a mask that user programs can use to figure out what
8464 instruction set this CPU supports. This could be done in user space,
8465 @@ -311,8 +329,7 @@ do { \
8466 #define ARCH_DLINFO \
8467 do { \
8468 if (vdso_enabled) \
8469 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8470 - (unsigned long)current->mm->context.vdso); \
8471 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8472 } while (0)
8473
8474 #define AT_SYSINFO 32
8475 @@ -323,7 +340,7 @@ do { \
8476
8477 #endif /* !CONFIG_X86_32 */
8478
8479 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8480 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8481
8482 #define VDSO_ENTRY \
8483 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8484 @@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8485 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8486 #define compat_arch_setup_additional_pages syscall32_setup_pages
8487
8488 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8489 -#define arch_randomize_brk arch_randomize_brk
8490 -
8491 #endif /* _ASM_X86_ELF_H */
8492 diff -urNp linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h
8493 --- linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8494 +++ linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8495 @@ -15,6 +15,6 @@ enum reboot_type {
8496
8497 extern enum reboot_type reboot_type;
8498
8499 -extern void machine_emergency_restart(void);
8500 +extern void machine_emergency_restart(void) __noreturn;
8501
8502 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8503 diff -urNp linux-2.6.32.45/arch/x86/include/asm/futex.h linux-2.6.32.45/arch/x86/include/asm/futex.h
8504 --- linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8505 +++ linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8506 @@ -12,16 +12,18 @@
8507 #include <asm/system.h>
8508
8509 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8510 + typecheck(u32 *, uaddr); \
8511 asm volatile("1:\t" insn "\n" \
8512 "2:\t.section .fixup,\"ax\"\n" \
8513 "3:\tmov\t%3, %1\n" \
8514 "\tjmp\t2b\n" \
8515 "\t.previous\n" \
8516 _ASM_EXTABLE(1b, 3b) \
8517 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8518 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8519 : "i" (-EFAULT), "0" (oparg), "1" (0))
8520
8521 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8522 + typecheck(u32 *, uaddr); \
8523 asm volatile("1:\tmovl %2, %0\n" \
8524 "\tmovl\t%0, %3\n" \
8525 "\t" insn "\n" \
8526 @@ -34,10 +36,10 @@
8527 _ASM_EXTABLE(1b, 4b) \
8528 _ASM_EXTABLE(2b, 4b) \
8529 : "=&a" (oldval), "=&r" (ret), \
8530 - "+m" (*uaddr), "=&r" (tem) \
8531 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8532 : "r" (oparg), "i" (-EFAULT), "1" (0))
8533
8534 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8535 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8536 {
8537 int op = (encoded_op >> 28) & 7;
8538 int cmp = (encoded_op >> 24) & 15;
8539 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8540
8541 switch (op) {
8542 case FUTEX_OP_SET:
8543 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8544 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8545 break;
8546 case FUTEX_OP_ADD:
8547 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8548 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8549 uaddr, oparg);
8550 break;
8551 case FUTEX_OP_OR:
8552 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8553 return ret;
8554 }
8555
8556 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8557 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8558 int newval)
8559 {
8560
8561 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8562 return -ENOSYS;
8563 #endif
8564
8565 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8566 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8567 return -EFAULT;
8568
8569 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8570 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8571 "2:\t.section .fixup, \"ax\"\n"
8572 "3:\tmov %2, %0\n"
8573 "\tjmp 2b\n"
8574 "\t.previous\n"
8575 _ASM_EXTABLE(1b, 3b)
8576 - : "=a" (oldval), "+m" (*uaddr)
8577 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8578 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8579 : "memory"
8580 );
8581 diff -urNp linux-2.6.32.45/arch/x86/include/asm/hw_irq.h linux-2.6.32.45/arch/x86/include/asm/hw_irq.h
8582 --- linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8583 +++ linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8584 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8585 extern void enable_IO_APIC(void);
8586
8587 /* Statistics */
8588 -extern atomic_t irq_err_count;
8589 -extern atomic_t irq_mis_count;
8590 +extern atomic_unchecked_t irq_err_count;
8591 +extern atomic_unchecked_t irq_mis_count;
8592
8593 /* EISA */
8594 extern void eisa_set_level_irq(unsigned int irq);
8595 diff -urNp linux-2.6.32.45/arch/x86/include/asm/i387.h linux-2.6.32.45/arch/x86/include/asm/i387.h
8596 --- linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8597 +++ linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8598 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8599 {
8600 int err;
8601
8602 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8603 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8604 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8605 +#endif
8606 +
8607 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8608 "2:\n"
8609 ".section .fixup,\"ax\"\n"
8610 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8611 {
8612 int err;
8613
8614 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8615 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8616 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8617 +#endif
8618 +
8619 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8620 "2:\n"
8621 ".section .fixup,\"ax\"\n"
8622 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8623 }
8624
8625 /* We need a safe address that is cheap to find and that is already
8626 - in L1 during context switch. The best choices are unfortunately
8627 - different for UP and SMP */
8628 -#ifdef CONFIG_SMP
8629 -#define safe_address (__per_cpu_offset[0])
8630 -#else
8631 -#define safe_address (kstat_cpu(0).cpustat.user)
8632 -#endif
8633 + in L1 during context switch. */
8634 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8635
8636 /*
8637 * These must be called with preempt disabled
8638 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8639 struct thread_info *me = current_thread_info();
8640 preempt_disable();
8641 if (me->status & TS_USEDFPU)
8642 - __save_init_fpu(me->task);
8643 + __save_init_fpu(current);
8644 else
8645 clts();
8646 }
8647 diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_32.h linux-2.6.32.45/arch/x86/include/asm/io_32.h
8648 --- linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8649 +++ linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8650 @@ -3,6 +3,7 @@
8651
8652 #include <linux/string.h>
8653 #include <linux/compiler.h>
8654 +#include <asm/processor.h>
8655
8656 /*
8657 * This file contains the definitions for the x86 IO instructions
8658 @@ -42,6 +43,17 @@
8659
8660 #ifdef __KERNEL__
8661
8662 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8663 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8664 +{
8665 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8666 +}
8667 +
8668 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8669 +{
8670 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8671 +}
8672 +
8673 #include <asm-generic/iomap.h>
8674
8675 #include <linux/vmalloc.h>
8676 diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_64.h linux-2.6.32.45/arch/x86/include/asm/io_64.h
8677 --- linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8678 +++ linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8679 @@ -140,6 +140,17 @@ __OUTS(l)
8680
8681 #include <linux/vmalloc.h>
8682
8683 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8684 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8685 +{
8686 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8687 +}
8688 +
8689 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8690 +{
8691 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8692 +}
8693 +
8694 #include <asm-generic/iomap.h>
8695
8696 void __memcpy_fromio(void *, unsigned long, unsigned);
8697 diff -urNp linux-2.6.32.45/arch/x86/include/asm/iommu.h linux-2.6.32.45/arch/x86/include/asm/iommu.h
8698 --- linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8699 +++ linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8700 @@ -3,7 +3,7 @@
8701
8702 extern void pci_iommu_shutdown(void);
8703 extern void no_iommu_init(void);
8704 -extern struct dma_map_ops nommu_dma_ops;
8705 +extern const struct dma_map_ops nommu_dma_ops;
8706 extern int force_iommu, no_iommu;
8707 extern int iommu_detected;
8708 extern int iommu_pass_through;
8709 diff -urNp linux-2.6.32.45/arch/x86/include/asm/irqflags.h linux-2.6.32.45/arch/x86/include/asm/irqflags.h
8710 --- linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8711 +++ linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8712 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8713 sti; \
8714 sysexit
8715
8716 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8717 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8718 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8719 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8720 +
8721 #else
8722 #define INTERRUPT_RETURN iret
8723 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8724 diff -urNp linux-2.6.32.45/arch/x86/include/asm/kprobes.h linux-2.6.32.45/arch/x86/include/asm/kprobes.h
8725 --- linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8726 +++ linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8727 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8728 #define BREAKPOINT_INSTRUCTION 0xcc
8729 #define RELATIVEJUMP_INSTRUCTION 0xe9
8730 #define MAX_INSN_SIZE 16
8731 -#define MAX_STACK_SIZE 64
8732 -#define MIN_STACK_SIZE(ADDR) \
8733 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8734 - THREAD_SIZE - (unsigned long)(ADDR))) \
8735 - ? (MAX_STACK_SIZE) \
8736 - : (((unsigned long)current_thread_info()) + \
8737 - THREAD_SIZE - (unsigned long)(ADDR)))
8738 +#define MAX_STACK_SIZE 64UL
8739 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8740
8741 #define flush_insn_slot(p) do { } while (0)
8742
8743 diff -urNp linux-2.6.32.45/arch/x86/include/asm/kvm_host.h linux-2.6.32.45/arch/x86/include/asm/kvm_host.h
8744 --- linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8745 +++ linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8746 @@ -536,7 +536,7 @@ struct kvm_x86_ops {
8747 const struct trace_print_flags *exit_reasons_str;
8748 };
8749
8750 -extern struct kvm_x86_ops *kvm_x86_ops;
8751 +extern const struct kvm_x86_ops *kvm_x86_ops;
8752
8753 int kvm_mmu_module_init(void);
8754 void kvm_mmu_module_exit(void);
8755 diff -urNp linux-2.6.32.45/arch/x86/include/asm/local.h linux-2.6.32.45/arch/x86/include/asm/local.h
8756 --- linux-2.6.32.45/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8757 +++ linux-2.6.32.45/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8758 @@ -18,26 +18,58 @@ typedef struct {
8759
8760 static inline void local_inc(local_t *l)
8761 {
8762 - asm volatile(_ASM_INC "%0"
8763 + asm volatile(_ASM_INC "%0\n"
8764 +
8765 +#ifdef CONFIG_PAX_REFCOUNT
8766 + "jno 0f\n"
8767 + _ASM_DEC "%0\n"
8768 + "int $4\n0:\n"
8769 + _ASM_EXTABLE(0b, 0b)
8770 +#endif
8771 +
8772 : "+m" (l->a.counter));
8773 }
8774
8775 static inline void local_dec(local_t *l)
8776 {
8777 - asm volatile(_ASM_DEC "%0"
8778 + asm volatile(_ASM_DEC "%0\n"
8779 +
8780 +#ifdef CONFIG_PAX_REFCOUNT
8781 + "jno 0f\n"
8782 + _ASM_INC "%0\n"
8783 + "int $4\n0:\n"
8784 + _ASM_EXTABLE(0b, 0b)
8785 +#endif
8786 +
8787 : "+m" (l->a.counter));
8788 }
8789
8790 static inline void local_add(long i, local_t *l)
8791 {
8792 - asm volatile(_ASM_ADD "%1,%0"
8793 + asm volatile(_ASM_ADD "%1,%0\n"
8794 +
8795 +#ifdef CONFIG_PAX_REFCOUNT
8796 + "jno 0f\n"
8797 + _ASM_SUB "%1,%0\n"
8798 + "int $4\n0:\n"
8799 + _ASM_EXTABLE(0b, 0b)
8800 +#endif
8801 +
8802 : "+m" (l->a.counter)
8803 : "ir" (i));
8804 }
8805
8806 static inline void local_sub(long i, local_t *l)
8807 {
8808 - asm volatile(_ASM_SUB "%1,%0"
8809 + asm volatile(_ASM_SUB "%1,%0\n"
8810 +
8811 +#ifdef CONFIG_PAX_REFCOUNT
8812 + "jno 0f\n"
8813 + _ASM_ADD "%1,%0\n"
8814 + "int $4\n0:\n"
8815 + _ASM_EXTABLE(0b, 0b)
8816 +#endif
8817 +
8818 : "+m" (l->a.counter)
8819 : "ir" (i));
8820 }
8821 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8822 {
8823 unsigned char c;
8824
8825 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8826 + asm volatile(_ASM_SUB "%2,%0\n"
8827 +
8828 +#ifdef CONFIG_PAX_REFCOUNT
8829 + "jno 0f\n"
8830 + _ASM_ADD "%2,%0\n"
8831 + "int $4\n0:\n"
8832 + _ASM_EXTABLE(0b, 0b)
8833 +#endif
8834 +
8835 + "sete %1\n"
8836 : "+m" (l->a.counter), "=qm" (c)
8837 : "ir" (i) : "memory");
8838 return c;
8839 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8840 {
8841 unsigned char c;
8842
8843 - asm volatile(_ASM_DEC "%0; sete %1"
8844 + asm volatile(_ASM_DEC "%0\n"
8845 +
8846 +#ifdef CONFIG_PAX_REFCOUNT
8847 + "jno 0f\n"
8848 + _ASM_INC "%0\n"
8849 + "int $4\n0:\n"
8850 + _ASM_EXTABLE(0b, 0b)
8851 +#endif
8852 +
8853 + "sete %1\n"
8854 : "+m" (l->a.counter), "=qm" (c)
8855 : : "memory");
8856 return c != 0;
8857 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8858 {
8859 unsigned char c;
8860
8861 - asm volatile(_ASM_INC "%0; sete %1"
8862 + asm volatile(_ASM_INC "%0\n"
8863 +
8864 +#ifdef CONFIG_PAX_REFCOUNT
8865 + "jno 0f\n"
8866 + _ASM_DEC "%0\n"
8867 + "int $4\n0:\n"
8868 + _ASM_EXTABLE(0b, 0b)
8869 +#endif
8870 +
8871 + "sete %1\n"
8872 : "+m" (l->a.counter), "=qm" (c)
8873 : : "memory");
8874 return c != 0;
8875 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8876 {
8877 unsigned char c;
8878
8879 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8880 + asm volatile(_ASM_ADD "%2,%0\n"
8881 +
8882 +#ifdef CONFIG_PAX_REFCOUNT
8883 + "jno 0f\n"
8884 + _ASM_SUB "%2,%0\n"
8885 + "int $4\n0:\n"
8886 + _ASM_EXTABLE(0b, 0b)
8887 +#endif
8888 +
8889 + "sets %1\n"
8890 : "+m" (l->a.counter), "=qm" (c)
8891 : "ir" (i) : "memory");
8892 return c;
8893 @@ -133,7 +201,15 @@ static inline long local_add_return(long
8894 #endif
8895 /* Modern 486+ processor */
8896 __i = i;
8897 - asm volatile(_ASM_XADD "%0, %1;"
8898 + asm volatile(_ASM_XADD "%0, %1\n"
8899 +
8900 +#ifdef CONFIG_PAX_REFCOUNT
8901 + "jno 0f\n"
8902 + _ASM_MOV "%0,%1\n"
8903 + "int $4\n0:\n"
8904 + _ASM_EXTABLE(0b, 0b)
8905 +#endif
8906 +
8907 : "+r" (i), "+m" (l->a.counter)
8908 : : "memory");
8909 return i + __i;
8910 diff -urNp linux-2.6.32.45/arch/x86/include/asm/microcode.h linux-2.6.32.45/arch/x86/include/asm/microcode.h
8911 --- linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8912 +++ linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8913 @@ -12,13 +12,13 @@ struct device;
8914 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8915
8916 struct microcode_ops {
8917 - enum ucode_state (*request_microcode_user) (int cpu,
8918 + enum ucode_state (* const request_microcode_user) (int cpu,
8919 const void __user *buf, size_t size);
8920
8921 - enum ucode_state (*request_microcode_fw) (int cpu,
8922 + enum ucode_state (* const request_microcode_fw) (int cpu,
8923 struct device *device);
8924
8925 - void (*microcode_fini_cpu) (int cpu);
8926 + void (* const microcode_fini_cpu) (int cpu);
8927
8928 /*
8929 * The generic 'microcode_core' part guarantees that
8930 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
8931 extern struct ucode_cpu_info ucode_cpu_info[];
8932
8933 #ifdef CONFIG_MICROCODE_INTEL
8934 -extern struct microcode_ops * __init init_intel_microcode(void);
8935 +extern const struct microcode_ops * __init init_intel_microcode(void);
8936 #else
8937 -static inline struct microcode_ops * __init init_intel_microcode(void)
8938 +static inline const struct microcode_ops * __init init_intel_microcode(void)
8939 {
8940 return NULL;
8941 }
8942 #endif /* CONFIG_MICROCODE_INTEL */
8943
8944 #ifdef CONFIG_MICROCODE_AMD
8945 -extern struct microcode_ops * __init init_amd_microcode(void);
8946 +extern const struct microcode_ops * __init init_amd_microcode(void);
8947 #else
8948 -static inline struct microcode_ops * __init init_amd_microcode(void)
8949 +static inline const struct microcode_ops * __init init_amd_microcode(void)
8950 {
8951 return NULL;
8952 }
8953 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mman.h linux-2.6.32.45/arch/x86/include/asm/mman.h
8954 --- linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8955 +++ linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8956 @@ -5,4 +5,14 @@
8957
8958 #include <asm-generic/mman.h>
8959
8960 +#ifdef __KERNEL__
8961 +#ifndef __ASSEMBLY__
8962 +#ifdef CONFIG_X86_32
8963 +#define arch_mmap_check i386_mmap_check
8964 +int i386_mmap_check(unsigned long addr, unsigned long len,
8965 + unsigned long flags);
8966 +#endif
8967 +#endif
8968 +#endif
8969 +
8970 #endif /* _ASM_X86_MMAN_H */
8971 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu_context.h linux-2.6.32.45/arch/x86/include/asm/mmu_context.h
8972 --- linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
8973 +++ linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-08-17 19:46:53.000000000 -0400
8974 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
8975
8976 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8977 {
8978 +
8979 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8980 + unsigned int i;
8981 + pgd_t *pgd;
8982 +
8983 + pax_open_kernel();
8984 + pgd = get_cpu_pgd(smp_processor_id());
8985 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8986 + if (paravirt_enabled())
8987 + set_pgd(pgd+i, native_make_pgd(0));
8988 + else
8989 + pgd[i] = native_make_pgd(0);
8990 + pax_close_kernel();
8991 +#endif
8992 +
8993 #ifdef CONFIG_SMP
8994 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8995 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8996 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
8997 struct task_struct *tsk)
8998 {
8999 unsigned cpu = smp_processor_id();
9000 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
9001 + int tlbstate = TLBSTATE_OK;
9002 +#endif
9003
9004 if (likely(prev != next)) {
9005 #ifdef CONFIG_SMP
9006 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9007 + tlbstate = percpu_read(cpu_tlbstate.state);
9008 +#endif
9009 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9010 percpu_write(cpu_tlbstate.active_mm, next);
9011 #endif
9012 cpumask_set_cpu(cpu, mm_cpumask(next));
9013
9014 /* Re-load page tables */
9015 +#ifdef CONFIG_PAX_PER_CPU_PGD
9016 + pax_open_kernel();
9017 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9018 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9019 + pax_close_kernel();
9020 + load_cr3(get_cpu_pgd(cpu));
9021 +#else
9022 load_cr3(next->pgd);
9023 +#endif
9024
9025 /* stop flush ipis for the previous mm */
9026 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9027 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
9028 */
9029 if (unlikely(prev->context.ldt != next->context.ldt))
9030 load_LDT_nolock(&next->context);
9031 - }
9032 +
9033 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9034 + if (!nx_enabled) {
9035 + smp_mb__before_clear_bit();
9036 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9037 + smp_mb__after_clear_bit();
9038 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9039 + }
9040 +#endif
9041 +
9042 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9043 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9044 + prev->context.user_cs_limit != next->context.user_cs_limit))
9045 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9046 #ifdef CONFIG_SMP
9047 + else if (unlikely(tlbstate != TLBSTATE_OK))
9048 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9049 +#endif
9050 +#endif
9051 +
9052 + }
9053 else {
9054 +
9055 +#ifdef CONFIG_PAX_PER_CPU_PGD
9056 + pax_open_kernel();
9057 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9058 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9059 + pax_close_kernel();
9060 + load_cr3(get_cpu_pgd(cpu));
9061 +#endif
9062 +
9063 +#ifdef CONFIG_SMP
9064 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9065 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9066
9067 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
9068 * tlb flush IPI delivery. We must reload CR3
9069 * to make sure to use no freed page tables.
9070 */
9071 +
9072 +#ifndef CONFIG_PAX_PER_CPU_PGD
9073 load_cr3(next->pgd);
9074 +#endif
9075 +
9076 load_LDT_nolock(&next->context);
9077 +
9078 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9079 + if (!nx_enabled)
9080 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9081 +#endif
9082 +
9083 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9084 +#ifdef CONFIG_PAX_PAGEEXEC
9085 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9086 +#endif
9087 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9088 +#endif
9089 +
9090 }
9091 - }
9092 #endif
9093 + }
9094 }
9095
9096 #define activate_mm(prev, next) \
9097 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu.h linux-2.6.32.45/arch/x86/include/asm/mmu.h
9098 --- linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9099 +++ linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9100 @@ -9,10 +9,23 @@
9101 * we put the segment information here.
9102 */
9103 typedef struct {
9104 - void *ldt;
9105 + struct desc_struct *ldt;
9106 int size;
9107 struct mutex lock;
9108 - void *vdso;
9109 + unsigned long vdso;
9110 +
9111 +#ifdef CONFIG_X86_32
9112 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9113 + unsigned long user_cs_base;
9114 + unsigned long user_cs_limit;
9115 +
9116 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9117 + cpumask_t cpu_user_cs_mask;
9118 +#endif
9119 +
9120 +#endif
9121 +#endif
9122 +
9123 } mm_context_t;
9124
9125 #ifdef CONFIG_SMP
9126 diff -urNp linux-2.6.32.45/arch/x86/include/asm/module.h linux-2.6.32.45/arch/x86/include/asm/module.h
9127 --- linux-2.6.32.45/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9128 +++ linux-2.6.32.45/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9129 @@ -5,6 +5,7 @@
9130
9131 #ifdef CONFIG_X86_64
9132 /* X86_64 does not define MODULE_PROC_FAMILY */
9133 +#define MODULE_PROC_FAMILY ""
9134 #elif defined CONFIG_M386
9135 #define MODULE_PROC_FAMILY "386 "
9136 #elif defined CONFIG_M486
9137 @@ -59,13 +60,36 @@
9138 #error unknown processor family
9139 #endif
9140
9141 -#ifdef CONFIG_X86_32
9142 -# ifdef CONFIG_4KSTACKS
9143 -# define MODULE_STACKSIZE "4KSTACKS "
9144 -# else
9145 -# define MODULE_STACKSIZE ""
9146 -# endif
9147 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9148 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9149 +#define MODULE_PAX_UDEREF "UDEREF "
9150 +#else
9151 +#define MODULE_PAX_UDEREF ""
9152 +#endif
9153 +
9154 +#ifdef CONFIG_PAX_KERNEXEC
9155 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
9156 +#else
9157 +#define MODULE_PAX_KERNEXEC ""
9158 +#endif
9159 +
9160 +#ifdef CONFIG_PAX_REFCOUNT
9161 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
9162 +#else
9163 +#define MODULE_PAX_REFCOUNT ""
9164 #endif
9165
9166 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9167 +#define MODULE_STACKSIZE "4KSTACKS "
9168 +#else
9169 +#define MODULE_STACKSIZE ""
9170 +#endif
9171 +
9172 +#ifdef CONFIG_GRKERNSEC
9173 +#define MODULE_GRSEC "GRSECURITY "
9174 +#else
9175 +#define MODULE_GRSEC ""
9176 +#endif
9177 +
9178 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9179 +
9180 #endif /* _ASM_X86_MODULE_H */
9181 diff -urNp linux-2.6.32.45/arch/x86/include/asm/page_64_types.h linux-2.6.32.45/arch/x86/include/asm/page_64_types.h
9182 --- linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9183 +++ linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9184 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9185
9186 /* duplicated to the one in bootmem.h */
9187 extern unsigned long max_pfn;
9188 -extern unsigned long phys_base;
9189 +extern const unsigned long phys_base;
9190
9191 extern unsigned long __phys_addr(unsigned long);
9192 #define __phys_reloc_hide(x) (x)
9193 diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt.h linux-2.6.32.45/arch/x86/include/asm/paravirt.h
9194 --- linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9195 +++ linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
9196 @@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
9197 pv_mmu_ops.set_fixmap(idx, phys, flags);
9198 }
9199
9200 +#ifdef CONFIG_PAX_KERNEXEC
9201 +static inline unsigned long pax_open_kernel(void)
9202 +{
9203 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9204 +}
9205 +
9206 +static inline unsigned long pax_close_kernel(void)
9207 +{
9208 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9209 +}
9210 +#else
9211 +static inline unsigned long pax_open_kernel(void) { return 0; }
9212 +static inline unsigned long pax_close_kernel(void) { return 0; }
9213 +#endif
9214 +
9215 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9216
9217 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9218 @@ -945,7 +960,7 @@ extern void default_banner(void);
9219
9220 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9221 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9222 -#define PARA_INDIRECT(addr) *%cs:addr
9223 +#define PARA_INDIRECT(addr) *%ss:addr
9224 #endif
9225
9226 #define INTERRUPT_RETURN \
9227 @@ -1022,6 +1037,21 @@ extern void default_banner(void);
9228 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9229 CLBR_NONE, \
9230 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9231 +
9232 +#define GET_CR0_INTO_RDI \
9233 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9234 + mov %rax,%rdi
9235 +
9236 +#define SET_RDI_INTO_CR0 \
9237 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9238 +
9239 +#define GET_CR3_INTO_RDI \
9240 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9241 + mov %rax,%rdi
9242 +
9243 +#define SET_RDI_INTO_CR3 \
9244 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9245 +
9246 #endif /* CONFIG_X86_32 */
9247
9248 #endif /* __ASSEMBLY__ */
9249 diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h
9250 --- linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9251 +++ linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-08-05 20:33:55.000000000 -0400
9252 @@ -78,19 +78,19 @@ struct pv_init_ops {
9253 */
9254 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9255 unsigned long addr, unsigned len);
9256 -};
9257 +} __no_const;
9258
9259
9260 struct pv_lazy_ops {
9261 /* Set deferred update mode, used for batching operations. */
9262 void (*enter)(void);
9263 void (*leave)(void);
9264 -};
9265 +} __no_const;
9266
9267 struct pv_time_ops {
9268 unsigned long long (*sched_clock)(void);
9269 unsigned long (*get_tsc_khz)(void);
9270 -};
9271 +} __no_const;
9272
9273 struct pv_cpu_ops {
9274 /* hooks for various privileged instructions */
9275 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
9276
9277 void (*start_context_switch)(struct task_struct *prev);
9278 void (*end_context_switch)(struct task_struct *next);
9279 -};
9280 +} __no_const;
9281
9282 struct pv_irq_ops {
9283 /*
9284 @@ -217,7 +217,7 @@ struct pv_apic_ops {
9285 unsigned long start_eip,
9286 unsigned long start_esp);
9287 #endif
9288 -};
9289 +} __no_const;
9290
9291 struct pv_mmu_ops {
9292 unsigned long (*read_cr2)(void);
9293 @@ -316,6 +316,12 @@ struct pv_mmu_ops {
9294 an mfn. We can tell which is which from the index. */
9295 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9296 phys_addr_t phys, pgprot_t flags);
9297 +
9298 +#ifdef CONFIG_PAX_KERNEXEC
9299 + unsigned long (*pax_open_kernel)(void);
9300 + unsigned long (*pax_close_kernel)(void);
9301 +#endif
9302 +
9303 };
9304
9305 struct raw_spinlock;
9306 @@ -326,7 +332,7 @@ struct pv_lock_ops {
9307 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
9308 int (*spin_trylock)(struct raw_spinlock *lock);
9309 void (*spin_unlock)(struct raw_spinlock *lock);
9310 -};
9311 +} __no_const;
9312
9313 /* This contains all the paravirt structures: we get a convenient
9314 * number for each function using the offset which we use to indicate
9315 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pci_x86.h linux-2.6.32.45/arch/x86/include/asm/pci_x86.h
9316 --- linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9317 +++ linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9318 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9319 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9320
9321 struct pci_raw_ops {
9322 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9323 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9324 int reg, int len, u32 *val);
9325 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9326 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9327 int reg, int len, u32 val);
9328 };
9329
9330 -extern struct pci_raw_ops *raw_pci_ops;
9331 -extern struct pci_raw_ops *raw_pci_ext_ops;
9332 +extern const struct pci_raw_ops *raw_pci_ops;
9333 +extern const struct pci_raw_ops *raw_pci_ext_ops;
9334
9335 -extern struct pci_raw_ops pci_direct_conf1;
9336 +extern const struct pci_raw_ops pci_direct_conf1;
9337 extern bool port_cf9_safe;
9338
9339 /* arch_initcall level */
9340 diff -urNp linux-2.6.32.45/arch/x86/include/asm/percpu.h linux-2.6.32.45/arch/x86/include/asm/percpu.h
9341 --- linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-03-27 14:31:47.000000000 -0400
9342 +++ linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-08-17 19:33:59.000000000 -0400
9343 @@ -78,6 +78,7 @@ do { \
9344 if (0) { \
9345 T__ tmp__; \
9346 tmp__ = (val); \
9347 + (void)tmp__; \
9348 } \
9349 switch (sizeof(var)) { \
9350 case 1: \
9351 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgalloc.h linux-2.6.32.45/arch/x86/include/asm/pgalloc.h
9352 --- linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9353 +++ linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9354 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9355 pmd_t *pmd, pte_t *pte)
9356 {
9357 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9358 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9359 +}
9360 +
9361 +static inline void pmd_populate_user(struct mm_struct *mm,
9362 + pmd_t *pmd, pte_t *pte)
9363 +{
9364 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9365 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9366 }
9367
9368 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h
9369 --- linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9370 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9371 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9372
9373 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9374 {
9375 + pax_open_kernel();
9376 *pmdp = pmd;
9377 + pax_close_kernel();
9378 }
9379
9380 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9381 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h
9382 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9383 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9384 @@ -26,9 +26,6 @@
9385 struct mm_struct;
9386 struct vm_area_struct;
9387
9388 -extern pgd_t swapper_pg_dir[1024];
9389 -extern pgd_t trampoline_pg_dir[1024];
9390 -
9391 static inline void pgtable_cache_init(void) { }
9392 static inline void check_pgt_cache(void) { }
9393 void paging_init(void);
9394 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9395 # include <asm/pgtable-2level.h>
9396 #endif
9397
9398 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9399 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9400 +#ifdef CONFIG_X86_PAE
9401 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9402 +#endif
9403 +
9404 #if defined(CONFIG_HIGHPTE)
9405 #define __KM_PTE \
9406 (in_nmi() ? KM_NMI_PTE : \
9407 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9408 /* Clear a kernel PTE and flush it from the TLB */
9409 #define kpte_clear_flush(ptep, vaddr) \
9410 do { \
9411 + pax_open_kernel(); \
9412 pte_clear(&init_mm, (vaddr), (ptep)); \
9413 + pax_close_kernel(); \
9414 __flush_tlb_one((vaddr)); \
9415 } while (0)
9416
9417 @@ -85,6 +90,9 @@ do { \
9418
9419 #endif /* !__ASSEMBLY__ */
9420
9421 +#define HAVE_ARCH_UNMAPPED_AREA
9422 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9423 +
9424 /*
9425 * kern_addr_valid() is (1) for FLATMEM and (0) for
9426 * SPARSEMEM and DISCONTIGMEM
9427 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h
9428 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9429 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9430 @@ -8,7 +8,7 @@
9431 */
9432 #ifdef CONFIG_X86_PAE
9433 # include <asm/pgtable-3level_types.h>
9434 -# define PMD_SIZE (1UL << PMD_SHIFT)
9435 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9436 # define PMD_MASK (~(PMD_SIZE - 1))
9437 #else
9438 # include <asm/pgtable-2level_types.h>
9439 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9440 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9441 #endif
9442
9443 +#ifdef CONFIG_PAX_KERNEXEC
9444 +#ifndef __ASSEMBLY__
9445 +extern unsigned char MODULES_EXEC_VADDR[];
9446 +extern unsigned char MODULES_EXEC_END[];
9447 +#endif
9448 +#include <asm/boot.h>
9449 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9450 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9451 +#else
9452 +#define ktla_ktva(addr) (addr)
9453 +#define ktva_ktla(addr) (addr)
9454 +#endif
9455 +
9456 #define MODULES_VADDR VMALLOC_START
9457 #define MODULES_END VMALLOC_END
9458 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9459 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h
9460 --- linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9461 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9462 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9463
9464 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9465 {
9466 + pax_open_kernel();
9467 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9468 + pax_close_kernel();
9469 }
9470
9471 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9472 {
9473 + pax_open_kernel();
9474 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9475 + pax_close_kernel();
9476 }
9477
9478 /*
9479 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h
9480 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9481 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9482 @@ -16,10 +16,13 @@
9483
9484 extern pud_t level3_kernel_pgt[512];
9485 extern pud_t level3_ident_pgt[512];
9486 +extern pud_t level3_vmalloc_pgt[512];
9487 +extern pud_t level3_vmemmap_pgt[512];
9488 +extern pud_t level2_vmemmap_pgt[512];
9489 extern pmd_t level2_kernel_pgt[512];
9490 extern pmd_t level2_fixmap_pgt[512];
9491 -extern pmd_t level2_ident_pgt[512];
9492 -extern pgd_t init_level4_pgt[];
9493 +extern pmd_t level2_ident_pgt[512*2];
9494 +extern pgd_t init_level4_pgt[512];
9495
9496 #define swapper_pg_dir init_level4_pgt
9497
9498 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9499
9500 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9501 {
9502 + pax_open_kernel();
9503 *pmdp = pmd;
9504 + pax_close_kernel();
9505 }
9506
9507 static inline void native_pmd_clear(pmd_t *pmd)
9508 @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9509
9510 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9511 {
9512 + pax_open_kernel();
9513 *pgdp = pgd;
9514 + pax_close_kernel();
9515 }
9516
9517 static inline void native_pgd_clear(pgd_t *pgd)
9518 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h
9519 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9520 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9521 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9522 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9523 #define MODULES_END _AC(0xffffffffff000000, UL)
9524 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9525 +#define MODULES_EXEC_VADDR MODULES_VADDR
9526 +#define MODULES_EXEC_END MODULES_END
9527 +
9528 +#define ktla_ktva(addr) (addr)
9529 +#define ktva_ktla(addr) (addr)
9530
9531 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9532 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable.h linux-2.6.32.45/arch/x86/include/asm/pgtable.h
9533 --- linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9534 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9535 @@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9536
9537 #define arch_end_context_switch(prev) do {} while(0)
9538
9539 +#define pax_open_kernel() native_pax_open_kernel()
9540 +#define pax_close_kernel() native_pax_close_kernel()
9541 #endif /* CONFIG_PARAVIRT */
9542
9543 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9544 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9545 +
9546 +#ifdef CONFIG_PAX_KERNEXEC
9547 +static inline unsigned long native_pax_open_kernel(void)
9548 +{
9549 + unsigned long cr0;
9550 +
9551 + preempt_disable();
9552 + barrier();
9553 + cr0 = read_cr0() ^ X86_CR0_WP;
9554 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9555 + write_cr0(cr0);
9556 + return cr0 ^ X86_CR0_WP;
9557 +}
9558 +
9559 +static inline unsigned long native_pax_close_kernel(void)
9560 +{
9561 + unsigned long cr0;
9562 +
9563 + cr0 = read_cr0() ^ X86_CR0_WP;
9564 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9565 + write_cr0(cr0);
9566 + barrier();
9567 + preempt_enable_no_resched();
9568 + return cr0 ^ X86_CR0_WP;
9569 +}
9570 +#else
9571 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9572 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9573 +#endif
9574 +
9575 /*
9576 * The following only work if pte_present() is true.
9577 * Undefined behaviour if not..
9578 */
9579 +static inline int pte_user(pte_t pte)
9580 +{
9581 + return pte_val(pte) & _PAGE_USER;
9582 +}
9583 +
9584 static inline int pte_dirty(pte_t pte)
9585 {
9586 return pte_flags(pte) & _PAGE_DIRTY;
9587 @@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9588 return pte_clear_flags(pte, _PAGE_RW);
9589 }
9590
9591 +static inline pte_t pte_mkread(pte_t pte)
9592 +{
9593 + return __pte(pte_val(pte) | _PAGE_USER);
9594 +}
9595 +
9596 static inline pte_t pte_mkexec(pte_t pte)
9597 {
9598 - return pte_clear_flags(pte, _PAGE_NX);
9599 +#ifdef CONFIG_X86_PAE
9600 + if (__supported_pte_mask & _PAGE_NX)
9601 + return pte_clear_flags(pte, _PAGE_NX);
9602 + else
9603 +#endif
9604 + return pte_set_flags(pte, _PAGE_USER);
9605 +}
9606 +
9607 +static inline pte_t pte_exprotect(pte_t pte)
9608 +{
9609 +#ifdef CONFIG_X86_PAE
9610 + if (__supported_pte_mask & _PAGE_NX)
9611 + return pte_set_flags(pte, _PAGE_NX);
9612 + else
9613 +#endif
9614 + return pte_clear_flags(pte, _PAGE_USER);
9615 }
9616
9617 static inline pte_t pte_mkdirty(pte_t pte)
9618 @@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9619 #endif
9620
9621 #ifndef __ASSEMBLY__
9622 +
9623 +#ifdef CONFIG_PAX_PER_CPU_PGD
9624 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9625 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9626 +{
9627 + return cpu_pgd[cpu];
9628 +}
9629 +#endif
9630 +
9631 #include <linux/mm_types.h>
9632
9633 static inline int pte_none(pte_t pte)
9634 @@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9635
9636 static inline int pgd_bad(pgd_t pgd)
9637 {
9638 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9639 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9640 }
9641
9642 static inline int pgd_none(pgd_t pgd)
9643 @@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9644 * pgd_offset() returns a (pgd_t *)
9645 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9646 */
9647 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9648 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9649 +
9650 +#ifdef CONFIG_PAX_PER_CPU_PGD
9651 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9652 +#endif
9653 +
9654 /*
9655 * a shortcut which implies the use of the kernel's pgd, instead
9656 * of a process's
9657 @@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9658 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9659 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9660
9661 +#ifdef CONFIG_X86_32
9662 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9663 +#else
9664 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9665 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9666 +
9667 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9668 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9669 +#else
9670 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9671 +#endif
9672 +
9673 +#endif
9674 +
9675 #ifndef __ASSEMBLY__
9676
9677 extern int direct_gbpages;
9678 @@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9679 * dst and src can be on the same page, but the range must not overlap,
9680 * and must not cross a page boundary.
9681 */
9682 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9683 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9684 {
9685 - memcpy(dst, src, count * sizeof(pgd_t));
9686 + pax_open_kernel();
9687 + while (count--)
9688 + *dst++ = *src++;
9689 + pax_close_kernel();
9690 }
9691
9692 +#ifdef CONFIG_PAX_PER_CPU_PGD
9693 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9694 +#endif
9695 +
9696 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9697 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9698 +#else
9699 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9700 +#endif
9701
9702 #include <asm-generic/pgtable.h>
9703 #endif /* __ASSEMBLY__ */
9704 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h
9705 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9706 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9707 @@ -16,12 +16,11 @@
9708 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9709 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9710 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9711 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9712 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9713 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9714 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9715 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9716 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9717 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9718 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9719 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9720
9721 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9722 @@ -39,7 +38,6 @@
9723 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9724 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9725 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9726 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9727 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9728 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9729 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9730 @@ -55,8 +53,10 @@
9731
9732 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9733 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9734 -#else
9735 +#elif defined(CONFIG_KMEMCHECK)
9736 #define _PAGE_NX (_AT(pteval_t, 0))
9737 +#else
9738 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9739 #endif
9740
9741 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9742 @@ -93,6 +93,9 @@
9743 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9744 _PAGE_ACCESSED)
9745
9746 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9747 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9748 +
9749 #define __PAGE_KERNEL_EXEC \
9750 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9751 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9752 @@ -103,8 +106,8 @@
9753 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9754 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9755 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9756 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9757 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9758 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9759 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9760 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9761 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9762 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9763 @@ -163,8 +166,8 @@
9764 * bits are combined, this will alow user to access the high address mapped
9765 * VDSO in the presence of CONFIG_COMPAT_VDSO
9766 */
9767 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9768 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9769 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9770 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9771 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9772 #endif
9773
9774 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9775 {
9776 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9777 }
9778 +#endif
9779
9780 +#if PAGETABLE_LEVELS == 3
9781 +#include <asm-generic/pgtable-nopud.h>
9782 +#endif
9783 +
9784 +#if PAGETABLE_LEVELS == 2
9785 +#include <asm-generic/pgtable-nopmd.h>
9786 +#endif
9787 +
9788 +#ifndef __ASSEMBLY__
9789 #if PAGETABLE_LEVELS > 3
9790 typedef struct { pudval_t pud; } pud_t;
9791
9792 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9793 return pud.pud;
9794 }
9795 #else
9796 -#include <asm-generic/pgtable-nopud.h>
9797 -
9798 static inline pudval_t native_pud_val(pud_t pud)
9799 {
9800 return native_pgd_val(pud.pgd);
9801 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9802 return pmd.pmd;
9803 }
9804 #else
9805 -#include <asm-generic/pgtable-nopmd.h>
9806 -
9807 static inline pmdval_t native_pmd_val(pmd_t pmd)
9808 {
9809 return native_pgd_val(pmd.pud.pgd);
9810 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9811
9812 extern pteval_t __supported_pte_mask;
9813 extern void set_nx(void);
9814 +
9815 +#ifdef CONFIG_X86_32
9816 +#ifdef CONFIG_X86_PAE
9817 extern int nx_enabled;
9818 +#else
9819 +#define nx_enabled (0)
9820 +#endif
9821 +#else
9822 +#define nx_enabled (1)
9823 +#endif
9824
9825 #define pgprot_writecombine pgprot_writecombine
9826 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9827 diff -urNp linux-2.6.32.45/arch/x86/include/asm/processor.h linux-2.6.32.45/arch/x86/include/asm/processor.h
9828 --- linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9829 +++ linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9830 @@ -272,7 +272,7 @@ struct tss_struct {
9831
9832 } ____cacheline_aligned;
9833
9834 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9835 +extern struct tss_struct init_tss[NR_CPUS];
9836
9837 /*
9838 * Save the original ist values for checking stack pointers during debugging
9839 @@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9840 */
9841 #define TASK_SIZE PAGE_OFFSET
9842 #define TASK_SIZE_MAX TASK_SIZE
9843 +
9844 +#ifdef CONFIG_PAX_SEGMEXEC
9845 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9846 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9847 +#else
9848 #define STACK_TOP TASK_SIZE
9849 -#define STACK_TOP_MAX STACK_TOP
9850 +#endif
9851 +
9852 +#define STACK_TOP_MAX TASK_SIZE
9853
9854 #define INIT_THREAD { \
9855 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9856 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9857 .vm86_info = NULL, \
9858 .sysenter_cs = __KERNEL_CS, \
9859 .io_bitmap_ptr = NULL, \
9860 @@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9861 */
9862 #define INIT_TSS { \
9863 .x86_tss = { \
9864 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9865 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9866 .ss0 = __KERNEL_DS, \
9867 .ss1 = __KERNEL_CS, \
9868 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9869 @@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9870 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9871
9872 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9873 -#define KSTK_TOP(info) \
9874 -({ \
9875 - unsigned long *__ptr = (unsigned long *)(info); \
9876 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9877 -})
9878 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9879
9880 /*
9881 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9882 @@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9883 #define task_pt_regs(task) \
9884 ({ \
9885 struct pt_regs *__regs__; \
9886 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9887 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9888 __regs__ - 1; \
9889 })
9890
9891 @@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9892 /*
9893 * User space process size. 47bits minus one guard page.
9894 */
9895 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9896 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9897
9898 /* This decides where the kernel will search for a free chunk of vm
9899 * space during mmap's.
9900 */
9901 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9902 - 0xc0000000 : 0xFFFFe000)
9903 + 0xc0000000 : 0xFFFFf000)
9904
9905 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9906 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9907 @@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9908 #define STACK_TOP_MAX TASK_SIZE_MAX
9909
9910 #define INIT_THREAD { \
9911 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9912 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9913 }
9914
9915 #define INIT_TSS { \
9916 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9917 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9918 }
9919
9920 /*
9921 @@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9922 */
9923 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9924
9925 +#ifdef CONFIG_PAX_SEGMEXEC
9926 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9927 +#endif
9928 +
9929 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9930
9931 /* Get/set a process' ability to use the timestamp counter instruction */
9932 diff -urNp linux-2.6.32.45/arch/x86/include/asm/ptrace.h linux-2.6.32.45/arch/x86/include/asm/ptrace.h
9933 --- linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9934 +++ linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9935 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9936 }
9937
9938 /*
9939 - * user_mode_vm(regs) determines whether a register set came from user mode.
9940 + * user_mode(regs) determines whether a register set came from user mode.
9941 * This is true if V8086 mode was enabled OR if the register set was from
9942 * protected mode with RPL-3 CS value. This tricky test checks that with
9943 * one comparison. Many places in the kernel can bypass this full check
9944 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9945 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9946 + * be used.
9947 */
9948 -static inline int user_mode(struct pt_regs *regs)
9949 +static inline int user_mode_novm(struct pt_regs *regs)
9950 {
9951 #ifdef CONFIG_X86_32
9952 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9953 #else
9954 - return !!(regs->cs & 3);
9955 + return !!(regs->cs & SEGMENT_RPL_MASK);
9956 #endif
9957 }
9958
9959 -static inline int user_mode_vm(struct pt_regs *regs)
9960 +static inline int user_mode(struct pt_regs *regs)
9961 {
9962 #ifdef CONFIG_X86_32
9963 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9964 USER_RPL;
9965 #else
9966 - return user_mode(regs);
9967 + return user_mode_novm(regs);
9968 #endif
9969 }
9970
9971 diff -urNp linux-2.6.32.45/arch/x86/include/asm/reboot.h linux-2.6.32.45/arch/x86/include/asm/reboot.h
9972 --- linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
9973 +++ linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-08-05 20:33:55.000000000 -0400
9974 @@ -6,19 +6,19 @@
9975 struct pt_regs;
9976
9977 struct machine_ops {
9978 - void (*restart)(char *cmd);
9979 - void (*halt)(void);
9980 - void (*power_off)(void);
9981 + void (* __noreturn restart)(char *cmd);
9982 + void (* __noreturn halt)(void);
9983 + void (* __noreturn power_off)(void);
9984 void (*shutdown)(void);
9985 void (*crash_shutdown)(struct pt_regs *);
9986 - void (*emergency_restart)(void);
9987 -};
9988 + void (* __noreturn emergency_restart)(void);
9989 +} __no_const;
9990
9991 extern struct machine_ops machine_ops;
9992
9993 void native_machine_crash_shutdown(struct pt_regs *regs);
9994 void native_machine_shutdown(void);
9995 -void machine_real_restart(const unsigned char *code, int length);
9996 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
9997
9998 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
9999 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
10000 diff -urNp linux-2.6.32.45/arch/x86/include/asm/rwsem.h linux-2.6.32.45/arch/x86/include/asm/rwsem.h
10001 --- linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
10002 +++ linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
10003 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw
10004 {
10005 asm volatile("# beginning down_read\n\t"
10006 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10007 +
10008 +#ifdef CONFIG_PAX_REFCOUNT
10009 + "jno 0f\n"
10010 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
10011 + "int $4\n0:\n"
10012 + _ASM_EXTABLE(0b, 0b)
10013 +#endif
10014 +
10015 /* adds 0x00000001, returns the old value */
10016 " jns 1f\n"
10017 " call call_rwsem_down_read_failed\n"
10018 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
10019 "1:\n\t"
10020 " mov %1,%2\n\t"
10021 " add %3,%2\n\t"
10022 +
10023 +#ifdef CONFIG_PAX_REFCOUNT
10024 + "jno 0f\n"
10025 + "sub %3,%2\n"
10026 + "int $4\n0:\n"
10027 + _ASM_EXTABLE(0b, 0b)
10028 +#endif
10029 +
10030 " jle 2f\n\t"
10031 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10032 " jnz 1b\n\t"
10033 @@ -160,6 +176,14 @@ static inline void __down_write_nested(s
10034 tmp = RWSEM_ACTIVE_WRITE_BIAS;
10035 asm volatile("# beginning down_write\n\t"
10036 LOCK_PREFIX " xadd %1,(%2)\n\t"
10037 +
10038 +#ifdef CONFIG_PAX_REFCOUNT
10039 + "jno 0f\n"
10040 + "mov %1,(%2)\n"
10041 + "int $4\n0:\n"
10042 + _ASM_EXTABLE(0b, 0b)
10043 +#endif
10044 +
10045 /* subtract 0x0000ffff, returns the old value */
10046 " test %1,%1\n\t"
10047 /* was the count 0 before? */
10048 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10049 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10050 asm volatile("# beginning __up_read\n\t"
10051 LOCK_PREFIX " xadd %1,(%2)\n\t"
10052 +
10053 +#ifdef CONFIG_PAX_REFCOUNT
10054 + "jno 0f\n"
10055 + "mov %1,(%2)\n"
10056 + "int $4\n0:\n"
10057 + _ASM_EXTABLE(0b, 0b)
10058 +#endif
10059 +
10060 /* subtracts 1, returns the old value */
10061 " jns 1f\n\t"
10062 " call call_rwsem_wake\n"
10063 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10064 rwsem_count_t tmp;
10065 asm volatile("# beginning __up_write\n\t"
10066 LOCK_PREFIX " xadd %1,(%2)\n\t"
10067 +
10068 +#ifdef CONFIG_PAX_REFCOUNT
10069 + "jno 0f\n"
10070 + "mov %1,(%2)\n"
10071 + "int $4\n0:\n"
10072 + _ASM_EXTABLE(0b, 0b)
10073 +#endif
10074 +
10075 /* tries to transition
10076 0xffff0001 -> 0x00000000 */
10077 " jz 1f\n"
10078 @@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10079 {
10080 asm volatile("# beginning __downgrade_write\n\t"
10081 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10082 +
10083 +#ifdef CONFIG_PAX_REFCOUNT
10084 + "jno 0f\n"
10085 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10086 + "int $4\n0:\n"
10087 + _ASM_EXTABLE(0b, 0b)
10088 +#endif
10089 +
10090 /*
10091 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10092 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10093 @@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10094 static inline void rwsem_atomic_add(rwsem_count_t delta,
10095 struct rw_semaphore *sem)
10096 {
10097 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10098 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10099 +
10100 +#ifdef CONFIG_PAX_REFCOUNT
10101 + "jno 0f\n"
10102 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10103 + "int $4\n0:\n"
10104 + _ASM_EXTABLE(0b, 0b)
10105 +#endif
10106 +
10107 : "+m" (sem->count)
10108 : "er" (delta));
10109 }
10110 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10111 {
10112 rwsem_count_t tmp = delta;
10113
10114 - asm volatile(LOCK_PREFIX "xadd %0,%1"
10115 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10116 +
10117 +#ifdef CONFIG_PAX_REFCOUNT
10118 + "jno 0f\n"
10119 + "mov %0,%1\n"
10120 + "int $4\n0:\n"
10121 + _ASM_EXTABLE(0b, 0b)
10122 +#endif
10123 +
10124 : "+r" (tmp), "+m" (sem->count)
10125 : : "memory");
10126
10127 diff -urNp linux-2.6.32.45/arch/x86/include/asm/segment.h linux-2.6.32.45/arch/x86/include/asm/segment.h
10128 --- linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10129 +++ linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10130 @@ -62,8 +62,8 @@
10131 * 26 - ESPFIX small SS
10132 * 27 - per-cpu [ offset to per-cpu data area ]
10133 * 28 - stack_canary-20 [ for stack protector ]
10134 - * 29 - unused
10135 - * 30 - unused
10136 + * 29 - PCI BIOS CS
10137 + * 30 - PCI BIOS DS
10138 * 31 - TSS for double fault handler
10139 */
10140 #define GDT_ENTRY_TLS_MIN 6
10141 @@ -77,6 +77,8 @@
10142
10143 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10144
10145 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10146 +
10147 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10148
10149 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10150 @@ -88,7 +90,7 @@
10151 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10152 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10153
10154 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10155 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10156 #ifdef CONFIG_SMP
10157 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10158 #else
10159 @@ -102,6 +104,12 @@
10160 #define __KERNEL_STACK_CANARY 0
10161 #endif
10162
10163 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10164 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10165 +
10166 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10167 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10168 +
10169 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10170
10171 /*
10172 @@ -139,7 +147,7 @@
10173 */
10174
10175 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10176 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10177 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10178
10179
10180 #else
10181 @@ -163,6 +171,8 @@
10182 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10183 #define __USER32_DS __USER_DS
10184
10185 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10186 +
10187 #define GDT_ENTRY_TSS 8 /* needs two entries */
10188 #define GDT_ENTRY_LDT 10 /* needs two entries */
10189 #define GDT_ENTRY_TLS_MIN 12
10190 @@ -183,6 +193,7 @@
10191 #endif
10192
10193 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10194 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10195 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10196 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10197 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10198 diff -urNp linux-2.6.32.45/arch/x86/include/asm/smp.h linux-2.6.32.45/arch/x86/include/asm/smp.h
10199 --- linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10200 +++ linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-08-05 20:33:55.000000000 -0400
10201 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
10202 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10203 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10204 DECLARE_PER_CPU(u16, cpu_llc_id);
10205 -DECLARE_PER_CPU(int, cpu_number);
10206 +DECLARE_PER_CPU(unsigned int, cpu_number);
10207
10208 static inline struct cpumask *cpu_sibling_mask(int cpu)
10209 {
10210 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10211 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10212
10213 /* Static state in head.S used to set up a CPU */
10214 -extern struct {
10215 - void *sp;
10216 - unsigned short ss;
10217 -} stack_start;
10218 +extern unsigned long stack_start; /* Initial stack pointer address */
10219
10220 struct smp_ops {
10221 void (*smp_prepare_boot_cpu)(void);
10222 @@ -60,7 +57,7 @@ struct smp_ops {
10223
10224 void (*send_call_func_ipi)(const struct cpumask *mask);
10225 void (*send_call_func_single_ipi)(int cpu);
10226 -};
10227 +} __no_const;
10228
10229 /* Globals due to paravirt */
10230 extern void set_cpu_sibling_map(int cpu);
10231 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10232 extern int safe_smp_processor_id(void);
10233
10234 #elif defined(CONFIG_X86_64_SMP)
10235 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10236 -
10237 -#define stack_smp_processor_id() \
10238 -({ \
10239 - struct thread_info *ti; \
10240 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10241 - ti->cpu; \
10242 -})
10243 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10244 +#define stack_smp_processor_id() raw_smp_processor_id()
10245 #define safe_smp_processor_id() smp_processor_id()
10246
10247 #endif
10248 diff -urNp linux-2.6.32.45/arch/x86/include/asm/spinlock.h linux-2.6.32.45/arch/x86/include/asm/spinlock.h
10249 --- linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10250 +++ linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10251 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10252 static inline void __raw_read_lock(raw_rwlock_t *rw)
10253 {
10254 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10255 +
10256 +#ifdef CONFIG_PAX_REFCOUNT
10257 + "jno 0f\n"
10258 + LOCK_PREFIX " addl $1,(%0)\n"
10259 + "int $4\n0:\n"
10260 + _ASM_EXTABLE(0b, 0b)
10261 +#endif
10262 +
10263 "jns 1f\n"
10264 "call __read_lock_failed\n\t"
10265 "1:\n"
10266 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10267 static inline void __raw_write_lock(raw_rwlock_t *rw)
10268 {
10269 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10270 +
10271 +#ifdef CONFIG_PAX_REFCOUNT
10272 + "jno 0f\n"
10273 + LOCK_PREFIX " addl %1,(%0)\n"
10274 + "int $4\n0:\n"
10275 + _ASM_EXTABLE(0b, 0b)
10276 +#endif
10277 +
10278 "jz 1f\n"
10279 "call __write_lock_failed\n\t"
10280 "1:\n"
10281 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10282
10283 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10284 {
10285 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10286 + asm volatile(LOCK_PREFIX "incl %0\n"
10287 +
10288 +#ifdef CONFIG_PAX_REFCOUNT
10289 + "jno 0f\n"
10290 + LOCK_PREFIX "decl %0\n"
10291 + "int $4\n0:\n"
10292 + _ASM_EXTABLE(0b, 0b)
10293 +#endif
10294 +
10295 + :"+m" (rw->lock) : : "memory");
10296 }
10297
10298 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10299 {
10300 - asm volatile(LOCK_PREFIX "addl %1, %0"
10301 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
10302 +
10303 +#ifdef CONFIG_PAX_REFCOUNT
10304 + "jno 0f\n"
10305 + LOCK_PREFIX "subl %1, %0\n"
10306 + "int $4\n0:\n"
10307 + _ASM_EXTABLE(0b, 0b)
10308 +#endif
10309 +
10310 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10311 }
10312
10313 diff -urNp linux-2.6.32.45/arch/x86/include/asm/stackprotector.h linux-2.6.32.45/arch/x86/include/asm/stackprotector.h
10314 --- linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10315 +++ linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10316 @@ -48,7 +48,7 @@
10317 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10318 */
10319 #define GDT_STACK_CANARY_INIT \
10320 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10321 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10322
10323 /*
10324 * Initialize the stackprotector canary value.
10325 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10326
10327 static inline void load_stack_canary_segment(void)
10328 {
10329 -#ifdef CONFIG_X86_32
10330 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10331 asm volatile ("mov %0, %%gs" : : "r" (0));
10332 #endif
10333 }
10334 diff -urNp linux-2.6.32.45/arch/x86/include/asm/system.h linux-2.6.32.45/arch/x86/include/asm/system.h
10335 --- linux-2.6.32.45/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10336 +++ linux-2.6.32.45/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10337 @@ -132,7 +132,7 @@ do { \
10338 "thread_return:\n\t" \
10339 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10340 __switch_canary \
10341 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10342 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10343 "movq %%rax,%%rdi\n\t" \
10344 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10345 "jnz ret_from_fork\n\t" \
10346 @@ -143,7 +143,7 @@ do { \
10347 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10348 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10349 [_tif_fork] "i" (_TIF_FORK), \
10350 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10351 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
10352 [current_task] "m" (per_cpu_var(current_task)) \
10353 __switch_canary_iparam \
10354 : "memory", "cc" __EXTRA_CLOBBER)
10355 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10356 {
10357 unsigned long __limit;
10358 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10359 - return __limit + 1;
10360 + return __limit;
10361 }
10362
10363 static inline void native_clts(void)
10364 @@ -340,12 +340,12 @@ void enable_hlt(void);
10365
10366 void cpu_idle_wait(void);
10367
10368 -extern unsigned long arch_align_stack(unsigned long sp);
10369 +#define arch_align_stack(x) ((x) & ~0xfUL)
10370 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10371
10372 void default_idle(void);
10373
10374 -void stop_this_cpu(void *dummy);
10375 +void stop_this_cpu(void *dummy) __noreturn;
10376
10377 /*
10378 * Force strict CPU ordering.
10379 diff -urNp linux-2.6.32.45/arch/x86/include/asm/thread_info.h linux-2.6.32.45/arch/x86/include/asm/thread_info.h
10380 --- linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10381 +++ linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10382 @@ -10,6 +10,7 @@
10383 #include <linux/compiler.h>
10384 #include <asm/page.h>
10385 #include <asm/types.h>
10386 +#include <asm/percpu.h>
10387
10388 /*
10389 * low level task data that entry.S needs immediate access to
10390 @@ -24,7 +25,6 @@ struct exec_domain;
10391 #include <asm/atomic.h>
10392
10393 struct thread_info {
10394 - struct task_struct *task; /* main task structure */
10395 struct exec_domain *exec_domain; /* execution domain */
10396 __u32 flags; /* low level flags */
10397 __u32 status; /* thread synchronous flags */
10398 @@ -34,18 +34,12 @@ struct thread_info {
10399 mm_segment_t addr_limit;
10400 struct restart_block restart_block;
10401 void __user *sysenter_return;
10402 -#ifdef CONFIG_X86_32
10403 - unsigned long previous_esp; /* ESP of the previous stack in
10404 - case of nested (IRQ) stacks
10405 - */
10406 - __u8 supervisor_stack[0];
10407 -#endif
10408 + unsigned long lowest_stack;
10409 int uaccess_err;
10410 };
10411
10412 -#define INIT_THREAD_INFO(tsk) \
10413 +#define INIT_THREAD_INFO \
10414 { \
10415 - .task = &tsk, \
10416 .exec_domain = &default_exec_domain, \
10417 .flags = 0, \
10418 .cpu = 0, \
10419 @@ -56,7 +50,7 @@ struct thread_info {
10420 }, \
10421 }
10422
10423 -#define init_thread_info (init_thread_union.thread_info)
10424 +#define init_thread_info (init_thread_union.stack)
10425 #define init_stack (init_thread_union.stack)
10426
10427 #else /* !__ASSEMBLY__ */
10428 @@ -163,6 +157,23 @@ struct thread_info {
10429 #define alloc_thread_info(tsk) \
10430 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10431
10432 +#ifdef __ASSEMBLY__
10433 +/* how to get the thread information struct from ASM */
10434 +#define GET_THREAD_INFO(reg) \
10435 + mov PER_CPU_VAR(current_tinfo), reg
10436 +
10437 +/* use this one if reg already contains %esp */
10438 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10439 +#else
10440 +/* how to get the thread information struct from C */
10441 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10442 +
10443 +static __always_inline struct thread_info *current_thread_info(void)
10444 +{
10445 + return percpu_read_stable(current_tinfo);
10446 +}
10447 +#endif
10448 +
10449 #ifdef CONFIG_X86_32
10450
10451 #define STACK_WARN (THREAD_SIZE/8)
10452 @@ -173,35 +184,13 @@ struct thread_info {
10453 */
10454 #ifndef __ASSEMBLY__
10455
10456 -
10457 /* how to get the current stack pointer from C */
10458 register unsigned long current_stack_pointer asm("esp") __used;
10459
10460 -/* how to get the thread information struct from C */
10461 -static inline struct thread_info *current_thread_info(void)
10462 -{
10463 - return (struct thread_info *)
10464 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10465 -}
10466 -
10467 -#else /* !__ASSEMBLY__ */
10468 -
10469 -/* how to get the thread information struct from ASM */
10470 -#define GET_THREAD_INFO(reg) \
10471 - movl $-THREAD_SIZE, reg; \
10472 - andl %esp, reg
10473 -
10474 -/* use this one if reg already contains %esp */
10475 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10476 - andl $-THREAD_SIZE, reg
10477 -
10478 #endif
10479
10480 #else /* X86_32 */
10481
10482 -#include <asm/percpu.h>
10483 -#define KERNEL_STACK_OFFSET (5*8)
10484 -
10485 /*
10486 * macros/functions for gaining access to the thread information structure
10487 * preempt_count needs to be 1 initially, until the scheduler is functional.
10488 @@ -209,21 +198,8 @@ static inline struct thread_info *curren
10489 #ifndef __ASSEMBLY__
10490 DECLARE_PER_CPU(unsigned long, kernel_stack);
10491
10492 -static inline struct thread_info *current_thread_info(void)
10493 -{
10494 - struct thread_info *ti;
10495 - ti = (void *)(percpu_read_stable(kernel_stack) +
10496 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10497 - return ti;
10498 -}
10499 -
10500 -#else /* !__ASSEMBLY__ */
10501 -
10502 -/* how to get the thread information struct from ASM */
10503 -#define GET_THREAD_INFO(reg) \
10504 - movq PER_CPU_VAR(kernel_stack),reg ; \
10505 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10506 -
10507 +/* how to get the current stack pointer from C */
10508 +register unsigned long current_stack_pointer asm("rsp") __used;
10509 #endif
10510
10511 #endif /* !X86_32 */
10512 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10513 extern void free_thread_info(struct thread_info *ti);
10514 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10515 #define arch_task_cache_init arch_task_cache_init
10516 +
10517 +#define __HAVE_THREAD_FUNCTIONS
10518 +#define task_thread_info(task) (&(task)->tinfo)
10519 +#define task_stack_page(task) ((task)->stack)
10520 +#define setup_thread_stack(p, org) do {} while (0)
10521 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10522 +
10523 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10524 +extern struct task_struct *alloc_task_struct(void);
10525 +extern void free_task_struct(struct task_struct *);
10526 +
10527 #endif
10528 #endif /* _ASM_X86_THREAD_INFO_H */
10529 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h
10530 --- linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10531 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10532 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10533 static __always_inline unsigned long __must_check
10534 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10535 {
10536 + pax_track_stack();
10537 +
10538 + if ((long)n < 0)
10539 + return n;
10540 +
10541 if (__builtin_constant_p(n)) {
10542 unsigned long ret;
10543
10544 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10545 return ret;
10546 }
10547 }
10548 + if (!__builtin_constant_p(n))
10549 + check_object_size(from, n, true);
10550 return __copy_to_user_ll(to, from, n);
10551 }
10552
10553 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10554 __copy_to_user(void __user *to, const void *from, unsigned long n)
10555 {
10556 might_fault();
10557 +
10558 return __copy_to_user_inatomic(to, from, n);
10559 }
10560
10561 static __always_inline unsigned long
10562 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10563 {
10564 + if ((long)n < 0)
10565 + return n;
10566 +
10567 /* Avoid zeroing the tail if the copy fails..
10568 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10569 * but as the zeroing behaviour is only significant when n is not
10570 @@ -138,6 +149,12 @@ static __always_inline unsigned long
10571 __copy_from_user(void *to, const void __user *from, unsigned long n)
10572 {
10573 might_fault();
10574 +
10575 + pax_track_stack();
10576 +
10577 + if ((long)n < 0)
10578 + return n;
10579 +
10580 if (__builtin_constant_p(n)) {
10581 unsigned long ret;
10582
10583 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10584 return ret;
10585 }
10586 }
10587 + if (!__builtin_constant_p(n))
10588 + check_object_size(to, n, false);
10589 return __copy_from_user_ll(to, from, n);
10590 }
10591
10592 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10593 const void __user *from, unsigned long n)
10594 {
10595 might_fault();
10596 +
10597 + if ((long)n < 0)
10598 + return n;
10599 +
10600 if (__builtin_constant_p(n)) {
10601 unsigned long ret;
10602
10603 @@ -182,14 +205,62 @@ static __always_inline unsigned long
10604 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10605 unsigned long n)
10606 {
10607 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10608 + if ((long)n < 0)
10609 + return n;
10610 +
10611 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10612 +}
10613 +
10614 +/**
10615 + * copy_to_user: - Copy a block of data into user space.
10616 + * @to: Destination address, in user space.
10617 + * @from: Source address, in kernel space.
10618 + * @n: Number of bytes to copy.
10619 + *
10620 + * Context: User context only. This function may sleep.
10621 + *
10622 + * Copy data from kernel space to user space.
10623 + *
10624 + * Returns number of bytes that could not be copied.
10625 + * On success, this will be zero.
10626 + */
10627 +static __always_inline unsigned long __must_check
10628 +copy_to_user(void __user *to, const void *from, unsigned long n)
10629 +{
10630 + if (access_ok(VERIFY_WRITE, to, n))
10631 + n = __copy_to_user(to, from, n);
10632 + return n;
10633 +}
10634 +
10635 +/**
10636 + * copy_from_user: - Copy a block of data from user space.
10637 + * @to: Destination address, in kernel space.
10638 + * @from: Source address, in user space.
10639 + * @n: Number of bytes to copy.
10640 + *
10641 + * Context: User context only. This function may sleep.
10642 + *
10643 + * Copy data from user space to kernel space.
10644 + *
10645 + * Returns number of bytes that could not be copied.
10646 + * On success, this will be zero.
10647 + *
10648 + * If some data could not be copied, this function will pad the copied
10649 + * data to the requested size using zero bytes.
10650 + */
10651 +static __always_inline unsigned long __must_check
10652 +copy_from_user(void *to, const void __user *from, unsigned long n)
10653 +{
10654 + if (access_ok(VERIFY_READ, from, n))
10655 + n = __copy_from_user(to, from, n);
10656 + else if ((long)n > 0) {
10657 + if (!__builtin_constant_p(n))
10658 + check_object_size(to, n, false);
10659 + memset(to, 0, n);
10660 + }
10661 + return n;
10662 }
10663
10664 -unsigned long __must_check copy_to_user(void __user *to,
10665 - const void *from, unsigned long n);
10666 -unsigned long __must_check copy_from_user(void *to,
10667 - const void __user *from,
10668 - unsigned long n);
10669 long __must_check strncpy_from_user(char *dst, const char __user *src,
10670 long count);
10671 long __must_check __strncpy_from_user(char *dst,
10672 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h
10673 --- linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10674 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10675 @@ -9,6 +9,9 @@
10676 #include <linux/prefetch.h>
10677 #include <linux/lockdep.h>
10678 #include <asm/page.h>
10679 +#include <asm/pgtable.h>
10680 +
10681 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10682
10683 /*
10684 * Copy To/From Userspace
10685 @@ -19,113 +22,203 @@ __must_check unsigned long
10686 copy_user_generic(void *to, const void *from, unsigned len);
10687
10688 __must_check unsigned long
10689 -copy_to_user(void __user *to, const void *from, unsigned len);
10690 -__must_check unsigned long
10691 -copy_from_user(void *to, const void __user *from, unsigned len);
10692 -__must_check unsigned long
10693 copy_in_user(void __user *to, const void __user *from, unsigned len);
10694
10695 static __always_inline __must_check
10696 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10697 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10698 {
10699 - int ret = 0;
10700 + unsigned ret = 0;
10701
10702 might_fault();
10703 - if (!__builtin_constant_p(size))
10704 - return copy_user_generic(dst, (__force void *)src, size);
10705 +
10706 + if ((int)size < 0)
10707 + return size;
10708 +
10709 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10710 + if (!__access_ok(VERIFY_READ, src, size))
10711 + return size;
10712 +#endif
10713 +
10714 + if (!__builtin_constant_p(size)) {
10715 + check_object_size(dst, size, false);
10716 +
10717 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10718 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10719 + src += PAX_USER_SHADOW_BASE;
10720 +#endif
10721 +
10722 + return copy_user_generic(dst, (__force const void *)src, size);
10723 + }
10724 switch (size) {
10725 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10726 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10727 ret, "b", "b", "=q", 1);
10728 return ret;
10729 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10730 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10731 ret, "w", "w", "=r", 2);
10732 return ret;
10733 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10734 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10735 ret, "l", "k", "=r", 4);
10736 return ret;
10737 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10738 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10739 ret, "q", "", "=r", 8);
10740 return ret;
10741 case 10:
10742 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10743 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10744 ret, "q", "", "=r", 10);
10745 if (unlikely(ret))
10746 return ret;
10747 __get_user_asm(*(u16 *)(8 + (char *)dst),
10748 - (u16 __user *)(8 + (char __user *)src),
10749 + (const u16 __user *)(8 + (const char __user *)src),
10750 ret, "w", "w", "=r", 2);
10751 return ret;
10752 case 16:
10753 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10754 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10755 ret, "q", "", "=r", 16);
10756 if (unlikely(ret))
10757 return ret;
10758 __get_user_asm(*(u64 *)(8 + (char *)dst),
10759 - (u64 __user *)(8 + (char __user *)src),
10760 + (const u64 __user *)(8 + (const char __user *)src),
10761 ret, "q", "", "=r", 8);
10762 return ret;
10763 default:
10764 - return copy_user_generic(dst, (__force void *)src, size);
10765 +
10766 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10767 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10768 + src += PAX_USER_SHADOW_BASE;
10769 +#endif
10770 +
10771 + return copy_user_generic(dst, (__force const void *)src, size);
10772 }
10773 }
10774
10775 static __always_inline __must_check
10776 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10777 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10778 {
10779 - int ret = 0;
10780 + unsigned ret = 0;
10781
10782 might_fault();
10783 - if (!__builtin_constant_p(size))
10784 +
10785 + pax_track_stack();
10786 +
10787 + if ((int)size < 0)
10788 + return size;
10789 +
10790 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10791 + if (!__access_ok(VERIFY_WRITE, dst, size))
10792 + return size;
10793 +#endif
10794 +
10795 + if (!__builtin_constant_p(size)) {
10796 + check_object_size(src, size, true);
10797 +
10798 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10799 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10800 + dst += PAX_USER_SHADOW_BASE;
10801 +#endif
10802 +
10803 return copy_user_generic((__force void *)dst, src, size);
10804 + }
10805 switch (size) {
10806 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10807 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10808 ret, "b", "b", "iq", 1);
10809 return ret;
10810 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10811 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10812 ret, "w", "w", "ir", 2);
10813 return ret;
10814 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10815 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10816 ret, "l", "k", "ir", 4);
10817 return ret;
10818 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10819 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10820 ret, "q", "", "er", 8);
10821 return ret;
10822 case 10:
10823 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10824 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10825 ret, "q", "", "er", 10);
10826 if (unlikely(ret))
10827 return ret;
10828 asm("":::"memory");
10829 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10830 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10831 ret, "w", "w", "ir", 2);
10832 return ret;
10833 case 16:
10834 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10835 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10836 ret, "q", "", "er", 16);
10837 if (unlikely(ret))
10838 return ret;
10839 asm("":::"memory");
10840 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10841 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10842 ret, "q", "", "er", 8);
10843 return ret;
10844 default:
10845 +
10846 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10847 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10848 + dst += PAX_USER_SHADOW_BASE;
10849 +#endif
10850 +
10851 return copy_user_generic((__force void *)dst, src, size);
10852 }
10853 }
10854
10855 static __always_inline __must_check
10856 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10857 +unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10858 +{
10859 + if (access_ok(VERIFY_WRITE, to, len))
10860 + len = __copy_to_user(to, from, len);
10861 + return len;
10862 +}
10863 +
10864 +static __always_inline __must_check
10865 +unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10866 +{
10867 + if ((int)len < 0)
10868 + return len;
10869 +
10870 + if (access_ok(VERIFY_READ, from, len))
10871 + len = __copy_from_user(to, from, len);
10872 + else if ((int)len > 0) {
10873 + if (!__builtin_constant_p(len))
10874 + check_object_size(to, len, false);
10875 + memset(to, 0, len);
10876 + }
10877 + return len;
10878 +}
10879 +
10880 +static __always_inline __must_check
10881 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10882 {
10883 - int ret = 0;
10884 + unsigned ret = 0;
10885
10886 might_fault();
10887 - if (!__builtin_constant_p(size))
10888 +
10889 + pax_track_stack();
10890 +
10891 + if ((int)size < 0)
10892 + return size;
10893 +
10894 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10895 + if (!__access_ok(VERIFY_READ, src, size))
10896 + return size;
10897 + if (!__access_ok(VERIFY_WRITE, dst, size))
10898 + return size;
10899 +#endif
10900 +
10901 + if (!__builtin_constant_p(size)) {
10902 +
10903 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10904 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10905 + src += PAX_USER_SHADOW_BASE;
10906 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10907 + dst += PAX_USER_SHADOW_BASE;
10908 +#endif
10909 +
10910 return copy_user_generic((__force void *)dst,
10911 - (__force void *)src, size);
10912 + (__force const void *)src, size);
10913 + }
10914 switch (size) {
10915 case 1: {
10916 u8 tmp;
10917 - __get_user_asm(tmp, (u8 __user *)src,
10918 + __get_user_asm(tmp, (const u8 __user *)src,
10919 ret, "b", "b", "=q", 1);
10920 if (likely(!ret))
10921 __put_user_asm(tmp, (u8 __user *)dst,
10922 @@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10923 }
10924 case 2: {
10925 u16 tmp;
10926 - __get_user_asm(tmp, (u16 __user *)src,
10927 + __get_user_asm(tmp, (const u16 __user *)src,
10928 ret, "w", "w", "=r", 2);
10929 if (likely(!ret))
10930 __put_user_asm(tmp, (u16 __user *)dst,
10931 @@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10932
10933 case 4: {
10934 u32 tmp;
10935 - __get_user_asm(tmp, (u32 __user *)src,
10936 + __get_user_asm(tmp, (const u32 __user *)src,
10937 ret, "l", "k", "=r", 4);
10938 if (likely(!ret))
10939 __put_user_asm(tmp, (u32 __user *)dst,
10940 @@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10941 }
10942 case 8: {
10943 u64 tmp;
10944 - __get_user_asm(tmp, (u64 __user *)src,
10945 + __get_user_asm(tmp, (const u64 __user *)src,
10946 ret, "q", "", "=r", 8);
10947 if (likely(!ret))
10948 __put_user_asm(tmp, (u64 __user *)dst,
10949 @@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10950 return ret;
10951 }
10952 default:
10953 +
10954 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10955 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10956 + src += PAX_USER_SHADOW_BASE;
10957 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10958 + dst += PAX_USER_SHADOW_BASE;
10959 +#endif
10960 +
10961 return copy_user_generic((__force void *)dst,
10962 - (__force void *)src, size);
10963 + (__force const void *)src, size);
10964 }
10965 }
10966
10967 @@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10968 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10969 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10970
10971 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
10972 - unsigned size);
10973 +static __must_check __always_inline unsigned long
10974 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10975 +{
10976 + pax_track_stack();
10977 +
10978 + if ((int)size < 0)
10979 + return size;
10980
10981 -static __must_check __always_inline int
10982 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10983 + if (!__access_ok(VERIFY_READ, src, size))
10984 + return size;
10985 +
10986 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10987 + src += PAX_USER_SHADOW_BASE;
10988 +#endif
10989 +
10990 + return copy_user_generic(dst, (__force const void *)src, size);
10991 +}
10992 +
10993 +static __must_check __always_inline unsigned long
10994 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10995 {
10996 + if ((int)size < 0)
10997 + return size;
10998 +
10999 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11000 + if (!__access_ok(VERIFY_WRITE, dst, size))
11001 + return size;
11002 +
11003 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11004 + dst += PAX_USER_SHADOW_BASE;
11005 +#endif
11006 +
11007 return copy_user_generic((__force void *)dst, src, size);
11008 }
11009
11010 -extern long __copy_user_nocache(void *dst, const void __user *src,
11011 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11012 unsigned size, int zerorest);
11013
11014 -static inline int
11015 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11016 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11017 {
11018 might_sleep();
11019 +
11020 + if ((int)size < 0)
11021 + return size;
11022 +
11023 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11024 + if (!__access_ok(VERIFY_READ, src, size))
11025 + return size;
11026 +#endif
11027 +
11028 return __copy_user_nocache(dst, src, size, 1);
11029 }
11030
11031 -static inline int
11032 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11033 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11034 unsigned size)
11035 {
11036 + if ((int)size < 0)
11037 + return size;
11038 +
11039 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11040 + if (!__access_ok(VERIFY_READ, src, size))
11041 + return size;
11042 +#endif
11043 +
11044 return __copy_user_nocache(dst, src, size, 0);
11045 }
11046
11047 -unsigned long
11048 +extern unsigned long
11049 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11050
11051 #endif /* _ASM_X86_UACCESS_64_H */
11052 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess.h linux-2.6.32.45/arch/x86/include/asm/uaccess.h
11053 --- linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
11054 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
11055 @@ -8,12 +8,15 @@
11056 #include <linux/thread_info.h>
11057 #include <linux/prefetch.h>
11058 #include <linux/string.h>
11059 +#include <linux/sched.h>
11060 #include <asm/asm.h>
11061 #include <asm/page.h>
11062
11063 #define VERIFY_READ 0
11064 #define VERIFY_WRITE 1
11065
11066 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
11067 +
11068 /*
11069 * The fs value determines whether argument validity checking should be
11070 * performed or not. If get_fs() == USER_DS, checking is performed, with
11071 @@ -29,7 +32,12 @@
11072
11073 #define get_ds() (KERNEL_DS)
11074 #define get_fs() (current_thread_info()->addr_limit)
11075 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11076 +void __set_fs(mm_segment_t x);
11077 +void set_fs(mm_segment_t x);
11078 +#else
11079 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11080 +#endif
11081
11082 #define segment_eq(a, b) ((a).seg == (b).seg)
11083
11084 @@ -77,7 +85,33 @@
11085 * checks that the pointer is in the user space range - after calling
11086 * this function, memory access functions may still return -EFAULT.
11087 */
11088 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11089 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11090 +#define access_ok(type, addr, size) \
11091 +({ \
11092 + long __size = size; \
11093 + unsigned long __addr = (unsigned long)addr; \
11094 + unsigned long __addr_ao = __addr & PAGE_MASK; \
11095 + unsigned long __end_ao = __addr + __size - 1; \
11096 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11097 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11098 + while(__addr_ao <= __end_ao) { \
11099 + char __c_ao; \
11100 + __addr_ao += PAGE_SIZE; \
11101 + if (__size > PAGE_SIZE) \
11102 + cond_resched(); \
11103 + if (__get_user(__c_ao, (char __user *)__addr)) \
11104 + break; \
11105 + if (type != VERIFY_WRITE) { \
11106 + __addr = __addr_ao; \
11107 + continue; \
11108 + } \
11109 + if (__put_user(__c_ao, (char __user *)__addr)) \
11110 + break; \
11111 + __addr = __addr_ao; \
11112 + } \
11113 + } \
11114 + __ret_ao; \
11115 +})
11116
11117 /*
11118 * The exception table consists of pairs of addresses: the first is the
11119 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11120 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11121 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11122
11123 -
11124 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11125 +#define __copyuser_seg "gs;"
11126 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11127 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11128 +#else
11129 +#define __copyuser_seg
11130 +#define __COPYUSER_SET_ES
11131 +#define __COPYUSER_RESTORE_ES
11132 +#endif
11133
11134 #ifdef CONFIG_X86_32
11135 #define __put_user_asm_u64(x, addr, err, errret) \
11136 - asm volatile("1: movl %%eax,0(%2)\n" \
11137 - "2: movl %%edx,4(%2)\n" \
11138 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11139 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11140 "3:\n" \
11141 ".section .fixup,\"ax\"\n" \
11142 "4: movl %3,%0\n" \
11143 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11144 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11145
11146 #define __put_user_asm_ex_u64(x, addr) \
11147 - asm volatile("1: movl %%eax,0(%1)\n" \
11148 - "2: movl %%edx,4(%1)\n" \
11149 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11150 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11151 "3:\n" \
11152 _ASM_EXTABLE(1b, 2b - 1b) \
11153 _ASM_EXTABLE(2b, 3b - 2b) \
11154 @@ -374,7 +416,7 @@ do { \
11155 } while (0)
11156
11157 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11158 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11159 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11160 "2:\n" \
11161 ".section .fixup,\"ax\"\n" \
11162 "3: mov %3,%0\n" \
11163 @@ -382,7 +424,7 @@ do { \
11164 " jmp 2b\n" \
11165 ".previous\n" \
11166 _ASM_EXTABLE(1b, 3b) \
11167 - : "=r" (err), ltype(x) \
11168 + : "=r" (err), ltype (x) \
11169 : "m" (__m(addr)), "i" (errret), "0" (err))
11170
11171 #define __get_user_size_ex(x, ptr, size) \
11172 @@ -407,7 +449,7 @@ do { \
11173 } while (0)
11174
11175 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11176 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11177 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11178 "2:\n" \
11179 _ASM_EXTABLE(1b, 2b - 1b) \
11180 : ltype(x) : "m" (__m(addr)))
11181 @@ -424,13 +466,24 @@ do { \
11182 int __gu_err; \
11183 unsigned long __gu_val; \
11184 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11185 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
11186 + (x) = (__typeof__(*(ptr)))__gu_val; \
11187 __gu_err; \
11188 })
11189
11190 /* FIXME: this hack is definitely wrong -AK */
11191 struct __large_struct { unsigned long buf[100]; };
11192 -#define __m(x) (*(struct __large_struct __user *)(x))
11193 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11194 +#define ____m(x) \
11195 +({ \
11196 + unsigned long ____x = (unsigned long)(x); \
11197 + if (____x < PAX_USER_SHADOW_BASE) \
11198 + ____x += PAX_USER_SHADOW_BASE; \
11199 + (void __user *)____x; \
11200 +})
11201 +#else
11202 +#define ____m(x) (x)
11203 +#endif
11204 +#define __m(x) (*(struct __large_struct __user *)____m(x))
11205
11206 /*
11207 * Tell gcc we read from memory instead of writing: this is because
11208 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11209 * aliasing issues.
11210 */
11211 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11212 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11213 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11214 "2:\n" \
11215 ".section .fixup,\"ax\"\n" \
11216 "3: mov %3,%0\n" \
11217 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11218 ".previous\n" \
11219 _ASM_EXTABLE(1b, 3b) \
11220 : "=r"(err) \
11221 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11222 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11223
11224 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11225 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11226 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11227 "2:\n" \
11228 _ASM_EXTABLE(1b, 2b - 1b) \
11229 : : ltype(x), "m" (__m(addr)))
11230 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11231 * On error, the variable @x is set to zero.
11232 */
11233
11234 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11235 +#define __get_user(x, ptr) get_user((x), (ptr))
11236 +#else
11237 #define __get_user(x, ptr) \
11238 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11239 +#endif
11240
11241 /**
11242 * __put_user: - Write a simple value into user space, with less checking.
11243 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11244 * Returns zero on success, or -EFAULT on error.
11245 */
11246
11247 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11248 +#define __put_user(x, ptr) put_user((x), (ptr))
11249 +#else
11250 #define __put_user(x, ptr) \
11251 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11252 +#endif
11253
11254 #define __get_user_unaligned __get_user
11255 #define __put_user_unaligned __put_user
11256 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11257 #define get_user_ex(x, ptr) do { \
11258 unsigned long __gue_val; \
11259 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11260 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
11261 + (x) = (__typeof__(*(ptr)))__gue_val; \
11262 } while (0)
11263
11264 #ifdef CONFIG_X86_WP_WORKS_OK
11265 @@ -567,6 +628,7 @@ extern struct movsl_mask {
11266
11267 #define ARCH_HAS_NOCACHE_UACCESS 1
11268
11269 +#define ARCH_HAS_SORT_EXTABLE
11270 #ifdef CONFIG_X86_32
11271 # include "uaccess_32.h"
11272 #else
11273 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vgtod.h linux-2.6.32.45/arch/x86/include/asm/vgtod.h
11274 --- linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11275 +++ linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11276 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11277 int sysctl_enabled;
11278 struct timezone sys_tz;
11279 struct { /* extract of a clocksource struct */
11280 + char name[8];
11281 cycle_t (*vread)(void);
11282 cycle_t cycle_last;
11283 cycle_t mask;
11284 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi.h linux-2.6.32.45/arch/x86/include/asm/vmi.h
11285 --- linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11286 +++ linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11287 @@ -191,6 +191,7 @@ struct vrom_header {
11288 u8 reserved[96]; /* Reserved for headers */
11289 char vmi_init[8]; /* VMI_Init jump point */
11290 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11291 + char rom_data[8048]; /* rest of the option ROM */
11292 } __attribute__((packed));
11293
11294 struct pnp_header {
11295 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi_time.h linux-2.6.32.45/arch/x86/include/asm/vmi_time.h
11296 --- linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-03-27 14:31:47.000000000 -0400
11297 +++ linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-08-05 20:33:55.000000000 -0400
11298 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
11299 int (*wallclock_updated)(void);
11300 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
11301 void (*cancel_alarm)(u32 flags);
11302 -} vmi_timer_ops;
11303 +} __no_const vmi_timer_ops;
11304
11305 /* Prototypes */
11306 extern void __init vmi_time_init(void);
11307 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vsyscall.h linux-2.6.32.45/arch/x86/include/asm/vsyscall.h
11308 --- linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11309 +++ linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11310 @@ -15,9 +15,10 @@ enum vsyscall_num {
11311
11312 #ifdef __KERNEL__
11313 #include <linux/seqlock.h>
11314 +#include <linux/getcpu.h>
11315 +#include <linux/time.h>
11316
11317 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11318 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11319
11320 /* Definitions for CONFIG_GENERIC_TIME definitions */
11321 #define __section_vsyscall_gtod_data __attribute__ \
11322 @@ -31,7 +32,6 @@ enum vsyscall_num {
11323 #define VGETCPU_LSL 2
11324
11325 extern int __vgetcpu_mode;
11326 -extern volatile unsigned long __jiffies;
11327
11328 /* kernel space (writeable) */
11329 extern int vgetcpu_mode;
11330 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11331
11332 extern void map_vsyscall(void);
11333
11334 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11335 +extern time_t vtime(time_t *t);
11336 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11337 #endif /* __KERNEL__ */
11338
11339 #endif /* _ASM_X86_VSYSCALL_H */
11340 diff -urNp linux-2.6.32.45/arch/x86/include/asm/x86_init.h linux-2.6.32.45/arch/x86/include/asm/x86_init.h
11341 --- linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-03-27 14:31:47.000000000 -0400
11342 +++ linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-08-05 20:33:55.000000000 -0400
11343 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11344 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11345 void (*find_smp_config)(unsigned int reserve);
11346 void (*get_smp_config)(unsigned int early);
11347 -};
11348 +} __no_const;
11349
11350 /**
11351 * struct x86_init_resources - platform specific resource related ops
11352 @@ -42,7 +42,7 @@ struct x86_init_resources {
11353 void (*probe_roms)(void);
11354 void (*reserve_resources)(void);
11355 char *(*memory_setup)(void);
11356 -};
11357 +} __no_const;
11358
11359 /**
11360 * struct x86_init_irqs - platform specific interrupt setup
11361 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11362 void (*pre_vector_init)(void);
11363 void (*intr_init)(void);
11364 void (*trap_init)(void);
11365 -};
11366 +} __no_const;
11367
11368 /**
11369 * struct x86_init_oem - oem platform specific customizing functions
11370 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11371 struct x86_init_oem {
11372 void (*arch_setup)(void);
11373 void (*banner)(void);
11374 -};
11375 +} __no_const;
11376
11377 /**
11378 * struct x86_init_paging - platform specific paging functions
11379 @@ -75,7 +75,7 @@ struct x86_init_oem {
11380 struct x86_init_paging {
11381 void (*pagetable_setup_start)(pgd_t *base);
11382 void (*pagetable_setup_done)(pgd_t *base);
11383 -};
11384 +} __no_const;
11385
11386 /**
11387 * struct x86_init_timers - platform specific timer setup
11388 @@ -88,7 +88,7 @@ struct x86_init_timers {
11389 void (*setup_percpu_clockev)(void);
11390 void (*tsc_pre_init)(void);
11391 void (*timer_init)(void);
11392 -};
11393 +} __no_const;
11394
11395 /**
11396 * struct x86_init_ops - functions for platform specific setup
11397 @@ -101,7 +101,7 @@ struct x86_init_ops {
11398 struct x86_init_oem oem;
11399 struct x86_init_paging paging;
11400 struct x86_init_timers timers;
11401 -};
11402 +} __no_const;
11403
11404 /**
11405 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11406 @@ -109,7 +109,7 @@ struct x86_init_ops {
11407 */
11408 struct x86_cpuinit_ops {
11409 void (*setup_percpu_clockev)(void);
11410 -};
11411 +} __no_const;
11412
11413 /**
11414 * struct x86_platform_ops - platform specific runtime functions
11415 @@ -121,7 +121,7 @@ struct x86_platform_ops {
11416 unsigned long (*calibrate_tsc)(void);
11417 unsigned long (*get_wallclock)(void);
11418 int (*set_wallclock)(unsigned long nowtime);
11419 -};
11420 +} __no_const;
11421
11422 extern struct x86_init_ops x86_init;
11423 extern struct x86_cpuinit_ops x86_cpuinit;
11424 diff -urNp linux-2.6.32.45/arch/x86/include/asm/xsave.h linux-2.6.32.45/arch/x86/include/asm/xsave.h
11425 --- linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11426 +++ linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11427 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11428 static inline int xsave_user(struct xsave_struct __user *buf)
11429 {
11430 int err;
11431 +
11432 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11433 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11434 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11435 +#endif
11436 +
11437 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11438 "2:\n"
11439 ".section .fixup,\"ax\"\n"
11440 @@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11441 u32 lmask = mask;
11442 u32 hmask = mask >> 32;
11443
11444 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11445 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11446 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11447 +#endif
11448 +
11449 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11450 "2:\n"
11451 ".section .fixup,\"ax\"\n"
11452 diff -urNp linux-2.6.32.45/arch/x86/Kconfig linux-2.6.32.45/arch/x86/Kconfig
11453 --- linux-2.6.32.45/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11454 +++ linux-2.6.32.45/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11455 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11456
11457 config X86_32_LAZY_GS
11458 def_bool y
11459 - depends on X86_32 && !CC_STACKPROTECTOR
11460 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11461
11462 config KTIME_SCALAR
11463 def_bool X86_32
11464 @@ -1008,7 +1008,7 @@ choice
11465
11466 config NOHIGHMEM
11467 bool "off"
11468 - depends on !X86_NUMAQ
11469 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11470 ---help---
11471 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11472 However, the address space of 32-bit x86 processors is only 4
11473 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
11474
11475 config HIGHMEM4G
11476 bool "4GB"
11477 - depends on !X86_NUMAQ
11478 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11479 ---help---
11480 Select this if you have a 32-bit processor and between 1 and 4
11481 gigabytes of physical RAM.
11482 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11483 hex
11484 default 0xB0000000 if VMSPLIT_3G_OPT
11485 default 0x80000000 if VMSPLIT_2G
11486 - default 0x78000000 if VMSPLIT_2G_OPT
11487 + default 0x70000000 if VMSPLIT_2G_OPT
11488 default 0x40000000 if VMSPLIT_1G
11489 default 0xC0000000
11490 depends on X86_32
11491 @@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11492
11493 config EFI
11494 bool "EFI runtime service support"
11495 - depends on ACPI
11496 + depends on ACPI && !PAX_KERNEXEC
11497 ---help---
11498 This enables the kernel to use EFI runtime services that are
11499 available (such as the EFI variable services).
11500 @@ -1460,6 +1460,7 @@ config SECCOMP
11501
11502 config CC_STACKPROTECTOR
11503 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11504 + depends on X86_64 || !PAX_MEMORY_UDEREF
11505 ---help---
11506 This option turns on the -fstack-protector GCC feature. This
11507 feature puts, at the beginning of functions, a canary value on
11508 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11509 config PHYSICAL_START
11510 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11511 default "0x1000000"
11512 + range 0x400000 0x40000000
11513 ---help---
11514 This gives the physical address where the kernel is loaded.
11515
11516 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11517 hex
11518 prompt "Alignment value to which kernel should be aligned" if X86_32
11519 default "0x1000000"
11520 + range 0x400000 0x1000000 if PAX_KERNEXEC
11521 range 0x2000 0x1000000
11522 ---help---
11523 This value puts the alignment restrictions on physical address
11524 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11525 Say N if you want to disable CPU hotplug.
11526
11527 config COMPAT_VDSO
11528 - def_bool y
11529 + def_bool n
11530 prompt "Compat VDSO support"
11531 depends on X86_32 || IA32_EMULATION
11532 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11533 ---help---
11534 Map the 32-bit VDSO to the predictable old-style address too.
11535 ---help---
11536 diff -urNp linux-2.6.32.45/arch/x86/Kconfig.cpu linux-2.6.32.45/arch/x86/Kconfig.cpu
11537 --- linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11538 +++ linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11539 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11540
11541 config X86_F00F_BUG
11542 def_bool y
11543 - depends on M586MMX || M586TSC || M586 || M486 || M386
11544 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11545
11546 config X86_WP_WORKS_OK
11547 def_bool y
11548 @@ -360,7 +360,7 @@ config X86_POPAD_OK
11549
11550 config X86_ALIGNMENT_16
11551 def_bool y
11552 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11553 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11554
11555 config X86_INTEL_USERCOPY
11556 def_bool y
11557 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
11558 # generates cmov.
11559 config X86_CMOV
11560 def_bool y
11561 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11562 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11563
11564 config X86_MINIMUM_CPU_FAMILY
11565 int
11566 diff -urNp linux-2.6.32.45/arch/x86/Kconfig.debug linux-2.6.32.45/arch/x86/Kconfig.debug
11567 --- linux-2.6.32.45/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11568 +++ linux-2.6.32.45/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11569 @@ -99,7 +99,7 @@ config X86_PTDUMP
11570 config DEBUG_RODATA
11571 bool "Write protect kernel read-only data structures"
11572 default y
11573 - depends on DEBUG_KERNEL
11574 + depends on DEBUG_KERNEL && BROKEN
11575 ---help---
11576 Mark the kernel read-only data as write-protected in the pagetables,
11577 in order to catch accidental (and incorrect) writes to such const
11578 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile
11579 --- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-03-27 14:31:47.000000000 -0400
11580 +++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-08-07 14:38:58.000000000 -0400
11581 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
11582 $(call cc-option, -fno-stack-protector) \
11583 $(call cc-option, -mpreferred-stack-boundary=2)
11584 KBUILD_CFLAGS += $(call cc-option, -m32)
11585 +ifdef CONSTIFY_PLUGIN
11586 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11587 +endif
11588 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11589 GCOV_PROFILE := n
11590
11591 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S
11592 --- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11593 +++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11594 @@ -91,6 +91,9 @@ _start:
11595 /* Do any other stuff... */
11596
11597 #ifndef CONFIG_64BIT
11598 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
11599 + call verify_cpu
11600 +
11601 /* This could also be done in C code... */
11602 movl pmode_cr3, %eax
11603 movl %eax, %cr3
11604 @@ -104,7 +107,7 @@ _start:
11605 movl %eax, %ecx
11606 orl %edx, %ecx
11607 jz 1f
11608 - movl $0xc0000080, %ecx
11609 + mov $MSR_EFER, %ecx
11610 wrmsr
11611 1:
11612
11613 @@ -114,6 +117,7 @@ _start:
11614 movl pmode_cr0, %eax
11615 movl %eax, %cr0
11616 jmp pmode_return
11617 +# include "../../verify_cpu.S"
11618 #else
11619 pushw $0
11620 pushw trampoline_segment
11621 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c
11622 --- linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11623 +++ linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11624 @@ -11,11 +11,12 @@
11625 #include <linux/cpumask.h>
11626 #include <asm/segment.h>
11627 #include <asm/desc.h>
11628 +#include <asm/e820.h>
11629
11630 #include "realmode/wakeup.h"
11631 #include "sleep.h"
11632
11633 -unsigned long acpi_wakeup_address;
11634 +unsigned long acpi_wakeup_address = 0x2000;
11635 unsigned long acpi_realmode_flags;
11636
11637 /* address in low memory of the wakeup routine. */
11638 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11639 #else /* CONFIG_64BIT */
11640 header->trampoline_segment = setup_trampoline() >> 4;
11641 #ifdef CONFIG_SMP
11642 - stack_start.sp = temp_stack + sizeof(temp_stack);
11643 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11644 +
11645 + pax_open_kernel();
11646 early_gdt_descr.address =
11647 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11648 + pax_close_kernel();
11649 +
11650 initial_gs = per_cpu_offset(smp_processor_id());
11651 #endif
11652 initial_code = (unsigned long)wakeup_long64;
11653 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11654 return;
11655 }
11656
11657 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11658 -
11659 - if (!acpi_realmode) {
11660 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11661 - return;
11662 - }
11663 -
11664 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11665 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11666 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11667 }
11668
11669
11670 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S
11671 --- linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11672 +++ linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11673 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11674 # and restore the stack ... but you need gdt for this to work
11675 movl saved_context_esp, %esp
11676
11677 - movl %cs:saved_magic, %eax
11678 - cmpl $0x12345678, %eax
11679 + cmpl $0x12345678, saved_magic
11680 jne bogus_magic
11681
11682 # jump to place where we left off
11683 - movl saved_eip, %eax
11684 - jmp *%eax
11685 + jmp *(saved_eip)
11686
11687 bogus_magic:
11688 jmp bogus_magic
11689 diff -urNp linux-2.6.32.45/arch/x86/kernel/alternative.c linux-2.6.32.45/arch/x86/kernel/alternative.c
11690 --- linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11691 +++ linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11692 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11693
11694 BUG_ON(p->len > MAX_PATCH_LEN);
11695 /* prep the buffer with the original instructions */
11696 - memcpy(insnbuf, p->instr, p->len);
11697 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11698 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11699 (unsigned long)p->instr, p->len);
11700
11701 @@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11702 if (smp_alt_once)
11703 free_init_pages("SMP alternatives",
11704 (unsigned long)__smp_locks,
11705 - (unsigned long)__smp_locks_end);
11706 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11707
11708 restart_nmi();
11709 }
11710 @@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11711 * instructions. And on the local CPU you need to be protected again NMI or MCE
11712 * handlers seeing an inconsistent instruction while you patch.
11713 */
11714 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11715 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
11716 size_t len)
11717 {
11718 unsigned long flags;
11719 local_irq_save(flags);
11720 - memcpy(addr, opcode, len);
11721 +
11722 + pax_open_kernel();
11723 + memcpy(ktla_ktva(addr), opcode, len);
11724 sync_core();
11725 + pax_close_kernel();
11726 +
11727 local_irq_restore(flags);
11728 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11729 that causes hangs on some VIA CPUs. */
11730 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11731 */
11732 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11733 {
11734 - unsigned long flags;
11735 - char *vaddr;
11736 + unsigned char *vaddr = ktla_ktva(addr);
11737 struct page *pages[2];
11738 - int i;
11739 + size_t i;
11740
11741 if (!core_kernel_text((unsigned long)addr)) {
11742 - pages[0] = vmalloc_to_page(addr);
11743 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11744 + pages[0] = vmalloc_to_page(vaddr);
11745 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11746 } else {
11747 - pages[0] = virt_to_page(addr);
11748 + pages[0] = virt_to_page(vaddr);
11749 WARN_ON(!PageReserved(pages[0]));
11750 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11751 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11752 }
11753 BUG_ON(!pages[0]);
11754 - local_irq_save(flags);
11755 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11756 - if (pages[1])
11757 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11758 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11759 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11760 - clear_fixmap(FIX_TEXT_POKE0);
11761 - if (pages[1])
11762 - clear_fixmap(FIX_TEXT_POKE1);
11763 - local_flush_tlb();
11764 - sync_core();
11765 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11766 - that causes hangs on some VIA CPUs. */
11767 + text_poke_early(addr, opcode, len);
11768 for (i = 0; i < len; i++)
11769 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11770 - local_irq_restore(flags);
11771 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11772 return addr;
11773 }
11774 diff -urNp linux-2.6.32.45/arch/x86/kernel/amd_iommu.c linux-2.6.32.45/arch/x86/kernel/amd_iommu.c
11775 --- linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11776 +++ linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11777 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11778 }
11779 }
11780
11781 -static struct dma_map_ops amd_iommu_dma_ops = {
11782 +static const struct dma_map_ops amd_iommu_dma_ops = {
11783 .alloc_coherent = alloc_coherent,
11784 .free_coherent = free_coherent,
11785 .map_page = map_page,
11786 diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/apic.c linux-2.6.32.45/arch/x86/kernel/apic/apic.c
11787 --- linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11788 +++ linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-08-17 20:00:16.000000000 -0400
11789 @@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
11790 /*
11791 * Debug level, exported for io_apic.c
11792 */
11793 -unsigned int apic_verbosity;
11794 +int apic_verbosity;
11795
11796 int pic_mode;
11797
11798 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11799 apic_write(APIC_ESR, 0);
11800 v1 = apic_read(APIC_ESR);
11801 ack_APIC_irq();
11802 - atomic_inc(&irq_err_count);
11803 + atomic_inc_unchecked(&irq_err_count);
11804
11805 /*
11806 * Here is what the APIC error bits mean:
11807 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11808 u16 *bios_cpu_apicid;
11809 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11810
11811 + pax_track_stack();
11812 +
11813 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11814 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11815
11816 diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c
11817 --- linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11818 +++ linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11819 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11820 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11821 GFP_ATOMIC);
11822 if (!ioapic_entries)
11823 - return 0;
11824 + return NULL;
11825
11826 for (apic = 0; apic < nr_ioapics; apic++) {
11827 ioapic_entries[apic] =
11828 @@ -733,7 +733,7 @@ nomem:
11829 kfree(ioapic_entries[apic]);
11830 kfree(ioapic_entries);
11831
11832 - return 0;
11833 + return NULL;
11834 }
11835
11836 /*
11837 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11838 }
11839 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11840
11841 -void lock_vector_lock(void)
11842 +void lock_vector_lock(void) __acquires(vector_lock)
11843 {
11844 /* Used to the online set of cpus does not change
11845 * during assign_irq_vector.
11846 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11847 spin_lock(&vector_lock);
11848 }
11849
11850 -void unlock_vector_lock(void)
11851 +void unlock_vector_lock(void) __releases(vector_lock)
11852 {
11853 spin_unlock(&vector_lock);
11854 }
11855 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11856 ack_APIC_irq();
11857 }
11858
11859 -atomic_t irq_mis_count;
11860 +atomic_unchecked_t irq_mis_count;
11861
11862 static void ack_apic_level(unsigned int irq)
11863 {
11864 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11865
11866 /* Tail end of version 0x11 I/O APIC bug workaround */
11867 if (!(v & (1 << (i & 0x1f)))) {
11868 - atomic_inc(&irq_mis_count);
11869 + atomic_inc_unchecked(&irq_mis_count);
11870 spin_lock(&ioapic_lock);
11871 __mask_and_edge_IO_APIC_irq(cfg);
11872 __unmask_and_level_IO_APIC_irq(cfg);
11873 diff -urNp linux-2.6.32.45/arch/x86/kernel/apm_32.c linux-2.6.32.45/arch/x86/kernel/apm_32.c
11874 --- linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11875 +++ linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11876 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11877 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11878 * even though they are called in protected mode.
11879 */
11880 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11881 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11882 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11883
11884 static const char driver_version[] = "1.16ac"; /* no spaces */
11885 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11886 BUG_ON(cpu != 0);
11887 gdt = get_cpu_gdt_table(cpu);
11888 save_desc_40 = gdt[0x40 / 8];
11889 +
11890 + pax_open_kernel();
11891 gdt[0x40 / 8] = bad_bios_desc;
11892 + pax_close_kernel();
11893
11894 apm_irq_save(flags);
11895 APM_DO_SAVE_SEGS;
11896 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11897 &call->esi);
11898 APM_DO_RESTORE_SEGS;
11899 apm_irq_restore(flags);
11900 +
11901 + pax_open_kernel();
11902 gdt[0x40 / 8] = save_desc_40;
11903 + pax_close_kernel();
11904 +
11905 put_cpu();
11906
11907 return call->eax & 0xff;
11908 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11909 BUG_ON(cpu != 0);
11910 gdt = get_cpu_gdt_table(cpu);
11911 save_desc_40 = gdt[0x40 / 8];
11912 +
11913 + pax_open_kernel();
11914 gdt[0x40 / 8] = bad_bios_desc;
11915 + pax_close_kernel();
11916
11917 apm_irq_save(flags);
11918 APM_DO_SAVE_SEGS;
11919 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11920 &call->eax);
11921 APM_DO_RESTORE_SEGS;
11922 apm_irq_restore(flags);
11923 +
11924 + pax_open_kernel();
11925 gdt[0x40 / 8] = save_desc_40;
11926 + pax_close_kernel();
11927 +
11928 put_cpu();
11929 return error;
11930 }
11931 @@ -975,7 +989,7 @@ recalc:
11932
11933 static void apm_power_off(void)
11934 {
11935 - unsigned char po_bios_call[] = {
11936 + const unsigned char po_bios_call[] = {
11937 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11938 0x8e, 0xd0, /* movw ax,ss */
11939 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11940 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11941 * code to that CPU.
11942 */
11943 gdt = get_cpu_gdt_table(0);
11944 +
11945 + pax_open_kernel();
11946 set_desc_base(&gdt[APM_CS >> 3],
11947 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11948 set_desc_base(&gdt[APM_CS_16 >> 3],
11949 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11950 set_desc_base(&gdt[APM_DS >> 3],
11951 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11952 + pax_close_kernel();
11953
11954 proc_create("apm", 0, NULL, &apm_file_ops);
11955
11956 diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c
11957 --- linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11958 +++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11959 @@ -51,7 +51,6 @@ void foo(void)
11960 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11961 BLANK();
11962
11963 - OFFSET(TI_task, thread_info, task);
11964 OFFSET(TI_exec_domain, thread_info, exec_domain);
11965 OFFSET(TI_flags, thread_info, flags);
11966 OFFSET(TI_status, thread_info, status);
11967 @@ -60,6 +59,8 @@ void foo(void)
11968 OFFSET(TI_restart_block, thread_info, restart_block);
11969 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11970 OFFSET(TI_cpu, thread_info, cpu);
11971 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11972 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11973 BLANK();
11974
11975 OFFSET(GDS_size, desc_ptr, size);
11976 @@ -99,6 +100,7 @@ void foo(void)
11977
11978 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11979 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11980 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11981 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
11982 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
11983 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
11984 @@ -115,6 +117,11 @@ void foo(void)
11985 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
11986 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11987 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11988 +
11989 +#ifdef CONFIG_PAX_KERNEXEC
11990 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11991 +#endif
11992 +
11993 #endif
11994
11995 #ifdef CONFIG_XEN
11996 diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c
11997 --- linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
11998 +++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
11999 @@ -44,6 +44,8 @@ int main(void)
12000 ENTRY(addr_limit);
12001 ENTRY(preempt_count);
12002 ENTRY(status);
12003 + ENTRY(lowest_stack);
12004 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12005 #ifdef CONFIG_IA32_EMULATION
12006 ENTRY(sysenter_return);
12007 #endif
12008 @@ -63,6 +65,18 @@ int main(void)
12009 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12010 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
12011 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12012 +
12013 +#ifdef CONFIG_PAX_KERNEXEC
12014 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12015 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12016 +#endif
12017 +
12018 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12019 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12020 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12021 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
12022 +#endif
12023 +
12024 #endif
12025
12026
12027 @@ -115,6 +129,7 @@ int main(void)
12028 ENTRY(cr8);
12029 BLANK();
12030 #undef ENTRY
12031 + DEFINE(TSS_size, sizeof(struct tss_struct));
12032 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
12033 BLANK();
12034 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
12035 @@ -130,6 +145,7 @@ int main(void)
12036
12037 BLANK();
12038 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12039 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12040 #ifdef CONFIG_XEN
12041 BLANK();
12042 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12043 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/amd.c
12044 --- linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
12045 +++ linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
12046 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
12047 unsigned int size)
12048 {
12049 /* AMD errata T13 (order #21922) */
12050 - if ((c->x86 == 6)) {
12051 + if (c->x86 == 6) {
12052 /* Duron Rev A0 */
12053 if (c->x86_model == 3 && c->x86_mask == 0)
12054 size = 64;
12055 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/common.c linux-2.6.32.45/arch/x86/kernel/cpu/common.c
12056 --- linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
12057 +++ linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
12058 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
12059
12060 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12061
12062 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12063 -#ifdef CONFIG_X86_64
12064 - /*
12065 - * We need valid kernel segments for data and code in long mode too
12066 - * IRET will check the segment types kkeil 2000/10/28
12067 - * Also sysret mandates a special GDT layout
12068 - *
12069 - * TLS descriptors are currently at a different place compared to i386.
12070 - * Hopefully nobody expects them at a fixed place (Wine?)
12071 - */
12072 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12073 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12074 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12075 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12076 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12077 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12078 -#else
12079 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12080 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12081 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12082 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12083 - /*
12084 - * Segments used for calling PnP BIOS have byte granularity.
12085 - * They code segments and data segments have fixed 64k limits,
12086 - * the transfer segment sizes are set at run time.
12087 - */
12088 - /* 32-bit code */
12089 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12090 - /* 16-bit code */
12091 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12092 - /* 16-bit data */
12093 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12094 - /* 16-bit data */
12095 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12096 - /* 16-bit data */
12097 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12098 - /*
12099 - * The APM segments have byte granularity and their bases
12100 - * are set at run time. All have 64k limits.
12101 - */
12102 - /* 32-bit code */
12103 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12104 - /* 16-bit code */
12105 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12106 - /* data */
12107 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12108 -
12109 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12110 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12111 - GDT_STACK_CANARY_INIT
12112 -#endif
12113 -} };
12114 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12115 -
12116 static int __init x86_xsave_setup(char *s)
12117 {
12118 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12119 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
12120 {
12121 struct desc_ptr gdt_descr;
12122
12123 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12124 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12125 gdt_descr.size = GDT_SIZE - 1;
12126 load_gdt(&gdt_descr);
12127 /* Reload the per-cpu base */
12128 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
12129 /* Filter out anything that depends on CPUID levels we don't have */
12130 filter_cpuid_features(c, true);
12131
12132 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
12133 + setup_clear_cpu_cap(X86_FEATURE_SEP);
12134 +#endif
12135 +
12136 /* If the model name is still unset, do table lookup. */
12137 if (!c->x86_model_id[0]) {
12138 const char *p;
12139 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
12140 }
12141 __setup("clearcpuid=", setup_disablecpuid);
12142
12143 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12144 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
12145 +
12146 #ifdef CONFIG_X86_64
12147 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12148
12149 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
12150 EXPORT_PER_CPU_SYMBOL(current_task);
12151
12152 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12153 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12154 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12155 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12156
12157 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12158 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
12159 {
12160 memset(regs, 0, sizeof(struct pt_regs));
12161 regs->fs = __KERNEL_PERCPU;
12162 - regs->gs = __KERNEL_STACK_CANARY;
12163 + savesegment(gs, regs->gs);
12164
12165 return regs;
12166 }
12167 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
12168 int i;
12169
12170 cpu = stack_smp_processor_id();
12171 - t = &per_cpu(init_tss, cpu);
12172 + t = init_tss + cpu;
12173 orig_ist = &per_cpu(orig_ist, cpu);
12174
12175 #ifdef CONFIG_NUMA
12176 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12177 switch_to_new_gdt(cpu);
12178 loadsegment(fs, 0);
12179
12180 - load_idt((const struct desc_ptr *)&idt_descr);
12181 + load_idt(&idt_descr);
12182
12183 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12184 syscall_init();
12185 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12186 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12187 barrier();
12188
12189 - check_efer();
12190 if (cpu != 0)
12191 enable_x2apic();
12192
12193 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12194 {
12195 int cpu = smp_processor_id();
12196 struct task_struct *curr = current;
12197 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12198 + struct tss_struct *t = init_tss + cpu;
12199 struct thread_struct *thread = &curr->thread;
12200
12201 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12202 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel.c linux-2.6.32.45/arch/x86/kernel/cpu/intel.c
12203 --- linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12204 +++ linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12205 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12206 * Update the IDT descriptor and reload the IDT so that
12207 * it uses the read-only mapped virtual address.
12208 */
12209 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12210 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12211 load_idt(&idt_descr);
12212 }
12213 #endif
12214 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c
12215 --- linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12216 +++ linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12217 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12218 return ret;
12219 }
12220
12221 -static struct sysfs_ops sysfs_ops = {
12222 +static const struct sysfs_ops sysfs_ops = {
12223 .show = show,
12224 .store = store,
12225 };
12226 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/Makefile linux-2.6.32.45/arch/x86/kernel/cpu/Makefile
12227 --- linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12228 +++ linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12229 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12230 CFLAGS_REMOVE_common.o = -pg
12231 endif
12232
12233 -# Make sure load_percpu_segment has no stackprotector
12234 -nostackp := $(call cc-option, -fno-stack-protector)
12235 -CFLAGS_common.o := $(nostackp)
12236 -
12237 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12238 obj-y += proc.o capflags.o powerflags.o common.o
12239 obj-y += vmware.o hypervisor.o sched.o
12240 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c
12241 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12242 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12243 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12244 return ret;
12245 }
12246
12247 -static struct sysfs_ops threshold_ops = {
12248 +static const struct sysfs_ops threshold_ops = {
12249 .show = show,
12250 .store = store,
12251 };
12252 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c
12253 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12254 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12255 @@ -43,6 +43,7 @@
12256 #include <asm/ipi.h>
12257 #include <asm/mce.h>
12258 #include <asm/msr.h>
12259 +#include <asm/local.h>
12260
12261 #include "mce-internal.h"
12262
12263 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12264 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12265 m->cs, m->ip);
12266
12267 - if (m->cs == __KERNEL_CS)
12268 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12269 print_symbol("{%s}", m->ip);
12270 pr_cont("\n");
12271 }
12272 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
12273
12274 #define PANIC_TIMEOUT 5 /* 5 seconds */
12275
12276 -static atomic_t mce_paniced;
12277 +static atomic_unchecked_t mce_paniced;
12278
12279 static int fake_panic;
12280 -static atomic_t mce_fake_paniced;
12281 +static atomic_unchecked_t mce_fake_paniced;
12282
12283 /* Panic in progress. Enable interrupts and wait for final IPI */
12284 static void wait_for_panic(void)
12285 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12286 /*
12287 * Make sure only one CPU runs in machine check panic
12288 */
12289 - if (atomic_inc_return(&mce_paniced) > 1)
12290 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12291 wait_for_panic();
12292 barrier();
12293
12294 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12295 console_verbose();
12296 } else {
12297 /* Don't log too much for fake panic */
12298 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12299 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12300 return;
12301 }
12302 print_mce_head();
12303 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12304 * might have been modified by someone else.
12305 */
12306 rmb();
12307 - if (atomic_read(&mce_paniced))
12308 + if (atomic_read_unchecked(&mce_paniced))
12309 wait_for_panic();
12310 if (!monarch_timeout)
12311 goto out;
12312 @@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12313 */
12314
12315 static DEFINE_SPINLOCK(mce_state_lock);
12316 -static int open_count; /* #times opened */
12317 +static local_t open_count; /* #times opened */
12318 static int open_exclu; /* already open exclusive? */
12319
12320 static int mce_open(struct inode *inode, struct file *file)
12321 {
12322 spin_lock(&mce_state_lock);
12323
12324 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12325 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12326 spin_unlock(&mce_state_lock);
12327
12328 return -EBUSY;
12329 @@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12330
12331 if (file->f_flags & O_EXCL)
12332 open_exclu = 1;
12333 - open_count++;
12334 + local_inc(&open_count);
12335
12336 spin_unlock(&mce_state_lock);
12337
12338 @@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12339 {
12340 spin_lock(&mce_state_lock);
12341
12342 - open_count--;
12343 + local_dec(&open_count);
12344 open_exclu = 0;
12345
12346 spin_unlock(&mce_state_lock);
12347 @@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12348 static void mce_reset(void)
12349 {
12350 cpu_missing = 0;
12351 - atomic_set(&mce_fake_paniced, 0);
12352 + atomic_set_unchecked(&mce_fake_paniced, 0);
12353 atomic_set(&mce_executing, 0);
12354 atomic_set(&mce_callin, 0);
12355 atomic_set(&global_nwo, 0);
12356 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c
12357 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-03-27 14:31:47.000000000 -0400
12358 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:33:55.000000000 -0400
12359 @@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *fi
12360 static int inject_init(void)
12361 {
12362 printk(KERN_INFO "Machine check injector initialized\n");
12363 - mce_chrdev_ops.write = mce_write;
12364 + pax_open_kernel();
12365 + *(void **)&mce_chrdev_ops.write = mce_write;
12366 + pax_close_kernel();
12367 register_die_notifier(&mce_raise_nb);
12368 return 0;
12369 }
12370 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c
12371 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12372 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12373 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12374 return 0;
12375 }
12376
12377 -static struct mtrr_ops amd_mtrr_ops = {
12378 +static const struct mtrr_ops amd_mtrr_ops = {
12379 .vendor = X86_VENDOR_AMD,
12380 .set = amd_set_mtrr,
12381 .get = amd_get_mtrr,
12382 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c
12383 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12384 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12385 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12386 return 0;
12387 }
12388
12389 -static struct mtrr_ops centaur_mtrr_ops = {
12390 +static const struct mtrr_ops centaur_mtrr_ops = {
12391 .vendor = X86_VENDOR_CENTAUR,
12392 .set = centaur_set_mcr,
12393 .get = centaur_get_mcr,
12394 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c
12395 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12396 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12397 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12398 post_set();
12399 }
12400
12401 -static struct mtrr_ops cyrix_mtrr_ops = {
12402 +static const struct mtrr_ops cyrix_mtrr_ops = {
12403 .vendor = X86_VENDOR_CYRIX,
12404 .set_all = cyrix_set_all,
12405 .set = cyrix_set_arr,
12406 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c
12407 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12408 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12409 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12410 /*
12411 * Generic structure...
12412 */
12413 -struct mtrr_ops generic_mtrr_ops = {
12414 +const struct mtrr_ops generic_mtrr_ops = {
12415 .use_intel_if = 1,
12416 .set_all = generic_set_all,
12417 .get = generic_get_mtrr,
12418 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c
12419 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12420 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12421 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12422 u64 size_or_mask, size_and_mask;
12423 static bool mtrr_aps_delayed_init;
12424
12425 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12426 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12427
12428 -struct mtrr_ops *mtrr_if;
12429 +const struct mtrr_ops *mtrr_if;
12430
12431 static void set_mtrr(unsigned int reg, unsigned long base,
12432 unsigned long size, mtrr_type type);
12433
12434 -void set_mtrr_ops(struct mtrr_ops *ops)
12435 +void set_mtrr_ops(const struct mtrr_ops *ops)
12436 {
12437 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12438 mtrr_ops[ops->vendor] = ops;
12439 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h
12440 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12441 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12442 @@ -12,19 +12,19 @@
12443 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12444
12445 struct mtrr_ops {
12446 - u32 vendor;
12447 - u32 use_intel_if;
12448 - void (*set)(unsigned int reg, unsigned long base,
12449 + const u32 vendor;
12450 + const u32 use_intel_if;
12451 + void (* const set)(unsigned int reg, unsigned long base,
12452 unsigned long size, mtrr_type type);
12453 - void (*set_all)(void);
12454 + void (* const set_all)(void);
12455
12456 - void (*get)(unsigned int reg, unsigned long *base,
12457 + void (* const get)(unsigned int reg, unsigned long *base,
12458 unsigned long *size, mtrr_type *type);
12459 - int (*get_free_region)(unsigned long base, unsigned long size,
12460 + int (* const get_free_region)(unsigned long base, unsigned long size,
12461 int replace_reg);
12462 - int (*validate_add_page)(unsigned long base, unsigned long size,
12463 + int (* const validate_add_page)(unsigned long base, unsigned long size,
12464 unsigned int type);
12465 - int (*have_wrcomb)(void);
12466 + int (* const have_wrcomb)(void);
12467 };
12468
12469 extern int generic_get_free_region(unsigned long base, unsigned long size,
12470 @@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12471 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12472 unsigned int type);
12473
12474 -extern struct mtrr_ops generic_mtrr_ops;
12475 +extern const struct mtrr_ops generic_mtrr_ops;
12476
12477 extern int positive_have_wrcomb(void);
12478
12479 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12480 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12481 void get_mtrr_state(void);
12482
12483 -extern void set_mtrr_ops(struct mtrr_ops *ops);
12484 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
12485
12486 extern u64 size_or_mask, size_and_mask;
12487 -extern struct mtrr_ops *mtrr_if;
12488 +extern const struct mtrr_ops *mtrr_if;
12489
12490 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12491 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12492 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c
12493 --- linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12494 +++ linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12495 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12496
12497 /* Interface defining a CPU specific perfctr watchdog */
12498 struct wd_ops {
12499 - int (*reserve)(void);
12500 - void (*unreserve)(void);
12501 - int (*setup)(unsigned nmi_hz);
12502 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12503 - void (*stop)(void);
12504 + int (* const reserve)(void);
12505 + void (* const unreserve)(void);
12506 + int (* const setup)(unsigned nmi_hz);
12507 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12508 + void (* const stop)(void);
12509 unsigned perfctr;
12510 unsigned evntsel;
12511 u64 checkbit;
12512 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12513 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12514 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12515
12516 +/* cannot be const */
12517 static struct wd_ops intel_arch_wd_ops;
12518
12519 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12520 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12521 return 1;
12522 }
12523
12524 +/* cannot be const */
12525 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12526 .reserve = single_msr_reserve,
12527 .unreserve = single_msr_unreserve,
12528 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c
12529 --- linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12530 +++ linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12531 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12532 * count to the generic event atomically:
12533 */
12534 again:
12535 - prev_raw_count = atomic64_read(&hwc->prev_count);
12536 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12537 rdmsrl(hwc->event_base + idx, new_raw_count);
12538
12539 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12540 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12541 new_raw_count) != prev_raw_count)
12542 goto again;
12543
12544 @@ -741,7 +741,7 @@ again:
12545 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12546 delta >>= shift;
12547
12548 - atomic64_add(delta, &event->count);
12549 + atomic64_add_unchecked(delta, &event->count);
12550 atomic64_sub(delta, &hwc->period_left);
12551
12552 return new_raw_count;
12553 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12554 * The hw event starts counting from this event offset,
12555 * mark it to be able to extra future deltas:
12556 */
12557 - atomic64_set(&hwc->prev_count, (u64)-left);
12558 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12559
12560 err = checking_wrmsrl(hwc->event_base + idx,
12561 (u64)(-left) & x86_pmu.event_mask);
12562 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12563 break;
12564
12565 callchain_store(entry, frame.return_address);
12566 - fp = frame.next_frame;
12567 + fp = (__force const void __user *)frame.next_frame;
12568 }
12569 }
12570
12571 diff -urNp linux-2.6.32.45/arch/x86/kernel/crash.c linux-2.6.32.45/arch/x86/kernel/crash.c
12572 --- linux-2.6.32.45/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12573 +++ linux-2.6.32.45/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12574 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12575 regs = args->regs;
12576
12577 #ifdef CONFIG_X86_32
12578 - if (!user_mode_vm(regs)) {
12579 + if (!user_mode(regs)) {
12580 crash_fixup_ss_esp(&fixed_regs, regs);
12581 regs = &fixed_regs;
12582 }
12583 diff -urNp linux-2.6.32.45/arch/x86/kernel/doublefault_32.c linux-2.6.32.45/arch/x86/kernel/doublefault_32.c
12584 --- linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12585 +++ linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12586 @@ -11,7 +11,7 @@
12587
12588 #define DOUBLEFAULT_STACKSIZE (1024)
12589 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12590 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12591 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12592
12593 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12594
12595 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12596 unsigned long gdt, tss;
12597
12598 store_gdt(&gdt_desc);
12599 - gdt = gdt_desc.address;
12600 + gdt = (unsigned long)gdt_desc.address;
12601
12602 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12603
12604 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12605 /* 0x2 bit is always set */
12606 .flags = X86_EFLAGS_SF | 0x2,
12607 .sp = STACK_START,
12608 - .es = __USER_DS,
12609 + .es = __KERNEL_DS,
12610 .cs = __KERNEL_CS,
12611 .ss = __KERNEL_DS,
12612 - .ds = __USER_DS,
12613 + .ds = __KERNEL_DS,
12614 .fs = __KERNEL_PERCPU,
12615
12616 .__cr3 = __pa_nodebug(swapper_pg_dir),
12617 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c
12618 --- linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12619 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12620 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12621 #endif
12622
12623 for (;;) {
12624 - struct thread_info *context;
12625 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12626 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12627
12628 - context = (struct thread_info *)
12629 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12630 - bp = print_context_stack(context, stack, bp, ops,
12631 - data, NULL, &graph);
12632 -
12633 - stack = (unsigned long *)context->previous_esp;
12634 - if (!stack)
12635 + if (stack_start == task_stack_page(task))
12636 break;
12637 + stack = *(unsigned long **)stack_start;
12638 if (ops->stack(data, "IRQ") < 0)
12639 break;
12640 touch_nmi_watchdog();
12641 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12642 * When in-kernel, we also print out the stack and code at the
12643 * time of the fault..
12644 */
12645 - if (!user_mode_vm(regs)) {
12646 + if (!user_mode(regs)) {
12647 unsigned int code_prologue = code_bytes * 43 / 64;
12648 unsigned int code_len = code_bytes;
12649 unsigned char c;
12650 u8 *ip;
12651 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12652
12653 printk(KERN_EMERG "Stack:\n");
12654 show_stack_log_lvl(NULL, regs, &regs->sp,
12655 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12656
12657 printk(KERN_EMERG "Code: ");
12658
12659 - ip = (u8 *)regs->ip - code_prologue;
12660 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12661 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12662 /* try starting at IP */
12663 - ip = (u8 *)regs->ip;
12664 + ip = (u8 *)regs->ip + cs_base;
12665 code_len = code_len - code_prologue + 1;
12666 }
12667 for (i = 0; i < code_len; i++, ip++) {
12668 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12669 printk(" Bad EIP value.");
12670 break;
12671 }
12672 - if (ip == (u8 *)regs->ip)
12673 + if (ip == (u8 *)regs->ip + cs_base)
12674 printk("<%02x> ", c);
12675 else
12676 printk("%02x ", c);
12677 @@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12678 {
12679 unsigned short ud2;
12680
12681 + ip = ktla_ktva(ip);
12682 if (ip < PAGE_OFFSET)
12683 return 0;
12684 if (probe_kernel_address((unsigned short *)ip, ud2))
12685 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c
12686 --- linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12687 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12688 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12689 unsigned long *irq_stack_end =
12690 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12691 unsigned used = 0;
12692 - struct thread_info *tinfo;
12693 int graph = 0;
12694 + void *stack_start;
12695
12696 if (!task)
12697 task = current;
12698 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12699 * current stack address. If the stacks consist of nested
12700 * exceptions
12701 */
12702 - tinfo = task_thread_info(task);
12703 for (;;) {
12704 char *id;
12705 unsigned long *estack_end;
12706 +
12707 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12708 &used, &id);
12709
12710 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12711 if (ops->stack(data, id) < 0)
12712 break;
12713
12714 - bp = print_context_stack(tinfo, stack, bp, ops,
12715 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12716 data, estack_end, &graph);
12717 ops->stack(data, "<EOE>");
12718 /*
12719 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12720 if (stack >= irq_stack && stack < irq_stack_end) {
12721 if (ops->stack(data, "IRQ") < 0)
12722 break;
12723 - bp = print_context_stack(tinfo, stack, bp,
12724 + bp = print_context_stack(task, irq_stack, stack, bp,
12725 ops, data, irq_stack_end, &graph);
12726 /*
12727 * We link to the next stack (which would be
12728 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12729 /*
12730 * This handles the process stack:
12731 */
12732 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12733 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12734 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12735 put_cpu();
12736 }
12737 EXPORT_SYMBOL(dump_trace);
12738 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.c linux-2.6.32.45/arch/x86/kernel/dumpstack.c
12739 --- linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12740 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12741 @@ -2,6 +2,9 @@
12742 * Copyright (C) 1991, 1992 Linus Torvalds
12743 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12744 */
12745 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12746 +#define __INCLUDED_BY_HIDESYM 1
12747 +#endif
12748 #include <linux/kallsyms.h>
12749 #include <linux/kprobes.h>
12750 #include <linux/uaccess.h>
12751 @@ -28,7 +31,7 @@ static int die_counter;
12752
12753 void printk_address(unsigned long address, int reliable)
12754 {
12755 - printk(" [<%p>] %s%pS\n", (void *) address,
12756 + printk(" [<%p>] %s%pA\n", (void *) address,
12757 reliable ? "" : "? ", (void *) address);
12758 }
12759
12760 @@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12761 static void
12762 print_ftrace_graph_addr(unsigned long addr, void *data,
12763 const struct stacktrace_ops *ops,
12764 - struct thread_info *tinfo, int *graph)
12765 + struct task_struct *task, int *graph)
12766 {
12767 - struct task_struct *task = tinfo->task;
12768 unsigned long ret_addr;
12769 int index = task->curr_ret_stack;
12770
12771 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12772 static inline void
12773 print_ftrace_graph_addr(unsigned long addr, void *data,
12774 const struct stacktrace_ops *ops,
12775 - struct thread_info *tinfo, int *graph)
12776 + struct task_struct *task, int *graph)
12777 { }
12778 #endif
12779
12780 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12781 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12782 */
12783
12784 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12785 - void *p, unsigned int size, void *end)
12786 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12787 {
12788 - void *t = tinfo;
12789 if (end) {
12790 if (p < end && p >= (end-THREAD_SIZE))
12791 return 1;
12792 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12793 }
12794
12795 unsigned long
12796 -print_context_stack(struct thread_info *tinfo,
12797 +print_context_stack(struct task_struct *task, void *stack_start,
12798 unsigned long *stack, unsigned long bp,
12799 const struct stacktrace_ops *ops, void *data,
12800 unsigned long *end, int *graph)
12801 {
12802 struct stack_frame *frame = (struct stack_frame *)bp;
12803
12804 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12805 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12806 unsigned long addr;
12807
12808 addr = *stack;
12809 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12810 } else {
12811 ops->address(data, addr, 0);
12812 }
12813 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12814 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12815 }
12816 stack++;
12817 }
12818 @@ -180,7 +180,7 @@ void dump_stack(void)
12819 #endif
12820
12821 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12822 - current->pid, current->comm, print_tainted(),
12823 + task_pid_nr(current), current->comm, print_tainted(),
12824 init_utsname()->release,
12825 (int)strcspn(init_utsname()->version, " "),
12826 init_utsname()->version);
12827 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12828 return flags;
12829 }
12830
12831 +extern void gr_handle_kernel_exploit(void);
12832 +
12833 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12834 {
12835 if (regs && kexec_should_crash(current))
12836 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12837 panic("Fatal exception in interrupt");
12838 if (panic_on_oops)
12839 panic("Fatal exception");
12840 - do_exit(signr);
12841 +
12842 + gr_handle_kernel_exploit();
12843 +
12844 + do_group_exit(signr);
12845 }
12846
12847 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12848 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12849 unsigned long flags = oops_begin();
12850 int sig = SIGSEGV;
12851
12852 - if (!user_mode_vm(regs))
12853 + if (!user_mode(regs))
12854 report_bug(regs->ip, regs);
12855
12856 if (__die(str, regs, err))
12857 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.h linux-2.6.32.45/arch/x86/kernel/dumpstack.h
12858 --- linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12859 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12860 @@ -15,7 +15,7 @@
12861 #endif
12862
12863 extern unsigned long
12864 -print_context_stack(struct thread_info *tinfo,
12865 +print_context_stack(struct task_struct *task, void *stack_start,
12866 unsigned long *stack, unsigned long bp,
12867 const struct stacktrace_ops *ops, void *data,
12868 unsigned long *end, int *graph);
12869 diff -urNp linux-2.6.32.45/arch/x86/kernel/e820.c linux-2.6.32.45/arch/x86/kernel/e820.c
12870 --- linux-2.6.32.45/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12871 +++ linux-2.6.32.45/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12872 @@ -733,7 +733,7 @@ struct early_res {
12873 };
12874 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12875 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12876 - {}
12877 + { 0, 0, {0}, 0 }
12878 };
12879
12880 static int __init find_overlapped_early(u64 start, u64 end)
12881 diff -urNp linux-2.6.32.45/arch/x86/kernel/early_printk.c linux-2.6.32.45/arch/x86/kernel/early_printk.c
12882 --- linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12883 +++ linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12884 @@ -7,6 +7,7 @@
12885 #include <linux/pci_regs.h>
12886 #include <linux/pci_ids.h>
12887 #include <linux/errno.h>
12888 +#include <linux/sched.h>
12889 #include <asm/io.h>
12890 #include <asm/processor.h>
12891 #include <asm/fcntl.h>
12892 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12893 int n;
12894 va_list ap;
12895
12896 + pax_track_stack();
12897 +
12898 va_start(ap, fmt);
12899 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12900 early_console->write(early_console, buf, n);
12901 diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_32.c linux-2.6.32.45/arch/x86/kernel/efi_32.c
12902 --- linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12903 +++ linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12904 @@ -38,70 +38,38 @@
12905 */
12906
12907 static unsigned long efi_rt_eflags;
12908 -static pgd_t efi_bak_pg_dir_pointer[2];
12909 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12910
12911 -void efi_call_phys_prelog(void)
12912 +void __init efi_call_phys_prelog(void)
12913 {
12914 - unsigned long cr4;
12915 - unsigned long temp;
12916 struct desc_ptr gdt_descr;
12917
12918 local_irq_save(efi_rt_eflags);
12919
12920 - /*
12921 - * If I don't have PAE, I should just duplicate two entries in page
12922 - * directory. If I have PAE, I just need to duplicate one entry in
12923 - * page directory.
12924 - */
12925 - cr4 = read_cr4_safe();
12926
12927 - if (cr4 & X86_CR4_PAE) {
12928 - efi_bak_pg_dir_pointer[0].pgd =
12929 - swapper_pg_dir[pgd_index(0)].pgd;
12930 - swapper_pg_dir[0].pgd =
12931 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12932 - } else {
12933 - efi_bak_pg_dir_pointer[0].pgd =
12934 - swapper_pg_dir[pgd_index(0)].pgd;
12935 - efi_bak_pg_dir_pointer[1].pgd =
12936 - swapper_pg_dir[pgd_index(0x400000)].pgd;
12937 - swapper_pg_dir[pgd_index(0)].pgd =
12938 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12939 - temp = PAGE_OFFSET + 0x400000;
12940 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12941 - swapper_pg_dir[pgd_index(temp)].pgd;
12942 - }
12943 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12944 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12945 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12946
12947 /*
12948 * After the lock is released, the original page table is restored.
12949 */
12950 __flush_tlb_all();
12951
12952 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
12953 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12954 gdt_descr.size = GDT_SIZE - 1;
12955 load_gdt(&gdt_descr);
12956 }
12957
12958 -void efi_call_phys_epilog(void)
12959 +void __init efi_call_phys_epilog(void)
12960 {
12961 - unsigned long cr4;
12962 struct desc_ptr gdt_descr;
12963
12964 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12965 + gdt_descr.address = get_cpu_gdt_table(0);
12966 gdt_descr.size = GDT_SIZE - 1;
12967 load_gdt(&gdt_descr);
12968
12969 - cr4 = read_cr4_safe();
12970 -
12971 - if (cr4 & X86_CR4_PAE) {
12972 - swapper_pg_dir[pgd_index(0)].pgd =
12973 - efi_bak_pg_dir_pointer[0].pgd;
12974 - } else {
12975 - swapper_pg_dir[pgd_index(0)].pgd =
12976 - efi_bak_pg_dir_pointer[0].pgd;
12977 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12978 - efi_bak_pg_dir_pointer[1].pgd;
12979 - }
12980 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
12981
12982 /*
12983 * After the lock is released, the original page table is restored.
12984 diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S
12985 --- linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
12986 +++ linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
12987 @@ -6,6 +6,7 @@
12988 */
12989
12990 #include <linux/linkage.h>
12991 +#include <linux/init.h>
12992 #include <asm/page_types.h>
12993
12994 /*
12995 @@ -20,7 +21,7 @@
12996 * service functions will comply with gcc calling convention, too.
12997 */
12998
12999 -.text
13000 +__INIT
13001 ENTRY(efi_call_phys)
13002 /*
13003 * 0. The function can only be called in Linux kernel. So CS has been
13004 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
13005 * The mapping of lower virtual memory has been created in prelog and
13006 * epilog.
13007 */
13008 - movl $1f, %edx
13009 - subl $__PAGE_OFFSET, %edx
13010 - jmp *%edx
13011 + jmp 1f-__PAGE_OFFSET
13012 1:
13013
13014 /*
13015 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
13016 * parameter 2, ..., param n. To make things easy, we save the return
13017 * address of efi_call_phys in a global variable.
13018 */
13019 - popl %edx
13020 - movl %edx, saved_return_addr
13021 - /* get the function pointer into ECX*/
13022 - popl %ecx
13023 - movl %ecx, efi_rt_function_ptr
13024 - movl $2f, %edx
13025 - subl $__PAGE_OFFSET, %edx
13026 - pushl %edx
13027 + popl (saved_return_addr)
13028 + popl (efi_rt_function_ptr)
13029
13030 /*
13031 * 3. Clear PG bit in %CR0.
13032 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
13033 /*
13034 * 5. Call the physical function.
13035 */
13036 - jmp *%ecx
13037 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
13038
13039 -2:
13040 /*
13041 * 6. After EFI runtime service returns, control will return to
13042 * following instruction. We'd better readjust stack pointer first.
13043 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
13044 movl %cr0, %edx
13045 orl $0x80000000, %edx
13046 movl %edx, %cr0
13047 - jmp 1f
13048 -1:
13049 +
13050 /*
13051 * 8. Now restore the virtual mode from flat mode by
13052 * adding EIP with PAGE_OFFSET.
13053 */
13054 - movl $1f, %edx
13055 - jmp *%edx
13056 + jmp 1f+__PAGE_OFFSET
13057 1:
13058
13059 /*
13060 * 9. Balance the stack. And because EAX contain the return value,
13061 * we'd better not clobber it.
13062 */
13063 - leal efi_rt_function_ptr, %edx
13064 - movl (%edx), %ecx
13065 - pushl %ecx
13066 + pushl (efi_rt_function_ptr)
13067
13068 /*
13069 - * 10. Push the saved return address onto the stack and return.
13070 + * 10. Return to the saved return address.
13071 */
13072 - leal saved_return_addr, %edx
13073 - movl (%edx), %ecx
13074 - pushl %ecx
13075 - ret
13076 + jmpl *(saved_return_addr)
13077 ENDPROC(efi_call_phys)
13078 .previous
13079
13080 -.data
13081 +__INITDATA
13082 saved_return_addr:
13083 .long 0
13084 efi_rt_function_ptr:
13085 diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_32.S linux-2.6.32.45/arch/x86/kernel/entry_32.S
13086 --- linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
13087 +++ linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
13088 @@ -185,13 +185,146 @@
13089 /*CFI_REL_OFFSET gs, PT_GS*/
13090 .endm
13091 .macro SET_KERNEL_GS reg
13092 +
13093 +#ifdef CONFIG_CC_STACKPROTECTOR
13094 movl $(__KERNEL_STACK_CANARY), \reg
13095 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13096 + movl $(__USER_DS), \reg
13097 +#else
13098 + xorl \reg, \reg
13099 +#endif
13100 +
13101 movl \reg, %gs
13102 .endm
13103
13104 #endif /* CONFIG_X86_32_LAZY_GS */
13105
13106 -.macro SAVE_ALL
13107 +.macro pax_enter_kernel
13108 +#ifdef CONFIG_PAX_KERNEXEC
13109 + call pax_enter_kernel
13110 +#endif
13111 +.endm
13112 +
13113 +.macro pax_exit_kernel
13114 +#ifdef CONFIG_PAX_KERNEXEC
13115 + call pax_exit_kernel
13116 +#endif
13117 +.endm
13118 +
13119 +#ifdef CONFIG_PAX_KERNEXEC
13120 +ENTRY(pax_enter_kernel)
13121 +#ifdef CONFIG_PARAVIRT
13122 + pushl %eax
13123 + pushl %ecx
13124 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
13125 + mov %eax, %esi
13126 +#else
13127 + mov %cr0, %esi
13128 +#endif
13129 + bts $16, %esi
13130 + jnc 1f
13131 + mov %cs, %esi
13132 + cmp $__KERNEL_CS, %esi
13133 + jz 3f
13134 + ljmp $__KERNEL_CS, $3f
13135 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13136 +2:
13137 +#ifdef CONFIG_PARAVIRT
13138 + mov %esi, %eax
13139 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13140 +#else
13141 + mov %esi, %cr0
13142 +#endif
13143 +3:
13144 +#ifdef CONFIG_PARAVIRT
13145 + popl %ecx
13146 + popl %eax
13147 +#endif
13148 + ret
13149 +ENDPROC(pax_enter_kernel)
13150 +
13151 +ENTRY(pax_exit_kernel)
13152 +#ifdef CONFIG_PARAVIRT
13153 + pushl %eax
13154 + pushl %ecx
13155 +#endif
13156 + mov %cs, %esi
13157 + cmp $__KERNEXEC_KERNEL_CS, %esi
13158 + jnz 2f
13159 +#ifdef CONFIG_PARAVIRT
13160 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13161 + mov %eax, %esi
13162 +#else
13163 + mov %cr0, %esi
13164 +#endif
13165 + btr $16, %esi
13166 + ljmp $__KERNEL_CS, $1f
13167 +1:
13168 +#ifdef CONFIG_PARAVIRT
13169 + mov %esi, %eax
13170 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13171 +#else
13172 + mov %esi, %cr0
13173 +#endif
13174 +2:
13175 +#ifdef CONFIG_PARAVIRT
13176 + popl %ecx
13177 + popl %eax
13178 +#endif
13179 + ret
13180 +ENDPROC(pax_exit_kernel)
13181 +#endif
13182 +
13183 +.macro pax_erase_kstack
13184 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13185 + call pax_erase_kstack
13186 +#endif
13187 +.endm
13188 +
13189 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13190 +/*
13191 + * ebp: thread_info
13192 + * ecx, edx: can be clobbered
13193 + */
13194 +ENTRY(pax_erase_kstack)
13195 + pushl %edi
13196 + pushl %eax
13197 +
13198 + mov TI_lowest_stack(%ebp), %edi
13199 + mov $-0xBEEF, %eax
13200 + std
13201 +
13202 +1: mov %edi, %ecx
13203 + and $THREAD_SIZE_asm - 1, %ecx
13204 + shr $2, %ecx
13205 + repne scasl
13206 + jecxz 2f
13207 +
13208 + cmp $2*16, %ecx
13209 + jc 2f
13210 +
13211 + mov $2*16, %ecx
13212 + repe scasl
13213 + jecxz 2f
13214 + jne 1b
13215 +
13216 +2: cld
13217 + mov %esp, %ecx
13218 + sub %edi, %ecx
13219 + shr $2, %ecx
13220 + rep stosl
13221 +
13222 + mov TI_task_thread_sp0(%ebp), %edi
13223 + sub $128, %edi
13224 + mov %edi, TI_lowest_stack(%ebp)
13225 +
13226 + popl %eax
13227 + popl %edi
13228 + ret
13229 +ENDPROC(pax_erase_kstack)
13230 +#endif
13231 +
13232 +.macro __SAVE_ALL _DS
13233 cld
13234 PUSH_GS
13235 pushl %fs
13236 @@ -224,7 +357,7 @@
13237 pushl %ebx
13238 CFI_ADJUST_CFA_OFFSET 4
13239 CFI_REL_OFFSET ebx, 0
13240 - movl $(__USER_DS), %edx
13241 + movl $\_DS, %edx
13242 movl %edx, %ds
13243 movl %edx, %es
13244 movl $(__KERNEL_PERCPU), %edx
13245 @@ -232,6 +365,15 @@
13246 SET_KERNEL_GS %edx
13247 .endm
13248
13249 +.macro SAVE_ALL
13250 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13251 + __SAVE_ALL __KERNEL_DS
13252 + pax_enter_kernel
13253 +#else
13254 + __SAVE_ALL __USER_DS
13255 +#endif
13256 +.endm
13257 +
13258 .macro RESTORE_INT_REGS
13259 popl %ebx
13260 CFI_ADJUST_CFA_OFFSET -4
13261 @@ -352,7 +494,15 @@ check_userspace:
13262 movb PT_CS(%esp), %al
13263 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13264 cmpl $USER_RPL, %eax
13265 +
13266 +#ifdef CONFIG_PAX_KERNEXEC
13267 + jae resume_userspace
13268 +
13269 + PAX_EXIT_KERNEL
13270 + jmp resume_kernel
13271 +#else
13272 jb resume_kernel # not returning to v8086 or userspace
13273 +#endif
13274
13275 ENTRY(resume_userspace)
13276 LOCKDEP_SYS_EXIT
13277 @@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13278 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13279 # int/exception return?
13280 jne work_pending
13281 - jmp restore_all
13282 + jmp restore_all_pax
13283 END(ret_from_exception)
13284
13285 #ifdef CONFIG_PREEMPT
13286 @@ -414,25 +564,36 @@ sysenter_past_esp:
13287 /*CFI_REL_OFFSET cs, 0*/
13288 /*
13289 * Push current_thread_info()->sysenter_return to the stack.
13290 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13291 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
13292 */
13293 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13294 + pushl $0
13295 CFI_ADJUST_CFA_OFFSET 4
13296 CFI_REL_OFFSET eip, 0
13297
13298 pushl %eax
13299 CFI_ADJUST_CFA_OFFSET 4
13300 SAVE_ALL
13301 + GET_THREAD_INFO(%ebp)
13302 + movl TI_sysenter_return(%ebp),%ebp
13303 + movl %ebp,PT_EIP(%esp)
13304 ENABLE_INTERRUPTS(CLBR_NONE)
13305
13306 /*
13307 * Load the potential sixth argument from user stack.
13308 * Careful about security.
13309 */
13310 + movl PT_OLDESP(%esp),%ebp
13311 +
13312 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13313 + mov PT_OLDSS(%esp),%ds
13314 +1: movl %ds:(%ebp),%ebp
13315 + push %ss
13316 + pop %ds
13317 +#else
13318 cmpl $__PAGE_OFFSET-3,%ebp
13319 jae syscall_fault
13320 1: movl (%ebp),%ebp
13321 +#endif
13322 +
13323 movl %ebp,PT_EBP(%esp)
13324 .section __ex_table,"a"
13325 .align 4
13326 @@ -455,12 +616,23 @@ sysenter_do_call:
13327 testl $_TIF_ALLWORK_MASK, %ecx
13328 jne sysexit_audit
13329 sysenter_exit:
13330 +
13331 +#ifdef CONFIG_PAX_RANDKSTACK
13332 + pushl_cfi %eax
13333 + call pax_randomize_kstack
13334 + popl_cfi %eax
13335 +#endif
13336 +
13337 + pax_erase_kstack
13338 +
13339 /* if something modifies registers it must also disable sysexit */
13340 movl PT_EIP(%esp), %edx
13341 movl PT_OLDESP(%esp), %ecx
13342 xorl %ebp,%ebp
13343 TRACE_IRQS_ON
13344 1: mov PT_FS(%esp), %fs
13345 +2: mov PT_DS(%esp), %ds
13346 +3: mov PT_ES(%esp), %es
13347 PTGS_TO_GS
13348 ENABLE_INTERRUPTS_SYSEXIT
13349
13350 @@ -477,6 +649,9 @@ sysenter_audit:
13351 movl %eax,%edx /* 2nd arg: syscall number */
13352 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13353 call audit_syscall_entry
13354 +
13355 + pax_erase_kstack
13356 +
13357 pushl %ebx
13358 CFI_ADJUST_CFA_OFFSET 4
13359 movl PT_EAX(%esp),%eax /* reload syscall number */
13360 @@ -504,11 +679,17 @@ sysexit_audit:
13361
13362 CFI_ENDPROC
13363 .pushsection .fixup,"ax"
13364 -2: movl $0,PT_FS(%esp)
13365 +4: movl $0,PT_FS(%esp)
13366 + jmp 1b
13367 +5: movl $0,PT_DS(%esp)
13368 + jmp 1b
13369 +6: movl $0,PT_ES(%esp)
13370 jmp 1b
13371 .section __ex_table,"a"
13372 .align 4
13373 - .long 1b,2b
13374 + .long 1b,4b
13375 + .long 2b,5b
13376 + .long 3b,6b
13377 .popsection
13378 PTGS_TO_GS_EX
13379 ENDPROC(ia32_sysenter_target)
13380 @@ -538,6 +719,14 @@ syscall_exit:
13381 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13382 jne syscall_exit_work
13383
13384 +restore_all_pax:
13385 +
13386 +#ifdef CONFIG_PAX_RANDKSTACK
13387 + call pax_randomize_kstack
13388 +#endif
13389 +
13390 + pax_erase_kstack
13391 +
13392 restore_all:
13393 TRACE_IRQS_IRET
13394 restore_all_notrace:
13395 @@ -602,7 +791,13 @@ ldt_ss:
13396 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13397 mov %dx, %ax /* eax: new kernel esp */
13398 sub %eax, %edx /* offset (low word is 0) */
13399 - PER_CPU(gdt_page, %ebx)
13400 +#ifdef CONFIG_SMP
13401 + movl PER_CPU_VAR(cpu_number), %ebx
13402 + shll $PAGE_SHIFT_asm, %ebx
13403 + addl $cpu_gdt_table, %ebx
13404 +#else
13405 + movl $cpu_gdt_table, %ebx
13406 +#endif
13407 shr $16, %edx
13408 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13409 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13410 @@ -636,31 +831,25 @@ work_resched:
13411 movl TI_flags(%ebp), %ecx
13412 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13413 # than syscall tracing?
13414 - jz restore_all
13415 + jz restore_all_pax
13416 testb $_TIF_NEED_RESCHED, %cl
13417 jnz work_resched
13418
13419 work_notifysig: # deal with pending signals and
13420 # notify-resume requests
13421 + movl %esp, %eax
13422 #ifdef CONFIG_VM86
13423 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13424 - movl %esp, %eax
13425 - jne work_notifysig_v86 # returning to kernel-space or
13426 + jz 1f # returning to kernel-space or
13427 # vm86-space
13428 - xorl %edx, %edx
13429 - call do_notify_resume
13430 - jmp resume_userspace_sig
13431
13432 - ALIGN
13433 -work_notifysig_v86:
13434 pushl %ecx # save ti_flags for do_notify_resume
13435 CFI_ADJUST_CFA_OFFSET 4
13436 call save_v86_state # %eax contains pt_regs pointer
13437 popl %ecx
13438 CFI_ADJUST_CFA_OFFSET -4
13439 movl %eax, %esp
13440 -#else
13441 - movl %esp, %eax
13442 +1:
13443 #endif
13444 xorl %edx, %edx
13445 call do_notify_resume
13446 @@ -673,6 +862,9 @@ syscall_trace_entry:
13447 movl $-ENOSYS,PT_EAX(%esp)
13448 movl %esp, %eax
13449 call syscall_trace_enter
13450 +
13451 + pax_erase_kstack
13452 +
13453 /* What it returned is what we'll actually use. */
13454 cmpl $(nr_syscalls), %eax
13455 jnae syscall_call
13456 @@ -695,6 +887,10 @@ END(syscall_exit_work)
13457
13458 RING0_INT_FRAME # can't unwind into user space anyway
13459 syscall_fault:
13460 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13461 + push %ss
13462 + pop %ds
13463 +#endif
13464 GET_THREAD_INFO(%ebp)
13465 movl $-EFAULT,PT_EAX(%esp)
13466 jmp resume_userspace
13467 @@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
13468 PTREGSCALL(vm86)
13469 PTREGSCALL(vm86old)
13470
13471 + ALIGN;
13472 +ENTRY(kernel_execve)
13473 + push %ebp
13474 + sub $PT_OLDSS+4,%esp
13475 + push %edi
13476 + push %ecx
13477 + push %eax
13478 + lea 3*4(%esp),%edi
13479 + mov $PT_OLDSS/4+1,%ecx
13480 + xorl %eax,%eax
13481 + rep stosl
13482 + pop %eax
13483 + pop %ecx
13484 + pop %edi
13485 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13486 + mov %eax,PT_EBX(%esp)
13487 + mov %edx,PT_ECX(%esp)
13488 + mov %ecx,PT_EDX(%esp)
13489 + mov %esp,%eax
13490 + call sys_execve
13491 + GET_THREAD_INFO(%ebp)
13492 + test %eax,%eax
13493 + jz syscall_exit
13494 + add $PT_OLDSS+4,%esp
13495 + pop %ebp
13496 + ret
13497 +
13498 .macro FIXUP_ESPFIX_STACK
13499 /*
13500 * Switch back for ESPFIX stack to the normal zerobased stack
13501 @@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
13502 * normal stack and adjusts ESP with the matching offset.
13503 */
13504 /* fixup the stack */
13505 - PER_CPU(gdt_page, %ebx)
13506 +#ifdef CONFIG_SMP
13507 + movl PER_CPU_VAR(cpu_number), %ebx
13508 + shll $PAGE_SHIFT_asm, %ebx
13509 + addl $cpu_gdt_table, %ebx
13510 +#else
13511 + movl $cpu_gdt_table, %ebx
13512 +#endif
13513 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13514 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13515 shl $16, %eax
13516 @@ -1198,7 +1427,6 @@ return_to_handler:
13517 ret
13518 #endif
13519
13520 -.section .rodata,"a"
13521 #include "syscall_table_32.S"
13522
13523 syscall_table_size=(.-sys_call_table)
13524 @@ -1255,9 +1483,12 @@ error_code:
13525 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13526 REG_TO_PTGS %ecx
13527 SET_KERNEL_GS %ecx
13528 - movl $(__USER_DS), %ecx
13529 + movl $(__KERNEL_DS), %ecx
13530 movl %ecx, %ds
13531 movl %ecx, %es
13532 +
13533 + pax_enter_kernel
13534 +
13535 TRACE_IRQS_OFF
13536 movl %esp,%eax # pt_regs pointer
13537 call *%edi
13538 @@ -1351,6 +1582,9 @@ nmi_stack_correct:
13539 xorl %edx,%edx # zero error code
13540 movl %esp,%eax # pt_regs pointer
13541 call do_nmi
13542 +
13543 + pax_exit_kernel
13544 +
13545 jmp restore_all_notrace
13546 CFI_ENDPROC
13547
13548 @@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13549 FIXUP_ESPFIX_STACK # %eax == %esp
13550 xorl %edx,%edx # zero error code
13551 call do_nmi
13552 +
13553 + pax_exit_kernel
13554 +
13555 RESTORE_REGS
13556 lss 12+4(%esp), %esp # back to espfix stack
13557 CFI_ADJUST_CFA_OFFSET -24
13558 diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_64.S linux-2.6.32.45/arch/x86/kernel/entry_64.S
13559 --- linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13560 +++ linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13561 @@ -53,6 +53,7 @@
13562 #include <asm/paravirt.h>
13563 #include <asm/ftrace.h>
13564 #include <asm/percpu.h>
13565 +#include <asm/pgtable.h>
13566
13567 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13568 #include <linux/elf-em.h>
13569 @@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13570 ENDPROC(native_usergs_sysret64)
13571 #endif /* CONFIG_PARAVIRT */
13572
13573 + .macro ljmpq sel, off
13574 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13575 + .byte 0x48; ljmp *1234f(%rip)
13576 + .pushsection .rodata
13577 + .align 16
13578 + 1234: .quad \off; .word \sel
13579 + .popsection
13580 +#else
13581 + pushq $\sel
13582 + pushq $\off
13583 + lretq
13584 +#endif
13585 + .endm
13586 +
13587 + .macro pax_enter_kernel
13588 +#ifdef CONFIG_PAX_KERNEXEC
13589 + call pax_enter_kernel
13590 +#endif
13591 + .endm
13592 +
13593 + .macro pax_exit_kernel
13594 +#ifdef CONFIG_PAX_KERNEXEC
13595 + call pax_exit_kernel
13596 +#endif
13597 + .endm
13598 +
13599 +#ifdef CONFIG_PAX_KERNEXEC
13600 +ENTRY(pax_enter_kernel)
13601 + pushq %rdi
13602 +
13603 +#ifdef CONFIG_PARAVIRT
13604 + PV_SAVE_REGS(CLBR_RDI)
13605 +#endif
13606 +
13607 + GET_CR0_INTO_RDI
13608 + bts $16,%rdi
13609 + jnc 1f
13610 + mov %cs,%edi
13611 + cmp $__KERNEL_CS,%edi
13612 + jz 3f
13613 + ljmpq __KERNEL_CS,3f
13614 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
13615 +2: SET_RDI_INTO_CR0
13616 +3:
13617 +
13618 +#ifdef CONFIG_PARAVIRT
13619 + PV_RESTORE_REGS(CLBR_RDI)
13620 +#endif
13621 +
13622 + popq %rdi
13623 + retq
13624 +ENDPROC(pax_enter_kernel)
13625 +
13626 +ENTRY(pax_exit_kernel)
13627 + pushq %rdi
13628 +
13629 +#ifdef CONFIG_PARAVIRT
13630 + PV_SAVE_REGS(CLBR_RDI)
13631 +#endif
13632 +
13633 + mov %cs,%rdi
13634 + cmp $__KERNEXEC_KERNEL_CS,%edi
13635 + jnz 2f
13636 + GET_CR0_INTO_RDI
13637 + btr $16,%rdi
13638 + ljmpq __KERNEL_CS,1f
13639 +1: SET_RDI_INTO_CR0
13640 +2:
13641 +
13642 +#ifdef CONFIG_PARAVIRT
13643 + PV_RESTORE_REGS(CLBR_RDI);
13644 +#endif
13645 +
13646 + popq %rdi
13647 + retq
13648 +ENDPROC(pax_exit_kernel)
13649 +#endif
13650 +
13651 + .macro pax_enter_kernel_user
13652 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13653 + call pax_enter_kernel_user
13654 +#endif
13655 + .endm
13656 +
13657 + .macro pax_exit_kernel_user
13658 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13659 + call pax_exit_kernel_user
13660 +#endif
13661 +#ifdef CONFIG_PAX_RANDKSTACK
13662 + push %rax
13663 + call pax_randomize_kstack
13664 + pop %rax
13665 +#endif
13666 + pax_erase_kstack
13667 + .endm
13668 +
13669 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13670 +ENTRY(pax_enter_kernel_user)
13671 + pushq %rdi
13672 + pushq %rbx
13673 +
13674 +#ifdef CONFIG_PARAVIRT
13675 + PV_SAVE_REGS(CLBR_RDI)
13676 +#endif
13677 +
13678 + GET_CR3_INTO_RDI
13679 + mov %rdi,%rbx
13680 + add $__START_KERNEL_map,%rbx
13681 + sub phys_base(%rip),%rbx
13682 +
13683 +#ifdef CONFIG_PARAVIRT
13684 + pushq %rdi
13685 + cmpl $0, pv_info+PARAVIRT_enabled
13686 + jz 1f
13687 + i = 0
13688 + .rept USER_PGD_PTRS
13689 + mov i*8(%rbx),%rsi
13690 + mov $0,%sil
13691 + lea i*8(%rbx),%rdi
13692 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13693 + i = i + 1
13694 + .endr
13695 + jmp 2f
13696 +1:
13697 +#endif
13698 +
13699 + i = 0
13700 + .rept USER_PGD_PTRS
13701 + movb $0,i*8(%rbx)
13702 + i = i + 1
13703 + .endr
13704 +
13705 +#ifdef CONFIG_PARAVIRT
13706 +2: popq %rdi
13707 +#endif
13708 + SET_RDI_INTO_CR3
13709 +
13710 +#ifdef CONFIG_PAX_KERNEXEC
13711 + GET_CR0_INTO_RDI
13712 + bts $16,%rdi
13713 + SET_RDI_INTO_CR0
13714 +#endif
13715 +
13716 +#ifdef CONFIG_PARAVIRT
13717 + PV_RESTORE_REGS(CLBR_RDI)
13718 +#endif
13719 +
13720 + popq %rbx
13721 + popq %rdi
13722 + retq
13723 +ENDPROC(pax_enter_kernel_user)
13724 +
13725 +ENTRY(pax_exit_kernel_user)
13726 + push %rdi
13727 +
13728 +#ifdef CONFIG_PARAVIRT
13729 + pushq %rbx
13730 + PV_SAVE_REGS(CLBR_RDI)
13731 +#endif
13732 +
13733 +#ifdef CONFIG_PAX_KERNEXEC
13734 + GET_CR0_INTO_RDI
13735 + btr $16,%rdi
13736 + SET_RDI_INTO_CR0
13737 +#endif
13738 +
13739 + GET_CR3_INTO_RDI
13740 + add $__START_KERNEL_map,%rdi
13741 + sub phys_base(%rip),%rdi
13742 +
13743 +#ifdef CONFIG_PARAVIRT
13744 + cmpl $0, pv_info+PARAVIRT_enabled
13745 + jz 1f
13746 + mov %rdi,%rbx
13747 + i = 0
13748 + .rept USER_PGD_PTRS
13749 + mov i*8(%rbx),%rsi
13750 + mov $0x67,%sil
13751 + lea i*8(%rbx),%rdi
13752 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13753 + i = i + 1
13754 + .endr
13755 + jmp 2f
13756 +1:
13757 +#endif
13758 +
13759 + i = 0
13760 + .rept USER_PGD_PTRS
13761 + movb $0x67,i*8(%rdi)
13762 + i = i + 1
13763 + .endr
13764 +
13765 +#ifdef CONFIG_PARAVIRT
13766 +2: PV_RESTORE_REGS(CLBR_RDI)
13767 + popq %rbx
13768 +#endif
13769 +
13770 + popq %rdi
13771 + retq
13772 +ENDPROC(pax_exit_kernel_user)
13773 +#endif
13774 +
13775 +.macro pax_erase_kstack
13776 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13777 + call pax_erase_kstack
13778 +#endif
13779 +.endm
13780 +
13781 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13782 +/*
13783 + * r10: thread_info
13784 + * rcx, rdx: can be clobbered
13785 + */
13786 +ENTRY(pax_erase_kstack)
13787 + pushq %rdi
13788 + pushq %rax
13789 +
13790 + GET_THREAD_INFO(%r10)
13791 + mov TI_lowest_stack(%r10), %rdi
13792 + mov $-0xBEEF, %rax
13793 + std
13794 +
13795 +1: mov %edi, %ecx
13796 + and $THREAD_SIZE_asm - 1, %ecx
13797 + shr $3, %ecx
13798 + repne scasq
13799 + jecxz 2f
13800 +
13801 + cmp $2*8, %ecx
13802 + jc 2f
13803 +
13804 + mov $2*8, %ecx
13805 + repe scasq
13806 + jecxz 2f
13807 + jne 1b
13808 +
13809 +2: cld
13810 + mov %esp, %ecx
13811 + sub %edi, %ecx
13812 + shr $3, %ecx
13813 + rep stosq
13814 +
13815 + mov TI_task_thread_sp0(%r10), %rdi
13816 + sub $256, %rdi
13817 + mov %rdi, TI_lowest_stack(%r10)
13818 +
13819 + popq %rax
13820 + popq %rdi
13821 + ret
13822 +ENDPROC(pax_erase_kstack)
13823 +#endif
13824
13825 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13826 #ifdef CONFIG_TRACE_IRQFLAGS
13827 @@ -317,7 +569,7 @@ ENTRY(save_args)
13828 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13829 movq_cfi rbp, 8 /* push %rbp */
13830 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13831 - testl $3, CS(%rdi)
13832 + testb $3, CS(%rdi)
13833 je 1f
13834 SWAPGS
13835 /*
13836 @@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13837
13838 RESTORE_REST
13839
13840 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13841 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13842 je int_ret_from_sys_call
13843
13844 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13845 @@ -455,7 +707,7 @@ END(ret_from_fork)
13846 ENTRY(system_call)
13847 CFI_STARTPROC simple
13848 CFI_SIGNAL_FRAME
13849 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13850 + CFI_DEF_CFA rsp,0
13851 CFI_REGISTER rip,rcx
13852 /*CFI_REGISTER rflags,r11*/
13853 SWAPGS_UNSAFE_STACK
13854 @@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13855
13856 movq %rsp,PER_CPU_VAR(old_rsp)
13857 movq PER_CPU_VAR(kernel_stack),%rsp
13858 + pax_enter_kernel_user
13859 /*
13860 * No need to follow this irqs off/on section - it's straight
13861 * and short:
13862 */
13863 ENABLE_INTERRUPTS(CLBR_NONE)
13864 - SAVE_ARGS 8,1
13865 + SAVE_ARGS 8*6,1
13866 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13867 movq %rcx,RIP-ARGOFFSET(%rsp)
13868 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13869 @@ -502,6 +755,7 @@ sysret_check:
13870 andl %edi,%edx
13871 jnz sysret_careful
13872 CFI_REMEMBER_STATE
13873 + pax_exit_kernel_user
13874 /*
13875 * sysretq will re-enable interrupts:
13876 */
13877 @@ -562,6 +816,9 @@ auditsys:
13878 movq %rax,%rsi /* 2nd arg: syscall number */
13879 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13880 call audit_syscall_entry
13881 +
13882 + pax_erase_kstack
13883 +
13884 LOAD_ARGS 0 /* reload call-clobbered registers */
13885 jmp system_call_fastpath
13886
13887 @@ -592,6 +849,9 @@ tracesys:
13888 FIXUP_TOP_OF_STACK %rdi
13889 movq %rsp,%rdi
13890 call syscall_trace_enter
13891 +
13892 + pax_erase_kstack
13893 +
13894 /*
13895 * Reload arg registers from stack in case ptrace changed them.
13896 * We don't reload %rax because syscall_trace_enter() returned
13897 @@ -613,7 +873,7 @@ tracesys:
13898 GLOBAL(int_ret_from_sys_call)
13899 DISABLE_INTERRUPTS(CLBR_NONE)
13900 TRACE_IRQS_OFF
13901 - testl $3,CS-ARGOFFSET(%rsp)
13902 + testb $3,CS-ARGOFFSET(%rsp)
13903 je retint_restore_args
13904 movl $_TIF_ALLWORK_MASK,%edi
13905 /* edi: mask to check */
13906 @@ -800,6 +1060,16 @@ END(interrupt)
13907 CFI_ADJUST_CFA_OFFSET 10*8
13908 call save_args
13909 PARTIAL_FRAME 0
13910 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13911 + testb $3, CS(%rdi)
13912 + jnz 1f
13913 + pax_enter_kernel
13914 + jmp 2f
13915 +1: pax_enter_kernel_user
13916 +2:
13917 +#else
13918 + pax_enter_kernel
13919 +#endif
13920 call \func
13921 .endm
13922
13923 @@ -822,7 +1092,7 @@ ret_from_intr:
13924 CFI_ADJUST_CFA_OFFSET -8
13925 exit_intr:
13926 GET_THREAD_INFO(%rcx)
13927 - testl $3,CS-ARGOFFSET(%rsp)
13928 + testb $3,CS-ARGOFFSET(%rsp)
13929 je retint_kernel
13930
13931 /* Interrupt came from user space */
13932 @@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13933 * The iretq could re-enable interrupts:
13934 */
13935 DISABLE_INTERRUPTS(CLBR_ANY)
13936 + pax_exit_kernel_user
13937 TRACE_IRQS_IRETQ
13938 SWAPGS
13939 jmp restore_args
13940
13941 retint_restore_args: /* return to kernel space */
13942 DISABLE_INTERRUPTS(CLBR_ANY)
13943 + pax_exit_kernel
13944 /*
13945 * The iretq could re-enable interrupts:
13946 */
13947 @@ -1032,6 +1304,16 @@ ENTRY(\sym)
13948 CFI_ADJUST_CFA_OFFSET 15*8
13949 call error_entry
13950 DEFAULT_FRAME 0
13951 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13952 + testb $3, CS(%rsp)
13953 + jnz 1f
13954 + pax_enter_kernel
13955 + jmp 2f
13956 +1: pax_enter_kernel_user
13957 +2:
13958 +#else
13959 + pax_enter_kernel
13960 +#endif
13961 movq %rsp,%rdi /* pt_regs pointer */
13962 xorl %esi,%esi /* no error code */
13963 call \do_sym
13964 @@ -1049,6 +1331,16 @@ ENTRY(\sym)
13965 subq $15*8, %rsp
13966 call save_paranoid
13967 TRACE_IRQS_OFF
13968 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13969 + testb $3, CS(%rsp)
13970 + jnz 1f
13971 + pax_enter_kernel
13972 + jmp 2f
13973 +1: pax_enter_kernel_user
13974 +2:
13975 +#else
13976 + pax_enter_kernel
13977 +#endif
13978 movq %rsp,%rdi /* pt_regs pointer */
13979 xorl %esi,%esi /* no error code */
13980 call \do_sym
13981 @@ -1066,9 +1358,24 @@ ENTRY(\sym)
13982 subq $15*8, %rsp
13983 call save_paranoid
13984 TRACE_IRQS_OFF
13985 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13986 + testb $3, CS(%rsp)
13987 + jnz 1f
13988 + pax_enter_kernel
13989 + jmp 2f
13990 +1: pax_enter_kernel_user
13991 +2:
13992 +#else
13993 + pax_enter_kernel
13994 +#endif
13995 movq %rsp,%rdi /* pt_regs pointer */
13996 xorl %esi,%esi /* no error code */
13997 - PER_CPU(init_tss, %rbp)
13998 +#ifdef CONFIG_SMP
13999 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
14000 + lea init_tss(%rbp), %rbp
14001 +#else
14002 + lea init_tss(%rip), %rbp
14003 +#endif
14004 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14005 call \do_sym
14006 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14007 @@ -1085,6 +1392,16 @@ ENTRY(\sym)
14008 CFI_ADJUST_CFA_OFFSET 15*8
14009 call error_entry
14010 DEFAULT_FRAME 0
14011 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14012 + testb $3, CS(%rsp)
14013 + jnz 1f
14014 + pax_enter_kernel
14015 + jmp 2f
14016 +1: pax_enter_kernel_user
14017 +2:
14018 +#else
14019 + pax_enter_kernel
14020 +#endif
14021 movq %rsp,%rdi /* pt_regs pointer */
14022 movq ORIG_RAX(%rsp),%rsi /* get error code */
14023 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14024 @@ -1104,6 +1421,16 @@ ENTRY(\sym)
14025 call save_paranoid
14026 DEFAULT_FRAME 0
14027 TRACE_IRQS_OFF
14028 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14029 + testb $3, CS(%rsp)
14030 + jnz 1f
14031 + pax_enter_kernel
14032 + jmp 2f
14033 +1: pax_enter_kernel_user
14034 +2:
14035 +#else
14036 + pax_enter_kernel
14037 +#endif
14038 movq %rsp,%rdi /* pt_regs pointer */
14039 movq ORIG_RAX(%rsp),%rsi /* get error code */
14040 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14041 @@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
14042 TRACE_IRQS_OFF
14043 testl %ebx,%ebx /* swapgs needed? */
14044 jnz paranoid_restore
14045 - testl $3,CS(%rsp)
14046 + testb $3,CS(%rsp)
14047 jnz paranoid_userspace
14048 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14049 + pax_exit_kernel
14050 + TRACE_IRQS_IRETQ 0
14051 + SWAPGS_UNSAFE_STACK
14052 + RESTORE_ALL 8
14053 + jmp irq_return
14054 +#endif
14055 paranoid_swapgs:
14056 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14057 + pax_exit_kernel_user
14058 +#else
14059 + pax_exit_kernel
14060 +#endif
14061 TRACE_IRQS_IRETQ 0
14062 SWAPGS_UNSAFE_STACK
14063 RESTORE_ALL 8
14064 jmp irq_return
14065 paranoid_restore:
14066 + pax_exit_kernel
14067 TRACE_IRQS_IRETQ 0
14068 RESTORE_ALL 8
14069 jmp irq_return
14070 @@ -1470,7 +1810,7 @@ ENTRY(error_entry)
14071 movq_cfi r14, R14+8
14072 movq_cfi r15, R15+8
14073 xorl %ebx,%ebx
14074 - testl $3,CS+8(%rsp)
14075 + testb $3,CS+8(%rsp)
14076 je error_kernelspace
14077 error_swapgs:
14078 SWAPGS
14079 @@ -1529,6 +1869,16 @@ ENTRY(nmi)
14080 CFI_ADJUST_CFA_OFFSET 15*8
14081 call save_paranoid
14082 DEFAULT_FRAME 0
14083 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14084 + testb $3, CS(%rsp)
14085 + jnz 1f
14086 + pax_enter_kernel
14087 + jmp 2f
14088 +1: pax_enter_kernel_user
14089 +2:
14090 +#else
14091 + pax_enter_kernel
14092 +#endif
14093 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14094 movq %rsp,%rdi
14095 movq $-1,%rsi
14096 @@ -1539,11 +1889,25 @@ ENTRY(nmi)
14097 DISABLE_INTERRUPTS(CLBR_NONE)
14098 testl %ebx,%ebx /* swapgs needed? */
14099 jnz nmi_restore
14100 - testl $3,CS(%rsp)
14101 + testb $3,CS(%rsp)
14102 jnz nmi_userspace
14103 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14104 + pax_exit_kernel
14105 + SWAPGS_UNSAFE_STACK
14106 + RESTORE_ALL 8
14107 + jmp irq_return
14108 +#endif
14109 nmi_swapgs:
14110 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14111 + pax_exit_kernel_user
14112 +#else
14113 + pax_exit_kernel
14114 +#endif
14115 SWAPGS_UNSAFE_STACK
14116 + RESTORE_ALL 8
14117 + jmp irq_return
14118 nmi_restore:
14119 + pax_exit_kernel
14120 RESTORE_ALL 8
14121 jmp irq_return
14122 nmi_userspace:
14123 diff -urNp linux-2.6.32.45/arch/x86/kernel/ftrace.c linux-2.6.32.45/arch/x86/kernel/ftrace.c
14124 --- linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
14125 +++ linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
14126 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
14127 static void *mod_code_newcode; /* holds the text to write to the IP */
14128
14129 static unsigned nmi_wait_count;
14130 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14131 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14132
14133 int ftrace_arch_read_dyn_info(char *buf, int size)
14134 {
14135 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
14136
14137 r = snprintf(buf, size, "%u %u",
14138 nmi_wait_count,
14139 - atomic_read(&nmi_update_count));
14140 + atomic_read_unchecked(&nmi_update_count));
14141 return r;
14142 }
14143
14144 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
14145 {
14146 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14147 smp_rmb();
14148 + pax_open_kernel();
14149 ftrace_mod_code();
14150 - atomic_inc(&nmi_update_count);
14151 + pax_close_kernel();
14152 + atomic_inc_unchecked(&nmi_update_count);
14153 }
14154 /* Must have previous changes seen before executions */
14155 smp_mb();
14156 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
14157
14158
14159
14160 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
14161 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
14162
14163 static unsigned char *ftrace_nop_replace(void)
14164 {
14165 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
14166 {
14167 unsigned char replaced[MCOUNT_INSN_SIZE];
14168
14169 + ip = ktla_ktva(ip);
14170 +
14171 /*
14172 * Note: Due to modules and __init, code can
14173 * disappear and change, we need to protect against faulting
14174 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
14175 unsigned char old[MCOUNT_INSN_SIZE], *new;
14176 int ret;
14177
14178 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14179 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14180 new = ftrace_call_replace(ip, (unsigned long)func);
14181 ret = ftrace_modify_code(ip, old, new);
14182
14183 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
14184 switch (faulted) {
14185 case 0:
14186 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
14187 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
14188 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14189 break;
14190 case 1:
14191 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14192 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14193 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14194 break;
14195 case 2:
14196 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14197 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14198 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14199 break;
14200 }
14201
14202 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14203 {
14204 unsigned char code[MCOUNT_INSN_SIZE];
14205
14206 + ip = ktla_ktva(ip);
14207 +
14208 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14209 return -EFAULT;
14210
14211 diff -urNp linux-2.6.32.45/arch/x86/kernel/head32.c linux-2.6.32.45/arch/x86/kernel/head32.c
14212 --- linux-2.6.32.45/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
14213 +++ linux-2.6.32.45/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
14214 @@ -16,6 +16,7 @@
14215 #include <asm/apic.h>
14216 #include <asm/io_apic.h>
14217 #include <asm/bios_ebda.h>
14218 +#include <asm/boot.h>
14219
14220 static void __init i386_default_early_setup(void)
14221 {
14222 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14223 {
14224 reserve_trampoline_memory();
14225
14226 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14227 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14228
14229 #ifdef CONFIG_BLK_DEV_INITRD
14230 /* Reserve INITRD */
14231 diff -urNp linux-2.6.32.45/arch/x86/kernel/head_32.S linux-2.6.32.45/arch/x86/kernel/head_32.S
14232 --- linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14233 +++ linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14234 @@ -19,10 +19,17 @@
14235 #include <asm/setup.h>
14236 #include <asm/processor-flags.h>
14237 #include <asm/percpu.h>
14238 +#include <asm/msr-index.h>
14239
14240 /* Physical address */
14241 #define pa(X) ((X) - __PAGE_OFFSET)
14242
14243 +#ifdef CONFIG_PAX_KERNEXEC
14244 +#define ta(X) (X)
14245 +#else
14246 +#define ta(X) ((X) - __PAGE_OFFSET)
14247 +#endif
14248 +
14249 /*
14250 * References to members of the new_cpu_data structure.
14251 */
14252 @@ -52,11 +59,7 @@
14253 * and small than max_low_pfn, otherwise will waste some page table entries
14254 */
14255
14256 -#if PTRS_PER_PMD > 1
14257 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14258 -#else
14259 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14260 -#endif
14261 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14262
14263 /* Enough space to fit pagetables for the low memory linear map */
14264 MAPPING_BEYOND_END = \
14265 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14266 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14267
14268 /*
14269 + * Real beginning of normal "text" segment
14270 + */
14271 +ENTRY(stext)
14272 +ENTRY(_stext)
14273 +
14274 +/*
14275 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14276 * %esi points to the real-mode code as a 32-bit pointer.
14277 * CS and DS must be 4 GB flat segments, but we don't depend on
14278 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14279 * can.
14280 */
14281 __HEAD
14282 +
14283 +#ifdef CONFIG_PAX_KERNEXEC
14284 + jmp startup_32
14285 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14286 +.fill PAGE_SIZE-5,1,0xcc
14287 +#endif
14288 +
14289 ENTRY(startup_32)
14290 + movl pa(stack_start),%ecx
14291 +
14292 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14293 us to not reload segments */
14294 testb $(1<<6), BP_loadflags(%esi)
14295 @@ -95,7 +113,60 @@ ENTRY(startup_32)
14296 movl %eax,%es
14297 movl %eax,%fs
14298 movl %eax,%gs
14299 + movl %eax,%ss
14300 2:
14301 + leal -__PAGE_OFFSET(%ecx),%esp
14302 +
14303 +#ifdef CONFIG_SMP
14304 + movl $pa(cpu_gdt_table),%edi
14305 + movl $__per_cpu_load,%eax
14306 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14307 + rorl $16,%eax
14308 + movb %al,__KERNEL_PERCPU + 4(%edi)
14309 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14310 + movl $__per_cpu_end - 1,%eax
14311 + subl $__per_cpu_start,%eax
14312 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14313 +#endif
14314 +
14315 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14316 + movl $NR_CPUS,%ecx
14317 + movl $pa(cpu_gdt_table),%edi
14318 +1:
14319 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14320 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14321 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14322 + addl $PAGE_SIZE_asm,%edi
14323 + loop 1b
14324 +#endif
14325 +
14326 +#ifdef CONFIG_PAX_KERNEXEC
14327 + movl $pa(boot_gdt),%edi
14328 + movl $__LOAD_PHYSICAL_ADDR,%eax
14329 + movw %ax,__BOOT_CS + 2(%edi)
14330 + rorl $16,%eax
14331 + movb %al,__BOOT_CS + 4(%edi)
14332 + movb %ah,__BOOT_CS + 7(%edi)
14333 + rorl $16,%eax
14334 +
14335 + ljmp $(__BOOT_CS),$1f
14336 +1:
14337 +
14338 + movl $NR_CPUS,%ecx
14339 + movl $pa(cpu_gdt_table),%edi
14340 + addl $__PAGE_OFFSET,%eax
14341 +1:
14342 + movw %ax,__KERNEL_CS + 2(%edi)
14343 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14344 + rorl $16,%eax
14345 + movb %al,__KERNEL_CS + 4(%edi)
14346 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14347 + movb %ah,__KERNEL_CS + 7(%edi)
14348 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14349 + rorl $16,%eax
14350 + addl $PAGE_SIZE_asm,%edi
14351 + loop 1b
14352 +#endif
14353
14354 /*
14355 * Clear BSS first so that there are no surprises...
14356 @@ -140,9 +211,7 @@ ENTRY(startup_32)
14357 cmpl $num_subarch_entries, %eax
14358 jae bad_subarch
14359
14360 - movl pa(subarch_entries)(,%eax,4), %eax
14361 - subl $__PAGE_OFFSET, %eax
14362 - jmp *%eax
14363 + jmp *pa(subarch_entries)(,%eax,4)
14364
14365 bad_subarch:
14366 WEAK(lguest_entry)
14367 @@ -154,10 +223,10 @@ WEAK(xen_entry)
14368 __INITDATA
14369
14370 subarch_entries:
14371 - .long default_entry /* normal x86/PC */
14372 - .long lguest_entry /* lguest hypervisor */
14373 - .long xen_entry /* Xen hypervisor */
14374 - .long default_entry /* Moorestown MID */
14375 + .long ta(default_entry) /* normal x86/PC */
14376 + .long ta(lguest_entry) /* lguest hypervisor */
14377 + .long ta(xen_entry) /* Xen hypervisor */
14378 + .long ta(default_entry) /* Moorestown MID */
14379 num_subarch_entries = (. - subarch_entries) / 4
14380 .previous
14381 #endif /* CONFIG_PARAVIRT */
14382 @@ -218,8 +287,11 @@ default_entry:
14383 movl %eax, pa(max_pfn_mapped)
14384
14385 /* Do early initialization of the fixmap area */
14386 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14387 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14388 +#ifdef CONFIG_COMPAT_VDSO
14389 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14390 +#else
14391 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14392 +#endif
14393 #else /* Not PAE */
14394
14395 page_pde_offset = (__PAGE_OFFSET >> 20);
14396 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14397 movl %eax, pa(max_pfn_mapped)
14398
14399 /* Do early initialization of the fixmap area */
14400 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14401 - movl %eax,pa(swapper_pg_dir+0xffc)
14402 +#ifdef CONFIG_COMPAT_VDSO
14403 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14404 +#else
14405 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14406 +#endif
14407 #endif
14408 jmp 3f
14409 /*
14410 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14411 movl %eax,%es
14412 movl %eax,%fs
14413 movl %eax,%gs
14414 + movl pa(stack_start),%ecx
14415 + movl %eax,%ss
14416 + leal -__PAGE_OFFSET(%ecx),%esp
14417 #endif /* CONFIG_SMP */
14418 3:
14419
14420 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14421 orl %edx,%eax
14422 movl %eax,%cr4
14423
14424 +#ifdef CONFIG_X86_PAE
14425 btl $5, %eax # check if PAE is enabled
14426 jnc 6f
14427
14428 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14429 cpuid
14430 cmpl $0x80000000, %eax
14431 jbe 6f
14432 +
14433 + /* Clear bogus XD_DISABLE bits */
14434 + call verify_cpu
14435 +
14436 mov $0x80000001, %eax
14437 cpuid
14438 /* Execute Disable bit supported? */
14439 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14440 jnc 6f
14441
14442 /* Setup EFER (Extended Feature Enable Register) */
14443 - movl $0xc0000080, %ecx
14444 + movl $MSR_EFER, %ecx
14445 rdmsr
14446
14447 btsl $11, %eax
14448 /* Make changes effective */
14449 wrmsr
14450
14451 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14452 + movl $1,pa(nx_enabled)
14453 +#endif
14454 +
14455 6:
14456
14457 /*
14458 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14459 movl %eax,%cr0 /* ..and set paging (PG) bit */
14460 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14461 1:
14462 - /* Set up the stack pointer */
14463 - lss stack_start,%esp
14464 + /* Shift the stack pointer to a virtual address */
14465 + addl $__PAGE_OFFSET, %esp
14466
14467 /*
14468 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14469 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14470
14471 #ifdef CONFIG_SMP
14472 cmpb $0, ready
14473 - jz 1f /* Initial CPU cleans BSS */
14474 - jmp checkCPUtype
14475 -1:
14476 + jnz checkCPUtype
14477 #endif /* CONFIG_SMP */
14478
14479 /*
14480 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14481 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14482 movl %eax,%ss # after changing gdt.
14483
14484 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14485 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14486 movl %eax,%ds
14487 movl %eax,%es
14488
14489 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14490 */
14491 cmpb $0,ready
14492 jne 1f
14493 - movl $per_cpu__gdt_page,%eax
14494 + movl $cpu_gdt_table,%eax
14495 movl $per_cpu__stack_canary,%ecx
14496 +#ifdef CONFIG_SMP
14497 + addl $__per_cpu_load,%ecx
14498 +#endif
14499 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14500 shrl $16, %ecx
14501 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14502 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14503 1:
14504 -#endif
14505 movl $(__KERNEL_STACK_CANARY),%eax
14506 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14507 + movl $(__USER_DS),%eax
14508 +#else
14509 + xorl %eax,%eax
14510 +#endif
14511 movl %eax,%gs
14512
14513 xorl %eax,%eax # Clear LDT
14514 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14515
14516 cld # gcc2 wants the direction flag cleared at all times
14517 pushl $0 # fake return address for unwinder
14518 -#ifdef CONFIG_SMP
14519 - movb ready, %cl
14520 movb $1, ready
14521 - cmpb $0,%cl # the first CPU calls start_kernel
14522 - je 1f
14523 - movl (stack_start), %esp
14524 -1:
14525 -#endif /* CONFIG_SMP */
14526 jmp *(initial_code)
14527
14528 /*
14529 @@ -546,22 +631,22 @@ early_page_fault:
14530 jmp early_fault
14531
14532 early_fault:
14533 - cld
14534 #ifdef CONFIG_PRINTK
14535 + cmpl $1,%ss:early_recursion_flag
14536 + je hlt_loop
14537 + incl %ss:early_recursion_flag
14538 + cld
14539 pusha
14540 movl $(__KERNEL_DS),%eax
14541 movl %eax,%ds
14542 movl %eax,%es
14543 - cmpl $2,early_recursion_flag
14544 - je hlt_loop
14545 - incl early_recursion_flag
14546 movl %cr2,%eax
14547 pushl %eax
14548 pushl %edx /* trapno */
14549 pushl $fault_msg
14550 call printk
14551 +; call dump_stack
14552 #endif
14553 - call dump_stack
14554 hlt_loop:
14555 hlt
14556 jmp hlt_loop
14557 @@ -569,8 +654,11 @@ hlt_loop:
14558 /* This is the default interrupt "handler" :-) */
14559 ALIGN
14560 ignore_int:
14561 - cld
14562 #ifdef CONFIG_PRINTK
14563 + cmpl $2,%ss:early_recursion_flag
14564 + je hlt_loop
14565 + incl %ss:early_recursion_flag
14566 + cld
14567 pushl %eax
14568 pushl %ecx
14569 pushl %edx
14570 @@ -579,9 +667,6 @@ ignore_int:
14571 movl $(__KERNEL_DS),%eax
14572 movl %eax,%ds
14573 movl %eax,%es
14574 - cmpl $2,early_recursion_flag
14575 - je hlt_loop
14576 - incl early_recursion_flag
14577 pushl 16(%esp)
14578 pushl 24(%esp)
14579 pushl 32(%esp)
14580 @@ -600,6 +685,8 @@ ignore_int:
14581 #endif
14582 iret
14583
14584 +#include "verify_cpu.S"
14585 +
14586 __REFDATA
14587 .align 4
14588 ENTRY(initial_code)
14589 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14590 /*
14591 * BSS section
14592 */
14593 -__PAGE_ALIGNED_BSS
14594 - .align PAGE_SIZE_asm
14595 #ifdef CONFIG_X86_PAE
14596 +.section .swapper_pg_pmd,"a",@progbits
14597 swapper_pg_pmd:
14598 .fill 1024*KPMDS,4,0
14599 #else
14600 +.section .swapper_pg_dir,"a",@progbits
14601 ENTRY(swapper_pg_dir)
14602 .fill 1024,4,0
14603 #endif
14604 +.section .swapper_pg_fixmap,"a",@progbits
14605 swapper_pg_fixmap:
14606 .fill 1024,4,0
14607 #ifdef CONFIG_X86_TRAMPOLINE
14608 +.section .trampoline_pg_dir,"a",@progbits
14609 ENTRY(trampoline_pg_dir)
14610 +#ifdef CONFIG_X86_PAE
14611 + .fill 4,8,0
14612 +#else
14613 .fill 1024,4,0
14614 #endif
14615 +#endif
14616 +
14617 +.section .empty_zero_page,"a",@progbits
14618 ENTRY(empty_zero_page)
14619 .fill 4096,1,0
14620
14621 /*
14622 + * The IDT has to be page-aligned to simplify the Pentium
14623 + * F0 0F bug workaround.. We have a special link segment
14624 + * for this.
14625 + */
14626 +.section .idt,"a",@progbits
14627 +ENTRY(idt_table)
14628 + .fill 256,8,0
14629 +
14630 +/*
14631 * This starts the data section.
14632 */
14633 #ifdef CONFIG_X86_PAE
14634 -__PAGE_ALIGNED_DATA
14635 - /* Page-aligned for the benefit of paravirt? */
14636 - .align PAGE_SIZE_asm
14637 +.section .swapper_pg_dir,"a",@progbits
14638 +
14639 ENTRY(swapper_pg_dir)
14640 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14641 # if KPMDS == 3
14642 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14643 # error "Kernel PMDs should be 1, 2 or 3"
14644 # endif
14645 .align PAGE_SIZE_asm /* needs to be page-sized too */
14646 +
14647 +#ifdef CONFIG_PAX_PER_CPU_PGD
14648 +ENTRY(cpu_pgd)
14649 + .rept NR_CPUS
14650 + .fill 4,8,0
14651 + .endr
14652 +#endif
14653 +
14654 #endif
14655
14656 .data
14657 +.balign 4
14658 ENTRY(stack_start)
14659 - .long init_thread_union+THREAD_SIZE
14660 - .long __BOOT_DS
14661 + .long init_thread_union+THREAD_SIZE-8
14662
14663 ready: .byte 0
14664
14665 +.section .rodata,"a",@progbits
14666 early_recursion_flag:
14667 .long 0
14668
14669 @@ -697,7 +809,7 @@ fault_msg:
14670 .word 0 # 32 bit align gdt_desc.address
14671 boot_gdt_descr:
14672 .word __BOOT_DS+7
14673 - .long boot_gdt - __PAGE_OFFSET
14674 + .long pa(boot_gdt)
14675
14676 .word 0 # 32-bit align idt_desc.address
14677 idt_descr:
14678 @@ -708,7 +820,7 @@ idt_descr:
14679 .word 0 # 32 bit align gdt_desc.address
14680 ENTRY(early_gdt_descr)
14681 .word GDT_ENTRIES*8-1
14682 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14683 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14684
14685 /*
14686 * The boot_gdt must mirror the equivalent in setup.S and is
14687 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14688 .align L1_CACHE_BYTES
14689 ENTRY(boot_gdt)
14690 .fill GDT_ENTRY_BOOT_CS,8,0
14691 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14692 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14693 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14694 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14695 +
14696 + .align PAGE_SIZE_asm
14697 +ENTRY(cpu_gdt_table)
14698 + .rept NR_CPUS
14699 + .quad 0x0000000000000000 /* NULL descriptor */
14700 + .quad 0x0000000000000000 /* 0x0b reserved */
14701 + .quad 0x0000000000000000 /* 0x13 reserved */
14702 + .quad 0x0000000000000000 /* 0x1b reserved */
14703 +
14704 +#ifdef CONFIG_PAX_KERNEXEC
14705 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14706 +#else
14707 + .quad 0x0000000000000000 /* 0x20 unused */
14708 +#endif
14709 +
14710 + .quad 0x0000000000000000 /* 0x28 unused */
14711 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14712 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14713 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14714 + .quad 0x0000000000000000 /* 0x4b reserved */
14715 + .quad 0x0000000000000000 /* 0x53 reserved */
14716 + .quad 0x0000000000000000 /* 0x5b reserved */
14717 +
14718 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14719 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14720 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14721 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14722 +
14723 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14724 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14725 +
14726 + /*
14727 + * Segments used for calling PnP BIOS have byte granularity.
14728 + * The code segments and data segments have fixed 64k limits,
14729 + * the transfer segment sizes are set at run time.
14730 + */
14731 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14732 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14733 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14734 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14735 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14736 +
14737 + /*
14738 + * The APM segments have byte granularity and their bases
14739 + * are set at run time. All have 64k limits.
14740 + */
14741 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14742 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14743 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14744 +
14745 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14746 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14747 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14748 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14749 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14750 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14751 +
14752 + /* Be sure this is zeroed to avoid false validations in Xen */
14753 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14754 + .endr
14755 diff -urNp linux-2.6.32.45/arch/x86/kernel/head_64.S linux-2.6.32.45/arch/x86/kernel/head_64.S
14756 --- linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14757 +++ linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14758 @@ -19,6 +19,7 @@
14759 #include <asm/cache.h>
14760 #include <asm/processor-flags.h>
14761 #include <asm/percpu.h>
14762 +#include <asm/cpufeature.h>
14763
14764 #ifdef CONFIG_PARAVIRT
14765 #include <asm/asm-offsets.h>
14766 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14767 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14768 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14769 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14770 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14771 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14772 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14773 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14774
14775 .text
14776 __HEAD
14777 @@ -85,35 +90,22 @@ startup_64:
14778 */
14779 addq %rbp, init_level4_pgt + 0(%rip)
14780 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14781 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14782 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14783 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14784
14785 addq %rbp, level3_ident_pgt + 0(%rip)
14786 +#ifndef CONFIG_XEN
14787 + addq %rbp, level3_ident_pgt + 8(%rip)
14788 +#endif
14789
14790 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14791 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14792 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14793
14794 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14795 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14796 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14797
14798 - /* Add an Identity mapping if I am above 1G */
14799 - leaq _text(%rip), %rdi
14800 - andq $PMD_PAGE_MASK, %rdi
14801 -
14802 - movq %rdi, %rax
14803 - shrq $PUD_SHIFT, %rax
14804 - andq $(PTRS_PER_PUD - 1), %rax
14805 - jz ident_complete
14806 -
14807 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14808 - leaq level3_ident_pgt(%rip), %rbx
14809 - movq %rdx, 0(%rbx, %rax, 8)
14810 -
14811 - movq %rdi, %rax
14812 - shrq $PMD_SHIFT, %rax
14813 - andq $(PTRS_PER_PMD - 1), %rax
14814 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14815 - leaq level2_spare_pgt(%rip), %rbx
14816 - movq %rdx, 0(%rbx, %rax, 8)
14817 -ident_complete:
14818 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14819 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14820
14821 /*
14822 * Fixup the kernel text+data virtual addresses. Note that
14823 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14824 * after the boot processor executes this code.
14825 */
14826
14827 - /* Enable PAE mode and PGE */
14828 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14829 + /* Enable PAE mode and PSE/PGE */
14830 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14831 movq %rax, %cr4
14832
14833 /* Setup early boot stage 4 level pagetables. */
14834 @@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14835 movl $MSR_EFER, %ecx
14836 rdmsr
14837 btsl $_EFER_SCE, %eax /* Enable System Call */
14838 - btl $20,%edi /* No Execute supported? */
14839 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14840 jnc 1f
14841 btsl $_EFER_NX, %eax
14842 + leaq init_level4_pgt(%rip), %rdi
14843 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14844 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14845 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14846 1: wrmsr /* Make changes effective */
14847
14848 /* Setup cr0 */
14849 @@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14850 .quad x86_64_start_kernel
14851 ENTRY(initial_gs)
14852 .quad INIT_PER_CPU_VAR(irq_stack_union)
14853 - __FINITDATA
14854
14855 ENTRY(stack_start)
14856 .quad init_thread_union+THREAD_SIZE-8
14857 .word 0
14858 + __FINITDATA
14859
14860 bad_address:
14861 jmp bad_address
14862
14863 - .section ".init.text","ax"
14864 + __INIT
14865 #ifdef CONFIG_EARLY_PRINTK
14866 .globl early_idt_handlers
14867 early_idt_handlers:
14868 @@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14869 #endif /* EARLY_PRINTK */
14870 1: hlt
14871 jmp 1b
14872 + .previous
14873
14874 #ifdef CONFIG_EARLY_PRINTK
14875 + __INITDATA
14876 early_recursion_flag:
14877 .long 0
14878 + .previous
14879
14880 + .section .rodata,"a",@progbits
14881 early_idt_msg:
14882 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14883 early_idt_ripmsg:
14884 .asciz "RIP %s\n"
14885 -#endif /* CONFIG_EARLY_PRINTK */
14886 .previous
14887 +#endif /* CONFIG_EARLY_PRINTK */
14888
14889 + .section .rodata,"a",@progbits
14890 #define NEXT_PAGE(name) \
14891 .balign PAGE_SIZE; \
14892 ENTRY(name)
14893 @@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14894 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14895 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14896 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14897 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
14898 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14899 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14900 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14901 .org init_level4_pgt + L4_START_KERNEL*8, 0
14902 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14903 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14904
14905 +#ifdef CONFIG_PAX_PER_CPU_PGD
14906 +NEXT_PAGE(cpu_pgd)
14907 + .rept NR_CPUS
14908 + .fill 512,8,0
14909 + .endr
14910 +#endif
14911 +
14912 NEXT_PAGE(level3_ident_pgt)
14913 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14914 +#ifdef CONFIG_XEN
14915 .fill 511,8,0
14916 +#else
14917 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14918 + .fill 510,8,0
14919 +#endif
14920 +
14921 +NEXT_PAGE(level3_vmalloc_pgt)
14922 + .fill 512,8,0
14923 +
14924 +NEXT_PAGE(level3_vmemmap_pgt)
14925 + .fill L3_VMEMMAP_START,8,0
14926 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14927
14928 NEXT_PAGE(level3_kernel_pgt)
14929 .fill L3_START_KERNEL,8,0
14930 @@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14931 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14932 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14933
14934 +NEXT_PAGE(level2_vmemmap_pgt)
14935 + .fill 512,8,0
14936 +
14937 NEXT_PAGE(level2_fixmap_pgt)
14938 - .fill 506,8,0
14939 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14940 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14941 - .fill 5,8,0
14942 + .fill 507,8,0
14943 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14944 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14945 + .fill 4,8,0
14946
14947 -NEXT_PAGE(level1_fixmap_pgt)
14948 +NEXT_PAGE(level1_vsyscall_pgt)
14949 .fill 512,8,0
14950
14951 -NEXT_PAGE(level2_ident_pgt)
14952 - /* Since I easily can, map the first 1G.
14953 + /* Since I easily can, map the first 2G.
14954 * Don't set NX because code runs from these pages.
14955 */
14956 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14957 +NEXT_PAGE(level2_ident_pgt)
14958 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14959
14960 NEXT_PAGE(level2_kernel_pgt)
14961 /*
14962 @@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14963 * If you want to increase this then increase MODULES_VADDR
14964 * too.)
14965 */
14966 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14967 - KERNEL_IMAGE_SIZE/PMD_SIZE)
14968 -
14969 -NEXT_PAGE(level2_spare_pgt)
14970 - .fill 512, 8, 0
14971 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14972
14973 #undef PMDS
14974 #undef NEXT_PAGE
14975
14976 - .data
14977 + .align PAGE_SIZE
14978 +ENTRY(cpu_gdt_table)
14979 + .rept NR_CPUS
14980 + .quad 0x0000000000000000 /* NULL descriptor */
14981 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14982 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
14983 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
14984 + .quad 0x00cffb000000ffff /* __USER32_CS */
14985 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14986 + .quad 0x00affb000000ffff /* __USER_CS */
14987 +
14988 +#ifdef CONFIG_PAX_KERNEXEC
14989 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14990 +#else
14991 + .quad 0x0 /* unused */
14992 +#endif
14993 +
14994 + .quad 0,0 /* TSS */
14995 + .quad 0,0 /* LDT */
14996 + .quad 0,0,0 /* three TLS descriptors */
14997 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
14998 + /* asm/segment.h:GDT_ENTRIES must match this */
14999 +
15000 + /* zero the remaining page */
15001 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15002 + .endr
15003 +
15004 .align 16
15005 .globl early_gdt_descr
15006 early_gdt_descr:
15007 .word GDT_ENTRIES*8-1
15008 early_gdt_descr_base:
15009 - .quad INIT_PER_CPU_VAR(gdt_page)
15010 + .quad cpu_gdt_table
15011
15012 ENTRY(phys_base)
15013 /* This must match the first entry in level2_kernel_pgt */
15014 .quad 0x0000000000000000
15015
15016 #include "../../x86/xen/xen-head.S"
15017 -
15018 - .section .bss, "aw", @nobits
15019 +
15020 + .section .rodata,"a",@progbits
15021 .align L1_CACHE_BYTES
15022 ENTRY(idt_table)
15023 - .skip IDT_ENTRIES * 16
15024 + .fill 512,8,0
15025
15026 __PAGE_ALIGNED_BSS
15027 .align PAGE_SIZE
15028 diff -urNp linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c
15029 --- linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
15030 +++ linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
15031 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15032 EXPORT_SYMBOL(cmpxchg8b_emu);
15033 #endif
15034
15035 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15036 +
15037 /* Networking helper routines. */
15038 EXPORT_SYMBOL(csum_partial_copy_generic);
15039 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15040 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15041
15042 EXPORT_SYMBOL(__get_user_1);
15043 EXPORT_SYMBOL(__get_user_2);
15044 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15045
15046 EXPORT_SYMBOL(csum_partial);
15047 EXPORT_SYMBOL(empty_zero_page);
15048 +
15049 +#ifdef CONFIG_PAX_KERNEXEC
15050 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15051 +#endif
15052 diff -urNp linux-2.6.32.45/arch/x86/kernel/i8259.c linux-2.6.32.45/arch/x86/kernel/i8259.c
15053 --- linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
15054 +++ linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
15055 @@ -208,7 +208,7 @@ spurious_8259A_irq:
15056 "spurious 8259A interrupt: IRQ%d.\n", irq);
15057 spurious_irq_mask |= irqmask;
15058 }
15059 - atomic_inc(&irq_err_count);
15060 + atomic_inc_unchecked(&irq_err_count);
15061 /*
15062 * Theoretically we do not have to handle this IRQ,
15063 * but in Linux this does not cause problems and is
15064 diff -urNp linux-2.6.32.45/arch/x86/kernel/init_task.c linux-2.6.32.45/arch/x86/kernel/init_task.c
15065 --- linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
15066 +++ linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
15067 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
15068 * way process stacks are handled. This is done by having a special
15069 * "init_task" linker map entry..
15070 */
15071 -union thread_union init_thread_union __init_task_data =
15072 - { INIT_THREAD_INFO(init_task) };
15073 +union thread_union init_thread_union __init_task_data;
15074
15075 /*
15076 * Initial task structure.
15077 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15078 * section. Since TSS's are completely CPU-local, we want them
15079 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15080 */
15081 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15082 -
15083 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15084 +EXPORT_SYMBOL(init_tss);
15085 diff -urNp linux-2.6.32.45/arch/x86/kernel/ioport.c linux-2.6.32.45/arch/x86/kernel/ioport.c
15086 --- linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
15087 +++ linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
15088 @@ -6,6 +6,7 @@
15089 #include <linux/sched.h>
15090 #include <linux/kernel.h>
15091 #include <linux/capability.h>
15092 +#include <linux/security.h>
15093 #include <linux/errno.h>
15094 #include <linux/types.h>
15095 #include <linux/ioport.h>
15096 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
15097
15098 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15099 return -EINVAL;
15100 +#ifdef CONFIG_GRKERNSEC_IO
15101 + if (turn_on && grsec_disable_privio) {
15102 + gr_handle_ioperm();
15103 + return -EPERM;
15104 + }
15105 +#endif
15106 if (turn_on && !capable(CAP_SYS_RAWIO))
15107 return -EPERM;
15108
15109 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
15110 * because the ->io_bitmap_max value must match the bitmap
15111 * contents:
15112 */
15113 - tss = &per_cpu(init_tss, get_cpu());
15114 + tss = init_tss + get_cpu();
15115
15116 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
15117
15118 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
15119 return -EINVAL;
15120 /* Trying to gain more privileges? */
15121 if (level > old) {
15122 +#ifdef CONFIG_GRKERNSEC_IO
15123 + if (grsec_disable_privio) {
15124 + gr_handle_iopl();
15125 + return -EPERM;
15126 + }
15127 +#endif
15128 if (!capable(CAP_SYS_RAWIO))
15129 return -EPERM;
15130 }
15131 diff -urNp linux-2.6.32.45/arch/x86/kernel/irq_32.c linux-2.6.32.45/arch/x86/kernel/irq_32.c
15132 --- linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
15133 +++ linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
15134 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
15135 __asm__ __volatile__("andl %%esp,%0" :
15136 "=r" (sp) : "0" (THREAD_SIZE - 1));
15137
15138 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15139 + return sp < STACK_WARN;
15140 }
15141
15142 static void print_stack_overflow(void)
15143 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(
15144 * per-CPU IRQ handling contexts (thread information and stack)
15145 */
15146 union irq_ctx {
15147 - struct thread_info tinfo;
15148 - u32 stack[THREAD_SIZE/sizeof(u32)];
15149 -} __attribute__((aligned(PAGE_SIZE)));
15150 + unsigned long previous_esp;
15151 + u32 stack[THREAD_SIZE/sizeof(u32)];
15152 +} __attribute__((aligned(THREAD_SIZE)));
15153
15154 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15155 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
15156 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
15157 static inline int
15158 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15159 {
15160 - union irq_ctx *curctx, *irqctx;
15161 + union irq_ctx *irqctx;
15162 u32 *isp, arg1, arg2;
15163
15164 - curctx = (union irq_ctx *) current_thread_info();
15165 irqctx = __get_cpu_var(hardirq_ctx);
15166
15167 /*
15168 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
15169 * handler) we can't do that and just have to keep using the
15170 * current stack (which is the irq stack already after all)
15171 */
15172 - if (unlikely(curctx == irqctx))
15173 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15174 return 0;
15175
15176 /* build the stack frame on the IRQ stack */
15177 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15178 - irqctx->tinfo.task = curctx->tinfo.task;
15179 - irqctx->tinfo.previous_esp = current_stack_pointer;
15180 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15181 + irqctx->previous_esp = current_stack_pointer;
15182
15183 - /*
15184 - * Copy the softirq bits in preempt_count so that the
15185 - * softirq checks work in the hardirq context.
15186 - */
15187 - irqctx->tinfo.preempt_count =
15188 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15189 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15190 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15191 + __set_fs(MAKE_MM_SEG(0));
15192 +#endif
15193
15194 if (unlikely(overflow))
15195 call_on_stack(print_stack_overflow, isp);
15196 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15197 : "0" (irq), "1" (desc), "2" (isp),
15198 "D" (desc->handle_irq)
15199 : "memory", "cc", "ecx");
15200 +
15201 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15202 + __set_fs(current_thread_info()->addr_limit);
15203 +#endif
15204 +
15205 return 1;
15206 }
15207
15208 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15209 */
15210 void __cpuinit irq_ctx_init(int cpu)
15211 {
15212 - union irq_ctx *irqctx;
15213 -
15214 if (per_cpu(hardirq_ctx, cpu))
15215 return;
15216
15217 - irqctx = &per_cpu(hardirq_stack, cpu);
15218 - irqctx->tinfo.task = NULL;
15219 - irqctx->tinfo.exec_domain = NULL;
15220 - irqctx->tinfo.cpu = cpu;
15221 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15222 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15223 -
15224 - per_cpu(hardirq_ctx, cpu) = irqctx;
15225 -
15226 - irqctx = &per_cpu(softirq_stack, cpu);
15227 - irqctx->tinfo.task = NULL;
15228 - irqctx->tinfo.exec_domain = NULL;
15229 - irqctx->tinfo.cpu = cpu;
15230 - irqctx->tinfo.preempt_count = 0;
15231 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15232 -
15233 - per_cpu(softirq_ctx, cpu) = irqctx;
15234 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15235 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15236
15237 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15238 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15239 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15240 asmlinkage void do_softirq(void)
15241 {
15242 unsigned long flags;
15243 - struct thread_info *curctx;
15244 union irq_ctx *irqctx;
15245 u32 *isp;
15246
15247 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15248 local_irq_save(flags);
15249
15250 if (local_softirq_pending()) {
15251 - curctx = current_thread_info();
15252 irqctx = __get_cpu_var(softirq_ctx);
15253 - irqctx->tinfo.task = curctx->task;
15254 - irqctx->tinfo.previous_esp = current_stack_pointer;
15255 + irqctx->previous_esp = current_stack_pointer;
15256
15257 /* build the stack frame on the softirq stack */
15258 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15259 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15260 +
15261 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15262 + __set_fs(MAKE_MM_SEG(0));
15263 +#endif
15264
15265 call_on_stack(__do_softirq, isp);
15266 +
15267 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15268 + __set_fs(current_thread_info()->addr_limit);
15269 +#endif
15270 +
15271 /*
15272 * Shouldnt happen, we returned above if in_interrupt():
15273 */
15274 diff -urNp linux-2.6.32.45/arch/x86/kernel/irq.c linux-2.6.32.45/arch/x86/kernel/irq.c
15275 --- linux-2.6.32.45/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15276 +++ linux-2.6.32.45/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15277 @@ -15,7 +15,7 @@
15278 #include <asm/mce.h>
15279 #include <asm/hw_irq.h>
15280
15281 -atomic_t irq_err_count;
15282 +atomic_unchecked_t irq_err_count;
15283
15284 /* Function pointer for generic interrupt vector handling */
15285 void (*generic_interrupt_extension)(void) = NULL;
15286 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15287 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15288 seq_printf(p, " Machine check polls\n");
15289 #endif
15290 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15291 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15292 #if defined(CONFIG_X86_IO_APIC)
15293 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15294 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15295 #endif
15296 return 0;
15297 }
15298 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15299
15300 u64 arch_irq_stat(void)
15301 {
15302 - u64 sum = atomic_read(&irq_err_count);
15303 + u64 sum = atomic_read_unchecked(&irq_err_count);
15304
15305 #ifdef CONFIG_X86_IO_APIC
15306 - sum += atomic_read(&irq_mis_count);
15307 + sum += atomic_read_unchecked(&irq_mis_count);
15308 #endif
15309 return sum;
15310 }
15311 diff -urNp linux-2.6.32.45/arch/x86/kernel/kgdb.c linux-2.6.32.45/arch/x86/kernel/kgdb.c
15312 --- linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15313 +++ linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15314 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15315
15316 /* clear the trace bit */
15317 linux_regs->flags &= ~X86_EFLAGS_TF;
15318 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15319 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15320
15321 /* set the trace bit if we're stepping */
15322 if (remcomInBuffer[0] == 's') {
15323 linux_regs->flags |= X86_EFLAGS_TF;
15324 kgdb_single_step = 1;
15325 - atomic_set(&kgdb_cpu_doing_single_step,
15326 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15327 raw_smp_processor_id());
15328 }
15329
15330 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15331 break;
15332
15333 case DIE_DEBUG:
15334 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
15335 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15336 raw_smp_processor_id()) {
15337 if (user_mode(regs))
15338 return single_step_cont(regs, args);
15339 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15340 return instruction_pointer(regs);
15341 }
15342
15343 -struct kgdb_arch arch_kgdb_ops = {
15344 +const struct kgdb_arch arch_kgdb_ops = {
15345 /* Breakpoint instruction: */
15346 .gdb_bpt_instr = { 0xcc },
15347 .flags = KGDB_HW_BREAKPOINT,
15348 diff -urNp linux-2.6.32.45/arch/x86/kernel/kprobes.c linux-2.6.32.45/arch/x86/kernel/kprobes.c
15349 --- linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15350 +++ linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15351 @@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15352 char op;
15353 s32 raddr;
15354 } __attribute__((packed)) * jop;
15355 - jop = (struct __arch_jmp_op *)from;
15356 +
15357 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15358 +
15359 + pax_open_kernel();
15360 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15361 jop->op = RELATIVEJUMP_INSTRUCTION;
15362 + pax_close_kernel();
15363 }
15364
15365 /*
15366 @@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15367 kprobe_opcode_t opcode;
15368 kprobe_opcode_t *orig_opcodes = opcodes;
15369
15370 - if (search_exception_tables((unsigned long)opcodes))
15371 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15372 return 0; /* Page fault may occur on this address. */
15373
15374 retry:
15375 @@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15376 disp = (u8 *) p->addr + *((s32 *) insn) -
15377 (u8 *) p->ainsn.insn;
15378 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15379 + pax_open_kernel();
15380 *(s32 *)insn = (s32) disp;
15381 + pax_close_kernel();
15382 }
15383 }
15384 #endif
15385 @@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15386
15387 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15388 {
15389 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15390 + pax_open_kernel();
15391 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15392 + pax_close_kernel();
15393
15394 fix_riprel(p);
15395
15396 - if (can_boost(p->addr))
15397 + if (can_boost(ktla_ktva(p->addr)))
15398 p->ainsn.boostable = 0;
15399 else
15400 p->ainsn.boostable = -1;
15401
15402 - p->opcode = *p->addr;
15403 + p->opcode = *(ktla_ktva(p->addr));
15404 }
15405
15406 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15407 @@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15408 if (p->opcode == BREAKPOINT_INSTRUCTION)
15409 regs->ip = (unsigned long)p->addr;
15410 else
15411 - regs->ip = (unsigned long)p->ainsn.insn;
15412 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15413 }
15414
15415 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15416 @@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15417 if (p->ainsn.boostable == 1 && !p->post_handler) {
15418 /* Boost up -- we can execute copied instructions directly */
15419 reset_current_kprobe();
15420 - regs->ip = (unsigned long)p->ainsn.insn;
15421 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15422 preempt_enable_no_resched();
15423 return;
15424 }
15425 @@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15426 struct kprobe_ctlblk *kcb;
15427
15428 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15429 - if (*addr != BREAKPOINT_INSTRUCTION) {
15430 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15431 /*
15432 * The breakpoint instruction was removed right
15433 * after we hit it. Another cpu has removed
15434 @@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15435 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15436 {
15437 unsigned long *tos = stack_addr(regs);
15438 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15439 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15440 unsigned long orig_ip = (unsigned long)p->addr;
15441 kprobe_opcode_t *insn = p->ainsn.insn;
15442
15443 @@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15444 struct die_args *args = data;
15445 int ret = NOTIFY_DONE;
15446
15447 - if (args->regs && user_mode_vm(args->regs))
15448 + if (args->regs && user_mode(args->regs))
15449 return ret;
15450
15451 switch (val) {
15452 diff -urNp linux-2.6.32.45/arch/x86/kernel/ldt.c linux-2.6.32.45/arch/x86/kernel/ldt.c
15453 --- linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15454 +++ linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15455 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15456 if (reload) {
15457 #ifdef CONFIG_SMP
15458 preempt_disable();
15459 - load_LDT(pc);
15460 + load_LDT_nolock(pc);
15461 if (!cpumask_equal(mm_cpumask(current->mm),
15462 cpumask_of(smp_processor_id())))
15463 smp_call_function(flush_ldt, current->mm, 1);
15464 preempt_enable();
15465 #else
15466 - load_LDT(pc);
15467 + load_LDT_nolock(pc);
15468 #endif
15469 }
15470 if (oldsize) {
15471 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15472 return err;
15473
15474 for (i = 0; i < old->size; i++)
15475 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15476 + write_ldt_entry(new->ldt, i, old->ldt + i);
15477 return 0;
15478 }
15479
15480 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15481 retval = copy_ldt(&mm->context, &old_mm->context);
15482 mutex_unlock(&old_mm->context.lock);
15483 }
15484 +
15485 + if (tsk == current) {
15486 + mm->context.vdso = 0;
15487 +
15488 +#ifdef CONFIG_X86_32
15489 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15490 + mm->context.user_cs_base = 0UL;
15491 + mm->context.user_cs_limit = ~0UL;
15492 +
15493 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15494 + cpus_clear(mm->context.cpu_user_cs_mask);
15495 +#endif
15496 +
15497 +#endif
15498 +#endif
15499 +
15500 + }
15501 +
15502 return retval;
15503 }
15504
15505 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15506 }
15507 }
15508
15509 +#ifdef CONFIG_PAX_SEGMEXEC
15510 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15511 + error = -EINVAL;
15512 + goto out_unlock;
15513 + }
15514 +#endif
15515 +
15516 fill_ldt(&ldt, &ldt_info);
15517 if (oldmode)
15518 ldt.avl = 0;
15519 diff -urNp linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c
15520 --- linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15521 +++ linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15522 @@ -26,7 +26,7 @@
15523 #include <asm/system.h>
15524 #include <asm/cacheflush.h>
15525
15526 -static void set_idt(void *newidt, __u16 limit)
15527 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15528 {
15529 struct desc_ptr curidt;
15530
15531 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15532 }
15533
15534
15535 -static void set_gdt(void *newgdt, __u16 limit)
15536 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15537 {
15538 struct desc_ptr curgdt;
15539
15540 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15541 }
15542
15543 control_page = page_address(image->control_code_page);
15544 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15545 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15546
15547 relocate_kernel_ptr = control_page;
15548 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15549 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_amd.c linux-2.6.32.45/arch/x86/kernel/microcode_amd.c
15550 --- linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15551 +++ linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15552 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15553 uci->mc = NULL;
15554 }
15555
15556 -static struct microcode_ops microcode_amd_ops = {
15557 +static const struct microcode_ops microcode_amd_ops = {
15558 .request_microcode_user = request_microcode_user,
15559 .request_microcode_fw = request_microcode_fw,
15560 .collect_cpu_info = collect_cpu_info_amd,
15561 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15562 .microcode_fini_cpu = microcode_fini_cpu_amd,
15563 };
15564
15565 -struct microcode_ops * __init init_amd_microcode(void)
15566 +const struct microcode_ops * __init init_amd_microcode(void)
15567 {
15568 return &microcode_amd_ops;
15569 }
15570 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_core.c linux-2.6.32.45/arch/x86/kernel/microcode_core.c
15571 --- linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15572 +++ linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15573 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15574
15575 #define MICROCODE_VERSION "2.00"
15576
15577 -static struct microcode_ops *microcode_ops;
15578 +static const struct microcode_ops *microcode_ops;
15579
15580 /*
15581 * Synchronization.
15582 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_intel.c linux-2.6.32.45/arch/x86/kernel/microcode_intel.c
15583 --- linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15584 +++ linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15585 @@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15586
15587 static int get_ucode_user(void *to, const void *from, size_t n)
15588 {
15589 - return copy_from_user(to, from, n);
15590 + return copy_from_user(to, (__force const void __user *)from, n);
15591 }
15592
15593 static enum ucode_state
15594 request_microcode_user(int cpu, const void __user *buf, size_t size)
15595 {
15596 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15597 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15598 }
15599
15600 static void microcode_fini_cpu(int cpu)
15601 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15602 uci->mc = NULL;
15603 }
15604
15605 -static struct microcode_ops microcode_intel_ops = {
15606 +static const struct microcode_ops microcode_intel_ops = {
15607 .request_microcode_user = request_microcode_user,
15608 .request_microcode_fw = request_microcode_fw,
15609 .collect_cpu_info = collect_cpu_info,
15610 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15611 .microcode_fini_cpu = microcode_fini_cpu,
15612 };
15613
15614 -struct microcode_ops * __init init_intel_microcode(void)
15615 +const struct microcode_ops * __init init_intel_microcode(void)
15616 {
15617 return &microcode_intel_ops;
15618 }
15619 diff -urNp linux-2.6.32.45/arch/x86/kernel/module.c linux-2.6.32.45/arch/x86/kernel/module.c
15620 --- linux-2.6.32.45/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15621 +++ linux-2.6.32.45/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15622 @@ -34,7 +34,7 @@
15623 #define DEBUGP(fmt...)
15624 #endif
15625
15626 -void *module_alloc(unsigned long size)
15627 +static void *__module_alloc(unsigned long size, pgprot_t prot)
15628 {
15629 struct vm_struct *area;
15630
15631 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15632 if (!area)
15633 return NULL;
15634
15635 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15636 - PAGE_KERNEL_EXEC);
15637 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15638 +}
15639 +
15640 +void *module_alloc(unsigned long size)
15641 +{
15642 +
15643 +#ifdef CONFIG_PAX_KERNEXEC
15644 + return __module_alloc(size, PAGE_KERNEL);
15645 +#else
15646 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15647 +#endif
15648 +
15649 }
15650
15651 /* Free memory returned from module_alloc */
15652 @@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15653 vfree(module_region);
15654 }
15655
15656 +#ifdef CONFIG_PAX_KERNEXEC
15657 +#ifdef CONFIG_X86_32
15658 +void *module_alloc_exec(unsigned long size)
15659 +{
15660 + struct vm_struct *area;
15661 +
15662 + if (size == 0)
15663 + return NULL;
15664 +
15665 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15666 + return area ? area->addr : NULL;
15667 +}
15668 +EXPORT_SYMBOL(module_alloc_exec);
15669 +
15670 +void module_free_exec(struct module *mod, void *module_region)
15671 +{
15672 + vunmap(module_region);
15673 +}
15674 +EXPORT_SYMBOL(module_free_exec);
15675 +#else
15676 +void module_free_exec(struct module *mod, void *module_region)
15677 +{
15678 + module_free(mod, module_region);
15679 +}
15680 +EXPORT_SYMBOL(module_free_exec);
15681 +
15682 +void *module_alloc_exec(unsigned long size)
15683 +{
15684 + return __module_alloc(size, PAGE_KERNEL_RX);
15685 +}
15686 +EXPORT_SYMBOL(module_alloc_exec);
15687 +#endif
15688 +#endif
15689 +
15690 /* We don't need anything special. */
15691 int module_frob_arch_sections(Elf_Ehdr *hdr,
15692 Elf_Shdr *sechdrs,
15693 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15694 unsigned int i;
15695 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15696 Elf32_Sym *sym;
15697 - uint32_t *location;
15698 + uint32_t *plocation, location;
15699
15700 DEBUGP("Applying relocate section %u to %u\n", relsec,
15701 sechdrs[relsec].sh_info);
15702 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15703 /* This is where to make the change */
15704 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15705 - + rel[i].r_offset;
15706 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15707 + location = (uint32_t)plocation;
15708 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15709 + plocation = ktla_ktva((void *)plocation);
15710 /* This is the symbol it is referring to. Note that all
15711 undefined symbols have been resolved. */
15712 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15713 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15714 switch (ELF32_R_TYPE(rel[i].r_info)) {
15715 case R_386_32:
15716 /* We add the value into the location given */
15717 - *location += sym->st_value;
15718 + pax_open_kernel();
15719 + *plocation += sym->st_value;
15720 + pax_close_kernel();
15721 break;
15722 case R_386_PC32:
15723 /* Add the value, subtract its postition */
15724 - *location += sym->st_value - (uint32_t)location;
15725 + pax_open_kernel();
15726 + *plocation += sym->st_value - location;
15727 + pax_close_kernel();
15728 break;
15729 default:
15730 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15731 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15732 case R_X86_64_NONE:
15733 break;
15734 case R_X86_64_64:
15735 + pax_open_kernel();
15736 *(u64 *)loc = val;
15737 + pax_close_kernel();
15738 break;
15739 case R_X86_64_32:
15740 + pax_open_kernel();
15741 *(u32 *)loc = val;
15742 + pax_close_kernel();
15743 if (val != *(u32 *)loc)
15744 goto overflow;
15745 break;
15746 case R_X86_64_32S:
15747 + pax_open_kernel();
15748 *(s32 *)loc = val;
15749 + pax_close_kernel();
15750 if ((s64)val != *(s32 *)loc)
15751 goto overflow;
15752 break;
15753 case R_X86_64_PC32:
15754 val -= (u64)loc;
15755 + pax_open_kernel();
15756 *(u32 *)loc = val;
15757 + pax_close_kernel();
15758 +
15759 #if 0
15760 if ((s64)val != *(s32 *)loc)
15761 goto overflow;
15762 diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt.c linux-2.6.32.45/arch/x86/kernel/paravirt.c
15763 --- linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15764 +++ linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-08-05 20:33:55.000000000 -0400
15765 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15766 {
15767 return x;
15768 }
15769 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15770 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15771 +#endif
15772
15773 void __init default_banner(void)
15774 {
15775 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15776 * corresponding structure. */
15777 static void *get_call_destination(u8 type)
15778 {
15779 - struct paravirt_patch_template tmpl = {
15780 + const struct paravirt_patch_template tmpl = {
15781 .pv_init_ops = pv_init_ops,
15782 .pv_time_ops = pv_time_ops,
15783 .pv_cpu_ops = pv_cpu_ops,
15784 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 typ
15785 .pv_lock_ops = pv_lock_ops,
15786 #endif
15787 };
15788 +
15789 + pax_track_stack();
15790 return *((void **)&tmpl + type);
15791 }
15792
15793 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type,
15794 if (opfunc == NULL)
15795 /* If there's no function, patch it with a ud2a (BUG) */
15796 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15797 - else if (opfunc == _paravirt_nop)
15798 + else if (opfunc == (void *)_paravirt_nop)
15799 /* If the operation is a nop, then nop the callsite */
15800 ret = paravirt_patch_nop();
15801
15802 /* identity functions just return their single argument */
15803 - else if (opfunc == _paravirt_ident_32)
15804 + else if (opfunc == (void *)_paravirt_ident_32)
15805 ret = paravirt_patch_ident_32(insnbuf, len);
15806 - else if (opfunc == _paravirt_ident_64)
15807 + else if (opfunc == (void *)_paravirt_ident_64)
15808 + ret = paravirt_patch_ident_64(insnbuf, len);
15809 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15810 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
15811 ret = paravirt_patch_ident_64(insnbuf, len);
15812 +#endif
15813
15814 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15815 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
15816 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insn
15817 if (insn_len > len || start == NULL)
15818 insn_len = len;
15819 else
15820 - memcpy(insnbuf, start, insn_len);
15821 + memcpy(insnbuf, ktla_ktva(start), insn_len);
15822
15823 return insn_len;
15824 }
15825 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
15826 preempt_enable();
15827 }
15828
15829 -struct pv_info pv_info = {
15830 +struct pv_info pv_info __read_only = {
15831 .name = "bare hardware",
15832 .paravirt_enabled = 0,
15833 .kernel_rpl = 0,
15834 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15835 };
15836
15837 -struct pv_init_ops pv_init_ops = {
15838 +struct pv_init_ops pv_init_ops __read_only = {
15839 .patch = native_patch,
15840 };
15841
15842 -struct pv_time_ops pv_time_ops = {
15843 +struct pv_time_ops pv_time_ops __read_only = {
15844 .sched_clock = native_sched_clock,
15845 };
15846
15847 -struct pv_irq_ops pv_irq_ops = {
15848 +struct pv_irq_ops pv_irq_ops __read_only = {
15849 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15850 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15851 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15852 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
15853 #endif
15854 };
15855
15856 -struct pv_cpu_ops pv_cpu_ops = {
15857 +struct pv_cpu_ops pv_cpu_ops __read_only = {
15858 .cpuid = native_cpuid,
15859 .get_debugreg = native_get_debugreg,
15860 .set_debugreg = native_set_debugreg,
15861 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
15862 .end_context_switch = paravirt_nop,
15863 };
15864
15865 -struct pv_apic_ops pv_apic_ops = {
15866 +struct pv_apic_ops pv_apic_ops __read_only = {
15867 #ifdef CONFIG_X86_LOCAL_APIC
15868 .startup_ipi_hook = paravirt_nop,
15869 #endif
15870 };
15871
15872 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
15873 +#ifdef CONFIG_X86_32
15874 +#ifdef CONFIG_X86_PAE
15875 +/* 64-bit pagetable entries */
15876 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
15877 +#else
15878 /* 32-bit pagetable entries */
15879 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
15880 +#endif
15881 #else
15882 /* 64-bit pagetable entries */
15883 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15884 #endif
15885
15886 -struct pv_mmu_ops pv_mmu_ops = {
15887 +struct pv_mmu_ops pv_mmu_ops __read_only = {
15888
15889 .read_cr2 = native_read_cr2,
15890 .write_cr2 = native_write_cr2,
15891 @@ -467,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15892 },
15893
15894 .set_fixmap = native_set_fixmap,
15895 +
15896 +#ifdef CONFIG_PAX_KERNEXEC
15897 + .pax_open_kernel = native_pax_open_kernel,
15898 + .pax_close_kernel = native_pax_close_kernel,
15899 +#endif
15900 +
15901 };
15902
15903 EXPORT_SYMBOL_GPL(pv_time_ops);
15904 diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c
15905 --- linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15906 +++ linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15907 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15908 __raw_spin_lock(lock);
15909 }
15910
15911 -struct pv_lock_ops pv_lock_ops = {
15912 +struct pv_lock_ops pv_lock_ops __read_only = {
15913 #ifdef CONFIG_SMP
15914 .spin_is_locked = __ticket_spin_is_locked,
15915 .spin_is_contended = __ticket_spin_is_contended,
15916 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c
15917 --- linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15918 +++ linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15919 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15920 free_pages((unsigned long)vaddr, get_order(size));
15921 }
15922
15923 -static struct dma_map_ops calgary_dma_ops = {
15924 +static const struct dma_map_ops calgary_dma_ops = {
15925 .alloc_coherent = calgary_alloc_coherent,
15926 .free_coherent = calgary_free_coherent,
15927 .map_sg = calgary_map_sg,
15928 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-dma.c linux-2.6.32.45/arch/x86/kernel/pci-dma.c
15929 --- linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15930 +++ linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15931 @@ -14,7 +14,7 @@
15932
15933 static int forbid_dac __read_mostly;
15934
15935 -struct dma_map_ops *dma_ops;
15936 +const struct dma_map_ops *dma_ops;
15937 EXPORT_SYMBOL(dma_ops);
15938
15939 static int iommu_sac_force __read_mostly;
15940 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15941
15942 int dma_supported(struct device *dev, u64 mask)
15943 {
15944 - struct dma_map_ops *ops = get_dma_ops(dev);
15945 + const struct dma_map_ops *ops = get_dma_ops(dev);
15946
15947 #ifdef CONFIG_PCI
15948 if (mask > 0xffffffff && forbid_dac > 0) {
15949 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c
15950 --- linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15951 +++ linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15952 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15953 return -1;
15954 }
15955
15956 -static struct dma_map_ops gart_dma_ops = {
15957 +static const struct dma_map_ops gart_dma_ops = {
15958 .map_sg = gart_map_sg,
15959 .unmap_sg = gart_unmap_sg,
15960 .map_page = gart_map_page,
15961 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-nommu.c linux-2.6.32.45/arch/x86/kernel/pci-nommu.c
15962 --- linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15963 +++ linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15964 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15965 flush_write_buffers();
15966 }
15967
15968 -struct dma_map_ops nommu_dma_ops = {
15969 +const struct dma_map_ops nommu_dma_ops = {
15970 .alloc_coherent = dma_generic_alloc_coherent,
15971 .free_coherent = nommu_free_coherent,
15972 .map_sg = nommu_map_sg,
15973 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c
15974 --- linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15975 +++ linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15976 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15977 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15978 }
15979
15980 -static struct dma_map_ops swiotlb_dma_ops = {
15981 +static const struct dma_map_ops swiotlb_dma_ops = {
15982 .mapping_error = swiotlb_dma_mapping_error,
15983 .alloc_coherent = x86_swiotlb_alloc_coherent,
15984 .free_coherent = swiotlb_free_coherent,
15985 diff -urNp linux-2.6.32.45/arch/x86/kernel/process_32.c linux-2.6.32.45/arch/x86/kernel/process_32.c
15986 --- linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
15987 +++ linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
15988 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
15989 unsigned long thread_saved_pc(struct task_struct *tsk)
15990 {
15991 return ((unsigned long *)tsk->thread.sp)[3];
15992 +//XXX return tsk->thread.eip;
15993 }
15994
15995 #ifndef CONFIG_SMP
15996 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
15997 unsigned short ss, gs;
15998 const char *board;
15999
16000 - if (user_mode_vm(regs)) {
16001 + if (user_mode(regs)) {
16002 sp = regs->sp;
16003 ss = regs->ss & 0xffff;
16004 - gs = get_user_gs(regs);
16005 } else {
16006 sp = (unsigned long) (&regs->sp);
16007 savesegment(ss, ss);
16008 - savesegment(gs, gs);
16009 }
16010 + gs = get_user_gs(regs);
16011
16012 printk("\n");
16013
16014 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
16015 regs.bx = (unsigned long) fn;
16016 regs.dx = (unsigned long) arg;
16017
16018 - regs.ds = __USER_DS;
16019 - regs.es = __USER_DS;
16020 + regs.ds = __KERNEL_DS;
16021 + regs.es = __KERNEL_DS;
16022 regs.fs = __KERNEL_PERCPU;
16023 - regs.gs = __KERNEL_STACK_CANARY;
16024 + savesegment(gs, regs.gs);
16025 regs.orig_ax = -1;
16026 regs.ip = (unsigned long) kernel_thread_helper;
16027 regs.cs = __KERNEL_CS | get_kernel_rpl();
16028 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
16029 struct task_struct *tsk;
16030 int err;
16031
16032 - childregs = task_pt_regs(p);
16033 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16034 *childregs = *regs;
16035 childregs->ax = 0;
16036 childregs->sp = sp;
16037
16038 p->thread.sp = (unsigned long) childregs;
16039 p->thread.sp0 = (unsigned long) (childregs+1);
16040 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16041
16042 p->thread.ip = (unsigned long) ret_from_fork;
16043
16044 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
16045 struct thread_struct *prev = &prev_p->thread,
16046 *next = &next_p->thread;
16047 int cpu = smp_processor_id();
16048 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16049 + struct tss_struct *tss = init_tss + cpu;
16050 bool preload_fpu;
16051
16052 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16053 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
16054 */
16055 lazy_save_gs(prev->gs);
16056
16057 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16058 + __set_fs(task_thread_info(next_p)->addr_limit);
16059 +#endif
16060 +
16061 /*
16062 * Load the per-thread Thread-Local Storage descriptor.
16063 */
16064 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
16065 */
16066 arch_end_context_switch(next_p);
16067
16068 + percpu_write(current_task, next_p);
16069 + percpu_write(current_tinfo, &next_p->tinfo);
16070 +
16071 if (preload_fpu)
16072 __math_state_restore();
16073
16074 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
16075 if (prev->gs | next->gs)
16076 lazy_load_gs(next->gs);
16077
16078 - percpu_write(current_task, next_p);
16079 -
16080 return prev_p;
16081 }
16082
16083 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
16084 } while (count++ < 16);
16085 return 0;
16086 }
16087 -
16088 diff -urNp linux-2.6.32.45/arch/x86/kernel/process_64.c linux-2.6.32.45/arch/x86/kernel/process_64.c
16089 --- linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
16090 +++ linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
16091 @@ -91,7 +91,7 @@ static void __exit_idle(void)
16092 void exit_idle(void)
16093 {
16094 /* idle loop has pid 0 */
16095 - if (current->pid)
16096 + if (task_pid_nr(current))
16097 return;
16098 __exit_idle();
16099 }
16100 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
16101 if (!board)
16102 board = "";
16103 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
16104 - current->pid, current->comm, print_tainted(),
16105 + task_pid_nr(current), current->comm, print_tainted(),
16106 init_utsname()->release,
16107 (int)strcspn(init_utsname()->version, " "),
16108 init_utsname()->version, board);
16109 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
16110 struct pt_regs *childregs;
16111 struct task_struct *me = current;
16112
16113 - childregs = ((struct pt_regs *)
16114 - (THREAD_SIZE + task_stack_page(p))) - 1;
16115 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16116 *childregs = *regs;
16117
16118 childregs->ax = 0;
16119 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
16120 p->thread.sp = (unsigned long) childregs;
16121 p->thread.sp0 = (unsigned long) (childregs+1);
16122 p->thread.usersp = me->thread.usersp;
16123 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16124
16125 set_tsk_thread_flag(p, TIF_FORK);
16126
16127 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
16128 struct thread_struct *prev = &prev_p->thread;
16129 struct thread_struct *next = &next_p->thread;
16130 int cpu = smp_processor_id();
16131 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16132 + struct tss_struct *tss = init_tss + cpu;
16133 unsigned fsindex, gsindex;
16134 bool preload_fpu;
16135
16136 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
16137 prev->usersp = percpu_read(old_rsp);
16138 percpu_write(old_rsp, next->usersp);
16139 percpu_write(current_task, next_p);
16140 + percpu_write(current_tinfo, &next_p->tinfo);
16141
16142 - percpu_write(kernel_stack,
16143 - (unsigned long)task_stack_page(next_p) +
16144 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16145 + percpu_write(kernel_stack, next->sp0);
16146
16147 /*
16148 * Now maybe reload the debug registers and handle I/O bitmaps
16149 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
16150 if (!p || p == current || p->state == TASK_RUNNING)
16151 return 0;
16152 stack = (unsigned long)task_stack_page(p);
16153 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16154 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16155 return 0;
16156 fp = *(u64 *)(p->thread.sp);
16157 do {
16158 - if (fp < (unsigned long)stack ||
16159 - fp >= (unsigned long)stack+THREAD_SIZE)
16160 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16161 return 0;
16162 ip = *(u64 *)(fp+8);
16163 if (!in_sched_functions(ip))
16164 diff -urNp linux-2.6.32.45/arch/x86/kernel/process.c linux-2.6.32.45/arch/x86/kernel/process.c
16165 --- linux-2.6.32.45/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
16166 +++ linux-2.6.32.45/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
16167 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
16168
16169 void free_thread_info(struct thread_info *ti)
16170 {
16171 - free_thread_xstate(ti->task);
16172 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16173 }
16174
16175 +static struct kmem_cache *task_struct_cachep;
16176 +
16177 void arch_task_cache_init(void)
16178 {
16179 - task_xstate_cachep =
16180 - kmem_cache_create("task_xstate", xstate_size,
16181 + /* create a slab on which task_structs can be allocated */
16182 + task_struct_cachep =
16183 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16184 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16185 +
16186 + task_xstate_cachep =
16187 + kmem_cache_create("task_xstate", xstate_size,
16188 __alignof__(union thread_xstate),
16189 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16190 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16191 +}
16192 +
16193 +struct task_struct *alloc_task_struct(void)
16194 +{
16195 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
16196 +}
16197 +
16198 +void free_task_struct(struct task_struct *task)
16199 +{
16200 + free_thread_xstate(task);
16201 + kmem_cache_free(task_struct_cachep, task);
16202 }
16203
16204 /*
16205 @@ -73,7 +90,7 @@ void exit_thread(void)
16206 unsigned long *bp = t->io_bitmap_ptr;
16207
16208 if (bp) {
16209 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16210 + struct tss_struct *tss = init_tss + get_cpu();
16211
16212 t->io_bitmap_ptr = NULL;
16213 clear_thread_flag(TIF_IO_BITMAP);
16214 @@ -93,6 +110,9 @@ void flush_thread(void)
16215
16216 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16217
16218 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16219 + loadsegment(gs, 0);
16220 +#endif
16221 tsk->thread.debugreg0 = 0;
16222 tsk->thread.debugreg1 = 0;
16223 tsk->thread.debugreg2 = 0;
16224 @@ -307,7 +327,7 @@ void default_idle(void)
16225 EXPORT_SYMBOL(default_idle);
16226 #endif
16227
16228 -void stop_this_cpu(void *dummy)
16229 +__noreturn void stop_this_cpu(void *dummy)
16230 {
16231 local_irq_disable();
16232 /*
16233 @@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
16234 }
16235 early_param("idle", idle_setup);
16236
16237 -unsigned long arch_align_stack(unsigned long sp)
16238 +#ifdef CONFIG_PAX_RANDKSTACK
16239 +asmlinkage void pax_randomize_kstack(void)
16240 {
16241 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16242 - sp -= get_random_int() % 8192;
16243 - return sp & ~0xf;
16244 -}
16245 + struct thread_struct *thread = &current->thread;
16246 + unsigned long time;
16247
16248 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16249 -{
16250 - unsigned long range_end = mm->brk + 0x02000000;
16251 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16252 + if (!randomize_va_space)
16253 + return;
16254 +
16255 + rdtscl(time);
16256 +
16257 + /* P4 seems to return a 0 LSB, ignore it */
16258 +#ifdef CONFIG_MPENTIUM4
16259 + time &= 0x3EUL;
16260 + time <<= 2;
16261 +#elif defined(CONFIG_X86_64)
16262 + time &= 0xFUL;
16263 + time <<= 4;
16264 +#else
16265 + time &= 0x1FUL;
16266 + time <<= 3;
16267 +#endif
16268 +
16269 + thread->sp0 ^= time;
16270 + load_sp0(init_tss + smp_processor_id(), thread);
16271 +
16272 +#ifdef CONFIG_X86_64
16273 + percpu_write(kernel_stack, thread->sp0);
16274 +#endif
16275 }
16276 +#endif
16277
16278 diff -urNp linux-2.6.32.45/arch/x86/kernel/ptrace.c linux-2.6.32.45/arch/x86/kernel/ptrace.c
16279 --- linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16280 +++ linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16281 @@ -925,7 +925,7 @@ static const struct user_regset_view use
16282 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16283 {
16284 int ret;
16285 - unsigned long __user *datap = (unsigned long __user *)data;
16286 + unsigned long __user *datap = (__force unsigned long __user *)data;
16287
16288 switch (request) {
16289 /* read the word at location addr in the USER area. */
16290 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16291 if (addr < 0)
16292 return -EIO;
16293 ret = do_get_thread_area(child, addr,
16294 - (struct user_desc __user *) data);
16295 + (__force struct user_desc __user *) data);
16296 break;
16297
16298 case PTRACE_SET_THREAD_AREA:
16299 if (addr < 0)
16300 return -EIO;
16301 ret = do_set_thread_area(child, addr,
16302 - (struct user_desc __user *) data, 0);
16303 + (__force struct user_desc __user *) data, 0);
16304 break;
16305 #endif
16306
16307 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16308 #ifdef CONFIG_X86_PTRACE_BTS
16309 case PTRACE_BTS_CONFIG:
16310 ret = ptrace_bts_config
16311 - (child, data, (struct ptrace_bts_config __user *)addr);
16312 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16313 break;
16314
16315 case PTRACE_BTS_STATUS:
16316 ret = ptrace_bts_status
16317 - (child, data, (struct ptrace_bts_config __user *)addr);
16318 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16319 break;
16320
16321 case PTRACE_BTS_SIZE:
16322 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16323
16324 case PTRACE_BTS_GET:
16325 ret = ptrace_bts_read_record
16326 - (child, data, (struct bts_struct __user *) addr);
16327 + (child, data, (__force struct bts_struct __user *) addr);
16328 break;
16329
16330 case PTRACE_BTS_CLEAR:
16331 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16332
16333 case PTRACE_BTS_DRAIN:
16334 ret = ptrace_bts_drain
16335 - (child, data, (struct bts_struct __user *) addr);
16336 + (child, data, (__force struct bts_struct __user *) addr);
16337 break;
16338 #endif /* CONFIG_X86_PTRACE_BTS */
16339
16340 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16341 info.si_code = si_code;
16342
16343 /* User-mode ip? */
16344 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16345 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16346
16347 /* Send us the fake SIGTRAP */
16348 force_sig_info(SIGTRAP, &info, tsk);
16349 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16350 * We must return the syscall number to actually look up in the table.
16351 * This can be -1L to skip running any syscall at all.
16352 */
16353 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
16354 +long syscall_trace_enter(struct pt_regs *regs)
16355 {
16356 long ret = 0;
16357
16358 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16359 return ret ?: regs->orig_ax;
16360 }
16361
16362 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
16363 +void syscall_trace_leave(struct pt_regs *regs)
16364 {
16365 if (unlikely(current->audit_context))
16366 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16367 diff -urNp linux-2.6.32.45/arch/x86/kernel/reboot.c linux-2.6.32.45/arch/x86/kernel/reboot.c
16368 --- linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:35:28.000000000 -0400
16369 +++ linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:33:59.000000000 -0400
16370 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16371 EXPORT_SYMBOL(pm_power_off);
16372
16373 static const struct desc_ptr no_idt = {};
16374 -static int reboot_mode;
16375 +static unsigned short reboot_mode;
16376 enum reboot_type reboot_type = BOOT_KBD;
16377 int reboot_force;
16378
16379 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
16380 controller to pulse the CPU reset line, which is more thorough, but
16381 doesn't work with at least one type of 486 motherboard. It is easy
16382 to stop this code working; hence the copious comments. */
16383 -static const unsigned long long
16384 -real_mode_gdt_entries [3] =
16385 +static struct desc_struct
16386 +real_mode_gdt_entries [3] __read_only =
16387 {
16388 - 0x0000000000000000ULL, /* Null descriptor */
16389 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16390 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16391 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16392 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16393 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16394 };
16395
16396 static const struct desc_ptr
16397 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16398 * specified by the code and length parameters.
16399 * We assume that length will aways be less that 100!
16400 */
16401 -void machine_real_restart(const unsigned char *code, int length)
16402 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16403 {
16404 local_irq_disable();
16405
16406 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16407 /* Remap the kernel at virtual address zero, as well as offset zero
16408 from the kernel segment. This assumes the kernel segment starts at
16409 virtual address PAGE_OFFSET. */
16410 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16411 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16412 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16413 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16414
16415 /*
16416 * Use `swapper_pg_dir' as our page directory.
16417 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16418 boot)". This seems like a fairly standard thing that gets set by
16419 REBOOT.COM programs, and the previous reset routine did this
16420 too. */
16421 - *((unsigned short *)0x472) = reboot_mode;
16422 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16423
16424 /* For the switch to real mode, copy some code to low memory. It has
16425 to be in the first 64k because it is running in 16-bit mode, and it
16426 has to have the same physical and virtual address, because it turns
16427 off paging. Copy it near the end of the first page, out of the way
16428 of BIOS variables. */
16429 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16430 - real_mode_switch, sizeof (real_mode_switch));
16431 - memcpy((void *)(0x1000 - 100), code, length);
16432 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16433 + memcpy(__va(0x1000 - 100), code, length);
16434
16435 /* Set up the IDT for real mode. */
16436 load_idt(&real_mode_idt);
16437 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16438 __asm__ __volatile__ ("ljmp $0x0008,%0"
16439 :
16440 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16441 + do { } while (1);
16442 }
16443 #ifdef CONFIG_APM_MODULE
16444 EXPORT_SYMBOL(machine_real_restart);
16445 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_f
16446 {
16447 }
16448
16449 -static void native_machine_emergency_restart(void)
16450 +__noreturn static void native_machine_emergency_restart(void)
16451 {
16452 int i;
16453
16454 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
16455 #endif
16456 }
16457
16458 -static void __machine_emergency_restart(int emergency)
16459 +static __noreturn void __machine_emergency_restart(int emergency)
16460 {
16461 reboot_emergency = emergency;
16462 machine_ops.emergency_restart();
16463 }
16464
16465 -static void native_machine_restart(char *__unused)
16466 +static __noreturn void native_machine_restart(char *__unused)
16467 {
16468 printk("machine restart\n");
16469
16470 @@ -674,7 +674,7 @@ static void native_machine_restart(char
16471 __machine_emergency_restart(0);
16472 }
16473
16474 -static void native_machine_halt(void)
16475 +static __noreturn void native_machine_halt(void)
16476 {
16477 /* stop other cpus and apics */
16478 machine_shutdown();
16479 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
16480 stop_this_cpu(NULL);
16481 }
16482
16483 -static void native_machine_power_off(void)
16484 +__noreturn static void native_machine_power_off(void)
16485 {
16486 if (pm_power_off) {
16487 if (!reboot_force)
16488 @@ -694,6 +694,7 @@ static void native_machine_power_off(voi
16489 }
16490 /* a fallback in case there is no PM info available */
16491 tboot_shutdown(TB_SHUTDOWN_HALT);
16492 + do { } while (1);
16493 }
16494
16495 struct machine_ops machine_ops = {
16496 diff -urNp linux-2.6.32.45/arch/x86/kernel/setup.c linux-2.6.32.45/arch/x86/kernel/setup.c
16497 --- linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16498 +++ linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16499 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16500
16501 if (!boot_params.hdr.root_flags)
16502 root_mountflags &= ~MS_RDONLY;
16503 - init_mm.start_code = (unsigned long) _text;
16504 - init_mm.end_code = (unsigned long) _etext;
16505 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16506 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16507 init_mm.end_data = (unsigned long) _edata;
16508 init_mm.brk = _brk_end;
16509
16510 - code_resource.start = virt_to_phys(_text);
16511 - code_resource.end = virt_to_phys(_etext)-1;
16512 - data_resource.start = virt_to_phys(_etext);
16513 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16514 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16515 + data_resource.start = virt_to_phys(_sdata);
16516 data_resource.end = virt_to_phys(_edata)-1;
16517 bss_resource.start = virt_to_phys(&__bss_start);
16518 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16519 diff -urNp linux-2.6.32.45/arch/x86/kernel/setup_percpu.c linux-2.6.32.45/arch/x86/kernel/setup_percpu.c
16520 --- linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16521 +++ linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16522 @@ -25,19 +25,17 @@
16523 # define DBG(x...)
16524 #endif
16525
16526 -DEFINE_PER_CPU(int, cpu_number);
16527 +#ifdef CONFIG_SMP
16528 +DEFINE_PER_CPU(unsigned int, cpu_number);
16529 EXPORT_PER_CPU_SYMBOL(cpu_number);
16530 +#endif
16531
16532 -#ifdef CONFIG_X86_64
16533 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16534 -#else
16535 -#define BOOT_PERCPU_OFFSET 0
16536 -#endif
16537
16538 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16539 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16540
16541 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16542 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16543 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16544 };
16545 EXPORT_SYMBOL(__per_cpu_offset);
16546 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16547 {
16548 #ifdef CONFIG_X86_32
16549 struct desc_struct gdt;
16550 + unsigned long base = per_cpu_offset(cpu);
16551
16552 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16553 - 0x2 | DESCTYPE_S, 0x8);
16554 - gdt.s = 1;
16555 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16556 + 0x83 | DESCTYPE_S, 0xC);
16557 write_gdt_entry(get_cpu_gdt_table(cpu),
16558 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16559 #endif
16560 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16561 /* alrighty, percpu areas up and running */
16562 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16563 for_each_possible_cpu(cpu) {
16564 +#ifdef CONFIG_CC_STACKPROTECTOR
16565 +#ifdef CONFIG_X86_32
16566 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16567 +#endif
16568 +#endif
16569 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16570 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16571 per_cpu(cpu_number, cpu) = cpu;
16572 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16573 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16574 #endif
16575 #endif
16576 +#ifdef CONFIG_CC_STACKPROTECTOR
16577 +#ifdef CONFIG_X86_32
16578 + if (!cpu)
16579 + per_cpu(stack_canary.canary, cpu) = canary;
16580 +#endif
16581 +#endif
16582 /*
16583 * Up to this point, the boot CPU has been using .data.init
16584 * area. Reload any changed state for the boot CPU.
16585 diff -urNp linux-2.6.32.45/arch/x86/kernel/signal.c linux-2.6.32.45/arch/x86/kernel/signal.c
16586 --- linux-2.6.32.45/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16587 +++ linux-2.6.32.45/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16588 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16589 * Align the stack pointer according to the i386 ABI,
16590 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16591 */
16592 - sp = ((sp + 4) & -16ul) - 4;
16593 + sp = ((sp - 12) & -16ul) - 4;
16594 #else /* !CONFIG_X86_32 */
16595 sp = round_down(sp, 16) - 8;
16596 #endif
16597 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16598 * Return an always-bogus address instead so we will die with SIGSEGV.
16599 */
16600 if (onsigstack && !likely(on_sig_stack(sp)))
16601 - return (void __user *)-1L;
16602 + return (__force void __user *)-1L;
16603
16604 /* save i387 state */
16605 if (used_math() && save_i387_xstate(*fpstate) < 0)
16606 - return (void __user *)-1L;
16607 + return (__force void __user *)-1L;
16608
16609 return (void __user *)sp;
16610 }
16611 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16612 }
16613
16614 if (current->mm->context.vdso)
16615 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16616 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16617 else
16618 - restorer = &frame->retcode;
16619 + restorer = (void __user *)&frame->retcode;
16620 if (ka->sa.sa_flags & SA_RESTORER)
16621 restorer = ka->sa.sa_restorer;
16622
16623 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16624 * reasons and because gdb uses it as a signature to notice
16625 * signal handler stack frames.
16626 */
16627 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16628 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16629
16630 if (err)
16631 return -EFAULT;
16632 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16633 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16634
16635 /* Set up to return from userspace. */
16636 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16637 + if (current->mm->context.vdso)
16638 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16639 + else
16640 + restorer = (void __user *)&frame->retcode;
16641 if (ka->sa.sa_flags & SA_RESTORER)
16642 restorer = ka->sa.sa_restorer;
16643 put_user_ex(restorer, &frame->pretcode);
16644 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16645 * reasons and because gdb uses it as a signature to notice
16646 * signal handler stack frames.
16647 */
16648 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16649 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16650 } put_user_catch(err);
16651
16652 if (err)
16653 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16654 int signr;
16655 sigset_t *oldset;
16656
16657 + pax_track_stack();
16658 +
16659 /*
16660 * We want the common case to go fast, which is why we may in certain
16661 * cases get here from kernel mode. Just return without doing anything
16662 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16663 * X86_32: vm86 regs switched out by assembly code before reaching
16664 * here, so testing against kernel CS suffices.
16665 */
16666 - if (!user_mode(regs))
16667 + if (!user_mode_novm(regs))
16668 return;
16669
16670 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16671 diff -urNp linux-2.6.32.45/arch/x86/kernel/smpboot.c linux-2.6.32.45/arch/x86/kernel/smpboot.c
16672 --- linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16673 +++ linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16674 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16675 */
16676 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16677
16678 -void cpu_hotplug_driver_lock()
16679 +void cpu_hotplug_driver_lock(void)
16680 {
16681 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
16682 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
16683 }
16684
16685 -void cpu_hotplug_driver_unlock()
16686 +void cpu_hotplug_driver_unlock(void)
16687 {
16688 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16689 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16690 }
16691
16692 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16693 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16694 * target processor state.
16695 */
16696 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16697 - (unsigned long)stack_start.sp);
16698 + stack_start);
16699
16700 /*
16701 * Run STARTUP IPI loop.
16702 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16703 set_idle_for_cpu(cpu, c_idle.idle);
16704 do_rest:
16705 per_cpu(current_task, cpu) = c_idle.idle;
16706 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16707 #ifdef CONFIG_X86_32
16708 /* Stack for startup_32 can be just as for start_secondary onwards */
16709 irq_ctx_init(cpu);
16710 @@ -750,13 +751,15 @@ do_rest:
16711 #else
16712 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16713 initial_gs = per_cpu_offset(cpu);
16714 - per_cpu(kernel_stack, cpu) =
16715 - (unsigned long)task_stack_page(c_idle.idle) -
16716 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16717 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16718 #endif
16719 +
16720 + pax_open_kernel();
16721 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16722 + pax_close_kernel();
16723 +
16724 initial_code = (unsigned long)start_secondary;
16725 - stack_start.sp = (void *) c_idle.idle->thread.sp;
16726 + stack_start = c_idle.idle->thread.sp;
16727
16728 /* start_ip had better be page-aligned! */
16729 start_ip = setup_trampoline();
16730 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16731
16732 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16733
16734 +#ifdef CONFIG_PAX_PER_CPU_PGD
16735 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16736 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16737 + KERNEL_PGD_PTRS);
16738 +#endif
16739 +
16740 err = do_boot_cpu(apicid, cpu);
16741
16742 if (err) {
16743 diff -urNp linux-2.6.32.45/arch/x86/kernel/step.c linux-2.6.32.45/arch/x86/kernel/step.c
16744 --- linux-2.6.32.45/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16745 +++ linux-2.6.32.45/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16746 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16747 struct desc_struct *desc;
16748 unsigned long base;
16749
16750 - seg &= ~7UL;
16751 + seg >>= 3;
16752
16753 mutex_lock(&child->mm->context.lock);
16754 - if (unlikely((seg >> 3) >= child->mm->context.size))
16755 + if (unlikely(seg >= child->mm->context.size))
16756 addr = -1L; /* bogus selector, access would fault */
16757 else {
16758 desc = child->mm->context.ldt + seg;
16759 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16760 addr += base;
16761 }
16762 mutex_unlock(&child->mm->context.lock);
16763 - }
16764 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16765 + addr = ktla_ktva(addr);
16766
16767 return addr;
16768 }
16769 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16770 unsigned char opcode[15];
16771 unsigned long addr = convert_ip_to_linear(child, regs);
16772
16773 + if (addr == -EINVAL)
16774 + return 0;
16775 +
16776 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16777 for (i = 0; i < copied; i++) {
16778 switch (opcode[i]) {
16779 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16780
16781 #ifdef CONFIG_X86_64
16782 case 0x40 ... 0x4f:
16783 - if (regs->cs != __USER_CS)
16784 + if ((regs->cs & 0xffff) != __USER_CS)
16785 /* 32-bit mode: register increment */
16786 return 0;
16787 /* 64-bit mode: REX prefix */
16788 diff -urNp linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S
16789 --- linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16790 +++ linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16791 @@ -1,3 +1,4 @@
16792 +.section .rodata,"a",@progbits
16793 ENTRY(sys_call_table)
16794 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16795 .long sys_exit
16796 diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c
16797 --- linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16798 +++ linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16799 @@ -24,6 +24,21 @@
16800
16801 #include <asm/syscalls.h>
16802
16803 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16804 +{
16805 + unsigned long pax_task_size = TASK_SIZE;
16806 +
16807 +#ifdef CONFIG_PAX_SEGMEXEC
16808 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16809 + pax_task_size = SEGMEXEC_TASK_SIZE;
16810 +#endif
16811 +
16812 + if (len > pax_task_size || addr > pax_task_size - len)
16813 + return -EINVAL;
16814 +
16815 + return 0;
16816 +}
16817 +
16818 /*
16819 * Perform the select(nd, in, out, ex, tv) and mmap() system
16820 * calls. Linux/i386 didn't use to be able to handle more than
16821 @@ -58,6 +73,212 @@ out:
16822 return err;
16823 }
16824
16825 +unsigned long
16826 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16827 + unsigned long len, unsigned long pgoff, unsigned long flags)
16828 +{
16829 + struct mm_struct *mm = current->mm;
16830 + struct vm_area_struct *vma;
16831 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16832 +
16833 +#ifdef CONFIG_PAX_SEGMEXEC
16834 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16835 + pax_task_size = SEGMEXEC_TASK_SIZE;
16836 +#endif
16837 +
16838 + pax_task_size -= PAGE_SIZE;
16839 +
16840 + if (len > pax_task_size)
16841 + return -ENOMEM;
16842 +
16843 + if (flags & MAP_FIXED)
16844 + return addr;
16845 +
16846 +#ifdef CONFIG_PAX_RANDMMAP
16847 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16848 +#endif
16849 +
16850 + if (addr) {
16851 + addr = PAGE_ALIGN(addr);
16852 + if (pax_task_size - len >= addr) {
16853 + vma = find_vma(mm, addr);
16854 + if (check_heap_stack_gap(vma, addr, len))
16855 + return addr;
16856 + }
16857 + }
16858 + if (len > mm->cached_hole_size) {
16859 + start_addr = addr = mm->free_area_cache;
16860 + } else {
16861 + start_addr = addr = mm->mmap_base;
16862 + mm->cached_hole_size = 0;
16863 + }
16864 +
16865 +#ifdef CONFIG_PAX_PAGEEXEC
16866 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16867 + start_addr = 0x00110000UL;
16868 +
16869 +#ifdef CONFIG_PAX_RANDMMAP
16870 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16871 + start_addr += mm->delta_mmap & 0x03FFF000UL;
16872 +#endif
16873 +
16874 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16875 + start_addr = addr = mm->mmap_base;
16876 + else
16877 + addr = start_addr;
16878 + }
16879 +#endif
16880 +
16881 +full_search:
16882 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16883 + /* At this point: (!vma || addr < vma->vm_end). */
16884 + if (pax_task_size - len < addr) {
16885 + /*
16886 + * Start a new search - just in case we missed
16887 + * some holes.
16888 + */
16889 + if (start_addr != mm->mmap_base) {
16890 + start_addr = addr = mm->mmap_base;
16891 + mm->cached_hole_size = 0;
16892 + goto full_search;
16893 + }
16894 + return -ENOMEM;
16895 + }
16896 + if (check_heap_stack_gap(vma, addr, len))
16897 + break;
16898 + if (addr + mm->cached_hole_size < vma->vm_start)
16899 + mm->cached_hole_size = vma->vm_start - addr;
16900 + addr = vma->vm_end;
16901 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
16902 + start_addr = addr = mm->mmap_base;
16903 + mm->cached_hole_size = 0;
16904 + goto full_search;
16905 + }
16906 + }
16907 +
16908 + /*
16909 + * Remember the place where we stopped the search:
16910 + */
16911 + mm->free_area_cache = addr + len;
16912 + return addr;
16913 +}
16914 +
16915 +unsigned long
16916 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16917 + const unsigned long len, const unsigned long pgoff,
16918 + const unsigned long flags)
16919 +{
16920 + struct vm_area_struct *vma;
16921 + struct mm_struct *mm = current->mm;
16922 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16923 +
16924 +#ifdef CONFIG_PAX_SEGMEXEC
16925 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16926 + pax_task_size = SEGMEXEC_TASK_SIZE;
16927 +#endif
16928 +
16929 + pax_task_size -= PAGE_SIZE;
16930 +
16931 + /* requested length too big for entire address space */
16932 + if (len > pax_task_size)
16933 + return -ENOMEM;
16934 +
16935 + if (flags & MAP_FIXED)
16936 + return addr;
16937 +
16938 +#ifdef CONFIG_PAX_PAGEEXEC
16939 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16940 + goto bottomup;
16941 +#endif
16942 +
16943 +#ifdef CONFIG_PAX_RANDMMAP
16944 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16945 +#endif
16946 +
16947 + /* requesting a specific address */
16948 + if (addr) {
16949 + addr = PAGE_ALIGN(addr);
16950 + if (pax_task_size - len >= addr) {
16951 + vma = find_vma(mm, addr);
16952 + if (check_heap_stack_gap(vma, addr, len))
16953 + return addr;
16954 + }
16955 + }
16956 +
16957 + /* check if free_area_cache is useful for us */
16958 + if (len <= mm->cached_hole_size) {
16959 + mm->cached_hole_size = 0;
16960 + mm->free_area_cache = mm->mmap_base;
16961 + }
16962 +
16963 + /* either no address requested or can't fit in requested address hole */
16964 + addr = mm->free_area_cache;
16965 +
16966 + /* make sure it can fit in the remaining address space */
16967 + if (addr > len) {
16968 + vma = find_vma(mm, addr-len);
16969 + if (check_heap_stack_gap(vma, addr - len, len))
16970 + /* remember the address as a hint for next time */
16971 + return (mm->free_area_cache = addr-len);
16972 + }
16973 +
16974 + if (mm->mmap_base < len)
16975 + goto bottomup;
16976 +
16977 + addr = mm->mmap_base-len;
16978 +
16979 + do {
16980 + /*
16981 + * Lookup failure means no vma is above this address,
16982 + * else if new region fits below vma->vm_start,
16983 + * return with success:
16984 + */
16985 + vma = find_vma(mm, addr);
16986 + if (check_heap_stack_gap(vma, addr, len))
16987 + /* remember the address as a hint for next time */
16988 + return (mm->free_area_cache = addr);
16989 +
16990 + /* remember the largest hole we saw so far */
16991 + if (addr + mm->cached_hole_size < vma->vm_start)
16992 + mm->cached_hole_size = vma->vm_start - addr;
16993 +
16994 + /* try just below the current vma->vm_start */
16995 + addr = skip_heap_stack_gap(vma, len);
16996 + } while (!IS_ERR_VALUE(addr));
16997 +
16998 +bottomup:
16999 + /*
17000 + * A failed mmap() very likely causes application failure,
17001 + * so fall back to the bottom-up function here. This scenario
17002 + * can happen with large stack limits and large mmap()
17003 + * allocations.
17004 + */
17005 +
17006 +#ifdef CONFIG_PAX_SEGMEXEC
17007 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17008 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17009 + else
17010 +#endif
17011 +
17012 + mm->mmap_base = TASK_UNMAPPED_BASE;
17013 +
17014 +#ifdef CONFIG_PAX_RANDMMAP
17015 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17016 + mm->mmap_base += mm->delta_mmap;
17017 +#endif
17018 +
17019 + mm->free_area_cache = mm->mmap_base;
17020 + mm->cached_hole_size = ~0UL;
17021 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17022 + /*
17023 + * Restore the topdown base:
17024 + */
17025 + mm->mmap_base = base;
17026 + mm->free_area_cache = base;
17027 + mm->cached_hole_size = ~0UL;
17028 +
17029 + return addr;
17030 +}
17031
17032 struct sel_arg_struct {
17033 unsigned long n;
17034 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
17035 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
17036 case SEMTIMEDOP:
17037 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
17038 - (const struct timespec __user *)fifth);
17039 + (__force const struct timespec __user *)fifth);
17040
17041 case SEMGET:
17042 return sys_semget(first, second, third);
17043 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
17044 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
17045 if (ret)
17046 return ret;
17047 - return put_user(raddr, (ulong __user *) third);
17048 + return put_user(raddr, (__force ulong __user *) third);
17049 }
17050 case 1: /* iBCS2 emulator entry point */
17051 if (!segment_eq(get_fs(), get_ds()))
17052 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
17053
17054 return error;
17055 }
17056 -
17057 -
17058 -/*
17059 - * Do a system call from kernel instead of calling sys_execve so we
17060 - * end up with proper pt_regs.
17061 - */
17062 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
17063 -{
17064 - long __res;
17065 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
17066 - : "=a" (__res)
17067 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
17068 - return __res;
17069 -}
17070 diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c
17071 --- linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
17072 +++ linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
17073 @@ -32,8 +32,8 @@ out:
17074 return error;
17075 }
17076
17077 -static void find_start_end(unsigned long flags, unsigned long *begin,
17078 - unsigned long *end)
17079 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17080 + unsigned long *begin, unsigned long *end)
17081 {
17082 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17083 unsigned long new_begin;
17084 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
17085 *begin = new_begin;
17086 }
17087 } else {
17088 - *begin = TASK_UNMAPPED_BASE;
17089 + *begin = mm->mmap_base;
17090 *end = TASK_SIZE;
17091 }
17092 }
17093 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
17094 if (flags & MAP_FIXED)
17095 return addr;
17096
17097 - find_start_end(flags, &begin, &end);
17098 + find_start_end(mm, flags, &begin, &end);
17099
17100 if (len > end)
17101 return -ENOMEM;
17102
17103 +#ifdef CONFIG_PAX_RANDMMAP
17104 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17105 +#endif
17106 +
17107 if (addr) {
17108 addr = PAGE_ALIGN(addr);
17109 vma = find_vma(mm, addr);
17110 - if (end - len >= addr &&
17111 - (!vma || addr + len <= vma->vm_start))
17112 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17113 return addr;
17114 }
17115 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17116 @@ -106,7 +109,7 @@ full_search:
17117 }
17118 return -ENOMEM;
17119 }
17120 - if (!vma || addr + len <= vma->vm_start) {
17121 + if (check_heap_stack_gap(vma, addr, len)) {
17122 /*
17123 * Remember the place where we stopped the search:
17124 */
17125 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
17126 {
17127 struct vm_area_struct *vma;
17128 struct mm_struct *mm = current->mm;
17129 - unsigned long addr = addr0;
17130 + unsigned long base = mm->mmap_base, addr = addr0;
17131
17132 /* requested length too big for entire address space */
17133 if (len > TASK_SIZE)
17134 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
17135 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17136 goto bottomup;
17137
17138 +#ifdef CONFIG_PAX_RANDMMAP
17139 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17140 +#endif
17141 +
17142 /* requesting a specific address */
17143 if (addr) {
17144 addr = PAGE_ALIGN(addr);
17145 - vma = find_vma(mm, addr);
17146 - if (TASK_SIZE - len >= addr &&
17147 - (!vma || addr + len <= vma->vm_start))
17148 - return addr;
17149 + if (TASK_SIZE - len >= addr) {
17150 + vma = find_vma(mm, addr);
17151 + if (check_heap_stack_gap(vma, addr, len))
17152 + return addr;
17153 + }
17154 }
17155
17156 /* check if free_area_cache is useful for us */
17157 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
17158 /* make sure it can fit in the remaining address space */
17159 if (addr > len) {
17160 vma = find_vma(mm, addr-len);
17161 - if (!vma || addr <= vma->vm_start)
17162 + if (check_heap_stack_gap(vma, addr - len, len))
17163 /* remember the address as a hint for next time */
17164 return mm->free_area_cache = addr-len;
17165 }
17166 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
17167 * return with success:
17168 */
17169 vma = find_vma(mm, addr);
17170 - if (!vma || addr+len <= vma->vm_start)
17171 + if (check_heap_stack_gap(vma, addr, len))
17172 /* remember the address as a hint for next time */
17173 return mm->free_area_cache = addr;
17174
17175 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
17176 mm->cached_hole_size = vma->vm_start - addr;
17177
17178 /* try just below the current vma->vm_start */
17179 - addr = vma->vm_start-len;
17180 - } while (len < vma->vm_start);
17181 + addr = skip_heap_stack_gap(vma, len);
17182 + } while (!IS_ERR_VALUE(addr));
17183
17184 bottomup:
17185 /*
17186 @@ -198,13 +206,21 @@ bottomup:
17187 * can happen with large stack limits and large mmap()
17188 * allocations.
17189 */
17190 + mm->mmap_base = TASK_UNMAPPED_BASE;
17191 +
17192 +#ifdef CONFIG_PAX_RANDMMAP
17193 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17194 + mm->mmap_base += mm->delta_mmap;
17195 +#endif
17196 +
17197 + mm->free_area_cache = mm->mmap_base;
17198 mm->cached_hole_size = ~0UL;
17199 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17200 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17201 /*
17202 * Restore the topdown base:
17203 */
17204 - mm->free_area_cache = mm->mmap_base;
17205 + mm->mmap_base = base;
17206 + mm->free_area_cache = base;
17207 mm->cached_hole_size = ~0UL;
17208
17209 return addr;
17210 diff -urNp linux-2.6.32.45/arch/x86/kernel/tboot.c linux-2.6.32.45/arch/x86/kernel/tboot.c
17211 --- linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
17212 +++ linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
17213 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
17214
17215 void tboot_shutdown(u32 shutdown_type)
17216 {
17217 - void (*shutdown)(void);
17218 + void (* __noreturn shutdown)(void);
17219
17220 if (!tboot_enabled())
17221 return;
17222 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17223
17224 switch_to_tboot_pt();
17225
17226 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17227 + shutdown = (void *)tboot->shutdown_entry;
17228 shutdown();
17229
17230 /* should not reach here */
17231 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17232 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17233 }
17234
17235 -static atomic_t ap_wfs_count;
17236 +static atomic_unchecked_t ap_wfs_count;
17237
17238 static int tboot_wait_for_aps(int num_aps)
17239 {
17240 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17241 {
17242 switch (action) {
17243 case CPU_DYING:
17244 - atomic_inc(&ap_wfs_count);
17245 + atomic_inc_unchecked(&ap_wfs_count);
17246 if (num_online_cpus() == 1)
17247 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17248 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17249 return NOTIFY_BAD;
17250 break;
17251 }
17252 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17253
17254 tboot_create_trampoline();
17255
17256 - atomic_set(&ap_wfs_count, 0);
17257 + atomic_set_unchecked(&ap_wfs_count, 0);
17258 register_hotcpu_notifier(&tboot_cpu_notifier);
17259 return 0;
17260 }
17261 diff -urNp linux-2.6.32.45/arch/x86/kernel/time.c linux-2.6.32.45/arch/x86/kernel/time.c
17262 --- linux-2.6.32.45/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17263 +++ linux-2.6.32.45/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17264 @@ -26,17 +26,13 @@
17265 int timer_ack;
17266 #endif
17267
17268 -#ifdef CONFIG_X86_64
17269 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17270 -#endif
17271 -
17272 unsigned long profile_pc(struct pt_regs *regs)
17273 {
17274 unsigned long pc = instruction_pointer(regs);
17275
17276 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17277 + if (!user_mode(regs) && in_lock_functions(pc)) {
17278 #ifdef CONFIG_FRAME_POINTER
17279 - return *(unsigned long *)(regs->bp + sizeof(long));
17280 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17281 #else
17282 unsigned long *sp =
17283 (unsigned long *)kernel_stack_pointer(regs);
17284 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17285 * or above a saved flags. Eflags has bits 22-31 zero,
17286 * kernel addresses don't.
17287 */
17288 +
17289 +#ifdef CONFIG_PAX_KERNEXEC
17290 + return ktla_ktva(sp[0]);
17291 +#else
17292 if (sp[0] >> 22)
17293 return sp[0];
17294 if (sp[1] >> 22)
17295 return sp[1];
17296 #endif
17297 +
17298 +#endif
17299 }
17300 return pc;
17301 }
17302 diff -urNp linux-2.6.32.45/arch/x86/kernel/tls.c linux-2.6.32.45/arch/x86/kernel/tls.c
17303 --- linux-2.6.32.45/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17304 +++ linux-2.6.32.45/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17305 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17306 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17307 return -EINVAL;
17308
17309 +#ifdef CONFIG_PAX_SEGMEXEC
17310 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17311 + return -EINVAL;
17312 +#endif
17313 +
17314 set_tls_desc(p, idx, &info, 1);
17315
17316 return 0;
17317 diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_32.S linux-2.6.32.45/arch/x86/kernel/trampoline_32.S
17318 --- linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17319 +++ linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17320 @@ -32,6 +32,12 @@
17321 #include <asm/segment.h>
17322 #include <asm/page_types.h>
17323
17324 +#ifdef CONFIG_PAX_KERNEXEC
17325 +#define ta(X) (X)
17326 +#else
17327 +#define ta(X) ((X) - __PAGE_OFFSET)
17328 +#endif
17329 +
17330 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17331 __CPUINITRODATA
17332 .code16
17333 @@ -60,7 +66,7 @@ r_base = .
17334 inc %ax # protected mode (PE) bit
17335 lmsw %ax # into protected mode
17336 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17337 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17338 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17339
17340 # These need to be in the same 64K segment as the above;
17341 # hence we don't use the boot_gdt_descr defined in head.S
17342 diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_64.S linux-2.6.32.45/arch/x86/kernel/trampoline_64.S
17343 --- linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17344 +++ linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17345 @@ -91,7 +91,7 @@ startup_32:
17346 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17347 movl %eax, %ds
17348
17349 - movl $X86_CR4_PAE, %eax
17350 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17351 movl %eax, %cr4 # Enable PAE mode
17352
17353 # Setup trampoline 4 level pagetables
17354 @@ -127,7 +127,7 @@ startup_64:
17355 no_longmode:
17356 hlt
17357 jmp no_longmode
17358 -#include "verify_cpu_64.S"
17359 +#include "verify_cpu.S"
17360
17361 # Careful these need to be in the same 64K segment as the above;
17362 tidt:
17363 @@ -138,7 +138,7 @@ tidt:
17364 # so the kernel can live anywhere
17365 .balign 4
17366 tgdt:
17367 - .short tgdt_end - tgdt # gdt limit
17368 + .short tgdt_end - tgdt - 1 # gdt limit
17369 .long tgdt - r_base
17370 .short 0
17371 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17372 diff -urNp linux-2.6.32.45/arch/x86/kernel/traps.c linux-2.6.32.45/arch/x86/kernel/traps.c
17373 --- linux-2.6.32.45/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17374 +++ linux-2.6.32.45/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17375 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17376
17377 /* Do we ignore FPU interrupts ? */
17378 char ignore_fpu_irq;
17379 -
17380 -/*
17381 - * The IDT has to be page-aligned to simplify the Pentium
17382 - * F0 0F bug workaround.
17383 - */
17384 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17385 #endif
17386
17387 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17388 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17389 static inline void
17390 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17391 {
17392 - if (!user_mode_vm(regs))
17393 + if (!user_mode(regs))
17394 die(str, regs, err);
17395 }
17396 #endif
17397
17398 static void __kprobes
17399 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17400 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17401 long error_code, siginfo_t *info)
17402 {
17403 struct task_struct *tsk = current;
17404
17405 #ifdef CONFIG_X86_32
17406 - if (regs->flags & X86_VM_MASK) {
17407 + if (v8086_mode(regs)) {
17408 /*
17409 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17410 * On nmi (interrupt 2), do_trap should not be called.
17411 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17412 }
17413 #endif
17414
17415 - if (!user_mode(regs))
17416 + if (!user_mode_novm(regs))
17417 goto kernel_trap;
17418
17419 #ifdef CONFIG_X86_32
17420 @@ -158,7 +152,7 @@ trap_signal:
17421 printk_ratelimit()) {
17422 printk(KERN_INFO
17423 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17424 - tsk->comm, tsk->pid, str,
17425 + tsk->comm, task_pid_nr(tsk), str,
17426 regs->ip, regs->sp, error_code);
17427 print_vma_addr(" in ", regs->ip);
17428 printk("\n");
17429 @@ -175,8 +169,20 @@ kernel_trap:
17430 if (!fixup_exception(regs)) {
17431 tsk->thread.error_code = error_code;
17432 tsk->thread.trap_no = trapnr;
17433 +
17434 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17435 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17436 + str = "PAX: suspicious stack segment fault";
17437 +#endif
17438 +
17439 die(str, regs, error_code);
17440 }
17441 +
17442 +#ifdef CONFIG_PAX_REFCOUNT
17443 + if (trapnr == 4)
17444 + pax_report_refcount_overflow(regs);
17445 +#endif
17446 +
17447 return;
17448
17449 #ifdef CONFIG_X86_32
17450 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17451 conditional_sti(regs);
17452
17453 #ifdef CONFIG_X86_32
17454 - if (regs->flags & X86_VM_MASK)
17455 + if (v8086_mode(regs))
17456 goto gp_in_vm86;
17457 #endif
17458
17459 tsk = current;
17460 - if (!user_mode(regs))
17461 + if (!user_mode_novm(regs))
17462 goto gp_in_kernel;
17463
17464 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17465 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17466 + struct mm_struct *mm = tsk->mm;
17467 + unsigned long limit;
17468 +
17469 + down_write(&mm->mmap_sem);
17470 + limit = mm->context.user_cs_limit;
17471 + if (limit < TASK_SIZE) {
17472 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17473 + up_write(&mm->mmap_sem);
17474 + return;
17475 + }
17476 + up_write(&mm->mmap_sem);
17477 + }
17478 +#endif
17479 +
17480 tsk->thread.error_code = error_code;
17481 tsk->thread.trap_no = 13;
17482
17483 @@ -305,6 +327,13 @@ gp_in_kernel:
17484 if (notify_die(DIE_GPF, "general protection fault", regs,
17485 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17486 return;
17487 +
17488 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17489 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17490 + die("PAX: suspicious general protection fault", regs, error_code);
17491 + else
17492 +#endif
17493 +
17494 die("general protection fault", regs, error_code);
17495 }
17496
17497 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17498 dotraplinkage notrace __kprobes void
17499 do_nmi(struct pt_regs *regs, long error_code)
17500 {
17501 +
17502 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17503 + if (!user_mode(regs)) {
17504 + unsigned long cs = regs->cs & 0xFFFF;
17505 + unsigned long ip = ktva_ktla(regs->ip);
17506 +
17507 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17508 + regs->ip = ip;
17509 + }
17510 +#endif
17511 +
17512 nmi_enter();
17513
17514 inc_irq_stat(__nmi_count);
17515 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17516 }
17517
17518 #ifdef CONFIG_X86_32
17519 - if (regs->flags & X86_VM_MASK)
17520 + if (v8086_mode(regs))
17521 goto debug_vm86;
17522 #endif
17523
17524 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17525 * kernel space (but re-enable TF when returning to user mode).
17526 */
17527 if (condition & DR_STEP) {
17528 - if (!user_mode(regs))
17529 + if (!user_mode_novm(regs))
17530 goto clear_TF_reenable;
17531 }
17532
17533 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17534 * Handle strange cache flush from user space exception
17535 * in all other cases. This is undocumented behaviour.
17536 */
17537 - if (regs->flags & X86_VM_MASK) {
17538 + if (v8086_mode(regs)) {
17539 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17540 return;
17541 }
17542 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17543 void __math_state_restore(void)
17544 {
17545 struct thread_info *thread = current_thread_info();
17546 - struct task_struct *tsk = thread->task;
17547 + struct task_struct *tsk = current;
17548
17549 /*
17550 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17551 @@ -825,8 +865,7 @@ void __math_state_restore(void)
17552 */
17553 asmlinkage void math_state_restore(void)
17554 {
17555 - struct thread_info *thread = current_thread_info();
17556 - struct task_struct *tsk = thread->task;
17557 + struct task_struct *tsk = current;
17558
17559 if (!tsk_used_math(tsk)) {
17560 local_irq_enable();
17561 diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S
17562 --- linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17563 +++ linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17564 @@ -1,105 +0,0 @@
17565 -/*
17566 - *
17567 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
17568 - * code has been borrowed from boot/setup.S and was introduced by
17569 - * Andi Kleen.
17570 - *
17571 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17572 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17573 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17574 - *
17575 - * This source code is licensed under the GNU General Public License,
17576 - * Version 2. See the file COPYING for more details.
17577 - *
17578 - * This is a common code for verification whether CPU supports
17579 - * long mode and SSE or not. It is not called directly instead this
17580 - * file is included at various places and compiled in that context.
17581 - * Following are the current usage.
17582 - *
17583 - * This file is included by both 16bit and 32bit code.
17584 - *
17585 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17586 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17587 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17588 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17589 - *
17590 - * verify_cpu, returns the status of cpu check in register %eax.
17591 - * 0: Success 1: Failure
17592 - *
17593 - * The caller needs to check for the error code and take the action
17594 - * appropriately. Either display a message or halt.
17595 - */
17596 -
17597 -#include <asm/cpufeature.h>
17598 -
17599 -verify_cpu:
17600 - pushfl # Save caller passed flags
17601 - pushl $0 # Kill any dangerous flags
17602 - popfl
17603 -
17604 - pushfl # standard way to check for cpuid
17605 - popl %eax
17606 - movl %eax,%ebx
17607 - xorl $0x200000,%eax
17608 - pushl %eax
17609 - popfl
17610 - pushfl
17611 - popl %eax
17612 - cmpl %eax,%ebx
17613 - jz verify_cpu_no_longmode # cpu has no cpuid
17614 -
17615 - movl $0x0,%eax # See if cpuid 1 is implemented
17616 - cpuid
17617 - cmpl $0x1,%eax
17618 - jb verify_cpu_no_longmode # no cpuid 1
17619 -
17620 - xor %di,%di
17621 - cmpl $0x68747541,%ebx # AuthenticAMD
17622 - jnz verify_cpu_noamd
17623 - cmpl $0x69746e65,%edx
17624 - jnz verify_cpu_noamd
17625 - cmpl $0x444d4163,%ecx
17626 - jnz verify_cpu_noamd
17627 - mov $1,%di # cpu is from AMD
17628 -
17629 -verify_cpu_noamd:
17630 - movl $0x1,%eax # Does the cpu have what it takes
17631 - cpuid
17632 - andl $REQUIRED_MASK0,%edx
17633 - xorl $REQUIRED_MASK0,%edx
17634 - jnz verify_cpu_no_longmode
17635 -
17636 - movl $0x80000000,%eax # See if extended cpuid is implemented
17637 - cpuid
17638 - cmpl $0x80000001,%eax
17639 - jb verify_cpu_no_longmode # no extended cpuid
17640 -
17641 - movl $0x80000001,%eax # Does the cpu have what it takes
17642 - cpuid
17643 - andl $REQUIRED_MASK1,%edx
17644 - xorl $REQUIRED_MASK1,%edx
17645 - jnz verify_cpu_no_longmode
17646 -
17647 -verify_cpu_sse_test:
17648 - movl $1,%eax
17649 - cpuid
17650 - andl $SSE_MASK,%edx
17651 - cmpl $SSE_MASK,%edx
17652 - je verify_cpu_sse_ok
17653 - test %di,%di
17654 - jz verify_cpu_no_longmode # only try to force SSE on AMD
17655 - movl $0xc0010015,%ecx # HWCR
17656 - rdmsr
17657 - btr $15,%eax # enable SSE
17658 - wrmsr
17659 - xor %di,%di # don't loop
17660 - jmp verify_cpu_sse_test # try again
17661 -
17662 -verify_cpu_no_longmode:
17663 - popfl # Restore caller passed flags
17664 - movl $1,%eax
17665 - ret
17666 -verify_cpu_sse_ok:
17667 - popfl # Restore caller passed flags
17668 - xorl %eax, %eax
17669 - ret
17670 diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu.S linux-2.6.32.45/arch/x86/kernel/verify_cpu.S
17671 --- linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17672 +++ linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17673 @@ -0,0 +1,140 @@
17674 +/*
17675 + *
17676 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
17677 + * code has been borrowed from boot/setup.S and was introduced by
17678 + * Andi Kleen.
17679 + *
17680 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17681 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17682 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17683 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17684 + *
17685 + * This source code is licensed under the GNU General Public License,
17686 + * Version 2. See the file COPYING for more details.
17687 + *
17688 + * This is a common code for verification whether CPU supports
17689 + * long mode and SSE or not. It is not called directly instead this
17690 + * file is included at various places and compiled in that context.
17691 + * This file is expected to run in 32bit code. Currently:
17692 + *
17693 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17694 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
17695 + * arch/x86/kernel/head_32.S: processor startup
17696 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17697 + *
17698 + * verify_cpu, returns the status of longmode and SSE in register %eax.
17699 + * 0: Success 1: Failure
17700 + *
17701 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17702 + *
17703 + * The caller needs to check for the error code and take the action
17704 + * appropriately. Either display a message or halt.
17705 + */
17706 +
17707 +#include <asm/cpufeature.h>
17708 +#include <asm/msr-index.h>
17709 +
17710 +verify_cpu:
17711 + pushfl # Save caller passed flags
17712 + pushl $0 # Kill any dangerous flags
17713 + popfl
17714 +
17715 + pushfl # standard way to check for cpuid
17716 + popl %eax
17717 + movl %eax,%ebx
17718 + xorl $0x200000,%eax
17719 + pushl %eax
17720 + popfl
17721 + pushfl
17722 + popl %eax
17723 + cmpl %eax,%ebx
17724 + jz verify_cpu_no_longmode # cpu has no cpuid
17725 +
17726 + movl $0x0,%eax # See if cpuid 1 is implemented
17727 + cpuid
17728 + cmpl $0x1,%eax
17729 + jb verify_cpu_no_longmode # no cpuid 1
17730 +
17731 + xor %di,%di
17732 + cmpl $0x68747541,%ebx # AuthenticAMD
17733 + jnz verify_cpu_noamd
17734 + cmpl $0x69746e65,%edx
17735 + jnz verify_cpu_noamd
17736 + cmpl $0x444d4163,%ecx
17737 + jnz verify_cpu_noamd
17738 + mov $1,%di # cpu is from AMD
17739 + jmp verify_cpu_check
17740 +
17741 +verify_cpu_noamd:
17742 + cmpl $0x756e6547,%ebx # GenuineIntel?
17743 + jnz verify_cpu_check
17744 + cmpl $0x49656e69,%edx
17745 + jnz verify_cpu_check
17746 + cmpl $0x6c65746e,%ecx
17747 + jnz verify_cpu_check
17748 +
17749 + # only call IA32_MISC_ENABLE when:
17750 + # family > 6 || (family == 6 && model >= 0xd)
17751 + movl $0x1, %eax # check CPU family and model
17752 + cpuid
17753 + movl %eax, %ecx
17754 +
17755 + andl $0x0ff00f00, %eax # mask family and extended family
17756 + shrl $8, %eax
17757 + cmpl $6, %eax
17758 + ja verify_cpu_clear_xd # family > 6, ok
17759 + jb verify_cpu_check # family < 6, skip
17760 +
17761 + andl $0x000f00f0, %ecx # mask model and extended model
17762 + shrl $4, %ecx
17763 + cmpl $0xd, %ecx
17764 + jb verify_cpu_check # family == 6, model < 0xd, skip
17765 +
17766 +verify_cpu_clear_xd:
17767 + movl $MSR_IA32_MISC_ENABLE, %ecx
17768 + rdmsr
17769 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17770 + jnc verify_cpu_check # only write MSR if bit was changed
17771 + wrmsr
17772 +
17773 +verify_cpu_check:
17774 + movl $0x1,%eax # Does the cpu have what it takes
17775 + cpuid
17776 + andl $REQUIRED_MASK0,%edx
17777 + xorl $REQUIRED_MASK0,%edx
17778 + jnz verify_cpu_no_longmode
17779 +
17780 + movl $0x80000000,%eax # See if extended cpuid is implemented
17781 + cpuid
17782 + cmpl $0x80000001,%eax
17783 + jb verify_cpu_no_longmode # no extended cpuid
17784 +
17785 + movl $0x80000001,%eax # Does the cpu have what it takes
17786 + cpuid
17787 + andl $REQUIRED_MASK1,%edx
17788 + xorl $REQUIRED_MASK1,%edx
17789 + jnz verify_cpu_no_longmode
17790 +
17791 +verify_cpu_sse_test:
17792 + movl $1,%eax
17793 + cpuid
17794 + andl $SSE_MASK,%edx
17795 + cmpl $SSE_MASK,%edx
17796 + je verify_cpu_sse_ok
17797 + test %di,%di
17798 + jz verify_cpu_no_longmode # only try to force SSE on AMD
17799 + movl $MSR_K7_HWCR,%ecx
17800 + rdmsr
17801 + btr $15,%eax # enable SSE
17802 + wrmsr
17803 + xor %di,%di # don't loop
17804 + jmp verify_cpu_sse_test # try again
17805 +
17806 +verify_cpu_no_longmode:
17807 + popfl # Restore caller passed flags
17808 + movl $1,%eax
17809 + ret
17810 +verify_cpu_sse_ok:
17811 + popfl # Restore caller passed flags
17812 + xorl %eax, %eax
17813 + ret
17814 diff -urNp linux-2.6.32.45/arch/x86/kernel/vm86_32.c linux-2.6.32.45/arch/x86/kernel/vm86_32.c
17815 --- linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
17816 +++ linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
17817 @@ -41,6 +41,7 @@
17818 #include <linux/ptrace.h>
17819 #include <linux/audit.h>
17820 #include <linux/stddef.h>
17821 +#include <linux/grsecurity.h>
17822
17823 #include <asm/uaccess.h>
17824 #include <asm/io.h>
17825 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
17826 do_exit(SIGSEGV);
17827 }
17828
17829 - tss = &per_cpu(init_tss, get_cpu());
17830 + tss = init_tss + get_cpu();
17831 current->thread.sp0 = current->thread.saved_sp0;
17832 current->thread.sysenter_cs = __KERNEL_CS;
17833 load_sp0(tss, &current->thread);
17834 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
17835 struct task_struct *tsk;
17836 int tmp, ret = -EPERM;
17837
17838 +#ifdef CONFIG_GRKERNSEC_VM86
17839 + if (!capable(CAP_SYS_RAWIO)) {
17840 + gr_handle_vm86();
17841 + goto out;
17842 + }
17843 +#endif
17844 +
17845 tsk = current;
17846 if (tsk->thread.saved_sp0)
17847 goto out;
17848 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
17849 int tmp, ret;
17850 struct vm86plus_struct __user *v86;
17851
17852 +#ifdef CONFIG_GRKERNSEC_VM86
17853 + if (!capable(CAP_SYS_RAWIO)) {
17854 + gr_handle_vm86();
17855 + ret = -EPERM;
17856 + goto out;
17857 + }
17858 +#endif
17859 +
17860 tsk = current;
17861 switch (regs->bx) {
17862 case VM86_REQUEST_IRQ:
17863 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
17864 tsk->thread.saved_fs = info->regs32->fs;
17865 tsk->thread.saved_gs = get_user_gs(info->regs32);
17866
17867 - tss = &per_cpu(init_tss, get_cpu());
17868 + tss = init_tss + get_cpu();
17869 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17870 if (cpu_has_sep)
17871 tsk->thread.sysenter_cs = 0;
17872 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
17873 goto cannot_handle;
17874 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17875 goto cannot_handle;
17876 - intr_ptr = (unsigned long __user *) (i << 2);
17877 + intr_ptr = (__force unsigned long __user *) (i << 2);
17878 if (get_user(segoffs, intr_ptr))
17879 goto cannot_handle;
17880 if ((segoffs >> 16) == BIOSSEG)
17881 diff -urNp linux-2.6.32.45/arch/x86/kernel/vmi_32.c linux-2.6.32.45/arch/x86/kernel/vmi_32.c
17882 --- linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
17883 +++ linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-08-05 20:33:55.000000000 -0400
17884 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
17885 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
17886
17887 #define call_vrom_func(rom,func) \
17888 - (((VROMFUNC *)(rom->func))())
17889 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
17890
17891 #define call_vrom_long_func(rom,func,arg) \
17892 - (((VROMLONGFUNC *)(rom->func)) (arg))
17893 +({\
17894 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
17895 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
17896 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
17897 + __reloc;\
17898 +})
17899
17900 -static struct vrom_header *vmi_rom;
17901 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
17902 static int disable_pge;
17903 static int disable_pse;
17904 static int disable_sep;
17905 @@ -76,10 +81,10 @@ static struct {
17906 void (*set_initial_ap_state)(int, int);
17907 void (*halt)(void);
17908 void (*set_lazy_mode)(int mode);
17909 -} vmi_ops;
17910 +} __no_const vmi_ops __read_only;
17911
17912 /* Cached VMI operations */
17913 -struct vmi_timer_ops vmi_timer_ops;
17914 +struct vmi_timer_ops vmi_timer_ops __read_only;
17915
17916 /*
17917 * VMI patching routines.
17918 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
17919 static inline void patch_offset(void *insnbuf,
17920 unsigned long ip, unsigned long dest)
17921 {
17922 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
17923 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
17924 }
17925
17926 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
17927 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
17928 {
17929 u64 reloc;
17930 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
17931 +
17932 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
17933 switch(rel->type) {
17934 case VMI_RELOCATION_CALL_REL:
17935 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
17936
17937 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17938 {
17939 - const pte_t pte = { .pte = 0 };
17940 + const pte_t pte = __pte(0ULL);
17941 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17942 }
17943
17944 static void vmi_pmd_clear(pmd_t *pmd)
17945 {
17946 - const pte_t pte = { .pte = 0 };
17947 + const pte_t pte = __pte(0ULL);
17948 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17949 }
17950 #endif
17951 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17952 ap.ss = __KERNEL_DS;
17953 ap.esp = (unsigned long) start_esp;
17954
17955 - ap.ds = __USER_DS;
17956 - ap.es = __USER_DS;
17957 + ap.ds = __KERNEL_DS;
17958 + ap.es = __KERNEL_DS;
17959 ap.fs = __KERNEL_PERCPU;
17960 - ap.gs = __KERNEL_STACK_CANARY;
17961 + savesegment(gs, ap.gs);
17962
17963 ap.eflags = 0;
17964
17965 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17966 paravirt_leave_lazy_mmu();
17967 }
17968
17969 +#ifdef CONFIG_PAX_KERNEXEC
17970 +static unsigned long vmi_pax_open_kernel(void)
17971 +{
17972 + return 0;
17973 +}
17974 +
17975 +static unsigned long vmi_pax_close_kernel(void)
17976 +{
17977 + return 0;
17978 +}
17979 +#endif
17980 +
17981 static inline int __init check_vmi_rom(struct vrom_header *rom)
17982 {
17983 struct pci_header *pci;
17984 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
17985 return 0;
17986 if (rom->vrom_signature != VMI_SIGNATURE)
17987 return 0;
17988 + if (rom->rom_length * 512 > sizeof(*rom)) {
17989 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
17990 + return 0;
17991 + }
17992 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
17993 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
17994 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
17995 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
17996 struct vrom_header *romstart;
17997 romstart = (struct vrom_header *)isa_bus_to_virt(base);
17998 if (check_vmi_rom(romstart)) {
17999 - vmi_rom = romstart;
18000 + vmi_rom = *romstart;
18001 return 1;
18002 }
18003 }
18004 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
18005
18006 para_fill(pv_irq_ops.safe_halt, Halt);
18007
18008 +#ifdef CONFIG_PAX_KERNEXEC
18009 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
18010 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
18011 +#endif
18012 +
18013 /*
18014 * Alternative instruction rewriting doesn't happen soon enough
18015 * to convert VMI_IRET to a call instead of a jump; so we have
18016 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
18017
18018 void __init vmi_init(void)
18019 {
18020 - if (!vmi_rom)
18021 + if (!vmi_rom.rom_signature)
18022 probe_vmi_rom();
18023 else
18024 - check_vmi_rom(vmi_rom);
18025 + check_vmi_rom(&vmi_rom);
18026
18027 /* In case probing for or validating the ROM failed, basil */
18028 - if (!vmi_rom)
18029 + if (!vmi_rom.rom_signature)
18030 return;
18031
18032 - reserve_top_address(-vmi_rom->virtual_top);
18033 + reserve_top_address(-vmi_rom.virtual_top);
18034
18035 #ifdef CONFIG_X86_IO_APIC
18036 /* This is virtual hardware; timer routing is wired correctly */
18037 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
18038 {
18039 unsigned long flags;
18040
18041 - if (!vmi_rom)
18042 + if (!vmi_rom.rom_signature)
18043 return;
18044
18045 local_irq_save(flags);
18046 diff -urNp linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S
18047 --- linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
18048 +++ linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
18049 @@ -26,6 +26,13 @@
18050 #include <asm/page_types.h>
18051 #include <asm/cache.h>
18052 #include <asm/boot.h>
18053 +#include <asm/segment.h>
18054 +
18055 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18056 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18057 +#else
18058 +#define __KERNEL_TEXT_OFFSET 0
18059 +#endif
18060
18061 #undef i386 /* in case the preprocessor is a 32bit one */
18062
18063 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
18064 #ifdef CONFIG_X86_32
18065 OUTPUT_ARCH(i386)
18066 ENTRY(phys_startup_32)
18067 -jiffies = jiffies_64;
18068 #else
18069 OUTPUT_ARCH(i386:x86-64)
18070 ENTRY(phys_startup_64)
18071 -jiffies_64 = jiffies;
18072 #endif
18073
18074 PHDRS {
18075 text PT_LOAD FLAGS(5); /* R_E */
18076 - data PT_LOAD FLAGS(7); /* RWE */
18077 +#ifdef CONFIG_X86_32
18078 + module PT_LOAD FLAGS(5); /* R_E */
18079 +#endif
18080 +#ifdef CONFIG_XEN
18081 + rodata PT_LOAD FLAGS(5); /* R_E */
18082 +#else
18083 + rodata PT_LOAD FLAGS(4); /* R__ */
18084 +#endif
18085 + data PT_LOAD FLAGS(6); /* RW_ */
18086 #ifdef CONFIG_X86_64
18087 user PT_LOAD FLAGS(5); /* R_E */
18088 +#endif
18089 + init.begin PT_LOAD FLAGS(6); /* RW_ */
18090 #ifdef CONFIG_SMP
18091 percpu PT_LOAD FLAGS(6); /* RW_ */
18092 #endif
18093 + text.init PT_LOAD FLAGS(5); /* R_E */
18094 + text.exit PT_LOAD FLAGS(5); /* R_E */
18095 init PT_LOAD FLAGS(7); /* RWE */
18096 -#endif
18097 note PT_NOTE FLAGS(0); /* ___ */
18098 }
18099
18100 SECTIONS
18101 {
18102 #ifdef CONFIG_X86_32
18103 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18104 - phys_startup_32 = startup_32 - LOAD_OFFSET;
18105 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18106 #else
18107 - . = __START_KERNEL;
18108 - phys_startup_64 = startup_64 - LOAD_OFFSET;
18109 + . = __START_KERNEL;
18110 #endif
18111
18112 /* Text and read-only data */
18113 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
18114 - _text = .;
18115 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18116 /* bootstrapping code */
18117 +#ifdef CONFIG_X86_32
18118 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18119 +#else
18120 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18121 +#endif
18122 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18123 + _text = .;
18124 HEAD_TEXT
18125 #ifdef CONFIG_X86_32
18126 . = ALIGN(PAGE_SIZE);
18127 @@ -82,28 +102,71 @@ SECTIONS
18128 IRQENTRY_TEXT
18129 *(.fixup)
18130 *(.gnu.warning)
18131 - /* End of text section */
18132 - _etext = .;
18133 } :text = 0x9090
18134
18135 - NOTES :text :note
18136 + . += __KERNEL_TEXT_OFFSET;
18137 +
18138 +#ifdef CONFIG_X86_32
18139 + . = ALIGN(PAGE_SIZE);
18140 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
18141 + *(.vmi.rom)
18142 + } :module
18143 +
18144 + . = ALIGN(PAGE_SIZE);
18145 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18146 +
18147 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18148 + MODULES_EXEC_VADDR = .;
18149 + BYTE(0)
18150 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18151 + . = ALIGN(HPAGE_SIZE);
18152 + MODULES_EXEC_END = . - 1;
18153 +#endif
18154 +
18155 + } :module
18156 +#endif
18157
18158 - EXCEPTION_TABLE(16) :text = 0x9090
18159 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18160 + /* End of text section */
18161 + _etext = . - __KERNEL_TEXT_OFFSET;
18162 + }
18163 +
18164 +#ifdef CONFIG_X86_32
18165 + . = ALIGN(PAGE_SIZE);
18166 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18167 + *(.idt)
18168 + . = ALIGN(PAGE_SIZE);
18169 + *(.empty_zero_page)
18170 + *(.swapper_pg_fixmap)
18171 + *(.swapper_pg_pmd)
18172 + *(.swapper_pg_dir)
18173 + *(.trampoline_pg_dir)
18174 + } :rodata
18175 +#endif
18176 +
18177 + . = ALIGN(PAGE_SIZE);
18178 + NOTES :rodata :note
18179 +
18180 + EXCEPTION_TABLE(16) :rodata
18181
18182 RO_DATA(PAGE_SIZE)
18183
18184 /* Data */
18185 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18186 +
18187 +#ifdef CONFIG_PAX_KERNEXEC
18188 + . = ALIGN(HPAGE_SIZE);
18189 +#else
18190 + . = ALIGN(PAGE_SIZE);
18191 +#endif
18192 +
18193 /* Start of data section */
18194 _sdata = .;
18195
18196 /* init_task */
18197 INIT_TASK_DATA(THREAD_SIZE)
18198
18199 -#ifdef CONFIG_X86_32
18200 - /* 32 bit has nosave before _edata */
18201 NOSAVE_DATA
18202 -#endif
18203
18204 PAGE_ALIGNED_DATA(PAGE_SIZE)
18205
18206 @@ -112,6 +175,8 @@ SECTIONS
18207 DATA_DATA
18208 CONSTRUCTORS
18209
18210 + jiffies = jiffies_64;
18211 +
18212 /* rarely changed data like cpu maps */
18213 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
18214
18215 @@ -166,12 +231,6 @@ SECTIONS
18216 }
18217 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18218
18219 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18220 - .jiffies : AT(VLOAD(.jiffies)) {
18221 - *(.jiffies)
18222 - }
18223 - jiffies = VVIRT(.jiffies);
18224 -
18225 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18226 *(.vsyscall_3)
18227 }
18228 @@ -187,12 +246,19 @@ SECTIONS
18229 #endif /* CONFIG_X86_64 */
18230
18231 /* Init code and data - will be freed after init */
18232 - . = ALIGN(PAGE_SIZE);
18233 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18234 + BYTE(0)
18235 +
18236 +#ifdef CONFIG_PAX_KERNEXEC
18237 + . = ALIGN(HPAGE_SIZE);
18238 +#else
18239 + . = ALIGN(PAGE_SIZE);
18240 +#endif
18241 +
18242 __init_begin = .; /* paired with __init_end */
18243 - }
18244 + } :init.begin
18245
18246 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18247 +#ifdef CONFIG_SMP
18248 /*
18249 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18250 * output PHDR, so the next output section - .init.text - should
18251 @@ -201,12 +267,27 @@ SECTIONS
18252 PERCPU_VADDR(0, :percpu)
18253 #endif
18254
18255 - INIT_TEXT_SECTION(PAGE_SIZE)
18256 -#ifdef CONFIG_X86_64
18257 - :init
18258 -#endif
18259 + . = ALIGN(PAGE_SIZE);
18260 + init_begin = .;
18261 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18262 + VMLINUX_SYMBOL(_sinittext) = .;
18263 + INIT_TEXT
18264 + VMLINUX_SYMBOL(_einittext) = .;
18265 + . = ALIGN(PAGE_SIZE);
18266 + } :text.init
18267
18268 - INIT_DATA_SECTION(16)
18269 + /*
18270 + * .exit.text is discard at runtime, not link time, to deal with
18271 + * references from .altinstructions and .eh_frame
18272 + */
18273 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18274 + EXIT_TEXT
18275 + . = ALIGN(16);
18276 + } :text.exit
18277 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18278 +
18279 + . = ALIGN(PAGE_SIZE);
18280 + INIT_DATA_SECTION(16) :init
18281
18282 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18283 __x86_cpu_dev_start = .;
18284 @@ -232,19 +313,11 @@ SECTIONS
18285 *(.altinstr_replacement)
18286 }
18287
18288 - /*
18289 - * .exit.text is discard at runtime, not link time, to deal with
18290 - * references from .altinstructions and .eh_frame
18291 - */
18292 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18293 - EXIT_TEXT
18294 - }
18295 -
18296 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18297 EXIT_DATA
18298 }
18299
18300 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18301 +#ifndef CONFIG_SMP
18302 PERCPU(PAGE_SIZE)
18303 #endif
18304
18305 @@ -267,12 +340,6 @@ SECTIONS
18306 . = ALIGN(PAGE_SIZE);
18307 }
18308
18309 -#ifdef CONFIG_X86_64
18310 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18311 - NOSAVE_DATA
18312 - }
18313 -#endif
18314 -
18315 /* BSS */
18316 . = ALIGN(PAGE_SIZE);
18317 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18318 @@ -288,6 +355,7 @@ SECTIONS
18319 __brk_base = .;
18320 . += 64 * 1024; /* 64k alignment slop space */
18321 *(.brk_reservation) /* areas brk users have reserved */
18322 + . = ALIGN(HPAGE_SIZE);
18323 __brk_limit = .;
18324 }
18325
18326 @@ -316,13 +384,12 @@ SECTIONS
18327 * for the boot processor.
18328 */
18329 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18330 -INIT_PER_CPU(gdt_page);
18331 INIT_PER_CPU(irq_stack_union);
18332
18333 /*
18334 * Build-time check on the image size:
18335 */
18336 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18337 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18338 "kernel image bigger than KERNEL_IMAGE_SIZE");
18339
18340 #ifdef CONFIG_SMP
18341 diff -urNp linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c
18342 --- linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18343 +++ linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18344 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18345
18346 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18347 /* copy vsyscall data */
18348 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18349 vsyscall_gtod_data.clock.vread = clock->vread;
18350 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18351 vsyscall_gtod_data.clock.mask = clock->mask;
18352 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18353 We do this here because otherwise user space would do it on
18354 its own in a likely inferior way (no access to jiffies).
18355 If you don't like it pass NULL. */
18356 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
18357 + if (tcache && tcache->blob[0] == (j = jiffies)) {
18358 p = tcache->blob[1];
18359 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18360 /* Load per CPU data from RDTSCP */
18361 diff -urNp linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c
18362 --- linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18363 +++ linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18364 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18365
18366 EXPORT_SYMBOL(copy_user_generic);
18367 EXPORT_SYMBOL(__copy_user_nocache);
18368 -EXPORT_SYMBOL(copy_from_user);
18369 -EXPORT_SYMBOL(copy_to_user);
18370 EXPORT_SYMBOL(__copy_from_user_inatomic);
18371
18372 EXPORT_SYMBOL(copy_page);
18373 diff -urNp linux-2.6.32.45/arch/x86/kernel/xsave.c linux-2.6.32.45/arch/x86/kernel/xsave.c
18374 --- linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18375 +++ linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18376 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18377 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18378 return -1;
18379
18380 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18381 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18382 fx_sw_user->extended_size -
18383 FP_XSTATE_MAGIC2_SIZE));
18384 /*
18385 @@ -196,7 +196,7 @@ fx_only:
18386 * the other extended state.
18387 */
18388 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18389 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18390 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18391 }
18392
18393 /*
18394 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18395 if (task_thread_info(tsk)->status & TS_XSAVE)
18396 err = restore_user_xstate(buf);
18397 else
18398 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18399 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
18400 buf);
18401 if (unlikely(err)) {
18402 /*
18403 diff -urNp linux-2.6.32.45/arch/x86/kvm/emulate.c linux-2.6.32.45/arch/x86/kvm/emulate.c
18404 --- linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18405 +++ linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18406 @@ -81,8 +81,8 @@
18407 #define Src2CL (1<<29)
18408 #define Src2ImmByte (2<<29)
18409 #define Src2One (3<<29)
18410 -#define Src2Imm16 (4<<29)
18411 -#define Src2Mask (7<<29)
18412 +#define Src2Imm16 (4U<<29)
18413 +#define Src2Mask (7U<<29)
18414
18415 enum {
18416 Group1_80, Group1_81, Group1_82, Group1_83,
18417 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
18418
18419 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18420 do { \
18421 + unsigned long _tmp; \
18422 __asm__ __volatile__ ( \
18423 _PRE_EFLAGS("0", "4", "2") \
18424 _op _suffix " %"_x"3,%1; " \
18425 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
18426 /* Raw emulation: instruction has two explicit operands. */
18427 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18428 do { \
18429 - unsigned long _tmp; \
18430 - \
18431 switch ((_dst).bytes) { \
18432 case 2: \
18433 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18434 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
18435
18436 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18437 do { \
18438 - unsigned long _tmp; \
18439 switch ((_dst).bytes) { \
18440 case 1: \
18441 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18442 diff -urNp linux-2.6.32.45/arch/x86/kvm/lapic.c linux-2.6.32.45/arch/x86/kvm/lapic.c
18443 --- linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18444 +++ linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18445 @@ -52,7 +52,7 @@
18446 #define APIC_BUS_CYCLE_NS 1
18447
18448 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18449 -#define apic_debug(fmt, arg...)
18450 +#define apic_debug(fmt, arg...) do {} while (0)
18451
18452 #define APIC_LVT_NUM 6
18453 /* 14 is the version for Xeon and Pentium 8.4.8*/
18454 diff -urNp linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h
18455 --- linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18456 +++ linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18457 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18458 int level = PT_PAGE_TABLE_LEVEL;
18459 unsigned long mmu_seq;
18460
18461 + pax_track_stack();
18462 +
18463 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18464 kvm_mmu_audit(vcpu, "pre page fault");
18465
18466 diff -urNp linux-2.6.32.45/arch/x86/kvm/svm.c linux-2.6.32.45/arch/x86/kvm/svm.c
18467 --- linux-2.6.32.45/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18468 +++ linux-2.6.32.45/arch/x86/kvm/svm.c 2011-08-05 20:33:55.000000000 -0400
18469 @@ -2485,7 +2485,11 @@ static void reload_tss(struct kvm_vcpu *
18470 int cpu = raw_smp_processor_id();
18471
18472 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18473 +
18474 + pax_open_kernel();
18475 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18476 + pax_close_kernel();
18477 +
18478 load_TR_desc();
18479 }
18480
18481 @@ -2946,7 +2950,7 @@ static bool svm_gb_page_enable(void)
18482 return true;
18483 }
18484
18485 -static struct kvm_x86_ops svm_x86_ops = {
18486 +static const struct kvm_x86_ops svm_x86_ops = {
18487 .cpu_has_kvm_support = has_svm,
18488 .disabled_by_bios = is_disabled,
18489 .hardware_setup = svm_hardware_setup,
18490 diff -urNp linux-2.6.32.45/arch/x86/kvm/vmx.c linux-2.6.32.45/arch/x86/kvm/vmx.c
18491 --- linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18492 +++ linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18493 @@ -570,7 +570,11 @@ static void reload_tss(void)
18494
18495 kvm_get_gdt(&gdt);
18496 descs = (void *)gdt.base;
18497 +
18498 + pax_open_kernel();
18499 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18500 + pax_close_kernel();
18501 +
18502 load_TR_desc();
18503 }
18504
18505 @@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18506 if (!cpu_has_vmx_flexpriority())
18507 flexpriority_enabled = 0;
18508
18509 - if (!cpu_has_vmx_tpr_shadow())
18510 - kvm_x86_ops->update_cr8_intercept = NULL;
18511 + if (!cpu_has_vmx_tpr_shadow()) {
18512 + pax_open_kernel();
18513 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18514 + pax_close_kernel();
18515 + }
18516
18517 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18518 kvm_disable_largepages();
18519 @@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18520 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18521
18522 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18523 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18524 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18525 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18526 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18527 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18528 @@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18529 "jmp .Lkvm_vmx_return \n\t"
18530 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18531 ".Lkvm_vmx_return: "
18532 +
18533 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18534 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18535 + ".Lkvm_vmx_return2: "
18536 +#endif
18537 +
18538 /* Save guest registers, load host registers, keep flags */
18539 "xchg %0, (%%"R"sp) \n\t"
18540 "mov %%"R"ax, %c[rax](%0) \n\t"
18541 @@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18542 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18543 #endif
18544 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18545 +
18546 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18547 + ,[cs]"i"(__KERNEL_CS)
18548 +#endif
18549 +
18550 : "cc", "memory"
18551 - , R"bx", R"di", R"si"
18552 + , R"ax", R"bx", R"di", R"si"
18553 #ifdef CONFIG_X86_64
18554 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18555 #endif
18556 @@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18557 if (vmx->rmode.irq.pending)
18558 fixup_rmode_irq(vmx);
18559
18560 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18561 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18562 +
18563 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18564 + loadsegment(fs, __KERNEL_PERCPU);
18565 +#endif
18566 +
18567 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18568 + __set_fs(current_thread_info()->addr_limit);
18569 +#endif
18570 +
18571 vmx->launched = 1;
18572
18573 vmx_complete_interrupts(vmx);
18574 @@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18575 return false;
18576 }
18577
18578 -static struct kvm_x86_ops vmx_x86_ops = {
18579 +static const struct kvm_x86_ops vmx_x86_ops = {
18580 .cpu_has_kvm_support = cpu_has_kvm_support,
18581 .disabled_by_bios = vmx_disabled_by_bios,
18582 .hardware_setup = hardware_setup,
18583 diff -urNp linux-2.6.32.45/arch/x86/kvm/x86.c linux-2.6.32.45/arch/x86/kvm/x86.c
18584 --- linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18585 +++ linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18586 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18587 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18588 struct kvm_cpuid_entry2 __user *entries);
18589
18590 -struct kvm_x86_ops *kvm_x86_ops;
18591 +const struct kvm_x86_ops *kvm_x86_ops;
18592 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18593
18594 int ignore_msrs = 0;
18595 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18596 struct kvm_cpuid2 *cpuid,
18597 struct kvm_cpuid_entry2 __user *entries)
18598 {
18599 - int r;
18600 + int r, i;
18601
18602 r = -E2BIG;
18603 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18604 goto out;
18605 r = -EFAULT;
18606 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18607 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18608 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18609 goto out;
18610 + for (i = 0; i < cpuid->nent; ++i) {
18611 + struct kvm_cpuid_entry2 cpuid_entry;
18612 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18613 + goto out;
18614 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18615 + }
18616 vcpu->arch.cpuid_nent = cpuid->nent;
18617 kvm_apic_set_version(vcpu);
18618 return 0;
18619 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18620 struct kvm_cpuid2 *cpuid,
18621 struct kvm_cpuid_entry2 __user *entries)
18622 {
18623 - int r;
18624 + int r, i;
18625
18626 vcpu_load(vcpu);
18627 r = -E2BIG;
18628 if (cpuid->nent < vcpu->arch.cpuid_nent)
18629 goto out;
18630 r = -EFAULT;
18631 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18632 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18633 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18634 goto out;
18635 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18636 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18637 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18638 + goto out;
18639 + }
18640 return 0;
18641
18642 out:
18643 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18644 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18645 struct kvm_interrupt *irq)
18646 {
18647 - if (irq->irq < 0 || irq->irq >= 256)
18648 + if (irq->irq >= 256)
18649 return -EINVAL;
18650 if (irqchip_in_kernel(vcpu->kvm))
18651 return -ENXIO;
18652 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18653 .notifier_call = kvmclock_cpufreq_notifier
18654 };
18655
18656 -int kvm_arch_init(void *opaque)
18657 +int kvm_arch_init(const void *opaque)
18658 {
18659 int r, cpu;
18660 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18661 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18662
18663 if (kvm_x86_ops) {
18664 printk(KERN_ERR "kvm: already loaded the other module\n");
18665 diff -urNp linux-2.6.32.45/arch/x86/lguest/boot.c linux-2.6.32.45/arch/x86/lguest/boot.c
18666 --- linux-2.6.32.45/arch/x86/lguest/boot.c 2011-03-27 14:31:47.000000000 -0400
18667 +++ linux-2.6.32.45/arch/x86/lguest/boot.c 2011-08-05 20:33:55.000000000 -0400
18668 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vt
18669 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18670 * Launcher to reboot us.
18671 */
18672 -static void lguest_restart(char *reason)
18673 +static __noreturn void lguest_restart(char *reason)
18674 {
18675 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
18676 + BUG();
18677 }
18678
18679 /*G:050
18680 diff -urNp linux-2.6.32.45/arch/x86/lib/atomic64_32.c linux-2.6.32.45/arch/x86/lib/atomic64_32.c
18681 --- linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18682 +++ linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18683 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18684 }
18685 EXPORT_SYMBOL(atomic64_cmpxchg);
18686
18687 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18688 +{
18689 + return cmpxchg8b(&ptr->counter, old_val, new_val);
18690 +}
18691 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18692 +
18693 /**
18694 * atomic64_xchg - xchg atomic64 variable
18695 * @ptr: pointer to type atomic64_t
18696 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18697 EXPORT_SYMBOL(atomic64_xchg);
18698
18699 /**
18700 + * atomic64_xchg_unchecked - xchg atomic64 variable
18701 + * @ptr: pointer to type atomic64_unchecked_t
18702 + * @new_val: value to assign
18703 + *
18704 + * Atomically xchgs the value of @ptr to @new_val and returns
18705 + * the old value.
18706 + */
18707 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18708 +{
18709 + /*
18710 + * Try first with a (possibly incorrect) assumption about
18711 + * what we have there. We'll do two loops most likely,
18712 + * but we'll get an ownership MESI transaction straight away
18713 + * instead of a read transaction followed by a
18714 + * flush-for-ownership transaction:
18715 + */
18716 + u64 old_val, real_val = 0;
18717 +
18718 + do {
18719 + old_val = real_val;
18720 +
18721 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18722 +
18723 + } while (real_val != old_val);
18724 +
18725 + return old_val;
18726 +}
18727 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
18728 +
18729 +/**
18730 * atomic64_set - set atomic64 variable
18731 * @ptr: pointer to type atomic64_t
18732 * @new_val: value to assign
18733 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18734 EXPORT_SYMBOL(atomic64_set);
18735
18736 /**
18737 -EXPORT_SYMBOL(atomic64_read);
18738 + * atomic64_unchecked_set - set atomic64 variable
18739 + * @ptr: pointer to type atomic64_unchecked_t
18740 + * @new_val: value to assign
18741 + *
18742 + * Atomically sets the value of @ptr to @new_val.
18743 + */
18744 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18745 +{
18746 + atomic64_xchg_unchecked(ptr, new_val);
18747 +}
18748 +EXPORT_SYMBOL(atomic64_set_unchecked);
18749 +
18750 +/**
18751 * atomic64_add_return - add and return
18752 * @delta: integer value to add
18753 * @ptr: pointer to type atomic64_t
18754 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18755 }
18756 EXPORT_SYMBOL(atomic64_add_return);
18757
18758 +/**
18759 + * atomic64_add_return_unchecked - add and return
18760 + * @delta: integer value to add
18761 + * @ptr: pointer to type atomic64_unchecked_t
18762 + *
18763 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
18764 + */
18765 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18766 +{
18767 + /*
18768 + * Try first with a (possibly incorrect) assumption about
18769 + * what we have there. We'll do two loops most likely,
18770 + * but we'll get an ownership MESI transaction straight away
18771 + * instead of a read transaction followed by a
18772 + * flush-for-ownership transaction:
18773 + */
18774 + u64 old_val, new_val, real_val = 0;
18775 +
18776 + do {
18777 + old_val = real_val;
18778 + new_val = old_val + delta;
18779 +
18780 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18781 +
18782 + } while (real_val != old_val);
18783 +
18784 + return new_val;
18785 +}
18786 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
18787 +
18788 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
18789 {
18790 return atomic64_add_return(-delta, ptr);
18791 }
18792 EXPORT_SYMBOL(atomic64_sub_return);
18793
18794 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18795 +{
18796 + return atomic64_add_return_unchecked(-delta, ptr);
18797 +}
18798 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
18799 +
18800 u64 atomic64_inc_return(atomic64_t *ptr)
18801 {
18802 return atomic64_add_return(1, ptr);
18803 }
18804 EXPORT_SYMBOL(atomic64_inc_return);
18805
18806 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
18807 +{
18808 + return atomic64_add_return_unchecked(1, ptr);
18809 +}
18810 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
18811 +
18812 u64 atomic64_dec_return(atomic64_t *ptr)
18813 {
18814 return atomic64_sub_return(1, ptr);
18815 }
18816 EXPORT_SYMBOL(atomic64_dec_return);
18817
18818 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
18819 +{
18820 + return atomic64_sub_return_unchecked(1, ptr);
18821 +}
18822 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
18823 +
18824 /**
18825 * atomic64_add - add integer to atomic64 variable
18826 * @delta: integer value to add
18827 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
18828 EXPORT_SYMBOL(atomic64_add);
18829
18830 /**
18831 + * atomic64_add_unchecked - add integer to atomic64 variable
18832 + * @delta: integer value to add
18833 + * @ptr: pointer to type atomic64_unchecked_t
18834 + *
18835 + * Atomically adds @delta to @ptr.
18836 + */
18837 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18838 +{
18839 + atomic64_add_return_unchecked(delta, ptr);
18840 +}
18841 +EXPORT_SYMBOL(atomic64_add_unchecked);
18842 +
18843 +/**
18844 * atomic64_sub - subtract the atomic64 variable
18845 * @delta: integer value to subtract
18846 * @ptr: pointer to type atomic64_t
18847 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
18848 EXPORT_SYMBOL(atomic64_sub);
18849
18850 /**
18851 + * atomic64_sub_unchecked - subtract the atomic64 variable
18852 + * @delta: integer value to subtract
18853 + * @ptr: pointer to type atomic64_unchecked_t
18854 + *
18855 + * Atomically subtracts @delta from @ptr.
18856 + */
18857 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18858 +{
18859 + atomic64_add_unchecked(-delta, ptr);
18860 +}
18861 +EXPORT_SYMBOL(atomic64_sub_unchecked);
18862 +
18863 +/**
18864 * atomic64_sub_and_test - subtract value from variable and test result
18865 * @delta: integer value to subtract
18866 * @ptr: pointer to type atomic64_t
18867 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
18868 EXPORT_SYMBOL(atomic64_inc);
18869
18870 /**
18871 + * atomic64_inc_unchecked - increment atomic64 variable
18872 + * @ptr: pointer to type atomic64_unchecked_t
18873 + *
18874 + * Atomically increments @ptr by 1.
18875 + */
18876 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
18877 +{
18878 + atomic64_add_unchecked(1, ptr);
18879 +}
18880 +EXPORT_SYMBOL(atomic64_inc_unchecked);
18881 +
18882 +/**
18883 * atomic64_dec - decrement atomic64 variable
18884 * @ptr: pointer to type atomic64_t
18885 *
18886 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
18887 EXPORT_SYMBOL(atomic64_dec);
18888
18889 /**
18890 + * atomic64_dec_unchecked - decrement atomic64 variable
18891 + * @ptr: pointer to type atomic64_unchecked_t
18892 + *
18893 + * Atomically decrements @ptr by 1.
18894 + */
18895 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
18896 +{
18897 + atomic64_sub_unchecked(1, ptr);
18898 +}
18899 +EXPORT_SYMBOL(atomic64_dec_unchecked);
18900 +
18901 +/**
18902 * atomic64_dec_and_test - decrement and test
18903 * @ptr: pointer to type atomic64_t
18904 *
18905 diff -urNp linux-2.6.32.45/arch/x86/lib/checksum_32.S linux-2.6.32.45/arch/x86/lib/checksum_32.S
18906 --- linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
18907 +++ linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
18908 @@ -28,7 +28,8 @@
18909 #include <linux/linkage.h>
18910 #include <asm/dwarf2.h>
18911 #include <asm/errno.h>
18912 -
18913 +#include <asm/segment.h>
18914 +
18915 /*
18916 * computes a partial checksum, e.g. for TCP/UDP fragments
18917 */
18918 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
18919
18920 #define ARGBASE 16
18921 #define FP 12
18922 -
18923 -ENTRY(csum_partial_copy_generic)
18924 +
18925 +ENTRY(csum_partial_copy_generic_to_user)
18926 CFI_STARTPROC
18927 +
18928 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18929 + pushl %gs
18930 + CFI_ADJUST_CFA_OFFSET 4
18931 + popl %es
18932 + CFI_ADJUST_CFA_OFFSET -4
18933 + jmp csum_partial_copy_generic
18934 +#endif
18935 +
18936 +ENTRY(csum_partial_copy_generic_from_user)
18937 +
18938 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18939 + pushl %gs
18940 + CFI_ADJUST_CFA_OFFSET 4
18941 + popl %ds
18942 + CFI_ADJUST_CFA_OFFSET -4
18943 +#endif
18944 +
18945 +ENTRY(csum_partial_copy_generic)
18946 subl $4,%esp
18947 CFI_ADJUST_CFA_OFFSET 4
18948 pushl %edi
18949 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
18950 jmp 4f
18951 SRC(1: movw (%esi), %bx )
18952 addl $2, %esi
18953 -DST( movw %bx, (%edi) )
18954 +DST( movw %bx, %es:(%edi) )
18955 addl $2, %edi
18956 addw %bx, %ax
18957 adcl $0, %eax
18958 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18959 SRC(1: movl (%esi), %ebx )
18960 SRC( movl 4(%esi), %edx )
18961 adcl %ebx, %eax
18962 -DST( movl %ebx, (%edi) )
18963 +DST( movl %ebx, %es:(%edi) )
18964 adcl %edx, %eax
18965 -DST( movl %edx, 4(%edi) )
18966 +DST( movl %edx, %es:4(%edi) )
18967
18968 SRC( movl 8(%esi), %ebx )
18969 SRC( movl 12(%esi), %edx )
18970 adcl %ebx, %eax
18971 -DST( movl %ebx, 8(%edi) )
18972 +DST( movl %ebx, %es:8(%edi) )
18973 adcl %edx, %eax
18974 -DST( movl %edx, 12(%edi) )
18975 +DST( movl %edx, %es:12(%edi) )
18976
18977 SRC( movl 16(%esi), %ebx )
18978 SRC( movl 20(%esi), %edx )
18979 adcl %ebx, %eax
18980 -DST( movl %ebx, 16(%edi) )
18981 +DST( movl %ebx, %es:16(%edi) )
18982 adcl %edx, %eax
18983 -DST( movl %edx, 20(%edi) )
18984 +DST( movl %edx, %es:20(%edi) )
18985
18986 SRC( movl 24(%esi), %ebx )
18987 SRC( movl 28(%esi), %edx )
18988 adcl %ebx, %eax
18989 -DST( movl %ebx, 24(%edi) )
18990 +DST( movl %ebx, %es:24(%edi) )
18991 adcl %edx, %eax
18992 -DST( movl %edx, 28(%edi) )
18993 +DST( movl %edx, %es:28(%edi) )
18994
18995 lea 32(%esi), %esi
18996 lea 32(%edi), %edi
18997 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
18998 shrl $2, %edx # This clears CF
18999 SRC(3: movl (%esi), %ebx )
19000 adcl %ebx, %eax
19001 -DST( movl %ebx, (%edi) )
19002 +DST( movl %ebx, %es:(%edi) )
19003 lea 4(%esi), %esi
19004 lea 4(%edi), %edi
19005 dec %edx
19006 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
19007 jb 5f
19008 SRC( movw (%esi), %cx )
19009 leal 2(%esi), %esi
19010 -DST( movw %cx, (%edi) )
19011 +DST( movw %cx, %es:(%edi) )
19012 leal 2(%edi), %edi
19013 je 6f
19014 shll $16,%ecx
19015 SRC(5: movb (%esi), %cl )
19016 -DST( movb %cl, (%edi) )
19017 +DST( movb %cl, %es:(%edi) )
19018 6: addl %ecx, %eax
19019 adcl $0, %eax
19020 7:
19021 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
19022
19023 6001:
19024 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19025 - movl $-EFAULT, (%ebx)
19026 + movl $-EFAULT, %ss:(%ebx)
19027
19028 # zero the complete destination - computing the rest
19029 # is too much work
19030 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
19031
19032 6002:
19033 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19034 - movl $-EFAULT,(%ebx)
19035 + movl $-EFAULT,%ss:(%ebx)
19036 jmp 5000b
19037
19038 .previous
19039
19040 + pushl %ss
19041 + CFI_ADJUST_CFA_OFFSET 4
19042 + popl %ds
19043 + CFI_ADJUST_CFA_OFFSET -4
19044 + pushl %ss
19045 + CFI_ADJUST_CFA_OFFSET 4
19046 + popl %es
19047 + CFI_ADJUST_CFA_OFFSET -4
19048 popl %ebx
19049 CFI_ADJUST_CFA_OFFSET -4
19050 CFI_RESTORE ebx
19051 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
19052 CFI_ADJUST_CFA_OFFSET -4
19053 ret
19054 CFI_ENDPROC
19055 -ENDPROC(csum_partial_copy_generic)
19056 +ENDPROC(csum_partial_copy_generic_to_user)
19057
19058 #else
19059
19060 /* Version for PentiumII/PPro */
19061
19062 #define ROUND1(x) \
19063 + nop; nop; nop; \
19064 SRC(movl x(%esi), %ebx ) ; \
19065 addl %ebx, %eax ; \
19066 - DST(movl %ebx, x(%edi) ) ;
19067 + DST(movl %ebx, %es:x(%edi)) ;
19068
19069 #define ROUND(x) \
19070 + nop; nop; nop; \
19071 SRC(movl x(%esi), %ebx ) ; \
19072 adcl %ebx, %eax ; \
19073 - DST(movl %ebx, x(%edi) ) ;
19074 + DST(movl %ebx, %es:x(%edi)) ;
19075
19076 #define ARGBASE 12
19077 -
19078 -ENTRY(csum_partial_copy_generic)
19079 +
19080 +ENTRY(csum_partial_copy_generic_to_user)
19081 CFI_STARTPROC
19082 +
19083 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19084 + pushl %gs
19085 + CFI_ADJUST_CFA_OFFSET 4
19086 + popl %es
19087 + CFI_ADJUST_CFA_OFFSET -4
19088 + jmp csum_partial_copy_generic
19089 +#endif
19090 +
19091 +ENTRY(csum_partial_copy_generic_from_user)
19092 +
19093 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19094 + pushl %gs
19095 + CFI_ADJUST_CFA_OFFSET 4
19096 + popl %ds
19097 + CFI_ADJUST_CFA_OFFSET -4
19098 +#endif
19099 +
19100 +ENTRY(csum_partial_copy_generic)
19101 pushl %ebx
19102 CFI_ADJUST_CFA_OFFSET 4
19103 CFI_REL_OFFSET ebx, 0
19104 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
19105 subl %ebx, %edi
19106 lea -1(%esi),%edx
19107 andl $-32,%edx
19108 - lea 3f(%ebx,%ebx), %ebx
19109 + lea 3f(%ebx,%ebx,2), %ebx
19110 testl %esi, %esi
19111 jmp *%ebx
19112 1: addl $64,%esi
19113 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
19114 jb 5f
19115 SRC( movw (%esi), %dx )
19116 leal 2(%esi), %esi
19117 -DST( movw %dx, (%edi) )
19118 +DST( movw %dx, %es:(%edi) )
19119 leal 2(%edi), %edi
19120 je 6f
19121 shll $16,%edx
19122 5:
19123 SRC( movb (%esi), %dl )
19124 -DST( movb %dl, (%edi) )
19125 +DST( movb %dl, %es:(%edi) )
19126 6: addl %edx, %eax
19127 adcl $0, %eax
19128 7:
19129 .section .fixup, "ax"
19130 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19131 - movl $-EFAULT, (%ebx)
19132 + movl $-EFAULT, %ss:(%ebx)
19133 # zero the complete destination (computing the rest is too much work)
19134 movl ARGBASE+8(%esp),%edi # dst
19135 movl ARGBASE+12(%esp),%ecx # len
19136 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
19137 rep; stosb
19138 jmp 7b
19139 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19140 - movl $-EFAULT, (%ebx)
19141 + movl $-EFAULT, %ss:(%ebx)
19142 jmp 7b
19143 .previous
19144
19145 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19146 + pushl %ss
19147 + CFI_ADJUST_CFA_OFFSET 4
19148 + popl %ds
19149 + CFI_ADJUST_CFA_OFFSET -4
19150 + pushl %ss
19151 + CFI_ADJUST_CFA_OFFSET 4
19152 + popl %es
19153 + CFI_ADJUST_CFA_OFFSET -4
19154 +#endif
19155 +
19156 popl %esi
19157 CFI_ADJUST_CFA_OFFSET -4
19158 CFI_RESTORE esi
19159 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
19160 CFI_RESTORE ebx
19161 ret
19162 CFI_ENDPROC
19163 -ENDPROC(csum_partial_copy_generic)
19164 +ENDPROC(csum_partial_copy_generic_to_user)
19165
19166 #undef ROUND
19167 #undef ROUND1
19168 diff -urNp linux-2.6.32.45/arch/x86/lib/clear_page_64.S linux-2.6.32.45/arch/x86/lib/clear_page_64.S
19169 --- linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
19170 +++ linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
19171 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
19172
19173 #include <asm/cpufeature.h>
19174
19175 - .section .altinstr_replacement,"ax"
19176 + .section .altinstr_replacement,"a"
19177 1: .byte 0xeb /* jmp <disp8> */
19178 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19179 2:
19180 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_page_64.S linux-2.6.32.45/arch/x86/lib/copy_page_64.S
19181 --- linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
19182 +++ linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
19183 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
19184
19185 #include <asm/cpufeature.h>
19186
19187 - .section .altinstr_replacement,"ax"
19188 + .section .altinstr_replacement,"a"
19189 1: .byte 0xeb /* jmp <disp8> */
19190 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19191 2:
19192 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_64.S linux-2.6.32.45/arch/x86/lib/copy_user_64.S
19193 --- linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
19194 +++ linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
19195 @@ -15,13 +15,14 @@
19196 #include <asm/asm-offsets.h>
19197 #include <asm/thread_info.h>
19198 #include <asm/cpufeature.h>
19199 +#include <asm/pgtable.h>
19200
19201 .macro ALTERNATIVE_JUMP feature,orig,alt
19202 0:
19203 .byte 0xe9 /* 32bit jump */
19204 .long \orig-1f /* by default jump to orig */
19205 1:
19206 - .section .altinstr_replacement,"ax"
19207 + .section .altinstr_replacement,"a"
19208 2: .byte 0xe9 /* near jump with 32bit immediate */
19209 .long \alt-1b /* offset */ /* or alternatively to alt */
19210 .previous
19211 @@ -64,49 +65,19 @@
19212 #endif
19213 .endm
19214
19215 -/* Standard copy_to_user with segment limit checking */
19216 -ENTRY(copy_to_user)
19217 - CFI_STARTPROC
19218 - GET_THREAD_INFO(%rax)
19219 - movq %rdi,%rcx
19220 - addq %rdx,%rcx
19221 - jc bad_to_user
19222 - cmpq TI_addr_limit(%rax),%rcx
19223 - ja bad_to_user
19224 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19225 - CFI_ENDPROC
19226 -ENDPROC(copy_to_user)
19227 -
19228 -/* Standard copy_from_user with segment limit checking */
19229 -ENTRY(copy_from_user)
19230 - CFI_STARTPROC
19231 - GET_THREAD_INFO(%rax)
19232 - movq %rsi,%rcx
19233 - addq %rdx,%rcx
19234 - jc bad_from_user
19235 - cmpq TI_addr_limit(%rax),%rcx
19236 - ja bad_from_user
19237 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19238 - CFI_ENDPROC
19239 -ENDPROC(copy_from_user)
19240 -
19241 ENTRY(copy_user_generic)
19242 CFI_STARTPROC
19243 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19244 CFI_ENDPROC
19245 ENDPROC(copy_user_generic)
19246
19247 -ENTRY(__copy_from_user_inatomic)
19248 - CFI_STARTPROC
19249 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19250 - CFI_ENDPROC
19251 -ENDPROC(__copy_from_user_inatomic)
19252 -
19253 .section .fixup,"ax"
19254 /* must zero dest */
19255 ENTRY(bad_from_user)
19256 bad_from_user:
19257 CFI_STARTPROC
19258 + testl %edx,%edx
19259 + js bad_to_user
19260 movl %edx,%ecx
19261 xorl %eax,%eax
19262 rep
19263 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S
19264 --- linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19265 +++ linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19266 @@ -14,6 +14,7 @@
19267 #include <asm/current.h>
19268 #include <asm/asm-offsets.h>
19269 #include <asm/thread_info.h>
19270 +#include <asm/pgtable.h>
19271
19272 .macro ALIGN_DESTINATION
19273 #ifdef FIX_ALIGNMENT
19274 @@ -50,6 +51,15 @@
19275 */
19276 ENTRY(__copy_user_nocache)
19277 CFI_STARTPROC
19278 +
19279 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19280 + mov $PAX_USER_SHADOW_BASE,%rcx
19281 + cmp %rcx,%rsi
19282 + jae 1f
19283 + add %rcx,%rsi
19284 +1:
19285 +#endif
19286 +
19287 cmpl $8,%edx
19288 jb 20f /* less then 8 bytes, go to byte copy loop */
19289 ALIGN_DESTINATION
19290 diff -urNp linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c
19291 --- linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19292 +++ linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19293 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19294 len -= 2;
19295 }
19296 }
19297 +
19298 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19299 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19300 + src += PAX_USER_SHADOW_BASE;
19301 +#endif
19302 +
19303 isum = csum_partial_copy_generic((__force const void *)src,
19304 dst, len, isum, errp, NULL);
19305 if (unlikely(*errp))
19306 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19307 }
19308
19309 *errp = 0;
19310 +
19311 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19312 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19313 + dst += PAX_USER_SHADOW_BASE;
19314 +#endif
19315 +
19316 return csum_partial_copy_generic(src, (void __force *)dst,
19317 len, isum, NULL, errp);
19318 }
19319 diff -urNp linux-2.6.32.45/arch/x86/lib/getuser.S linux-2.6.32.45/arch/x86/lib/getuser.S
19320 --- linux-2.6.32.45/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19321 +++ linux-2.6.32.45/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19322 @@ -33,14 +33,35 @@
19323 #include <asm/asm-offsets.h>
19324 #include <asm/thread_info.h>
19325 #include <asm/asm.h>
19326 +#include <asm/segment.h>
19327 +#include <asm/pgtable.h>
19328 +
19329 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19330 +#define __copyuser_seg gs;
19331 +#else
19332 +#define __copyuser_seg
19333 +#endif
19334
19335 .text
19336 ENTRY(__get_user_1)
19337 CFI_STARTPROC
19338 +
19339 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19340 GET_THREAD_INFO(%_ASM_DX)
19341 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19342 jae bad_get_user
19343 -1: movzb (%_ASM_AX),%edx
19344 +
19345 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19346 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19347 + cmp %_ASM_DX,%_ASM_AX
19348 + jae 1234f
19349 + add %_ASM_DX,%_ASM_AX
19350 +1234:
19351 +#endif
19352 +
19353 +#endif
19354 +
19355 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19356 xor %eax,%eax
19357 ret
19358 CFI_ENDPROC
19359 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19360 ENTRY(__get_user_2)
19361 CFI_STARTPROC
19362 add $1,%_ASM_AX
19363 +
19364 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19365 jc bad_get_user
19366 GET_THREAD_INFO(%_ASM_DX)
19367 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19368 jae bad_get_user
19369 -2: movzwl -1(%_ASM_AX),%edx
19370 +
19371 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19372 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19373 + cmp %_ASM_DX,%_ASM_AX
19374 + jae 1234f
19375 + add %_ASM_DX,%_ASM_AX
19376 +1234:
19377 +#endif
19378 +
19379 +#endif
19380 +
19381 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19382 xor %eax,%eax
19383 ret
19384 CFI_ENDPROC
19385 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19386 ENTRY(__get_user_4)
19387 CFI_STARTPROC
19388 add $3,%_ASM_AX
19389 +
19390 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19391 jc bad_get_user
19392 GET_THREAD_INFO(%_ASM_DX)
19393 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19394 jae bad_get_user
19395 -3: mov -3(%_ASM_AX),%edx
19396 +
19397 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19398 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19399 + cmp %_ASM_DX,%_ASM_AX
19400 + jae 1234f
19401 + add %_ASM_DX,%_ASM_AX
19402 +1234:
19403 +#endif
19404 +
19405 +#endif
19406 +
19407 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19408 xor %eax,%eax
19409 ret
19410 CFI_ENDPROC
19411 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19412 GET_THREAD_INFO(%_ASM_DX)
19413 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19414 jae bad_get_user
19415 +
19416 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19417 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19418 + cmp %_ASM_DX,%_ASM_AX
19419 + jae 1234f
19420 + add %_ASM_DX,%_ASM_AX
19421 +1234:
19422 +#endif
19423 +
19424 4: movq -7(%_ASM_AX),%_ASM_DX
19425 xor %eax,%eax
19426 ret
19427 diff -urNp linux-2.6.32.45/arch/x86/lib/memcpy_64.S linux-2.6.32.45/arch/x86/lib/memcpy_64.S
19428 --- linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19429 +++ linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19430 @@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19431 * It is also a lot simpler. Use this when possible:
19432 */
19433
19434 - .section .altinstr_replacement, "ax"
19435 + .section .altinstr_replacement, "a"
19436 1: .byte 0xeb /* jmp <disp8> */
19437 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19438 2:
19439 diff -urNp linux-2.6.32.45/arch/x86/lib/memset_64.S linux-2.6.32.45/arch/x86/lib/memset_64.S
19440 --- linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19441 +++ linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19442 @@ -118,7 +118,7 @@ ENDPROC(__memset)
19443
19444 #include <asm/cpufeature.h>
19445
19446 - .section .altinstr_replacement,"ax"
19447 + .section .altinstr_replacement,"a"
19448 1: .byte 0xeb /* jmp <disp8> */
19449 .byte (memset_c - memset) - (2f - 1b) /* offset */
19450 2:
19451 diff -urNp linux-2.6.32.45/arch/x86/lib/mmx_32.c linux-2.6.32.45/arch/x86/lib/mmx_32.c
19452 --- linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19453 +++ linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19454 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19455 {
19456 void *p;
19457 int i;
19458 + unsigned long cr0;
19459
19460 if (unlikely(in_interrupt()))
19461 return __memcpy(to, from, len);
19462 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19463 kernel_fpu_begin();
19464
19465 __asm__ __volatile__ (
19466 - "1: prefetch (%0)\n" /* This set is 28 bytes */
19467 - " prefetch 64(%0)\n"
19468 - " prefetch 128(%0)\n"
19469 - " prefetch 192(%0)\n"
19470 - " prefetch 256(%0)\n"
19471 + "1: prefetch (%1)\n" /* This set is 28 bytes */
19472 + " prefetch 64(%1)\n"
19473 + " prefetch 128(%1)\n"
19474 + " prefetch 192(%1)\n"
19475 + " prefetch 256(%1)\n"
19476 "2: \n"
19477 ".section .fixup, \"ax\"\n"
19478 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19479 + "3: \n"
19480 +
19481 +#ifdef CONFIG_PAX_KERNEXEC
19482 + " movl %%cr0, %0\n"
19483 + " movl %0, %%eax\n"
19484 + " andl $0xFFFEFFFF, %%eax\n"
19485 + " movl %%eax, %%cr0\n"
19486 +#endif
19487 +
19488 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19489 +
19490 +#ifdef CONFIG_PAX_KERNEXEC
19491 + " movl %0, %%cr0\n"
19492 +#endif
19493 +
19494 " jmp 2b\n"
19495 ".previous\n"
19496 _ASM_EXTABLE(1b, 3b)
19497 - : : "r" (from));
19498 + : "=&r" (cr0) : "r" (from) : "ax");
19499
19500 for ( ; i > 5; i--) {
19501 __asm__ __volatile__ (
19502 - "1: prefetch 320(%0)\n"
19503 - "2: movq (%0), %%mm0\n"
19504 - " movq 8(%0), %%mm1\n"
19505 - " movq 16(%0), %%mm2\n"
19506 - " movq 24(%0), %%mm3\n"
19507 - " movq %%mm0, (%1)\n"
19508 - " movq %%mm1, 8(%1)\n"
19509 - " movq %%mm2, 16(%1)\n"
19510 - " movq %%mm3, 24(%1)\n"
19511 - " movq 32(%0), %%mm0\n"
19512 - " movq 40(%0), %%mm1\n"
19513 - " movq 48(%0), %%mm2\n"
19514 - " movq 56(%0), %%mm3\n"
19515 - " movq %%mm0, 32(%1)\n"
19516 - " movq %%mm1, 40(%1)\n"
19517 - " movq %%mm2, 48(%1)\n"
19518 - " movq %%mm3, 56(%1)\n"
19519 + "1: prefetch 320(%1)\n"
19520 + "2: movq (%1), %%mm0\n"
19521 + " movq 8(%1), %%mm1\n"
19522 + " movq 16(%1), %%mm2\n"
19523 + " movq 24(%1), %%mm3\n"
19524 + " movq %%mm0, (%2)\n"
19525 + " movq %%mm1, 8(%2)\n"
19526 + " movq %%mm2, 16(%2)\n"
19527 + " movq %%mm3, 24(%2)\n"
19528 + " movq 32(%1), %%mm0\n"
19529 + " movq 40(%1), %%mm1\n"
19530 + " movq 48(%1), %%mm2\n"
19531 + " movq 56(%1), %%mm3\n"
19532 + " movq %%mm0, 32(%2)\n"
19533 + " movq %%mm1, 40(%2)\n"
19534 + " movq %%mm2, 48(%2)\n"
19535 + " movq %%mm3, 56(%2)\n"
19536 ".section .fixup, \"ax\"\n"
19537 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19538 + "3:\n"
19539 +
19540 +#ifdef CONFIG_PAX_KERNEXEC
19541 + " movl %%cr0, %0\n"
19542 + " movl %0, %%eax\n"
19543 + " andl $0xFFFEFFFF, %%eax\n"
19544 + " movl %%eax, %%cr0\n"
19545 +#endif
19546 +
19547 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19548 +
19549 +#ifdef CONFIG_PAX_KERNEXEC
19550 + " movl %0, %%cr0\n"
19551 +#endif
19552 +
19553 " jmp 2b\n"
19554 ".previous\n"
19555 _ASM_EXTABLE(1b, 3b)
19556 - : : "r" (from), "r" (to) : "memory");
19557 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19558
19559 from += 64;
19560 to += 64;
19561 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19562 static void fast_copy_page(void *to, void *from)
19563 {
19564 int i;
19565 + unsigned long cr0;
19566
19567 kernel_fpu_begin();
19568
19569 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19570 * but that is for later. -AV
19571 */
19572 __asm__ __volatile__(
19573 - "1: prefetch (%0)\n"
19574 - " prefetch 64(%0)\n"
19575 - " prefetch 128(%0)\n"
19576 - " prefetch 192(%0)\n"
19577 - " prefetch 256(%0)\n"
19578 + "1: prefetch (%1)\n"
19579 + " prefetch 64(%1)\n"
19580 + " prefetch 128(%1)\n"
19581 + " prefetch 192(%1)\n"
19582 + " prefetch 256(%1)\n"
19583 "2: \n"
19584 ".section .fixup, \"ax\"\n"
19585 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19586 + "3: \n"
19587 +
19588 +#ifdef CONFIG_PAX_KERNEXEC
19589 + " movl %%cr0, %0\n"
19590 + " movl %0, %%eax\n"
19591 + " andl $0xFFFEFFFF, %%eax\n"
19592 + " movl %%eax, %%cr0\n"
19593 +#endif
19594 +
19595 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19596 +
19597 +#ifdef CONFIG_PAX_KERNEXEC
19598 + " movl %0, %%cr0\n"
19599 +#endif
19600 +
19601 " jmp 2b\n"
19602 ".previous\n"
19603 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19604 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19605
19606 for (i = 0; i < (4096-320)/64; i++) {
19607 __asm__ __volatile__ (
19608 - "1: prefetch 320(%0)\n"
19609 - "2: movq (%0), %%mm0\n"
19610 - " movntq %%mm0, (%1)\n"
19611 - " movq 8(%0), %%mm1\n"
19612 - " movntq %%mm1, 8(%1)\n"
19613 - " movq 16(%0), %%mm2\n"
19614 - " movntq %%mm2, 16(%1)\n"
19615 - " movq 24(%0), %%mm3\n"
19616 - " movntq %%mm3, 24(%1)\n"
19617 - " movq 32(%0), %%mm4\n"
19618 - " movntq %%mm4, 32(%1)\n"
19619 - " movq 40(%0), %%mm5\n"
19620 - " movntq %%mm5, 40(%1)\n"
19621 - " movq 48(%0), %%mm6\n"
19622 - " movntq %%mm6, 48(%1)\n"
19623 - " movq 56(%0), %%mm7\n"
19624 - " movntq %%mm7, 56(%1)\n"
19625 + "1: prefetch 320(%1)\n"
19626 + "2: movq (%1), %%mm0\n"
19627 + " movntq %%mm0, (%2)\n"
19628 + " movq 8(%1), %%mm1\n"
19629 + " movntq %%mm1, 8(%2)\n"
19630 + " movq 16(%1), %%mm2\n"
19631 + " movntq %%mm2, 16(%2)\n"
19632 + " movq 24(%1), %%mm3\n"
19633 + " movntq %%mm3, 24(%2)\n"
19634 + " movq 32(%1), %%mm4\n"
19635 + " movntq %%mm4, 32(%2)\n"
19636 + " movq 40(%1), %%mm5\n"
19637 + " movntq %%mm5, 40(%2)\n"
19638 + " movq 48(%1), %%mm6\n"
19639 + " movntq %%mm6, 48(%2)\n"
19640 + " movq 56(%1), %%mm7\n"
19641 + " movntq %%mm7, 56(%2)\n"
19642 ".section .fixup, \"ax\"\n"
19643 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19644 + "3:\n"
19645 +
19646 +#ifdef CONFIG_PAX_KERNEXEC
19647 + " movl %%cr0, %0\n"
19648 + " movl %0, %%eax\n"
19649 + " andl $0xFFFEFFFF, %%eax\n"
19650 + " movl %%eax, %%cr0\n"
19651 +#endif
19652 +
19653 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19654 +
19655 +#ifdef CONFIG_PAX_KERNEXEC
19656 + " movl %0, %%cr0\n"
19657 +#endif
19658 +
19659 " jmp 2b\n"
19660 ".previous\n"
19661 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19662 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19663
19664 from += 64;
19665 to += 64;
19666 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19667 static void fast_copy_page(void *to, void *from)
19668 {
19669 int i;
19670 + unsigned long cr0;
19671
19672 kernel_fpu_begin();
19673
19674 __asm__ __volatile__ (
19675 - "1: prefetch (%0)\n"
19676 - " prefetch 64(%0)\n"
19677 - " prefetch 128(%0)\n"
19678 - " prefetch 192(%0)\n"
19679 - " prefetch 256(%0)\n"
19680 + "1: prefetch (%1)\n"
19681 + " prefetch 64(%1)\n"
19682 + " prefetch 128(%1)\n"
19683 + " prefetch 192(%1)\n"
19684 + " prefetch 256(%1)\n"
19685 "2: \n"
19686 ".section .fixup, \"ax\"\n"
19687 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19688 + "3: \n"
19689 +
19690 +#ifdef CONFIG_PAX_KERNEXEC
19691 + " movl %%cr0, %0\n"
19692 + " movl %0, %%eax\n"
19693 + " andl $0xFFFEFFFF, %%eax\n"
19694 + " movl %%eax, %%cr0\n"
19695 +#endif
19696 +
19697 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19698 +
19699 +#ifdef CONFIG_PAX_KERNEXEC
19700 + " movl %0, %%cr0\n"
19701 +#endif
19702 +
19703 " jmp 2b\n"
19704 ".previous\n"
19705 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19706 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19707
19708 for (i = 0; i < 4096/64; i++) {
19709 __asm__ __volatile__ (
19710 - "1: prefetch 320(%0)\n"
19711 - "2: movq (%0), %%mm0\n"
19712 - " movq 8(%0), %%mm1\n"
19713 - " movq 16(%0), %%mm2\n"
19714 - " movq 24(%0), %%mm3\n"
19715 - " movq %%mm0, (%1)\n"
19716 - " movq %%mm1, 8(%1)\n"
19717 - " movq %%mm2, 16(%1)\n"
19718 - " movq %%mm3, 24(%1)\n"
19719 - " movq 32(%0), %%mm0\n"
19720 - " movq 40(%0), %%mm1\n"
19721 - " movq 48(%0), %%mm2\n"
19722 - " movq 56(%0), %%mm3\n"
19723 - " movq %%mm0, 32(%1)\n"
19724 - " movq %%mm1, 40(%1)\n"
19725 - " movq %%mm2, 48(%1)\n"
19726 - " movq %%mm3, 56(%1)\n"
19727 + "1: prefetch 320(%1)\n"
19728 + "2: movq (%1), %%mm0\n"
19729 + " movq 8(%1), %%mm1\n"
19730 + " movq 16(%1), %%mm2\n"
19731 + " movq 24(%1), %%mm3\n"
19732 + " movq %%mm0, (%2)\n"
19733 + " movq %%mm1, 8(%2)\n"
19734 + " movq %%mm2, 16(%2)\n"
19735 + " movq %%mm3, 24(%2)\n"
19736 + " movq 32(%1), %%mm0\n"
19737 + " movq 40(%1), %%mm1\n"
19738 + " movq 48(%1), %%mm2\n"
19739 + " movq 56(%1), %%mm3\n"
19740 + " movq %%mm0, 32(%2)\n"
19741 + " movq %%mm1, 40(%2)\n"
19742 + " movq %%mm2, 48(%2)\n"
19743 + " movq %%mm3, 56(%2)\n"
19744 ".section .fixup, \"ax\"\n"
19745 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19746 + "3:\n"
19747 +
19748 +#ifdef CONFIG_PAX_KERNEXEC
19749 + " movl %%cr0, %0\n"
19750 + " movl %0, %%eax\n"
19751 + " andl $0xFFFEFFFF, %%eax\n"
19752 + " movl %%eax, %%cr0\n"
19753 +#endif
19754 +
19755 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19756 +
19757 +#ifdef CONFIG_PAX_KERNEXEC
19758 + " movl %0, %%cr0\n"
19759 +#endif
19760 +
19761 " jmp 2b\n"
19762 ".previous\n"
19763 _ASM_EXTABLE(1b, 3b)
19764 - : : "r" (from), "r" (to) : "memory");
19765 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19766
19767 from += 64;
19768 to += 64;
19769 diff -urNp linux-2.6.32.45/arch/x86/lib/putuser.S linux-2.6.32.45/arch/x86/lib/putuser.S
19770 --- linux-2.6.32.45/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19771 +++ linux-2.6.32.45/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19772 @@ -15,7 +15,8 @@
19773 #include <asm/thread_info.h>
19774 #include <asm/errno.h>
19775 #include <asm/asm.h>
19776 -
19777 +#include <asm/segment.h>
19778 +#include <asm/pgtable.h>
19779
19780 /*
19781 * __put_user_X
19782 @@ -29,52 +30,119 @@
19783 * as they get called from within inline assembly.
19784 */
19785
19786 -#define ENTER CFI_STARTPROC ; \
19787 - GET_THREAD_INFO(%_ASM_BX)
19788 +#define ENTER CFI_STARTPROC
19789 #define EXIT ret ; \
19790 CFI_ENDPROC
19791
19792 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19793 +#define _DEST %_ASM_CX,%_ASM_BX
19794 +#else
19795 +#define _DEST %_ASM_CX
19796 +#endif
19797 +
19798 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19799 +#define __copyuser_seg gs;
19800 +#else
19801 +#define __copyuser_seg
19802 +#endif
19803 +
19804 .text
19805 ENTRY(__put_user_1)
19806 ENTER
19807 +
19808 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19809 + GET_THREAD_INFO(%_ASM_BX)
19810 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
19811 jae bad_put_user
19812 -1: movb %al,(%_ASM_CX)
19813 +
19814 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19815 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19816 + cmp %_ASM_BX,%_ASM_CX
19817 + jb 1234f
19818 + xor %ebx,%ebx
19819 +1234:
19820 +#endif
19821 +
19822 +#endif
19823 +
19824 +1: __copyuser_seg movb %al,(_DEST)
19825 xor %eax,%eax
19826 EXIT
19827 ENDPROC(__put_user_1)
19828
19829 ENTRY(__put_user_2)
19830 ENTER
19831 +
19832 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19833 + GET_THREAD_INFO(%_ASM_BX)
19834 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19835 sub $1,%_ASM_BX
19836 cmp %_ASM_BX,%_ASM_CX
19837 jae bad_put_user
19838 -2: movw %ax,(%_ASM_CX)
19839 +
19840 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19841 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19842 + cmp %_ASM_BX,%_ASM_CX
19843 + jb 1234f
19844 + xor %ebx,%ebx
19845 +1234:
19846 +#endif
19847 +
19848 +#endif
19849 +
19850 +2: __copyuser_seg movw %ax,(_DEST)
19851 xor %eax,%eax
19852 EXIT
19853 ENDPROC(__put_user_2)
19854
19855 ENTRY(__put_user_4)
19856 ENTER
19857 +
19858 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19859 + GET_THREAD_INFO(%_ASM_BX)
19860 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19861 sub $3,%_ASM_BX
19862 cmp %_ASM_BX,%_ASM_CX
19863 jae bad_put_user
19864 -3: movl %eax,(%_ASM_CX)
19865 +
19866 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19867 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19868 + cmp %_ASM_BX,%_ASM_CX
19869 + jb 1234f
19870 + xor %ebx,%ebx
19871 +1234:
19872 +#endif
19873 +
19874 +#endif
19875 +
19876 +3: __copyuser_seg movl %eax,(_DEST)
19877 xor %eax,%eax
19878 EXIT
19879 ENDPROC(__put_user_4)
19880
19881 ENTRY(__put_user_8)
19882 ENTER
19883 +
19884 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19885 + GET_THREAD_INFO(%_ASM_BX)
19886 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19887 sub $7,%_ASM_BX
19888 cmp %_ASM_BX,%_ASM_CX
19889 jae bad_put_user
19890 -4: mov %_ASM_AX,(%_ASM_CX)
19891 +
19892 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19893 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19894 + cmp %_ASM_BX,%_ASM_CX
19895 + jb 1234f
19896 + xor %ebx,%ebx
19897 +1234:
19898 +#endif
19899 +
19900 +#endif
19901 +
19902 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
19903 #ifdef CONFIG_X86_32
19904 -5: movl %edx,4(%_ASM_CX)
19905 +5: __copyuser_seg movl %edx,4(_DEST)
19906 #endif
19907 xor %eax,%eax
19908 EXIT
19909 diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_32.c linux-2.6.32.45/arch/x86/lib/usercopy_32.c
19910 --- linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
19911 +++ linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
19912 @@ -43,7 +43,7 @@ do { \
19913 __asm__ __volatile__( \
19914 " testl %1,%1\n" \
19915 " jz 2f\n" \
19916 - "0: lodsb\n" \
19917 + "0: "__copyuser_seg"lodsb\n" \
19918 " stosb\n" \
19919 " testb %%al,%%al\n" \
19920 " jz 1f\n" \
19921 @@ -128,10 +128,12 @@ do { \
19922 int __d0; \
19923 might_fault(); \
19924 __asm__ __volatile__( \
19925 + __COPYUSER_SET_ES \
19926 "0: rep; stosl\n" \
19927 " movl %2,%0\n" \
19928 "1: rep; stosb\n" \
19929 "2:\n" \
19930 + __COPYUSER_RESTORE_ES \
19931 ".section .fixup,\"ax\"\n" \
19932 "3: lea 0(%2,%0,4),%0\n" \
19933 " jmp 2b\n" \
19934 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19935 might_fault();
19936
19937 __asm__ __volatile__(
19938 + __COPYUSER_SET_ES
19939 " testl %0, %0\n"
19940 " jz 3f\n"
19941 " andl %0,%%ecx\n"
19942 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19943 " subl %%ecx,%0\n"
19944 " addl %0,%%eax\n"
19945 "1:\n"
19946 + __COPYUSER_RESTORE_ES
19947 ".section .fixup,\"ax\"\n"
19948 "2: xorl %%eax,%%eax\n"
19949 " jmp 1b\n"
19950 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19951
19952 #ifdef CONFIG_X86_INTEL_USERCOPY
19953 static unsigned long
19954 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
19955 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19956 {
19957 int d0, d1;
19958 __asm__ __volatile__(
19959 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19960 " .align 2,0x90\n"
19961 "3: movl 0(%4), %%eax\n"
19962 "4: movl 4(%4), %%edx\n"
19963 - "5: movl %%eax, 0(%3)\n"
19964 - "6: movl %%edx, 4(%3)\n"
19965 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19966 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19967 "7: movl 8(%4), %%eax\n"
19968 "8: movl 12(%4),%%edx\n"
19969 - "9: movl %%eax, 8(%3)\n"
19970 - "10: movl %%edx, 12(%3)\n"
19971 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19972 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19973 "11: movl 16(%4), %%eax\n"
19974 "12: movl 20(%4), %%edx\n"
19975 - "13: movl %%eax, 16(%3)\n"
19976 - "14: movl %%edx, 20(%3)\n"
19977 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19978 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19979 "15: movl 24(%4), %%eax\n"
19980 "16: movl 28(%4), %%edx\n"
19981 - "17: movl %%eax, 24(%3)\n"
19982 - "18: movl %%edx, 28(%3)\n"
19983 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19984 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19985 "19: movl 32(%4), %%eax\n"
19986 "20: movl 36(%4), %%edx\n"
19987 - "21: movl %%eax, 32(%3)\n"
19988 - "22: movl %%edx, 36(%3)\n"
19989 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19990 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19991 "23: movl 40(%4), %%eax\n"
19992 "24: movl 44(%4), %%edx\n"
19993 - "25: movl %%eax, 40(%3)\n"
19994 - "26: movl %%edx, 44(%3)\n"
19995 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19996 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19997 "27: movl 48(%4), %%eax\n"
19998 "28: movl 52(%4), %%edx\n"
19999 - "29: movl %%eax, 48(%3)\n"
20000 - "30: movl %%edx, 52(%3)\n"
20001 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20002 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20003 "31: movl 56(%4), %%eax\n"
20004 "32: movl 60(%4), %%edx\n"
20005 - "33: movl %%eax, 56(%3)\n"
20006 - "34: movl %%edx, 60(%3)\n"
20007 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20008 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20009 " addl $-64, %0\n"
20010 " addl $64, %4\n"
20011 " addl $64, %3\n"
20012 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
20013 " shrl $2, %0\n"
20014 " andl $3, %%eax\n"
20015 " cld\n"
20016 + __COPYUSER_SET_ES
20017 "99: rep; movsl\n"
20018 "36: movl %%eax, %0\n"
20019 "37: rep; movsb\n"
20020 "100:\n"
20021 + __COPYUSER_RESTORE_ES
20022 + ".section .fixup,\"ax\"\n"
20023 + "101: lea 0(%%eax,%0,4),%0\n"
20024 + " jmp 100b\n"
20025 + ".previous\n"
20026 + ".section __ex_table,\"a\"\n"
20027 + " .align 4\n"
20028 + " .long 1b,100b\n"
20029 + " .long 2b,100b\n"
20030 + " .long 3b,100b\n"
20031 + " .long 4b,100b\n"
20032 + " .long 5b,100b\n"
20033 + " .long 6b,100b\n"
20034 + " .long 7b,100b\n"
20035 + " .long 8b,100b\n"
20036 + " .long 9b,100b\n"
20037 + " .long 10b,100b\n"
20038 + " .long 11b,100b\n"
20039 + " .long 12b,100b\n"
20040 + " .long 13b,100b\n"
20041 + " .long 14b,100b\n"
20042 + " .long 15b,100b\n"
20043 + " .long 16b,100b\n"
20044 + " .long 17b,100b\n"
20045 + " .long 18b,100b\n"
20046 + " .long 19b,100b\n"
20047 + " .long 20b,100b\n"
20048 + " .long 21b,100b\n"
20049 + " .long 22b,100b\n"
20050 + " .long 23b,100b\n"
20051 + " .long 24b,100b\n"
20052 + " .long 25b,100b\n"
20053 + " .long 26b,100b\n"
20054 + " .long 27b,100b\n"
20055 + " .long 28b,100b\n"
20056 + " .long 29b,100b\n"
20057 + " .long 30b,100b\n"
20058 + " .long 31b,100b\n"
20059 + " .long 32b,100b\n"
20060 + " .long 33b,100b\n"
20061 + " .long 34b,100b\n"
20062 + " .long 35b,100b\n"
20063 + " .long 36b,100b\n"
20064 + " .long 37b,100b\n"
20065 + " .long 99b,101b\n"
20066 + ".previous"
20067 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
20068 + : "1"(to), "2"(from), "0"(size)
20069 + : "eax", "edx", "memory");
20070 + return size;
20071 +}
20072 +
20073 +static unsigned long
20074 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20075 +{
20076 + int d0, d1;
20077 + __asm__ __volatile__(
20078 + " .align 2,0x90\n"
20079 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20080 + " cmpl $67, %0\n"
20081 + " jbe 3f\n"
20082 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20083 + " .align 2,0x90\n"
20084 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20085 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20086 + "5: movl %%eax, 0(%3)\n"
20087 + "6: movl %%edx, 4(%3)\n"
20088 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20089 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20090 + "9: movl %%eax, 8(%3)\n"
20091 + "10: movl %%edx, 12(%3)\n"
20092 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20093 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20094 + "13: movl %%eax, 16(%3)\n"
20095 + "14: movl %%edx, 20(%3)\n"
20096 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20097 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20098 + "17: movl %%eax, 24(%3)\n"
20099 + "18: movl %%edx, 28(%3)\n"
20100 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20101 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20102 + "21: movl %%eax, 32(%3)\n"
20103 + "22: movl %%edx, 36(%3)\n"
20104 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20105 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20106 + "25: movl %%eax, 40(%3)\n"
20107 + "26: movl %%edx, 44(%3)\n"
20108 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20109 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20110 + "29: movl %%eax, 48(%3)\n"
20111 + "30: movl %%edx, 52(%3)\n"
20112 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20113 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20114 + "33: movl %%eax, 56(%3)\n"
20115 + "34: movl %%edx, 60(%3)\n"
20116 + " addl $-64, %0\n"
20117 + " addl $64, %4\n"
20118 + " addl $64, %3\n"
20119 + " cmpl $63, %0\n"
20120 + " ja 1b\n"
20121 + "35: movl %0, %%eax\n"
20122 + " shrl $2, %0\n"
20123 + " andl $3, %%eax\n"
20124 + " cld\n"
20125 + "99: rep; "__copyuser_seg" movsl\n"
20126 + "36: movl %%eax, %0\n"
20127 + "37: rep; "__copyuser_seg" movsb\n"
20128 + "100:\n"
20129 ".section .fixup,\"ax\"\n"
20130 "101: lea 0(%%eax,%0,4),%0\n"
20131 " jmp 100b\n"
20132 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
20133 int d0, d1;
20134 __asm__ __volatile__(
20135 " .align 2,0x90\n"
20136 - "0: movl 32(%4), %%eax\n"
20137 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20138 " cmpl $67, %0\n"
20139 " jbe 2f\n"
20140 - "1: movl 64(%4), %%eax\n"
20141 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20142 " .align 2,0x90\n"
20143 - "2: movl 0(%4), %%eax\n"
20144 - "21: movl 4(%4), %%edx\n"
20145 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20146 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20147 " movl %%eax, 0(%3)\n"
20148 " movl %%edx, 4(%3)\n"
20149 - "3: movl 8(%4), %%eax\n"
20150 - "31: movl 12(%4),%%edx\n"
20151 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20152 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20153 " movl %%eax, 8(%3)\n"
20154 " movl %%edx, 12(%3)\n"
20155 - "4: movl 16(%4), %%eax\n"
20156 - "41: movl 20(%4), %%edx\n"
20157 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20158 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20159 " movl %%eax, 16(%3)\n"
20160 " movl %%edx, 20(%3)\n"
20161 - "10: movl 24(%4), %%eax\n"
20162 - "51: movl 28(%4), %%edx\n"
20163 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20164 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20165 " movl %%eax, 24(%3)\n"
20166 " movl %%edx, 28(%3)\n"
20167 - "11: movl 32(%4), %%eax\n"
20168 - "61: movl 36(%4), %%edx\n"
20169 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20170 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20171 " movl %%eax, 32(%3)\n"
20172 " movl %%edx, 36(%3)\n"
20173 - "12: movl 40(%4), %%eax\n"
20174 - "71: movl 44(%4), %%edx\n"
20175 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20176 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20177 " movl %%eax, 40(%3)\n"
20178 " movl %%edx, 44(%3)\n"
20179 - "13: movl 48(%4), %%eax\n"
20180 - "81: movl 52(%4), %%edx\n"
20181 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20182 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20183 " movl %%eax, 48(%3)\n"
20184 " movl %%edx, 52(%3)\n"
20185 - "14: movl 56(%4), %%eax\n"
20186 - "91: movl 60(%4), %%edx\n"
20187 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20188 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20189 " movl %%eax, 56(%3)\n"
20190 " movl %%edx, 60(%3)\n"
20191 " addl $-64, %0\n"
20192 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
20193 " shrl $2, %0\n"
20194 " andl $3, %%eax\n"
20195 " cld\n"
20196 - "6: rep; movsl\n"
20197 + "6: rep; "__copyuser_seg" movsl\n"
20198 " movl %%eax,%0\n"
20199 - "7: rep; movsb\n"
20200 + "7: rep; "__copyuser_seg" movsb\n"
20201 "8:\n"
20202 ".section .fixup,\"ax\"\n"
20203 "9: lea 0(%%eax,%0,4),%0\n"
20204 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
20205
20206 __asm__ __volatile__(
20207 " .align 2,0x90\n"
20208 - "0: movl 32(%4), %%eax\n"
20209 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20210 " cmpl $67, %0\n"
20211 " jbe 2f\n"
20212 - "1: movl 64(%4), %%eax\n"
20213 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20214 " .align 2,0x90\n"
20215 - "2: movl 0(%4), %%eax\n"
20216 - "21: movl 4(%4), %%edx\n"
20217 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20218 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20219 " movnti %%eax, 0(%3)\n"
20220 " movnti %%edx, 4(%3)\n"
20221 - "3: movl 8(%4), %%eax\n"
20222 - "31: movl 12(%4),%%edx\n"
20223 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20224 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20225 " movnti %%eax, 8(%3)\n"
20226 " movnti %%edx, 12(%3)\n"
20227 - "4: movl 16(%4), %%eax\n"
20228 - "41: movl 20(%4), %%edx\n"
20229 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20230 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20231 " movnti %%eax, 16(%3)\n"
20232 " movnti %%edx, 20(%3)\n"
20233 - "10: movl 24(%4), %%eax\n"
20234 - "51: movl 28(%4), %%edx\n"
20235 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20236 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20237 " movnti %%eax, 24(%3)\n"
20238 " movnti %%edx, 28(%3)\n"
20239 - "11: movl 32(%4), %%eax\n"
20240 - "61: movl 36(%4), %%edx\n"
20241 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20242 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20243 " movnti %%eax, 32(%3)\n"
20244 " movnti %%edx, 36(%3)\n"
20245 - "12: movl 40(%4), %%eax\n"
20246 - "71: movl 44(%4), %%edx\n"
20247 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20248 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20249 " movnti %%eax, 40(%3)\n"
20250 " movnti %%edx, 44(%3)\n"
20251 - "13: movl 48(%4), %%eax\n"
20252 - "81: movl 52(%4), %%edx\n"
20253 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20254 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20255 " movnti %%eax, 48(%3)\n"
20256 " movnti %%edx, 52(%3)\n"
20257 - "14: movl 56(%4), %%eax\n"
20258 - "91: movl 60(%4), %%edx\n"
20259 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20260 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20261 " movnti %%eax, 56(%3)\n"
20262 " movnti %%edx, 60(%3)\n"
20263 " addl $-64, %0\n"
20264 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20265 " shrl $2, %0\n"
20266 " andl $3, %%eax\n"
20267 " cld\n"
20268 - "6: rep; movsl\n"
20269 + "6: rep; "__copyuser_seg" movsl\n"
20270 " movl %%eax,%0\n"
20271 - "7: rep; movsb\n"
20272 + "7: rep; "__copyuser_seg" movsb\n"
20273 "8:\n"
20274 ".section .fixup,\"ax\"\n"
20275 "9: lea 0(%%eax,%0,4),%0\n"
20276 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20277
20278 __asm__ __volatile__(
20279 " .align 2,0x90\n"
20280 - "0: movl 32(%4), %%eax\n"
20281 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20282 " cmpl $67, %0\n"
20283 " jbe 2f\n"
20284 - "1: movl 64(%4), %%eax\n"
20285 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20286 " .align 2,0x90\n"
20287 - "2: movl 0(%4), %%eax\n"
20288 - "21: movl 4(%4), %%edx\n"
20289 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20290 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20291 " movnti %%eax, 0(%3)\n"
20292 " movnti %%edx, 4(%3)\n"
20293 - "3: movl 8(%4), %%eax\n"
20294 - "31: movl 12(%4),%%edx\n"
20295 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20296 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20297 " movnti %%eax, 8(%3)\n"
20298 " movnti %%edx, 12(%3)\n"
20299 - "4: movl 16(%4), %%eax\n"
20300 - "41: movl 20(%4), %%edx\n"
20301 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20302 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20303 " movnti %%eax, 16(%3)\n"
20304 " movnti %%edx, 20(%3)\n"
20305 - "10: movl 24(%4), %%eax\n"
20306 - "51: movl 28(%4), %%edx\n"
20307 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20308 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20309 " movnti %%eax, 24(%3)\n"
20310 " movnti %%edx, 28(%3)\n"
20311 - "11: movl 32(%4), %%eax\n"
20312 - "61: movl 36(%4), %%edx\n"
20313 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20314 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20315 " movnti %%eax, 32(%3)\n"
20316 " movnti %%edx, 36(%3)\n"
20317 - "12: movl 40(%4), %%eax\n"
20318 - "71: movl 44(%4), %%edx\n"
20319 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20320 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20321 " movnti %%eax, 40(%3)\n"
20322 " movnti %%edx, 44(%3)\n"
20323 - "13: movl 48(%4), %%eax\n"
20324 - "81: movl 52(%4), %%edx\n"
20325 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20326 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20327 " movnti %%eax, 48(%3)\n"
20328 " movnti %%edx, 52(%3)\n"
20329 - "14: movl 56(%4), %%eax\n"
20330 - "91: movl 60(%4), %%edx\n"
20331 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20332 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20333 " movnti %%eax, 56(%3)\n"
20334 " movnti %%edx, 60(%3)\n"
20335 " addl $-64, %0\n"
20336 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20337 " shrl $2, %0\n"
20338 " andl $3, %%eax\n"
20339 " cld\n"
20340 - "6: rep; movsl\n"
20341 + "6: rep; "__copyuser_seg" movsl\n"
20342 " movl %%eax,%0\n"
20343 - "7: rep; movsb\n"
20344 + "7: rep; "__copyuser_seg" movsb\n"
20345 "8:\n"
20346 ".section .fixup,\"ax\"\n"
20347 "9: lea 0(%%eax,%0,4),%0\n"
20348 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20349 */
20350 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20351 unsigned long size);
20352 -unsigned long __copy_user_intel(void __user *to, const void *from,
20353 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20354 + unsigned long size);
20355 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20356 unsigned long size);
20357 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20358 const void __user *from, unsigned long size);
20359 #endif /* CONFIG_X86_INTEL_USERCOPY */
20360
20361 /* Generic arbitrary sized copy. */
20362 -#define __copy_user(to, from, size) \
20363 +#define __copy_user(to, from, size, prefix, set, restore) \
20364 do { \
20365 int __d0, __d1, __d2; \
20366 __asm__ __volatile__( \
20367 + set \
20368 " cmp $7,%0\n" \
20369 " jbe 1f\n" \
20370 " movl %1,%0\n" \
20371 " negl %0\n" \
20372 " andl $7,%0\n" \
20373 " subl %0,%3\n" \
20374 - "4: rep; movsb\n" \
20375 + "4: rep; "prefix"movsb\n" \
20376 " movl %3,%0\n" \
20377 " shrl $2,%0\n" \
20378 " andl $3,%3\n" \
20379 " .align 2,0x90\n" \
20380 - "0: rep; movsl\n" \
20381 + "0: rep; "prefix"movsl\n" \
20382 " movl %3,%0\n" \
20383 - "1: rep; movsb\n" \
20384 + "1: rep; "prefix"movsb\n" \
20385 "2:\n" \
20386 + restore \
20387 ".section .fixup,\"ax\"\n" \
20388 "5: addl %3,%0\n" \
20389 " jmp 2b\n" \
20390 @@ -682,14 +799,14 @@ do { \
20391 " negl %0\n" \
20392 " andl $7,%0\n" \
20393 " subl %0,%3\n" \
20394 - "4: rep; movsb\n" \
20395 + "4: rep; "__copyuser_seg"movsb\n" \
20396 " movl %3,%0\n" \
20397 " shrl $2,%0\n" \
20398 " andl $3,%3\n" \
20399 " .align 2,0x90\n" \
20400 - "0: rep; movsl\n" \
20401 + "0: rep; "__copyuser_seg"movsl\n" \
20402 " movl %3,%0\n" \
20403 - "1: rep; movsb\n" \
20404 + "1: rep; "__copyuser_seg"movsb\n" \
20405 "2:\n" \
20406 ".section .fixup,\"ax\"\n" \
20407 "5: addl %3,%0\n" \
20408 @@ -775,9 +892,9 @@ survive:
20409 }
20410 #endif
20411 if (movsl_is_ok(to, from, n))
20412 - __copy_user(to, from, n);
20413 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20414 else
20415 - n = __copy_user_intel(to, from, n);
20416 + n = __generic_copy_to_user_intel(to, from, n);
20417 return n;
20418 }
20419 EXPORT_SYMBOL(__copy_to_user_ll);
20420 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20421 unsigned long n)
20422 {
20423 if (movsl_is_ok(to, from, n))
20424 - __copy_user(to, from, n);
20425 + __copy_user(to, from, n, __copyuser_seg, "", "");
20426 else
20427 - n = __copy_user_intel((void __user *)to,
20428 - (const void *)from, n);
20429 + n = __generic_copy_from_user_intel(to, from, n);
20430 return n;
20431 }
20432 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20433 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20434 if (n > 64 && cpu_has_xmm2)
20435 n = __copy_user_intel_nocache(to, from, n);
20436 else
20437 - __copy_user(to, from, n);
20438 + __copy_user(to, from, n, __copyuser_seg, "", "");
20439 #else
20440 - __copy_user(to, from, n);
20441 + __copy_user(to, from, n, __copyuser_seg, "", "");
20442 #endif
20443 return n;
20444 }
20445 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20446
20447 -/**
20448 - * copy_to_user: - Copy a block of data into user space.
20449 - * @to: Destination address, in user space.
20450 - * @from: Source address, in kernel space.
20451 - * @n: Number of bytes to copy.
20452 - *
20453 - * Context: User context only. This function may sleep.
20454 - *
20455 - * Copy data from kernel space to user space.
20456 - *
20457 - * Returns number of bytes that could not be copied.
20458 - * On success, this will be zero.
20459 - */
20460 -unsigned long
20461 -copy_to_user(void __user *to, const void *from, unsigned long n)
20462 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20463 +void __set_fs(mm_segment_t x)
20464 {
20465 - if (access_ok(VERIFY_WRITE, to, n))
20466 - n = __copy_to_user(to, from, n);
20467 - return n;
20468 + switch (x.seg) {
20469 + case 0:
20470 + loadsegment(gs, 0);
20471 + break;
20472 + case TASK_SIZE_MAX:
20473 + loadsegment(gs, __USER_DS);
20474 + break;
20475 + case -1UL:
20476 + loadsegment(gs, __KERNEL_DS);
20477 + break;
20478 + default:
20479 + BUG();
20480 + }
20481 + return;
20482 }
20483 -EXPORT_SYMBOL(copy_to_user);
20484 +EXPORT_SYMBOL(__set_fs);
20485
20486 -/**
20487 - * copy_from_user: - Copy a block of data from user space.
20488 - * @to: Destination address, in kernel space.
20489 - * @from: Source address, in user space.
20490 - * @n: Number of bytes to copy.
20491 - *
20492 - * Context: User context only. This function may sleep.
20493 - *
20494 - * Copy data from user space to kernel space.
20495 - *
20496 - * Returns number of bytes that could not be copied.
20497 - * On success, this will be zero.
20498 - *
20499 - * If some data could not be copied, this function will pad the copied
20500 - * data to the requested size using zero bytes.
20501 - */
20502 -unsigned long
20503 -copy_from_user(void *to, const void __user *from, unsigned long n)
20504 +void set_fs(mm_segment_t x)
20505 {
20506 - if (access_ok(VERIFY_READ, from, n))
20507 - n = __copy_from_user(to, from, n);
20508 - else
20509 - memset(to, 0, n);
20510 - return n;
20511 + current_thread_info()->addr_limit = x;
20512 + __set_fs(x);
20513 }
20514 -EXPORT_SYMBOL(copy_from_user);
20515 +EXPORT_SYMBOL(set_fs);
20516 +#endif
20517 diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_64.c linux-2.6.32.45/arch/x86/lib/usercopy_64.c
20518 --- linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20519 +++ linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20520 @@ -42,6 +42,12 @@ long
20521 __strncpy_from_user(char *dst, const char __user *src, long count)
20522 {
20523 long res;
20524 +
20525 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20526 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20527 + src += PAX_USER_SHADOW_BASE;
20528 +#endif
20529 +
20530 __do_strncpy_from_user(dst, src, count, res);
20531 return res;
20532 }
20533 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20534 {
20535 long __d0;
20536 might_fault();
20537 +
20538 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20539 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20540 + addr += PAX_USER_SHADOW_BASE;
20541 +#endif
20542 +
20543 /* no memory constraint because it doesn't change any memory gcc knows
20544 about */
20545 asm volatile(
20546 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20547
20548 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20549 {
20550 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20551 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20552 +
20553 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20554 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20555 + to += PAX_USER_SHADOW_BASE;
20556 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20557 + from += PAX_USER_SHADOW_BASE;
20558 +#endif
20559 +
20560 return copy_user_generic((__force void *)to, (__force void *)from, len);
20561 - }
20562 - return len;
20563 + }
20564 + return len;
20565 }
20566 EXPORT_SYMBOL(copy_in_user);
20567
20568 diff -urNp linux-2.6.32.45/arch/x86/Makefile linux-2.6.32.45/arch/x86/Makefile
20569 --- linux-2.6.32.45/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20570 +++ linux-2.6.32.45/arch/x86/Makefile 2011-07-19 18:16:02.000000000 -0400
20571 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
20572 else
20573 BITS := 64
20574 UTS_MACHINE := x86_64
20575 + biarch := $(call cc-option,-m64)
20576 CHECKFLAGS += -D__x86_64__ -m64
20577
20578 KBUILD_AFLAGS += -m64
20579 @@ -189,3 +190,12 @@ define archhelp
20580 echo ' FDARGS="..." arguments for the booted kernel'
20581 echo ' FDINITRD=file initrd for the booted kernel'
20582 endef
20583 +
20584 +define OLD_LD
20585 +
20586 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20587 +*** Please upgrade your binutils to 2.18 or newer
20588 +endef
20589 +
20590 +archprepare:
20591 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20592 diff -urNp linux-2.6.32.45/arch/x86/mm/extable.c linux-2.6.32.45/arch/x86/mm/extable.c
20593 --- linux-2.6.32.45/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20594 +++ linux-2.6.32.45/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20595 @@ -1,14 +1,71 @@
20596 #include <linux/module.h>
20597 #include <linux/spinlock.h>
20598 +#include <linux/sort.h>
20599 #include <asm/uaccess.h>
20600 +#include <asm/pgtable.h>
20601
20602 +/*
20603 + * The exception table needs to be sorted so that the binary
20604 + * search that we use to find entries in it works properly.
20605 + * This is used both for the kernel exception table and for
20606 + * the exception tables of modules that get loaded.
20607 + */
20608 +static int cmp_ex(const void *a, const void *b)
20609 +{
20610 + const struct exception_table_entry *x = a, *y = b;
20611 +
20612 + /* avoid overflow */
20613 + if (x->insn > y->insn)
20614 + return 1;
20615 + if (x->insn < y->insn)
20616 + return -1;
20617 + return 0;
20618 +}
20619 +
20620 +static void swap_ex(void *a, void *b, int size)
20621 +{
20622 + struct exception_table_entry t, *x = a, *y = b;
20623 +
20624 + t = *x;
20625 +
20626 + pax_open_kernel();
20627 + *x = *y;
20628 + *y = t;
20629 + pax_close_kernel();
20630 +}
20631 +
20632 +void sort_extable(struct exception_table_entry *start,
20633 + struct exception_table_entry *finish)
20634 +{
20635 + sort(start, finish - start, sizeof(struct exception_table_entry),
20636 + cmp_ex, swap_ex);
20637 +}
20638 +
20639 +#ifdef CONFIG_MODULES
20640 +/*
20641 + * If the exception table is sorted, any referring to the module init
20642 + * will be at the beginning or the end.
20643 + */
20644 +void trim_init_extable(struct module *m)
20645 +{
20646 + /*trim the beginning*/
20647 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20648 + m->extable++;
20649 + m->num_exentries--;
20650 + }
20651 + /*trim the end*/
20652 + while (m->num_exentries &&
20653 + within_module_init(m->extable[m->num_exentries-1].insn, m))
20654 + m->num_exentries--;
20655 +}
20656 +#endif /* CONFIG_MODULES */
20657
20658 int fixup_exception(struct pt_regs *regs)
20659 {
20660 const struct exception_table_entry *fixup;
20661
20662 #ifdef CONFIG_PNPBIOS
20663 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20664 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20665 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20666 extern u32 pnp_bios_is_utter_crap;
20667 pnp_bios_is_utter_crap = 1;
20668 diff -urNp linux-2.6.32.45/arch/x86/mm/fault.c linux-2.6.32.45/arch/x86/mm/fault.c
20669 --- linux-2.6.32.45/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20670 +++ linux-2.6.32.45/arch/x86/mm/fault.c 2011-08-17 20:06:44.000000000 -0400
20671 @@ -11,10 +11,19 @@
20672 #include <linux/kprobes.h> /* __kprobes, ... */
20673 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20674 #include <linux/perf_event.h> /* perf_sw_event */
20675 +#include <linux/unistd.h>
20676 +#include <linux/compiler.h>
20677
20678 #include <asm/traps.h> /* dotraplinkage, ... */
20679 #include <asm/pgalloc.h> /* pgd_*(), ... */
20680 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20681 +#include <asm/vsyscall.h>
20682 +#include <asm/tlbflush.h>
20683 +
20684 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20685 +#include <asm/stacktrace.h>
20686 +#include "../kernel/dumpstack.h"
20687 +#endif
20688
20689 /*
20690 * Page fault error code bits:
20691 @@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20692 int ret = 0;
20693
20694 /* kprobe_running() needs smp_processor_id() */
20695 - if (kprobes_built_in() && !user_mode_vm(regs)) {
20696 + if (kprobes_built_in() && !user_mode(regs)) {
20697 preempt_disable();
20698 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20699 ret = 1;
20700 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20701 return !instr_lo || (instr_lo>>1) == 1;
20702 case 0x00:
20703 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20704 - if (probe_kernel_address(instr, opcode))
20705 + if (user_mode(regs)) {
20706 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20707 + return 0;
20708 + } else if (probe_kernel_address(instr, opcode))
20709 return 0;
20710
20711 *prefetch = (instr_lo == 0xF) &&
20712 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20713 while (instr < max_instr) {
20714 unsigned char opcode;
20715
20716 - if (probe_kernel_address(instr, opcode))
20717 + if (user_mode(regs)) {
20718 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20719 + break;
20720 + } else if (probe_kernel_address(instr, opcode))
20721 break;
20722
20723 instr++;
20724 @@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20725 force_sig_info(si_signo, &info, tsk);
20726 }
20727
20728 +#ifdef CONFIG_PAX_EMUTRAMP
20729 +static int pax_handle_fetch_fault(struct pt_regs *regs);
20730 +#endif
20731 +
20732 +#ifdef CONFIG_PAX_PAGEEXEC
20733 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20734 +{
20735 + pgd_t *pgd;
20736 + pud_t *pud;
20737 + pmd_t *pmd;
20738 +
20739 + pgd = pgd_offset(mm, address);
20740 + if (!pgd_present(*pgd))
20741 + return NULL;
20742 + pud = pud_offset(pgd, address);
20743 + if (!pud_present(*pud))
20744 + return NULL;
20745 + pmd = pmd_offset(pud, address);
20746 + if (!pmd_present(*pmd))
20747 + return NULL;
20748 + return pmd;
20749 +}
20750 +#endif
20751 +
20752 DEFINE_SPINLOCK(pgd_lock);
20753 LIST_HEAD(pgd_list);
20754
20755 @@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20756 address += PMD_SIZE) {
20757
20758 unsigned long flags;
20759 +
20760 +#ifdef CONFIG_PAX_PER_CPU_PGD
20761 + unsigned long cpu;
20762 +#else
20763 struct page *page;
20764 +#endif
20765
20766 spin_lock_irqsave(&pgd_lock, flags);
20767 +
20768 +#ifdef CONFIG_PAX_PER_CPU_PGD
20769 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20770 + pgd_t *pgd = get_cpu_pgd(cpu);
20771 +#else
20772 list_for_each_entry(page, &pgd_list, lru) {
20773 - if (!vmalloc_sync_one(page_address(page), address))
20774 + pgd_t *pgd = page_address(page);
20775 +#endif
20776 +
20777 + if (!vmalloc_sync_one(pgd, address))
20778 break;
20779 }
20780 spin_unlock_irqrestore(&pgd_lock, flags);
20781 @@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20782 * an interrupt in the middle of a task switch..
20783 */
20784 pgd_paddr = read_cr3();
20785 +
20786 +#ifdef CONFIG_PAX_PER_CPU_PGD
20787 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20788 +#endif
20789 +
20790 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20791 if (!pmd_k)
20792 return -1;
20793 @@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
20794
20795 const pgd_t *pgd_ref = pgd_offset_k(address);
20796 unsigned long flags;
20797 +
20798 +#ifdef CONFIG_PAX_PER_CPU_PGD
20799 + unsigned long cpu;
20800 +#else
20801 struct page *page;
20802 +#endif
20803
20804 if (pgd_none(*pgd_ref))
20805 continue;
20806
20807 spin_lock_irqsave(&pgd_lock, flags);
20808 +
20809 +#ifdef CONFIG_PAX_PER_CPU_PGD
20810 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20811 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
20812 +#else
20813 list_for_each_entry(page, &pgd_list, lru) {
20814 pgd_t *pgd;
20815 pgd = (pgd_t *)page_address(page) + pgd_index(address);
20816 +#endif
20817 +
20818 if (pgd_none(*pgd))
20819 set_pgd(pgd, *pgd_ref);
20820 else
20821 @@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
20822 * happen within a race in page table update. In the later
20823 * case just flush:
20824 */
20825 +
20826 +#ifdef CONFIG_PAX_PER_CPU_PGD
20827 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20828 + pgd = pgd_offset_cpu(smp_processor_id(), address);
20829 +#else
20830 pgd = pgd_offset(current->active_mm, address);
20831 +#endif
20832 +
20833 pgd_ref = pgd_offset_k(address);
20834 if (pgd_none(*pgd_ref))
20835 return -1;
20836 @@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
20837 static int is_errata100(struct pt_regs *regs, unsigned long address)
20838 {
20839 #ifdef CONFIG_X86_64
20840 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20841 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20842 return 1;
20843 #endif
20844 return 0;
20845 @@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
20846 }
20847
20848 static const char nx_warning[] = KERN_CRIT
20849 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20850 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20851
20852 static void
20853 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20854 @@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
20855 if (!oops_may_print())
20856 return;
20857
20858 - if (error_code & PF_INSTR) {
20859 + if (nx_enabled && (error_code & PF_INSTR)) {
20860 unsigned int level;
20861
20862 pte_t *pte = lookup_address(address, &level);
20863
20864 if (pte && pte_present(*pte) && !pte_exec(*pte))
20865 - printk(nx_warning, current_uid());
20866 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20867 }
20868
20869 +#ifdef CONFIG_PAX_KERNEXEC
20870 + if (init_mm.start_code <= address && address < init_mm.end_code) {
20871 + if (current->signal->curr_ip)
20872 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20873 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20874 + else
20875 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20876 + current->comm, task_pid_nr(current), current_uid(), current_euid());
20877 + }
20878 +#endif
20879 +
20880 printk(KERN_ALERT "BUG: unable to handle kernel ");
20881 if (address < PAGE_SIZE)
20882 printk(KERN_CONT "NULL pointer dereference");
20883 @@ -704,6 +791,70 @@ __bad_area_nosemaphore(struct pt_regs *r
20884 unsigned long address, int si_code)
20885 {
20886 struct task_struct *tsk = current;
20887 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20888 + struct mm_struct *mm = tsk->mm;
20889 +#endif
20890 +
20891 +#ifdef CONFIG_X86_64
20892 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
20893 + if (regs->ip == (unsigned long)vgettimeofday) {
20894 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
20895 + return;
20896 + } else if (regs->ip == (unsigned long)vtime) {
20897 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
20898 + return;
20899 + } else if (regs->ip == (unsigned long)vgetcpu) {
20900 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
20901 + return;
20902 + }
20903 + }
20904 +#endif
20905 +
20906 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20907 + if (mm && (error_code & PF_USER)) {
20908 + unsigned long ip = regs->ip;
20909 +
20910 + if (v8086_mode(regs))
20911 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20912 +
20913 + /*
20914 + * It's possible to have interrupts off here:
20915 + */
20916 + local_irq_enable();
20917 +
20918 +#ifdef CONFIG_PAX_PAGEEXEC
20919 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
20920 + ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
20921 +
20922 +#ifdef CONFIG_PAX_EMUTRAMP
20923 + switch (pax_handle_fetch_fault(regs)) {
20924 + case 2:
20925 + return;
20926 + }
20927 +#endif
20928 +
20929 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20930 + do_group_exit(SIGKILL);
20931 + }
20932 +#endif
20933 +
20934 +#ifdef CONFIG_PAX_SEGMEXEC
20935 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
20936 +
20937 +#ifdef CONFIG_PAX_EMUTRAMP
20938 + switch (pax_handle_fetch_fault(regs)) {
20939 + case 2:
20940 + return;
20941 + }
20942 +#endif
20943 +
20944 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20945 + do_group_exit(SIGKILL);
20946 + }
20947 +#endif
20948 +
20949 + }
20950 +#endif
20951
20952 /* User mode accesses just cause a SIGSEGV */
20953 if (error_code & PF_USER) {
20954 @@ -857,6 +1008,99 @@ static int spurious_fault_check(unsigned
20955 return 1;
20956 }
20957
20958 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20959 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20960 +{
20961 + pte_t *pte;
20962 + pmd_t *pmd;
20963 + spinlock_t *ptl;
20964 + unsigned char pte_mask;
20965 +
20966 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20967 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
20968 + return 0;
20969 +
20970 + /* PaX: it's our fault, let's handle it if we can */
20971 +
20972 + /* PaX: take a look at read faults before acquiring any locks */
20973 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20974 + /* instruction fetch attempt from a protected page in user mode */
20975 + up_read(&mm->mmap_sem);
20976 +
20977 +#ifdef CONFIG_PAX_EMUTRAMP
20978 + switch (pax_handle_fetch_fault(regs)) {
20979 + case 2:
20980 + return 1;
20981 + }
20982 +#endif
20983 +
20984 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20985 + do_group_exit(SIGKILL);
20986 + }
20987 +
20988 + pmd = pax_get_pmd(mm, address);
20989 + if (unlikely(!pmd))
20990 + return 0;
20991 +
20992 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20993 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20994 + pte_unmap_unlock(pte, ptl);
20995 + return 0;
20996 + }
20997 +
20998 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20999 + /* write attempt to a protected page in user mode */
21000 + pte_unmap_unlock(pte, ptl);
21001 + return 0;
21002 + }
21003 +
21004 +#ifdef CONFIG_SMP
21005 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21006 +#else
21007 + if (likely(address > get_limit(regs->cs)))
21008 +#endif
21009 + {
21010 + set_pte(pte, pte_mkread(*pte));
21011 + __flush_tlb_one(address);
21012 + pte_unmap_unlock(pte, ptl);
21013 + up_read(&mm->mmap_sem);
21014 + return 1;
21015 + }
21016 +
21017 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21018 +
21019 + /*
21020 + * PaX: fill DTLB with user rights and retry
21021 + */
21022 + __asm__ __volatile__ (
21023 + "orb %2,(%1)\n"
21024 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21025 +/*
21026 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21027 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21028 + * page fault when examined during a TLB load attempt. this is true not only
21029 + * for PTEs holding a non-present entry but also present entries that will
21030 + * raise a page fault (such as those set up by PaX, or the copy-on-write
21031 + * mechanism). in effect it means that we do *not* need to flush the TLBs
21032 + * for our target pages since their PTEs are simply not in the TLBs at all.
21033 +
21034 + * the best thing in omitting it is that we gain around 15-20% speed in the
21035 + * fast path of the page fault handler and can get rid of tracing since we
21036 + * can no longer flush unintended entries.
21037 + */
21038 + "invlpg (%0)\n"
21039 +#endif
21040 + __copyuser_seg"testb $0,(%0)\n"
21041 + "xorb %3,(%1)\n"
21042 + :
21043 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21044 + : "memory", "cc");
21045 + pte_unmap_unlock(pte, ptl);
21046 + up_read(&mm->mmap_sem);
21047 + return 1;
21048 +}
21049 +#endif
21050 +
21051 /*
21052 * Handle a spurious fault caused by a stale TLB entry.
21053 *
21054 @@ -923,6 +1167,9 @@ int show_unhandled_signals = 1;
21055 static inline int
21056 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
21057 {
21058 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21059 + return 1;
21060 +
21061 if (write) {
21062 /* write, present and write, not present: */
21063 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21064 @@ -956,17 +1203,31 @@ do_page_fault(struct pt_regs *regs, unsi
21065 {
21066 struct vm_area_struct *vma;
21067 struct task_struct *tsk;
21068 - unsigned long address;
21069 struct mm_struct *mm;
21070 int write;
21071 int fault;
21072
21073 + /* Get the faulting address: */
21074 + unsigned long address = read_cr2();
21075 +
21076 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21077 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21078 + if (!search_exception_tables(regs->ip)) {
21079 + bad_area_nosemaphore(regs, error_code, address);
21080 + return;
21081 + }
21082 + if (address < PAX_USER_SHADOW_BASE) {
21083 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21084 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
21085 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21086 + } else
21087 + address -= PAX_USER_SHADOW_BASE;
21088 + }
21089 +#endif
21090 +
21091 tsk = current;
21092 mm = tsk->mm;
21093
21094 - /* Get the faulting address: */
21095 - address = read_cr2();
21096 -
21097 /*
21098 * Detect and handle instructions that would cause a page fault for
21099 * both a tracked kernel page and a userspace page.
21100 @@ -1026,7 +1287,7 @@ do_page_fault(struct pt_regs *regs, unsi
21101 * User-mode registers count as a user access even for any
21102 * potential system fault or CPU buglet:
21103 */
21104 - if (user_mode_vm(regs)) {
21105 + if (user_mode(regs)) {
21106 local_irq_enable();
21107 error_code |= PF_USER;
21108 } else {
21109 @@ -1080,6 +1341,11 @@ do_page_fault(struct pt_regs *regs, unsi
21110 might_sleep();
21111 }
21112
21113 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21114 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21115 + return;
21116 +#endif
21117 +
21118 vma = find_vma(mm, address);
21119 if (unlikely(!vma)) {
21120 bad_area(regs, error_code, address);
21121 @@ -1091,18 +1357,24 @@ do_page_fault(struct pt_regs *regs, unsi
21122 bad_area(regs, error_code, address);
21123 return;
21124 }
21125 - if (error_code & PF_USER) {
21126 - /*
21127 - * Accessing the stack below %sp is always a bug.
21128 - * The large cushion allows instructions like enter
21129 - * and pusha to work. ("enter $65535, $31" pushes
21130 - * 32 pointers and then decrements %sp by 65535.)
21131 - */
21132 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21133 - bad_area(regs, error_code, address);
21134 - return;
21135 - }
21136 + /*
21137 + * Accessing the stack below %sp is always a bug.
21138 + * The large cushion allows instructions like enter
21139 + * and pusha to work. ("enter $65535, $31" pushes
21140 + * 32 pointers and then decrements %sp by 65535.)
21141 + */
21142 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21143 + bad_area(regs, error_code, address);
21144 + return;
21145 }
21146 +
21147 +#ifdef CONFIG_PAX_SEGMEXEC
21148 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21149 + bad_area(regs, error_code, address);
21150 + return;
21151 + }
21152 +#endif
21153 +
21154 if (unlikely(expand_stack(vma, address))) {
21155 bad_area(regs, error_code, address);
21156 return;
21157 @@ -1146,3 +1418,199 @@ good_area:
21158
21159 up_read(&mm->mmap_sem);
21160 }
21161 +
21162 +#ifdef CONFIG_PAX_EMUTRAMP
21163 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21164 +{
21165 + int err;
21166 +
21167 + do { /* PaX: gcc trampoline emulation #1 */
21168 + unsigned char mov1, mov2;
21169 + unsigned short jmp;
21170 + unsigned int addr1, addr2;
21171 +
21172 +#ifdef CONFIG_X86_64
21173 + if ((regs->ip + 11) >> 32)
21174 + break;
21175 +#endif
21176 +
21177 + err = get_user(mov1, (unsigned char __user *)regs->ip);
21178 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21179 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21180 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21181 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21182 +
21183 + if (err)
21184 + break;
21185 +
21186 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21187 + regs->cx = addr1;
21188 + regs->ax = addr2;
21189 + regs->ip = addr2;
21190 + return 2;
21191 + }
21192 + } while (0);
21193 +
21194 + do { /* PaX: gcc trampoline emulation #2 */
21195 + unsigned char mov, jmp;
21196 + unsigned int addr1, addr2;
21197 +
21198 +#ifdef CONFIG_X86_64
21199 + if ((regs->ip + 9) >> 32)
21200 + break;
21201 +#endif
21202 +
21203 + err = get_user(mov, (unsigned char __user *)regs->ip);
21204 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21205 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21206 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21207 +
21208 + if (err)
21209 + break;
21210 +
21211 + if (mov == 0xB9 && jmp == 0xE9) {
21212 + regs->cx = addr1;
21213 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21214 + return 2;
21215 + }
21216 + } while (0);
21217 +
21218 + return 1; /* PaX in action */
21219 +}
21220 +
21221 +#ifdef CONFIG_X86_64
21222 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21223 +{
21224 + int err;
21225 +
21226 + do { /* PaX: gcc trampoline emulation #1 */
21227 + unsigned short mov1, mov2, jmp1;
21228 + unsigned char jmp2;
21229 + unsigned int addr1;
21230 + unsigned long addr2;
21231 +
21232 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21233 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21234 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21235 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21236 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21237 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21238 +
21239 + if (err)
21240 + break;
21241 +
21242 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21243 + regs->r11 = addr1;
21244 + regs->r10 = addr2;
21245 + regs->ip = addr1;
21246 + return 2;
21247 + }
21248 + } while (0);
21249 +
21250 + do { /* PaX: gcc trampoline emulation #2 */
21251 + unsigned short mov1, mov2, jmp1;
21252 + unsigned char jmp2;
21253 + unsigned long addr1, addr2;
21254 +
21255 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21256 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21257 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21258 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21259 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21260 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21261 +
21262 + if (err)
21263 + break;
21264 +
21265 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21266 + regs->r11 = addr1;
21267 + regs->r10 = addr2;
21268 + regs->ip = addr1;
21269 + return 2;
21270 + }
21271 + } while (0);
21272 +
21273 + return 1; /* PaX in action */
21274 +}
21275 +#endif
21276 +
21277 +/*
21278 + * PaX: decide what to do with offenders (regs->ip = fault address)
21279 + *
21280 + * returns 1 when task should be killed
21281 + * 2 when gcc trampoline was detected
21282 + */
21283 +static int pax_handle_fetch_fault(struct pt_regs *regs)
21284 +{
21285 + if (v8086_mode(regs))
21286 + return 1;
21287 +
21288 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21289 + return 1;
21290 +
21291 +#ifdef CONFIG_X86_32
21292 + return pax_handle_fetch_fault_32(regs);
21293 +#else
21294 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21295 + return pax_handle_fetch_fault_32(regs);
21296 + else
21297 + return pax_handle_fetch_fault_64(regs);
21298 +#endif
21299 +}
21300 +#endif
21301 +
21302 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21303 +void pax_report_insns(void *pc, void *sp)
21304 +{
21305 + long i;
21306 +
21307 + printk(KERN_ERR "PAX: bytes at PC: ");
21308 + for (i = 0; i < 20; i++) {
21309 + unsigned char c;
21310 + if (get_user(c, (__force unsigned char __user *)pc+i))
21311 + printk(KERN_CONT "?? ");
21312 + else
21313 + printk(KERN_CONT "%02x ", c);
21314 + }
21315 + printk("\n");
21316 +
21317 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21318 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
21319 + unsigned long c;
21320 + if (get_user(c, (__force unsigned long __user *)sp+i))
21321 +#ifdef CONFIG_X86_32
21322 + printk(KERN_CONT "???????? ");
21323 +#else
21324 + printk(KERN_CONT "???????????????? ");
21325 +#endif
21326 + else
21327 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21328 + }
21329 + printk("\n");
21330 +}
21331 +#endif
21332 +
21333 +/**
21334 + * probe_kernel_write(): safely attempt to write to a location
21335 + * @dst: address to write to
21336 + * @src: pointer to the data that shall be written
21337 + * @size: size of the data chunk
21338 + *
21339 + * Safely write to address @dst from the buffer at @src. If a kernel fault
21340 + * happens, handle that and return -EFAULT.
21341 + */
21342 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21343 +{
21344 + long ret;
21345 + mm_segment_t old_fs = get_fs();
21346 +
21347 + set_fs(KERNEL_DS);
21348 + pagefault_disable();
21349 + pax_open_kernel();
21350 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21351 + pax_close_kernel();
21352 + pagefault_enable();
21353 + set_fs(old_fs);
21354 +
21355 + return ret ? -EFAULT : 0;
21356 +}
21357 diff -urNp linux-2.6.32.45/arch/x86/mm/gup.c linux-2.6.32.45/arch/x86/mm/gup.c
21358 --- linux-2.6.32.45/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21359 +++ linux-2.6.32.45/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21360 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21361 addr = start;
21362 len = (unsigned long) nr_pages << PAGE_SHIFT;
21363 end = start + len;
21364 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21365 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21366 (void __user *)start, len)))
21367 return 0;
21368
21369 diff -urNp linux-2.6.32.45/arch/x86/mm/highmem_32.c linux-2.6.32.45/arch/x86/mm/highmem_32.c
21370 --- linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21371 +++ linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21372 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21373 idx = type + KM_TYPE_NR*smp_processor_id();
21374 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21375 BUG_ON(!pte_none(*(kmap_pte-idx)));
21376 +
21377 + pax_open_kernel();
21378 set_pte(kmap_pte-idx, mk_pte(page, prot));
21379 + pax_close_kernel();
21380
21381 return (void *)vaddr;
21382 }
21383 diff -urNp linux-2.6.32.45/arch/x86/mm/hugetlbpage.c linux-2.6.32.45/arch/x86/mm/hugetlbpage.c
21384 --- linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21385 +++ linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21386 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21387 struct hstate *h = hstate_file(file);
21388 struct mm_struct *mm = current->mm;
21389 struct vm_area_struct *vma;
21390 - unsigned long start_addr;
21391 + unsigned long start_addr, pax_task_size = TASK_SIZE;
21392 +
21393 +#ifdef CONFIG_PAX_SEGMEXEC
21394 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21395 + pax_task_size = SEGMEXEC_TASK_SIZE;
21396 +#endif
21397 +
21398 + pax_task_size -= PAGE_SIZE;
21399
21400 if (len > mm->cached_hole_size) {
21401 - start_addr = mm->free_area_cache;
21402 + start_addr = mm->free_area_cache;
21403 } else {
21404 - start_addr = TASK_UNMAPPED_BASE;
21405 - mm->cached_hole_size = 0;
21406 + start_addr = mm->mmap_base;
21407 + mm->cached_hole_size = 0;
21408 }
21409
21410 full_search:
21411 @@ -281,26 +288,27 @@ full_search:
21412
21413 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21414 /* At this point: (!vma || addr < vma->vm_end). */
21415 - if (TASK_SIZE - len < addr) {
21416 + if (pax_task_size - len < addr) {
21417 /*
21418 * Start a new search - just in case we missed
21419 * some holes.
21420 */
21421 - if (start_addr != TASK_UNMAPPED_BASE) {
21422 - start_addr = TASK_UNMAPPED_BASE;
21423 + if (start_addr != mm->mmap_base) {
21424 + start_addr = mm->mmap_base;
21425 mm->cached_hole_size = 0;
21426 goto full_search;
21427 }
21428 return -ENOMEM;
21429 }
21430 - if (!vma || addr + len <= vma->vm_start) {
21431 - mm->free_area_cache = addr + len;
21432 - return addr;
21433 - }
21434 + if (check_heap_stack_gap(vma, addr, len))
21435 + break;
21436 if (addr + mm->cached_hole_size < vma->vm_start)
21437 mm->cached_hole_size = vma->vm_start - addr;
21438 addr = ALIGN(vma->vm_end, huge_page_size(h));
21439 }
21440 +
21441 + mm->free_area_cache = addr + len;
21442 + return addr;
21443 }
21444
21445 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21446 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21447 {
21448 struct hstate *h = hstate_file(file);
21449 struct mm_struct *mm = current->mm;
21450 - struct vm_area_struct *vma, *prev_vma;
21451 - unsigned long base = mm->mmap_base, addr = addr0;
21452 + struct vm_area_struct *vma;
21453 + unsigned long base = mm->mmap_base, addr;
21454 unsigned long largest_hole = mm->cached_hole_size;
21455 - int first_time = 1;
21456
21457 /* don't allow allocations above current base */
21458 if (mm->free_area_cache > base)
21459 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21460 largest_hole = 0;
21461 mm->free_area_cache = base;
21462 }
21463 -try_again:
21464 +
21465 /* make sure it can fit in the remaining address space */
21466 if (mm->free_area_cache < len)
21467 goto fail;
21468
21469 /* either no address requested or cant fit in requested address hole */
21470 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
21471 + addr = (mm->free_area_cache - len);
21472 do {
21473 + addr &= huge_page_mask(h);
21474 + vma = find_vma(mm, addr);
21475 /*
21476 * Lookup failure means no vma is above this address,
21477 * i.e. return with success:
21478 - */
21479 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21480 - return addr;
21481 -
21482 - /*
21483 * new region fits between prev_vma->vm_end and
21484 * vma->vm_start, use it:
21485 */
21486 - if (addr + len <= vma->vm_start &&
21487 - (!prev_vma || (addr >= prev_vma->vm_end))) {
21488 + if (check_heap_stack_gap(vma, addr, len)) {
21489 /* remember the address as a hint for next time */
21490 - mm->cached_hole_size = largest_hole;
21491 - return (mm->free_area_cache = addr);
21492 - } else {
21493 - /* pull free_area_cache down to the first hole */
21494 - if (mm->free_area_cache == vma->vm_end) {
21495 - mm->free_area_cache = vma->vm_start;
21496 - mm->cached_hole_size = largest_hole;
21497 - }
21498 + mm->cached_hole_size = largest_hole;
21499 + return (mm->free_area_cache = addr);
21500 + }
21501 + /* pull free_area_cache down to the first hole */
21502 + if (mm->free_area_cache == vma->vm_end) {
21503 + mm->free_area_cache = vma->vm_start;
21504 + mm->cached_hole_size = largest_hole;
21505 }
21506
21507 /* remember the largest hole we saw so far */
21508 if (addr + largest_hole < vma->vm_start)
21509 - largest_hole = vma->vm_start - addr;
21510 + largest_hole = vma->vm_start - addr;
21511
21512 /* try just below the current vma->vm_start */
21513 - addr = (vma->vm_start - len) & huge_page_mask(h);
21514 - } while (len <= vma->vm_start);
21515 + addr = skip_heap_stack_gap(vma, len);
21516 + } while (!IS_ERR_VALUE(addr));
21517
21518 fail:
21519 /*
21520 - * if hint left us with no space for the requested
21521 - * mapping then try again:
21522 - */
21523 - if (first_time) {
21524 - mm->free_area_cache = base;
21525 - largest_hole = 0;
21526 - first_time = 0;
21527 - goto try_again;
21528 - }
21529 - /*
21530 * A failed mmap() very likely causes application failure,
21531 * so fall back to the bottom-up function here. This scenario
21532 * can happen with large stack limits and large mmap()
21533 * allocations.
21534 */
21535 - mm->free_area_cache = TASK_UNMAPPED_BASE;
21536 +
21537 +#ifdef CONFIG_PAX_SEGMEXEC
21538 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21539 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21540 + else
21541 +#endif
21542 +
21543 + mm->mmap_base = TASK_UNMAPPED_BASE;
21544 +
21545 +#ifdef CONFIG_PAX_RANDMMAP
21546 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21547 + mm->mmap_base += mm->delta_mmap;
21548 +#endif
21549 +
21550 + mm->free_area_cache = mm->mmap_base;
21551 mm->cached_hole_size = ~0UL;
21552 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21553 len, pgoff, flags);
21554 @@ -387,6 +393,7 @@ fail:
21555 /*
21556 * Restore the topdown base:
21557 */
21558 + mm->mmap_base = base;
21559 mm->free_area_cache = base;
21560 mm->cached_hole_size = ~0UL;
21561
21562 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21563 struct hstate *h = hstate_file(file);
21564 struct mm_struct *mm = current->mm;
21565 struct vm_area_struct *vma;
21566 + unsigned long pax_task_size = TASK_SIZE;
21567
21568 if (len & ~huge_page_mask(h))
21569 return -EINVAL;
21570 - if (len > TASK_SIZE)
21571 +
21572 +#ifdef CONFIG_PAX_SEGMEXEC
21573 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21574 + pax_task_size = SEGMEXEC_TASK_SIZE;
21575 +#endif
21576 +
21577 + pax_task_size -= PAGE_SIZE;
21578 +
21579 + if (len > pax_task_size)
21580 return -ENOMEM;
21581
21582 if (flags & MAP_FIXED) {
21583 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21584 if (addr) {
21585 addr = ALIGN(addr, huge_page_size(h));
21586 vma = find_vma(mm, addr);
21587 - if (TASK_SIZE - len >= addr &&
21588 - (!vma || addr + len <= vma->vm_start))
21589 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21590 return addr;
21591 }
21592 if (mm->get_unmapped_area == arch_get_unmapped_area)
21593 diff -urNp linux-2.6.32.45/arch/x86/mm/init_32.c linux-2.6.32.45/arch/x86/mm/init_32.c
21594 --- linux-2.6.32.45/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21595 +++ linux-2.6.32.45/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21596 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21597 }
21598
21599 /*
21600 - * Creates a middle page table and puts a pointer to it in the
21601 - * given global directory entry. This only returns the gd entry
21602 - * in non-PAE compilation mode, since the middle layer is folded.
21603 - */
21604 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
21605 -{
21606 - pud_t *pud;
21607 - pmd_t *pmd_table;
21608 -
21609 -#ifdef CONFIG_X86_PAE
21610 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21611 - if (after_bootmem)
21612 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21613 - else
21614 - pmd_table = (pmd_t *)alloc_low_page();
21615 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21616 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21617 - pud = pud_offset(pgd, 0);
21618 - BUG_ON(pmd_table != pmd_offset(pud, 0));
21619 -
21620 - return pmd_table;
21621 - }
21622 -#endif
21623 - pud = pud_offset(pgd, 0);
21624 - pmd_table = pmd_offset(pud, 0);
21625 -
21626 - return pmd_table;
21627 -}
21628 -
21629 -/*
21630 * Create a page table and place a pointer to it in a middle page
21631 * directory entry:
21632 */
21633 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21634 page_table = (pte_t *)alloc_low_page();
21635
21636 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21637 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21638 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21639 +#else
21640 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21641 +#endif
21642 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21643 }
21644
21645 return pte_offset_kernel(pmd, 0);
21646 }
21647
21648 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
21649 +{
21650 + pud_t *pud;
21651 + pmd_t *pmd_table;
21652 +
21653 + pud = pud_offset(pgd, 0);
21654 + pmd_table = pmd_offset(pud, 0);
21655 +
21656 + return pmd_table;
21657 +}
21658 +
21659 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21660 {
21661 int pgd_idx = pgd_index(vaddr);
21662 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21663 int pgd_idx, pmd_idx;
21664 unsigned long vaddr;
21665 pgd_t *pgd;
21666 + pud_t *pud;
21667 pmd_t *pmd;
21668 pte_t *pte = NULL;
21669
21670 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21671 pgd = pgd_base + pgd_idx;
21672
21673 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21674 - pmd = one_md_table_init(pgd);
21675 - pmd = pmd + pmd_index(vaddr);
21676 + pud = pud_offset(pgd, vaddr);
21677 + pmd = pmd_offset(pud, vaddr);
21678 +
21679 +#ifdef CONFIG_X86_PAE
21680 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21681 +#endif
21682 +
21683 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21684 pmd++, pmd_idx++) {
21685 pte = page_table_kmap_check(one_page_table_init(pmd),
21686 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21687 }
21688 }
21689
21690 -static inline int is_kernel_text(unsigned long addr)
21691 +static inline int is_kernel_text(unsigned long start, unsigned long end)
21692 {
21693 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21694 - return 1;
21695 - return 0;
21696 + if ((start > ktla_ktva((unsigned long)_etext) ||
21697 + end <= ktla_ktva((unsigned long)_stext)) &&
21698 + (start > ktla_ktva((unsigned long)_einittext) ||
21699 + end <= ktla_ktva((unsigned long)_sinittext)) &&
21700 +
21701 +#ifdef CONFIG_ACPI_SLEEP
21702 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21703 +#endif
21704 +
21705 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21706 + return 0;
21707 + return 1;
21708 }
21709
21710 /*
21711 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21712 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21713 unsigned long start_pfn, end_pfn;
21714 pgd_t *pgd_base = swapper_pg_dir;
21715 - int pgd_idx, pmd_idx, pte_ofs;
21716 + unsigned int pgd_idx, pmd_idx, pte_ofs;
21717 unsigned long pfn;
21718 pgd_t *pgd;
21719 + pud_t *pud;
21720 pmd_t *pmd;
21721 pte_t *pte;
21722 unsigned pages_2m, pages_4k;
21723 @@ -278,8 +279,13 @@ repeat:
21724 pfn = start_pfn;
21725 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21726 pgd = pgd_base + pgd_idx;
21727 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21728 - pmd = one_md_table_init(pgd);
21729 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21730 + pud = pud_offset(pgd, 0);
21731 + pmd = pmd_offset(pud, 0);
21732 +
21733 +#ifdef CONFIG_X86_PAE
21734 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21735 +#endif
21736
21737 if (pfn >= end_pfn)
21738 continue;
21739 @@ -291,14 +297,13 @@ repeat:
21740 #endif
21741 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21742 pmd++, pmd_idx++) {
21743 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21744 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21745
21746 /*
21747 * Map with big pages if possible, otherwise
21748 * create normal page tables:
21749 */
21750 if (use_pse) {
21751 - unsigned int addr2;
21752 pgprot_t prot = PAGE_KERNEL_LARGE;
21753 /*
21754 * first pass will use the same initial
21755 @@ -308,11 +313,7 @@ repeat:
21756 __pgprot(PTE_IDENT_ATTR |
21757 _PAGE_PSE);
21758
21759 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21760 - PAGE_OFFSET + PAGE_SIZE-1;
21761 -
21762 - if (is_kernel_text(addr) ||
21763 - is_kernel_text(addr2))
21764 + if (is_kernel_text(address, address + PMD_SIZE))
21765 prot = PAGE_KERNEL_LARGE_EXEC;
21766
21767 pages_2m++;
21768 @@ -329,7 +330,7 @@ repeat:
21769 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21770 pte += pte_ofs;
21771 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21772 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21773 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21774 pgprot_t prot = PAGE_KERNEL;
21775 /*
21776 * first pass will use the same initial
21777 @@ -337,7 +338,7 @@ repeat:
21778 */
21779 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21780
21781 - if (is_kernel_text(addr))
21782 + if (is_kernel_text(address, address + PAGE_SIZE))
21783 prot = PAGE_KERNEL_EXEC;
21784
21785 pages_4k++;
21786 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21787
21788 pud = pud_offset(pgd, va);
21789 pmd = pmd_offset(pud, va);
21790 - if (!pmd_present(*pmd))
21791 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
21792 break;
21793
21794 pte = pte_offset_kernel(pmd, va);
21795 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
21796
21797 static void __init pagetable_init(void)
21798 {
21799 - pgd_t *pgd_base = swapper_pg_dir;
21800 -
21801 - permanent_kmaps_init(pgd_base);
21802 + permanent_kmaps_init(swapper_pg_dir);
21803 }
21804
21805 #ifdef CONFIG_ACPI_SLEEP
21806 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
21807 * ACPI suspend needs this for resume, because things like the intel-agp
21808 * driver might have split up a kernel 4MB mapping.
21809 */
21810 -char swsusp_pg_dir[PAGE_SIZE]
21811 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
21812 __attribute__ ((aligned(PAGE_SIZE)));
21813
21814 static inline void save_pg_dir(void)
21815 {
21816 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
21817 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
21818 }
21819 #else /* !CONFIG_ACPI_SLEEP */
21820 static inline void save_pg_dir(void)
21821 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
21822 flush_tlb_all();
21823 }
21824
21825 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21826 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21827 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21828
21829 /* user-defined highmem size */
21830 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
21831 * Initialize the boot-time allocator (with low memory only):
21832 */
21833 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
21834 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21835 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21836 PAGE_SIZE);
21837 if (bootmap == -1L)
21838 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
21839 @@ -864,6 +863,12 @@ void __init mem_init(void)
21840
21841 pci_iommu_alloc();
21842
21843 +#ifdef CONFIG_PAX_PER_CPU_PGD
21844 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21845 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21846 + KERNEL_PGD_PTRS);
21847 +#endif
21848 +
21849 #ifdef CONFIG_FLATMEM
21850 BUG_ON(!mem_map);
21851 #endif
21852 @@ -881,7 +886,7 @@ void __init mem_init(void)
21853 set_highmem_pages_init();
21854
21855 codesize = (unsigned long) &_etext - (unsigned long) &_text;
21856 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21857 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21858 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21859
21860 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21861 @@ -923,10 +928,10 @@ void __init mem_init(void)
21862 ((unsigned long)&__init_end -
21863 (unsigned long)&__init_begin) >> 10,
21864
21865 - (unsigned long)&_etext, (unsigned long)&_edata,
21866 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21867 + (unsigned long)&_sdata, (unsigned long)&_edata,
21868 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21869
21870 - (unsigned long)&_text, (unsigned long)&_etext,
21871 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21872 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21873
21874 /*
21875 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
21876 if (!kernel_set_to_readonly)
21877 return;
21878
21879 + start = ktla_ktva(start);
21880 pr_debug("Set kernel text: %lx - %lx for read write\n",
21881 start, start+size);
21882
21883 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
21884 if (!kernel_set_to_readonly)
21885 return;
21886
21887 + start = ktla_ktva(start);
21888 pr_debug("Set kernel text: %lx - %lx for read only\n",
21889 start, start+size);
21890
21891 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
21892 unsigned long start = PFN_ALIGN(_text);
21893 unsigned long size = PFN_ALIGN(_etext) - start;
21894
21895 + start = ktla_ktva(start);
21896 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21897 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21898 size >> 10);
21899 diff -urNp linux-2.6.32.45/arch/x86/mm/init_64.c linux-2.6.32.45/arch/x86/mm/init_64.c
21900 --- linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
21901 +++ linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
21902 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21903 pmd = fill_pmd(pud, vaddr);
21904 pte = fill_pte(pmd, vaddr);
21905
21906 + pax_open_kernel();
21907 set_pte(pte, new_pte);
21908 + pax_close_kernel();
21909
21910 /*
21911 * It's enough to flush this one mapping.
21912 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
21913 pgd = pgd_offset_k((unsigned long)__va(phys));
21914 if (pgd_none(*pgd)) {
21915 pud = (pud_t *) spp_getpage();
21916 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21917 - _PAGE_USER));
21918 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21919 }
21920 pud = pud_offset(pgd, (unsigned long)__va(phys));
21921 if (pud_none(*pud)) {
21922 pmd = (pmd_t *) spp_getpage();
21923 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21924 - _PAGE_USER));
21925 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21926 }
21927 pmd = pmd_offset(pud, phys);
21928 BUG_ON(!pmd_none(*pmd));
21929 @@ -675,6 +675,12 @@ void __init mem_init(void)
21930
21931 pci_iommu_alloc();
21932
21933 +#ifdef CONFIG_PAX_PER_CPU_PGD
21934 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21935 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21936 + KERNEL_PGD_PTRS);
21937 +#endif
21938 +
21939 /* clear_bss() already clear the empty_zero_page */
21940
21941 reservedpages = 0;
21942 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
21943 static struct vm_area_struct gate_vma = {
21944 .vm_start = VSYSCALL_START,
21945 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21946 - .vm_page_prot = PAGE_READONLY_EXEC,
21947 - .vm_flags = VM_READ | VM_EXEC
21948 + .vm_page_prot = PAGE_READONLY,
21949 + .vm_flags = VM_READ
21950 };
21951
21952 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21953 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
21954
21955 const char *arch_vma_name(struct vm_area_struct *vma)
21956 {
21957 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21958 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21959 return "[vdso]";
21960 if (vma == &gate_vma)
21961 return "[vsyscall]";
21962 diff -urNp linux-2.6.32.45/arch/x86/mm/init.c linux-2.6.32.45/arch/x86/mm/init.c
21963 --- linux-2.6.32.45/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21964 +++ linux-2.6.32.45/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21965 @@ -69,11 +69,7 @@ static void __init find_early_table_spac
21966 * cause a hotspot and fill up ZONE_DMA. The page tables
21967 * need roughly 0.5KB per GB.
21968 */
21969 -#ifdef CONFIG_X86_32
21970 - start = 0x7000;
21971 -#else
21972 - start = 0x8000;
21973 -#endif
21974 + start = 0x100000;
21975 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
21976 tables, PAGE_SIZE);
21977 if (e820_table_start == -1UL)
21978 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
21979 #endif
21980
21981 set_nx();
21982 - if (nx_enabled)
21983 + if (nx_enabled && cpu_has_nx)
21984 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
21985
21986 /* Enable PSE if available */
21987 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
21988 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
21989 * mmio resources as well as potential bios/acpi data regions.
21990 */
21991 +
21992 int devmem_is_allowed(unsigned long pagenr)
21993 {
21994 +#ifdef CONFIG_GRKERNSEC_KMEM
21995 + /* allow BDA */
21996 + if (!pagenr)
21997 + return 1;
21998 + /* allow EBDA */
21999 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22000 + return 1;
22001 + /* allow ISA/video mem */
22002 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22003 + return 1;
22004 + /* throw out everything else below 1MB */
22005 + if (pagenr <= 256)
22006 + return 0;
22007 +#else
22008 if (pagenr <= 256)
22009 return 1;
22010 +#endif
22011 +
22012 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22013 return 0;
22014 if (!page_is_ram(pagenr))
22015 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
22016
22017 void free_initmem(void)
22018 {
22019 +
22020 +#ifdef CONFIG_PAX_KERNEXEC
22021 +#ifdef CONFIG_X86_32
22022 + /* PaX: limit KERNEL_CS to actual size */
22023 + unsigned long addr, limit;
22024 + struct desc_struct d;
22025 + int cpu;
22026 +
22027 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22028 + limit = (limit - 1UL) >> PAGE_SHIFT;
22029 +
22030 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22031 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22032 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22033 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22034 + }
22035 +
22036 + /* PaX: make KERNEL_CS read-only */
22037 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22038 + if (!paravirt_enabled())
22039 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22040 +/*
22041 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22042 + pgd = pgd_offset_k(addr);
22043 + pud = pud_offset(pgd, addr);
22044 + pmd = pmd_offset(pud, addr);
22045 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22046 + }
22047 +*/
22048 +#ifdef CONFIG_X86_PAE
22049 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22050 +/*
22051 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22052 + pgd = pgd_offset_k(addr);
22053 + pud = pud_offset(pgd, addr);
22054 + pmd = pmd_offset(pud, addr);
22055 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22056 + }
22057 +*/
22058 +#endif
22059 +
22060 +#ifdef CONFIG_MODULES
22061 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22062 +#endif
22063 +
22064 +#else
22065 + pgd_t *pgd;
22066 + pud_t *pud;
22067 + pmd_t *pmd;
22068 + unsigned long addr, end;
22069 +
22070 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22071 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22072 + pgd = pgd_offset_k(addr);
22073 + pud = pud_offset(pgd, addr);
22074 + pmd = pmd_offset(pud, addr);
22075 + if (!pmd_present(*pmd))
22076 + continue;
22077 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22078 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22079 + else
22080 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22081 + }
22082 +
22083 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22084 + end = addr + KERNEL_IMAGE_SIZE;
22085 + for (; addr < end; addr += PMD_SIZE) {
22086 + pgd = pgd_offset_k(addr);
22087 + pud = pud_offset(pgd, addr);
22088 + pmd = pmd_offset(pud, addr);
22089 + if (!pmd_present(*pmd))
22090 + continue;
22091 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22092 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22093 + }
22094 +#endif
22095 +
22096 + flush_tlb_all();
22097 +#endif
22098 +
22099 free_init_pages("unused kernel memory",
22100 (unsigned long)(&__init_begin),
22101 (unsigned long)(&__init_end));
22102 diff -urNp linux-2.6.32.45/arch/x86/mm/iomap_32.c linux-2.6.32.45/arch/x86/mm/iomap_32.c
22103 --- linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
22104 +++ linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
22105 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
22106 debug_kmap_atomic(type);
22107 idx = type + KM_TYPE_NR * smp_processor_id();
22108 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22109 +
22110 + pax_open_kernel();
22111 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22112 + pax_close_kernel();
22113 +
22114 arch_flush_lazy_mmu_mode();
22115
22116 return (void *)vaddr;
22117 diff -urNp linux-2.6.32.45/arch/x86/mm/ioremap.c linux-2.6.32.45/arch/x86/mm/ioremap.c
22118 --- linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
22119 +++ linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
22120 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
22121 * Second special case: Some BIOSen report the PC BIOS
22122 * area (640->1Mb) as ram even though it is not.
22123 */
22124 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
22125 - pagenr < (BIOS_END >> PAGE_SHIFT))
22126 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
22127 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22128 return 0;
22129
22130 for (i = 0; i < e820.nr_map; i++) {
22131 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
22132 /*
22133 * Don't allow anybody to remap normal RAM that we're using..
22134 */
22135 - for (pfn = phys_addr >> PAGE_SHIFT;
22136 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
22137 - pfn++) {
22138 -
22139 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
22140 int is_ram = page_is_ram(pfn);
22141
22142 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22143 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22144 return NULL;
22145 WARN_ON_ONCE(is_ram);
22146 }
22147 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
22148 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22149
22150 static __initdata int after_paging_init;
22151 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22152 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22153
22154 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22155 {
22156 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
22157 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22158
22159 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22160 - memset(bm_pte, 0, sizeof(bm_pte));
22161 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
22162 + pmd_populate_user(&init_mm, pmd, bm_pte);
22163
22164 /*
22165 * The boot-ioremap range spans multiple pmds, for which
22166 diff -urNp linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c
22167 --- linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
22168 +++ linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
22169 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
22170 * memory (e.g. tracked pages)? For now, we need this to avoid
22171 * invoking kmemcheck for PnP BIOS calls.
22172 */
22173 - if (regs->flags & X86_VM_MASK)
22174 + if (v8086_mode(regs))
22175 return false;
22176 - if (regs->cs != __KERNEL_CS)
22177 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22178 return false;
22179
22180 pte = kmemcheck_pte_lookup(address);
22181 diff -urNp linux-2.6.32.45/arch/x86/mm/mmap.c linux-2.6.32.45/arch/x86/mm/mmap.c
22182 --- linux-2.6.32.45/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
22183 +++ linux-2.6.32.45/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
22184 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
22185 * Leave an at least ~128 MB hole with possible stack randomization.
22186 */
22187 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22188 -#define MAX_GAP (TASK_SIZE/6*5)
22189 +#define MAX_GAP (pax_task_size/6*5)
22190
22191 /*
22192 * True on X86_32 or when emulating IA32 on X86_64
22193 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22194 return rnd << PAGE_SHIFT;
22195 }
22196
22197 -static unsigned long mmap_base(void)
22198 +static unsigned long mmap_base(struct mm_struct *mm)
22199 {
22200 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
22201 + unsigned long pax_task_size = TASK_SIZE;
22202 +
22203 +#ifdef CONFIG_PAX_SEGMEXEC
22204 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22205 + pax_task_size = SEGMEXEC_TASK_SIZE;
22206 +#endif
22207
22208 if (gap < MIN_GAP)
22209 gap = MIN_GAP;
22210 else if (gap > MAX_GAP)
22211 gap = MAX_GAP;
22212
22213 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22214 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22215 }
22216
22217 /*
22218 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22219 * does, but not when emulating X86_32
22220 */
22221 -static unsigned long mmap_legacy_base(void)
22222 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
22223 {
22224 - if (mmap_is_ia32())
22225 + if (mmap_is_ia32()) {
22226 +
22227 +#ifdef CONFIG_PAX_SEGMEXEC
22228 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22229 + return SEGMEXEC_TASK_UNMAPPED_BASE;
22230 + else
22231 +#endif
22232 +
22233 return TASK_UNMAPPED_BASE;
22234 - else
22235 + } else
22236 return TASK_UNMAPPED_BASE + mmap_rnd();
22237 }
22238
22239 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22240 void arch_pick_mmap_layout(struct mm_struct *mm)
22241 {
22242 if (mmap_is_legacy()) {
22243 - mm->mmap_base = mmap_legacy_base();
22244 + mm->mmap_base = mmap_legacy_base(mm);
22245 +
22246 +#ifdef CONFIG_PAX_RANDMMAP
22247 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22248 + mm->mmap_base += mm->delta_mmap;
22249 +#endif
22250 +
22251 mm->get_unmapped_area = arch_get_unmapped_area;
22252 mm->unmap_area = arch_unmap_area;
22253 } else {
22254 - mm->mmap_base = mmap_base();
22255 + mm->mmap_base = mmap_base(mm);
22256 +
22257 +#ifdef CONFIG_PAX_RANDMMAP
22258 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22259 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22260 +#endif
22261 +
22262 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22263 mm->unmap_area = arch_unmap_area_topdown;
22264 }
22265 diff -urNp linux-2.6.32.45/arch/x86/mm/mmio-mod.c linux-2.6.32.45/arch/x86/mm/mmio-mod.c
22266 --- linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22267 +++ linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22268 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22269 break;
22270 default:
22271 {
22272 - unsigned char *ip = (unsigned char *)instptr;
22273 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22274 my_trace->opcode = MMIO_UNKNOWN_OP;
22275 my_trace->width = 0;
22276 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22277 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22278 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22279 void __iomem *addr)
22280 {
22281 - static atomic_t next_id;
22282 + static atomic_unchecked_t next_id;
22283 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22284 /* These are page-unaligned. */
22285 struct mmiotrace_map map = {
22286 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22287 .private = trace
22288 },
22289 .phys = offset,
22290 - .id = atomic_inc_return(&next_id)
22291 + .id = atomic_inc_return_unchecked(&next_id)
22292 };
22293 map.map_id = trace->id;
22294
22295 diff -urNp linux-2.6.32.45/arch/x86/mm/numa_32.c linux-2.6.32.45/arch/x86/mm/numa_32.c
22296 --- linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22297 +++ linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22298 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22299 }
22300 #endif
22301
22302 -extern unsigned long find_max_low_pfn(void);
22303 extern unsigned long highend_pfn, highstart_pfn;
22304
22305 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22306 diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr.c linux-2.6.32.45/arch/x86/mm/pageattr.c
22307 --- linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22308 +++ linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22309 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22310 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22311 */
22312 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22313 - pgprot_val(forbidden) |= _PAGE_NX;
22314 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22315
22316 /*
22317 * The kernel text needs to be executable for obvious reasons
22318 * Does not cover __inittext since that is gone later on. On
22319 * 64bit we do not enforce !NX on the low mapping
22320 */
22321 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
22322 - pgprot_val(forbidden) |= _PAGE_NX;
22323 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22324 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22325
22326 +#ifdef CONFIG_DEBUG_RODATA
22327 /*
22328 * The .rodata section needs to be read-only. Using the pfn
22329 * catches all aliases.
22330 @@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22331 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22332 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22333 pgprot_val(forbidden) |= _PAGE_RW;
22334 +#endif
22335 +
22336 +#ifdef CONFIG_PAX_KERNEXEC
22337 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22338 + pgprot_val(forbidden) |= _PAGE_RW;
22339 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22340 + }
22341 +#endif
22342
22343 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22344
22345 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22346 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22347 {
22348 /* change init_mm */
22349 + pax_open_kernel();
22350 set_pte_atomic(kpte, pte);
22351 +
22352 #ifdef CONFIG_X86_32
22353 if (!SHARED_KERNEL_PMD) {
22354 +
22355 +#ifdef CONFIG_PAX_PER_CPU_PGD
22356 + unsigned long cpu;
22357 +#else
22358 struct page *page;
22359 +#endif
22360
22361 +#ifdef CONFIG_PAX_PER_CPU_PGD
22362 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22363 + pgd_t *pgd = get_cpu_pgd(cpu);
22364 +#else
22365 list_for_each_entry(page, &pgd_list, lru) {
22366 - pgd_t *pgd;
22367 + pgd_t *pgd = (pgd_t *)page_address(page);
22368 +#endif
22369 +
22370 pud_t *pud;
22371 pmd_t *pmd;
22372
22373 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
22374 + pgd += pgd_index(address);
22375 pud = pud_offset(pgd, address);
22376 pmd = pmd_offset(pud, address);
22377 set_pte_atomic((pte_t *)pmd, pte);
22378 }
22379 }
22380 #endif
22381 + pax_close_kernel();
22382 }
22383
22384 static int
22385 diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr-test.c linux-2.6.32.45/arch/x86/mm/pageattr-test.c
22386 --- linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22387 +++ linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22388 @@ -36,7 +36,7 @@ enum {
22389
22390 static int pte_testbit(pte_t pte)
22391 {
22392 - return pte_flags(pte) & _PAGE_UNUSED1;
22393 + return pte_flags(pte) & _PAGE_CPA_TEST;
22394 }
22395
22396 struct split_state {
22397 diff -urNp linux-2.6.32.45/arch/x86/mm/pat.c linux-2.6.32.45/arch/x86/mm/pat.c
22398 --- linux-2.6.32.45/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22399 +++ linux-2.6.32.45/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22400 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22401
22402 conflict:
22403 printk(KERN_INFO "%s:%d conflicting memory types "
22404 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22405 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22406 new->end, cattr_name(new->type), cattr_name(entry->type));
22407 return -EBUSY;
22408 }
22409 @@ -559,7 +559,7 @@ unlock_ret:
22410
22411 if (err) {
22412 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22413 - current->comm, current->pid, start, end);
22414 + current->comm, task_pid_nr(current), start, end);
22415 }
22416
22417 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22418 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22419 while (cursor < to) {
22420 if (!devmem_is_allowed(pfn)) {
22421 printk(KERN_INFO
22422 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22423 - current->comm, from, to);
22424 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22425 + current->comm, from, to, cursor);
22426 return 0;
22427 }
22428 cursor += PAGE_SIZE;
22429 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22430 printk(KERN_INFO
22431 "%s:%d ioremap_change_attr failed %s "
22432 "for %Lx-%Lx\n",
22433 - current->comm, current->pid,
22434 + current->comm, task_pid_nr(current),
22435 cattr_name(flags),
22436 base, (unsigned long long)(base + size));
22437 return -EINVAL;
22438 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22439 free_memtype(paddr, paddr + size);
22440 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22441 " for %Lx-%Lx, got %s\n",
22442 - current->comm, current->pid,
22443 + current->comm, task_pid_nr(current),
22444 cattr_name(want_flags),
22445 (unsigned long long)paddr,
22446 (unsigned long long)(paddr + size),
22447 diff -urNp linux-2.6.32.45/arch/x86/mm/pf_in.c linux-2.6.32.45/arch/x86/mm/pf_in.c
22448 --- linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22449 +++ linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22450 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22451 int i;
22452 enum reason_type rv = OTHERS;
22453
22454 - p = (unsigned char *)ins_addr;
22455 + p = (unsigned char *)ktla_ktva(ins_addr);
22456 p += skip_prefix(p, &prf);
22457 p += get_opcode(p, &opcode);
22458
22459 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22460 struct prefix_bits prf;
22461 int i;
22462
22463 - p = (unsigned char *)ins_addr;
22464 + p = (unsigned char *)ktla_ktva(ins_addr);
22465 p += skip_prefix(p, &prf);
22466 p += get_opcode(p, &opcode);
22467
22468 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22469 struct prefix_bits prf;
22470 int i;
22471
22472 - p = (unsigned char *)ins_addr;
22473 + p = (unsigned char *)ktla_ktva(ins_addr);
22474 p += skip_prefix(p, &prf);
22475 p += get_opcode(p, &opcode);
22476
22477 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22478 int i;
22479 unsigned long rv;
22480
22481 - p = (unsigned char *)ins_addr;
22482 + p = (unsigned char *)ktla_ktva(ins_addr);
22483 p += skip_prefix(p, &prf);
22484 p += get_opcode(p, &opcode);
22485 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22486 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22487 int i;
22488 unsigned long rv;
22489
22490 - p = (unsigned char *)ins_addr;
22491 + p = (unsigned char *)ktla_ktva(ins_addr);
22492 p += skip_prefix(p, &prf);
22493 p += get_opcode(p, &opcode);
22494 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22495 diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable_32.c linux-2.6.32.45/arch/x86/mm/pgtable_32.c
22496 --- linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22497 +++ linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22498 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22499 return;
22500 }
22501 pte = pte_offset_kernel(pmd, vaddr);
22502 +
22503 + pax_open_kernel();
22504 if (pte_val(pteval))
22505 set_pte_at(&init_mm, vaddr, pte, pteval);
22506 else
22507 pte_clear(&init_mm, vaddr, pte);
22508 + pax_close_kernel();
22509
22510 /*
22511 * It's enough to flush this one mapping.
22512 diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable.c linux-2.6.32.45/arch/x86/mm/pgtable.c
22513 --- linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22514 +++ linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22515 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22516 list_del(&page->lru);
22517 }
22518
22519 -#define UNSHARED_PTRS_PER_PGD \
22520 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22521 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22522 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22523
22524 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22525 +{
22526 + while (count--)
22527 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22528 +}
22529 +#endif
22530 +
22531 +#ifdef CONFIG_PAX_PER_CPU_PGD
22532 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22533 +{
22534 + while (count--)
22535 +
22536 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22537 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22538 +#else
22539 + *dst++ = *src++;
22540 +#endif
22541 +
22542 +}
22543 +#endif
22544 +
22545 +#ifdef CONFIG_X86_64
22546 +#define pxd_t pud_t
22547 +#define pyd_t pgd_t
22548 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22549 +#define pxd_free(mm, pud) pud_free((mm), (pud))
22550 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22551 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22552 +#define PYD_SIZE PGDIR_SIZE
22553 +#else
22554 +#define pxd_t pmd_t
22555 +#define pyd_t pud_t
22556 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22557 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
22558 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22559 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
22560 +#define PYD_SIZE PUD_SIZE
22561 +#endif
22562 +
22563 +#ifdef CONFIG_PAX_PER_CPU_PGD
22564 +static inline void pgd_ctor(pgd_t *pgd) {}
22565 +static inline void pgd_dtor(pgd_t *pgd) {}
22566 +#else
22567 static void pgd_ctor(pgd_t *pgd)
22568 {
22569 /* If the pgd points to a shared pagetable level (either the
22570 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22571 pgd_list_del(pgd);
22572 spin_unlock_irqrestore(&pgd_lock, flags);
22573 }
22574 +#endif
22575
22576 /*
22577 * List of all pgd's needed for non-PAE so it can invalidate entries
22578 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22579 * -- wli
22580 */
22581
22582 -#ifdef CONFIG_X86_PAE
22583 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22584 /*
22585 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22586 * updating the top-level pagetable entries to guarantee the
22587 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22588 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22589 * and initialize the kernel pmds here.
22590 */
22591 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22592 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22593
22594 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22595 {
22596 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22597 */
22598 flush_tlb_mm(mm);
22599 }
22600 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22601 +#define PREALLOCATED_PXDS USER_PGD_PTRS
22602 #else /* !CONFIG_X86_PAE */
22603
22604 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22605 -#define PREALLOCATED_PMDS 0
22606 +#define PREALLOCATED_PXDS 0
22607
22608 #endif /* CONFIG_X86_PAE */
22609
22610 -static void free_pmds(pmd_t *pmds[])
22611 +static void free_pxds(pxd_t *pxds[])
22612 {
22613 int i;
22614
22615 - for(i = 0; i < PREALLOCATED_PMDS; i++)
22616 - if (pmds[i])
22617 - free_page((unsigned long)pmds[i]);
22618 + for(i = 0; i < PREALLOCATED_PXDS; i++)
22619 + if (pxds[i])
22620 + free_page((unsigned long)pxds[i]);
22621 }
22622
22623 -static int preallocate_pmds(pmd_t *pmds[])
22624 +static int preallocate_pxds(pxd_t *pxds[])
22625 {
22626 int i;
22627 bool failed = false;
22628
22629 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22630 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22631 - if (pmd == NULL)
22632 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22633 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22634 + if (pxd == NULL)
22635 failed = true;
22636 - pmds[i] = pmd;
22637 + pxds[i] = pxd;
22638 }
22639
22640 if (failed) {
22641 - free_pmds(pmds);
22642 + free_pxds(pxds);
22643 return -ENOMEM;
22644 }
22645
22646 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22647 * preallocate which never got a corresponding vma will need to be
22648 * freed manually.
22649 */
22650 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22651 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22652 {
22653 int i;
22654
22655 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22656 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22657 pgd_t pgd = pgdp[i];
22658
22659 if (pgd_val(pgd) != 0) {
22660 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22661 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22662
22663 - pgdp[i] = native_make_pgd(0);
22664 + set_pgd(pgdp + i, native_make_pgd(0));
22665
22666 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22667 - pmd_free(mm, pmd);
22668 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22669 + pxd_free(mm, pxd);
22670 }
22671 }
22672 }
22673
22674 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22675 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22676 {
22677 - pud_t *pud;
22678 + pyd_t *pyd;
22679 unsigned long addr;
22680 int i;
22681
22682 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22683 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22684 return;
22685
22686 - pud = pud_offset(pgd, 0);
22687 +#ifdef CONFIG_X86_64
22688 + pyd = pyd_offset(mm, 0L);
22689 +#else
22690 + pyd = pyd_offset(pgd, 0L);
22691 +#endif
22692
22693 - for (addr = i = 0; i < PREALLOCATED_PMDS;
22694 - i++, pud++, addr += PUD_SIZE) {
22695 - pmd_t *pmd = pmds[i];
22696 + for (addr = i = 0; i < PREALLOCATED_PXDS;
22697 + i++, pyd++, addr += PYD_SIZE) {
22698 + pxd_t *pxd = pxds[i];
22699
22700 if (i >= KERNEL_PGD_BOUNDARY)
22701 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22702 - sizeof(pmd_t) * PTRS_PER_PMD);
22703 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22704 + sizeof(pxd_t) * PTRS_PER_PMD);
22705
22706 - pud_populate(mm, pud, pmd);
22707 + pyd_populate(mm, pyd, pxd);
22708 }
22709 }
22710
22711 pgd_t *pgd_alloc(struct mm_struct *mm)
22712 {
22713 pgd_t *pgd;
22714 - pmd_t *pmds[PREALLOCATED_PMDS];
22715 + pxd_t *pxds[PREALLOCATED_PXDS];
22716 +
22717 unsigned long flags;
22718
22719 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22720 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22721
22722 mm->pgd = pgd;
22723
22724 - if (preallocate_pmds(pmds) != 0)
22725 + if (preallocate_pxds(pxds) != 0)
22726 goto out_free_pgd;
22727
22728 if (paravirt_pgd_alloc(mm) != 0)
22729 - goto out_free_pmds;
22730 + goto out_free_pxds;
22731
22732 /*
22733 * Make sure that pre-populating the pmds is atomic with
22734 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22735 spin_lock_irqsave(&pgd_lock, flags);
22736
22737 pgd_ctor(pgd);
22738 - pgd_prepopulate_pmd(mm, pgd, pmds);
22739 + pgd_prepopulate_pxd(mm, pgd, pxds);
22740
22741 spin_unlock_irqrestore(&pgd_lock, flags);
22742
22743 return pgd;
22744
22745 -out_free_pmds:
22746 - free_pmds(pmds);
22747 +out_free_pxds:
22748 + free_pxds(pxds);
22749 out_free_pgd:
22750 free_page((unsigned long)pgd);
22751 out:
22752 @@ -287,7 +338,7 @@ out:
22753
22754 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22755 {
22756 - pgd_mop_up_pmds(mm, pgd);
22757 + pgd_mop_up_pxds(mm, pgd);
22758 pgd_dtor(pgd);
22759 paravirt_pgd_free(mm, pgd);
22760 free_page((unsigned long)pgd);
22761 diff -urNp linux-2.6.32.45/arch/x86/mm/setup_nx.c linux-2.6.32.45/arch/x86/mm/setup_nx.c
22762 --- linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22763 +++ linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22764 @@ -4,11 +4,10 @@
22765
22766 #include <asm/pgtable.h>
22767
22768 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22769 int nx_enabled;
22770
22771 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22772 -static int disable_nx __cpuinitdata;
22773 -
22774 +#ifndef CONFIG_PAX_PAGEEXEC
22775 /*
22776 * noexec = on|off
22777 *
22778 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22779 if (!str)
22780 return -EINVAL;
22781 if (!strncmp(str, "on", 2)) {
22782 - __supported_pte_mask |= _PAGE_NX;
22783 - disable_nx = 0;
22784 + nx_enabled = 1;
22785 } else if (!strncmp(str, "off", 3)) {
22786 - disable_nx = 1;
22787 - __supported_pte_mask &= ~_PAGE_NX;
22788 + nx_enabled = 0;
22789 }
22790 return 0;
22791 }
22792 early_param("noexec", noexec_setup);
22793 #endif
22794 +#endif
22795
22796 #ifdef CONFIG_X86_PAE
22797 void __init set_nx(void)
22798 {
22799 - unsigned int v[4], l, h;
22800 + if (!nx_enabled && cpu_has_nx) {
22801 + unsigned l, h;
22802
22803 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
22804 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
22805 -
22806 - if ((v[3] & (1 << 20)) && !disable_nx) {
22807 - rdmsr(MSR_EFER, l, h);
22808 - l |= EFER_NX;
22809 - wrmsr(MSR_EFER, l, h);
22810 - nx_enabled = 1;
22811 - __supported_pte_mask |= _PAGE_NX;
22812 - }
22813 + __supported_pte_mask &= ~_PAGE_NX;
22814 + rdmsr(MSR_EFER, l, h);
22815 + l &= ~EFER_NX;
22816 + wrmsr(MSR_EFER, l, h);
22817 }
22818 }
22819 #else
22820 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
22821 unsigned long efer;
22822
22823 rdmsrl(MSR_EFER, efer);
22824 - if (!(efer & EFER_NX) || disable_nx)
22825 + if (!(efer & EFER_NX) || !nx_enabled)
22826 __supported_pte_mask &= ~_PAGE_NX;
22827 }
22828 #endif
22829 diff -urNp linux-2.6.32.45/arch/x86/mm/tlb.c linux-2.6.32.45/arch/x86/mm/tlb.c
22830 --- linux-2.6.32.45/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
22831 +++ linux-2.6.32.45/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
22832 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
22833 BUG();
22834 cpumask_clear_cpu(cpu,
22835 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
22836 +
22837 +#ifndef CONFIG_PAX_PER_CPU_PGD
22838 load_cr3(swapper_pg_dir);
22839 +#endif
22840 +
22841 }
22842 EXPORT_SYMBOL_GPL(leave_mm);
22843
22844 diff -urNp linux-2.6.32.45/arch/x86/oprofile/backtrace.c linux-2.6.32.45/arch/x86/oprofile/backtrace.c
22845 --- linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
22846 +++ linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
22847 @@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
22848 struct frame_head bufhead[2];
22849
22850 /* Also check accessibility of one struct frame_head beyond */
22851 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
22852 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
22853 return NULL;
22854 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
22855 return NULL;
22856 @@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
22857 {
22858 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
22859
22860 - if (!user_mode_vm(regs)) {
22861 + if (!user_mode(regs)) {
22862 unsigned long stack = kernel_stack_pointer(regs);
22863 if (depth)
22864 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22865 diff -urNp linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c
22866 --- linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
22867 +++ linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
22868 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
22869 #endif
22870 }
22871
22872 -static int inline addr_increment(void)
22873 +static inline int addr_increment(void)
22874 {
22875 #ifdef CONFIG_SMP
22876 return smp_num_siblings == 2 ? 2 : 1;
22877 diff -urNp linux-2.6.32.45/arch/x86/pci/common.c linux-2.6.32.45/arch/x86/pci/common.c
22878 --- linux-2.6.32.45/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
22879 +++ linux-2.6.32.45/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
22880 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
22881 int pcibios_last_bus = -1;
22882 unsigned long pirq_table_addr;
22883 struct pci_bus *pci_root_bus;
22884 -struct pci_raw_ops *raw_pci_ops;
22885 -struct pci_raw_ops *raw_pci_ext_ops;
22886 +const struct pci_raw_ops *raw_pci_ops;
22887 +const struct pci_raw_ops *raw_pci_ext_ops;
22888
22889 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
22890 int reg, int len, u32 *val)
22891 diff -urNp linux-2.6.32.45/arch/x86/pci/direct.c linux-2.6.32.45/arch/x86/pci/direct.c
22892 --- linux-2.6.32.45/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
22893 +++ linux-2.6.32.45/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
22894 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
22895
22896 #undef PCI_CONF1_ADDRESS
22897
22898 -struct pci_raw_ops pci_direct_conf1 = {
22899 +const struct pci_raw_ops pci_direct_conf1 = {
22900 .read = pci_conf1_read,
22901 .write = pci_conf1_write,
22902 };
22903 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
22904
22905 #undef PCI_CONF2_ADDRESS
22906
22907 -struct pci_raw_ops pci_direct_conf2 = {
22908 +const struct pci_raw_ops pci_direct_conf2 = {
22909 .read = pci_conf2_read,
22910 .write = pci_conf2_write,
22911 };
22912 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
22913 * This should be close to trivial, but it isn't, because there are buggy
22914 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
22915 */
22916 -static int __init pci_sanity_check(struct pci_raw_ops *o)
22917 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
22918 {
22919 u32 x = 0;
22920 int year, devfn;
22921 diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_32.c linux-2.6.32.45/arch/x86/pci/mmconfig_32.c
22922 --- linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
22923 +++ linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
22924 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
22925 return 0;
22926 }
22927
22928 -static struct pci_raw_ops pci_mmcfg = {
22929 +static const struct pci_raw_ops pci_mmcfg = {
22930 .read = pci_mmcfg_read,
22931 .write = pci_mmcfg_write,
22932 };
22933 diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_64.c linux-2.6.32.45/arch/x86/pci/mmconfig_64.c
22934 --- linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
22935 +++ linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
22936 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
22937 return 0;
22938 }
22939
22940 -static struct pci_raw_ops pci_mmcfg = {
22941 +static const struct pci_raw_ops pci_mmcfg = {
22942 .read = pci_mmcfg_read,
22943 .write = pci_mmcfg_write,
22944 };
22945 diff -urNp linux-2.6.32.45/arch/x86/pci/numaq_32.c linux-2.6.32.45/arch/x86/pci/numaq_32.c
22946 --- linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
22947 +++ linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
22948 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
22949
22950 #undef PCI_CONF1_MQ_ADDRESS
22951
22952 -static struct pci_raw_ops pci_direct_conf1_mq = {
22953 +static const struct pci_raw_ops pci_direct_conf1_mq = {
22954 .read = pci_conf1_mq_read,
22955 .write = pci_conf1_mq_write
22956 };
22957 diff -urNp linux-2.6.32.45/arch/x86/pci/olpc.c linux-2.6.32.45/arch/x86/pci/olpc.c
22958 --- linux-2.6.32.45/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
22959 +++ linux-2.6.32.45/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
22960 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
22961 return 0;
22962 }
22963
22964 -static struct pci_raw_ops pci_olpc_conf = {
22965 +static const struct pci_raw_ops pci_olpc_conf = {
22966 .read = pci_olpc_read,
22967 .write = pci_olpc_write,
22968 };
22969 diff -urNp linux-2.6.32.45/arch/x86/pci/pcbios.c linux-2.6.32.45/arch/x86/pci/pcbios.c
22970 --- linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
22971 +++ linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
22972 @@ -56,50 +56,93 @@ union bios32 {
22973 static struct {
22974 unsigned long address;
22975 unsigned short segment;
22976 -} bios32_indirect = { 0, __KERNEL_CS };
22977 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22978
22979 /*
22980 * Returns the entry point for the given service, NULL on error
22981 */
22982
22983 -static unsigned long bios32_service(unsigned long service)
22984 +static unsigned long __devinit bios32_service(unsigned long service)
22985 {
22986 unsigned char return_code; /* %al */
22987 unsigned long address; /* %ebx */
22988 unsigned long length; /* %ecx */
22989 unsigned long entry; /* %edx */
22990 unsigned long flags;
22991 + struct desc_struct d, *gdt;
22992
22993 local_irq_save(flags);
22994 - __asm__("lcall *(%%edi); cld"
22995 +
22996 + gdt = get_cpu_gdt_table(smp_processor_id());
22997 +
22998 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22999 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23000 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23001 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23002 +
23003 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23004 : "=a" (return_code),
23005 "=b" (address),
23006 "=c" (length),
23007 "=d" (entry)
23008 : "0" (service),
23009 "1" (0),
23010 - "D" (&bios32_indirect));
23011 + "D" (&bios32_indirect),
23012 + "r"(__PCIBIOS_DS)
23013 + : "memory");
23014 +
23015 + pax_open_kernel();
23016 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23017 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23018 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23019 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23020 + pax_close_kernel();
23021 +
23022 local_irq_restore(flags);
23023
23024 switch (return_code) {
23025 - case 0:
23026 - return address + entry;
23027 - case 0x80: /* Not present */
23028 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23029 - return 0;
23030 - default: /* Shouldn't happen */
23031 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23032 - service, return_code);
23033 + case 0: {
23034 + int cpu;
23035 + unsigned char flags;
23036 +
23037 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23038 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23039 + printk(KERN_WARNING "bios32_service: not valid\n");
23040 return 0;
23041 + }
23042 + address = address + PAGE_OFFSET;
23043 + length += 16UL; /* some BIOSs underreport this... */
23044 + flags = 4;
23045 + if (length >= 64*1024*1024) {
23046 + length >>= PAGE_SHIFT;
23047 + flags |= 8;
23048 + }
23049 +
23050 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
23051 + gdt = get_cpu_gdt_table(cpu);
23052 + pack_descriptor(&d, address, length, 0x9b, flags);
23053 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23054 + pack_descriptor(&d, address, length, 0x93, flags);
23055 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23056 + }
23057 + return entry;
23058 + }
23059 + case 0x80: /* Not present */
23060 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23061 + return 0;
23062 + default: /* Shouldn't happen */
23063 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23064 + service, return_code);
23065 + return 0;
23066 }
23067 }
23068
23069 static struct {
23070 unsigned long address;
23071 unsigned short segment;
23072 -} pci_indirect = { 0, __KERNEL_CS };
23073 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23074
23075 -static int pci_bios_present;
23076 +static int pci_bios_present __read_only;
23077
23078 static int __devinit check_pcibios(void)
23079 {
23080 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
23081 unsigned long flags, pcibios_entry;
23082
23083 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23084 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23085 + pci_indirect.address = pcibios_entry;
23086
23087 local_irq_save(flags);
23088 - __asm__(
23089 - "lcall *(%%edi); cld\n\t"
23090 + __asm__("movw %w6, %%ds\n\t"
23091 + "lcall *%%ss:(%%edi); cld\n\t"
23092 + "push %%ss\n\t"
23093 + "pop %%ds\n\t"
23094 "jc 1f\n\t"
23095 "xor %%ah, %%ah\n"
23096 "1:"
23097 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
23098 "=b" (ebx),
23099 "=c" (ecx)
23100 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23101 - "D" (&pci_indirect)
23102 + "D" (&pci_indirect),
23103 + "r" (__PCIBIOS_DS)
23104 : "memory");
23105 local_irq_restore(flags);
23106
23107 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
23108
23109 switch (len) {
23110 case 1:
23111 - __asm__("lcall *(%%esi); cld\n\t"
23112 + __asm__("movw %w6, %%ds\n\t"
23113 + "lcall *%%ss:(%%esi); cld\n\t"
23114 + "push %%ss\n\t"
23115 + "pop %%ds\n\t"
23116 "jc 1f\n\t"
23117 "xor %%ah, %%ah\n"
23118 "1:"
23119 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
23120 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23121 "b" (bx),
23122 "D" ((long)reg),
23123 - "S" (&pci_indirect));
23124 + "S" (&pci_indirect),
23125 + "r" (__PCIBIOS_DS));
23126 /*
23127 * Zero-extend the result beyond 8 bits, do not trust the
23128 * BIOS having done it:
23129 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
23130 *value &= 0xff;
23131 break;
23132 case 2:
23133 - __asm__("lcall *(%%esi); cld\n\t"
23134 + __asm__("movw %w6, %%ds\n\t"
23135 + "lcall *%%ss:(%%esi); cld\n\t"
23136 + "push %%ss\n\t"
23137 + "pop %%ds\n\t"
23138 "jc 1f\n\t"
23139 "xor %%ah, %%ah\n"
23140 "1:"
23141 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
23142 : "1" (PCIBIOS_READ_CONFIG_WORD),
23143 "b" (bx),
23144 "D" ((long)reg),
23145 - "S" (&pci_indirect));
23146 + "S" (&pci_indirect),
23147 + "r" (__PCIBIOS_DS));
23148 /*
23149 * Zero-extend the result beyond 16 bits, do not trust the
23150 * BIOS having done it:
23151 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
23152 *value &= 0xffff;
23153 break;
23154 case 4:
23155 - __asm__("lcall *(%%esi); cld\n\t"
23156 + __asm__("movw %w6, %%ds\n\t"
23157 + "lcall *%%ss:(%%esi); cld\n\t"
23158 + "push %%ss\n\t"
23159 + "pop %%ds\n\t"
23160 "jc 1f\n\t"
23161 "xor %%ah, %%ah\n"
23162 "1:"
23163 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
23164 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23165 "b" (bx),
23166 "D" ((long)reg),
23167 - "S" (&pci_indirect));
23168 + "S" (&pci_indirect),
23169 + "r" (__PCIBIOS_DS));
23170 break;
23171 }
23172
23173 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
23174
23175 switch (len) {
23176 case 1:
23177 - __asm__("lcall *(%%esi); cld\n\t"
23178 + __asm__("movw %w6, %%ds\n\t"
23179 + "lcall *%%ss:(%%esi); cld\n\t"
23180 + "push %%ss\n\t"
23181 + "pop %%ds\n\t"
23182 "jc 1f\n\t"
23183 "xor %%ah, %%ah\n"
23184 "1:"
23185 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
23186 "c" (value),
23187 "b" (bx),
23188 "D" ((long)reg),
23189 - "S" (&pci_indirect));
23190 + "S" (&pci_indirect),
23191 + "r" (__PCIBIOS_DS));
23192 break;
23193 case 2:
23194 - __asm__("lcall *(%%esi); cld\n\t"
23195 + __asm__("movw %w6, %%ds\n\t"
23196 + "lcall *%%ss:(%%esi); cld\n\t"
23197 + "push %%ss\n\t"
23198 + "pop %%ds\n\t"
23199 "jc 1f\n\t"
23200 "xor %%ah, %%ah\n"
23201 "1:"
23202 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
23203 "c" (value),
23204 "b" (bx),
23205 "D" ((long)reg),
23206 - "S" (&pci_indirect));
23207 + "S" (&pci_indirect),
23208 + "r" (__PCIBIOS_DS));
23209 break;
23210 case 4:
23211 - __asm__("lcall *(%%esi); cld\n\t"
23212 + __asm__("movw %w6, %%ds\n\t"
23213 + "lcall *%%ss:(%%esi); cld\n\t"
23214 + "push %%ss\n\t"
23215 + "pop %%ds\n\t"
23216 "jc 1f\n\t"
23217 "xor %%ah, %%ah\n"
23218 "1:"
23219 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
23220 "c" (value),
23221 "b" (bx),
23222 "D" ((long)reg),
23223 - "S" (&pci_indirect));
23224 + "S" (&pci_indirect),
23225 + "r" (__PCIBIOS_DS));
23226 break;
23227 }
23228
23229 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
23230 * Function table for BIOS32 access
23231 */
23232
23233 -static struct pci_raw_ops pci_bios_access = {
23234 +static const struct pci_raw_ops pci_bios_access = {
23235 .read = pci_bios_read,
23236 .write = pci_bios_write
23237 };
23238 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
23239 * Try to find PCI BIOS.
23240 */
23241
23242 -static struct pci_raw_ops * __devinit pci_find_bios(void)
23243 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
23244 {
23245 union bios32 *check;
23246 unsigned char sum;
23247 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
23248
23249 DBG("PCI: Fetching IRQ routing table... ");
23250 __asm__("push %%es\n\t"
23251 + "movw %w8, %%ds\n\t"
23252 "push %%ds\n\t"
23253 "pop %%es\n\t"
23254 - "lcall *(%%esi); cld\n\t"
23255 + "lcall *%%ss:(%%esi); cld\n\t"
23256 "pop %%es\n\t"
23257 + "push %%ss\n\t"
23258 + "pop %%ds\n"
23259 "jc 1f\n\t"
23260 "xor %%ah, %%ah\n"
23261 "1:"
23262 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
23263 "1" (0),
23264 "D" ((long) &opt),
23265 "S" (&pci_indirect),
23266 - "m" (opt)
23267 + "m" (opt),
23268 + "r" (__PCIBIOS_DS)
23269 : "memory");
23270 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23271 if (ret & 0xff00)
23272 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23273 {
23274 int ret;
23275
23276 - __asm__("lcall *(%%esi); cld\n\t"
23277 + __asm__("movw %w5, %%ds\n\t"
23278 + "lcall *%%ss:(%%esi); cld\n\t"
23279 + "push %%ss\n\t"
23280 + "pop %%ds\n"
23281 "jc 1f\n\t"
23282 "xor %%ah, %%ah\n"
23283 "1:"
23284 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23285 : "0" (PCIBIOS_SET_PCI_HW_INT),
23286 "b" ((dev->bus->number << 8) | dev->devfn),
23287 "c" ((irq << 8) | (pin + 10)),
23288 - "S" (&pci_indirect));
23289 + "S" (&pci_indirect),
23290 + "r" (__PCIBIOS_DS));
23291 return !(ret & 0xff00);
23292 }
23293 EXPORT_SYMBOL(pcibios_set_irq_routing);
23294 diff -urNp linux-2.6.32.45/arch/x86/power/cpu.c linux-2.6.32.45/arch/x86/power/cpu.c
23295 --- linux-2.6.32.45/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23296 +++ linux-2.6.32.45/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23297 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
23298 static void fix_processor_context(void)
23299 {
23300 int cpu = smp_processor_id();
23301 - struct tss_struct *t = &per_cpu(init_tss, cpu);
23302 + struct tss_struct *t = init_tss + cpu;
23303
23304 set_tss_desc(cpu, t); /*
23305 * This just modifies memory; should not be
23306 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
23307 */
23308
23309 #ifdef CONFIG_X86_64
23310 + pax_open_kernel();
23311 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23312 + pax_close_kernel();
23313
23314 syscall_init(); /* This sets MSR_*STAR and related */
23315 #endif
23316 diff -urNp linux-2.6.32.45/arch/x86/vdso/Makefile linux-2.6.32.45/arch/x86/vdso/Makefile
23317 --- linux-2.6.32.45/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23318 +++ linux-2.6.32.45/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23319 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23320 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23321 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23322
23323 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23324 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23325 GCOV_PROFILE := n
23326
23327 #
23328 diff -urNp linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c
23329 --- linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23330 +++ linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23331 @@ -22,24 +22,48 @@
23332 #include <asm/hpet.h>
23333 #include <asm/unistd.h>
23334 #include <asm/io.h>
23335 +#include <asm/fixmap.h>
23336 #include "vextern.h"
23337
23338 #define gtod vdso_vsyscall_gtod_data
23339
23340 +notrace noinline long __vdso_fallback_time(long *t)
23341 +{
23342 + long secs;
23343 + asm volatile("syscall"
23344 + : "=a" (secs)
23345 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23346 + return secs;
23347 +}
23348 +
23349 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23350 {
23351 long ret;
23352 asm("syscall" : "=a" (ret) :
23353 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23354 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23355 return ret;
23356 }
23357
23358 +notrace static inline cycle_t __vdso_vread_hpet(void)
23359 +{
23360 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23361 +}
23362 +
23363 +notrace static inline cycle_t __vdso_vread_tsc(void)
23364 +{
23365 + cycle_t ret = (cycle_t)vget_cycles();
23366 +
23367 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23368 +}
23369 +
23370 notrace static inline long vgetns(void)
23371 {
23372 long v;
23373 - cycles_t (*vread)(void);
23374 - vread = gtod->clock.vread;
23375 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23376 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23377 + v = __vdso_vread_tsc();
23378 + else
23379 + v = __vdso_vread_hpet();
23380 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23381 return (v * gtod->clock.mult) >> gtod->clock.shift;
23382 }
23383
23384 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23385
23386 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23387 {
23388 - if (likely(gtod->sysctl_enabled))
23389 + if (likely(gtod->sysctl_enabled &&
23390 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23391 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23392 switch (clock) {
23393 case CLOCK_REALTIME:
23394 if (likely(gtod->clock.vread))
23395 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23396 int clock_gettime(clockid_t, struct timespec *)
23397 __attribute__((weak, alias("__vdso_clock_gettime")));
23398
23399 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23400 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23401 {
23402 long ret;
23403 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23404 + asm("syscall" : "=a" (ret) :
23405 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23406 + return ret;
23407 +}
23408 +
23409 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23410 +{
23411 + if (likely(gtod->sysctl_enabled &&
23412 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23413 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23414 + {
23415 if (likely(tv != NULL)) {
23416 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23417 offsetof(struct timespec, tv_nsec) ||
23418 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23419 }
23420 return 0;
23421 }
23422 - asm("syscall" : "=a" (ret) :
23423 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23424 - return ret;
23425 + return __vdso_fallback_gettimeofday(tv, tz);
23426 }
23427 int gettimeofday(struct timeval *, struct timezone *)
23428 __attribute__((weak, alias("__vdso_gettimeofday")));
23429 diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c
23430 --- linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23431 +++ linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23432 @@ -25,6 +25,7 @@
23433 #include <asm/tlbflush.h>
23434 #include <asm/vdso.h>
23435 #include <asm/proto.h>
23436 +#include <asm/mman.h>
23437
23438 enum {
23439 VDSO_DISABLED = 0,
23440 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23441 void enable_sep_cpu(void)
23442 {
23443 int cpu = get_cpu();
23444 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
23445 + struct tss_struct *tss = init_tss + cpu;
23446
23447 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23448 put_cpu();
23449 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23450 gate_vma.vm_start = FIXADDR_USER_START;
23451 gate_vma.vm_end = FIXADDR_USER_END;
23452 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23453 - gate_vma.vm_page_prot = __P101;
23454 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23455 /*
23456 * Make sure the vDSO gets into every core dump.
23457 * Dumping its contents makes post-mortem fully interpretable later
23458 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23459 if (compat)
23460 addr = VDSO_HIGH_BASE;
23461 else {
23462 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23463 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23464 if (IS_ERR_VALUE(addr)) {
23465 ret = addr;
23466 goto up_fail;
23467 }
23468 }
23469
23470 - current->mm->context.vdso = (void *)addr;
23471 + current->mm->context.vdso = addr;
23472
23473 if (compat_uses_vma || !compat) {
23474 /*
23475 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23476 }
23477
23478 current_thread_info()->sysenter_return =
23479 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23480 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23481
23482 up_fail:
23483 if (ret)
23484 - current->mm->context.vdso = NULL;
23485 + current->mm->context.vdso = 0;
23486
23487 up_write(&mm->mmap_sem);
23488
23489 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23490
23491 const char *arch_vma_name(struct vm_area_struct *vma)
23492 {
23493 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23494 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23495 return "[vdso]";
23496 +
23497 +#ifdef CONFIG_PAX_SEGMEXEC
23498 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23499 + return "[vdso]";
23500 +#endif
23501 +
23502 return NULL;
23503 }
23504
23505 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23506 struct mm_struct *mm = tsk->mm;
23507
23508 /* Check to see if this task was created in compat vdso mode */
23509 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23510 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23511 return &gate_vma;
23512 return NULL;
23513 }
23514 diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso.lds.S linux-2.6.32.45/arch/x86/vdso/vdso.lds.S
23515 --- linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23516 +++ linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23517 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23518 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23519 #include "vextern.h"
23520 #undef VEXTERN
23521 +
23522 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23523 +VEXTERN(fallback_gettimeofday)
23524 +VEXTERN(fallback_time)
23525 +VEXTERN(getcpu)
23526 +#undef VEXTERN
23527 diff -urNp linux-2.6.32.45/arch/x86/vdso/vextern.h linux-2.6.32.45/arch/x86/vdso/vextern.h
23528 --- linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23529 +++ linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23530 @@ -11,6 +11,5 @@
23531 put into vextern.h and be referenced as a pointer with vdso prefix.
23532 The main kernel later fills in the values. */
23533
23534 -VEXTERN(jiffies)
23535 VEXTERN(vgetcpu_mode)
23536 VEXTERN(vsyscall_gtod_data)
23537 diff -urNp linux-2.6.32.45/arch/x86/vdso/vma.c linux-2.6.32.45/arch/x86/vdso/vma.c
23538 --- linux-2.6.32.45/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23539 +++ linux-2.6.32.45/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
23540 @@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
23541 if (!vbase)
23542 goto oom;
23543
23544 - if (memcmp(vbase, "\177ELF", 4)) {
23545 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
23546 printk("VDSO: I'm broken; not ELF\n");
23547 vdso_enabled = 0;
23548 }
23549 @@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
23550 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23551 #include "vextern.h"
23552 #undef VEXTERN
23553 + vunmap(vbase);
23554 return 0;
23555
23556 oom:
23557 @@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
23558 goto up_fail;
23559 }
23560
23561 - current->mm->context.vdso = (void *)addr;
23562 + current->mm->context.vdso = addr;
23563
23564 ret = install_special_mapping(mm, addr, vdso_size,
23565 VM_READ|VM_EXEC|
23566 @@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
23567 VM_ALWAYSDUMP,
23568 vdso_pages);
23569 if (ret) {
23570 - current->mm->context.vdso = NULL;
23571 + current->mm->context.vdso = 0;
23572 goto up_fail;
23573 }
23574
23575 @@ -132,10 +133,3 @@ up_fail:
23576 up_write(&mm->mmap_sem);
23577 return ret;
23578 }
23579 -
23580 -static __init int vdso_setup(char *s)
23581 -{
23582 - vdso_enabled = simple_strtoul(s, NULL, 0);
23583 - return 0;
23584 -}
23585 -__setup("vdso=", vdso_setup);
23586 diff -urNp linux-2.6.32.45/arch/x86/xen/enlighten.c linux-2.6.32.45/arch/x86/xen/enlighten.c
23587 --- linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23588 +++ linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23589 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23590
23591 struct shared_info xen_dummy_shared_info;
23592
23593 -void *xen_initial_gdt;
23594 -
23595 /*
23596 * Point at some empty memory to start with. We map the real shared_info
23597 * page as soon as fixmap is up and running.
23598 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23599
23600 preempt_disable();
23601
23602 - start = __get_cpu_var(idt_desc).address;
23603 + start = (unsigned long)__get_cpu_var(idt_desc).address;
23604 end = start + __get_cpu_var(idt_desc).size + 1;
23605
23606 xen_mc_flush();
23607 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23608 #endif
23609 };
23610
23611 -static void xen_reboot(int reason)
23612 +static __noreturn void xen_reboot(int reason)
23613 {
23614 struct sched_shutdown r = { .reason = reason };
23615
23616 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23617 BUG();
23618 }
23619
23620 -static void xen_restart(char *msg)
23621 +static __noreturn void xen_restart(char *msg)
23622 {
23623 xen_reboot(SHUTDOWN_reboot);
23624 }
23625
23626 -static void xen_emergency_restart(void)
23627 +static __noreturn void xen_emergency_restart(void)
23628 {
23629 xen_reboot(SHUTDOWN_reboot);
23630 }
23631
23632 -static void xen_machine_halt(void)
23633 +static __noreturn void xen_machine_halt(void)
23634 {
23635 xen_reboot(SHUTDOWN_poweroff);
23636 }
23637 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23638 */
23639 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23640
23641 -#ifdef CONFIG_X86_64
23642 /* Work out if we support NX */
23643 - check_efer();
23644 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23645 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23646 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23647 + unsigned l, h;
23648 +
23649 +#ifdef CONFIG_X86_PAE
23650 + nx_enabled = 1;
23651 +#endif
23652 + __supported_pte_mask |= _PAGE_NX;
23653 + rdmsr(MSR_EFER, l, h);
23654 + l |= EFER_NX;
23655 + wrmsr(MSR_EFER, l, h);
23656 + }
23657 #endif
23658
23659 xen_setup_features();
23660 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23661
23662 machine_ops = xen_machine_ops;
23663
23664 - /*
23665 - * The only reliable way to retain the initial address of the
23666 - * percpu gdt_page is to remember it here, so we can go and
23667 - * mark it RW later, when the initial percpu area is freed.
23668 - */
23669 - xen_initial_gdt = &per_cpu(gdt_page, 0);
23670 -
23671 xen_smp_init();
23672
23673 pgd = (pgd_t *)xen_start_info->pt_base;
23674 diff -urNp linux-2.6.32.45/arch/x86/xen/mmu.c linux-2.6.32.45/arch/x86/xen/mmu.c
23675 --- linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:04.000000000 -0400
23676 +++ linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:18.000000000 -0400
23677 @@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23678 convert_pfn_mfn(init_level4_pgt);
23679 convert_pfn_mfn(level3_ident_pgt);
23680 convert_pfn_mfn(level3_kernel_pgt);
23681 + convert_pfn_mfn(level3_vmalloc_pgt);
23682 + convert_pfn_mfn(level3_vmemmap_pgt);
23683
23684 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23685 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23686 @@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23687 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23688 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23689 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23690 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23691 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23692 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23693 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23694 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23695 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23696
23697 diff -urNp linux-2.6.32.45/arch/x86/xen/smp.c linux-2.6.32.45/arch/x86/xen/smp.c
23698 --- linux-2.6.32.45/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23699 +++ linux-2.6.32.45/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23700 @@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23701 {
23702 BUG_ON(smp_processor_id() != 0);
23703 native_smp_prepare_boot_cpu();
23704 -
23705 - /* We've switched to the "real" per-cpu gdt, so make sure the
23706 - old memory can be recycled */
23707 - make_lowmem_page_readwrite(xen_initial_gdt);
23708 -
23709 xen_setup_vcpu_info_placement();
23710 }
23711
23712 @@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23713 gdt = get_cpu_gdt_table(cpu);
23714
23715 ctxt->flags = VGCF_IN_KERNEL;
23716 - ctxt->user_regs.ds = __USER_DS;
23717 - ctxt->user_regs.es = __USER_DS;
23718 + ctxt->user_regs.ds = __KERNEL_DS;
23719 + ctxt->user_regs.es = __KERNEL_DS;
23720 ctxt->user_regs.ss = __KERNEL_DS;
23721 #ifdef CONFIG_X86_32
23722 ctxt->user_regs.fs = __KERNEL_PERCPU;
23723 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23724 + savesegment(gs, ctxt->user_regs.gs);
23725 #else
23726 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23727 #endif
23728 @@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23729 int rc;
23730
23731 per_cpu(current_task, cpu) = idle;
23732 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
23733 #ifdef CONFIG_X86_32
23734 irq_ctx_init(cpu);
23735 #else
23736 clear_tsk_thread_flag(idle, TIF_FORK);
23737 - per_cpu(kernel_stack, cpu) =
23738 - (unsigned long)task_stack_page(idle) -
23739 - KERNEL_STACK_OFFSET + THREAD_SIZE;
23740 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23741 #endif
23742 xen_setup_runstate_info(cpu);
23743 xen_setup_timer(cpu);
23744 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-asm_32.S linux-2.6.32.45/arch/x86/xen/xen-asm_32.S
23745 --- linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
23746 +++ linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
23747 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
23748 ESP_OFFSET=4 # bytes pushed onto stack
23749
23750 /*
23751 - * Store vcpu_info pointer for easy access. Do it this way to
23752 - * avoid having to reload %fs
23753 + * Store vcpu_info pointer for easy access.
23754 */
23755 #ifdef CONFIG_SMP
23756 - GET_THREAD_INFO(%eax)
23757 - movl TI_cpu(%eax), %eax
23758 - movl __per_cpu_offset(,%eax,4), %eax
23759 - mov per_cpu__xen_vcpu(%eax), %eax
23760 + push %fs
23761 + mov $(__KERNEL_PERCPU), %eax
23762 + mov %eax, %fs
23763 + mov PER_CPU_VAR(xen_vcpu), %eax
23764 + pop %fs
23765 #else
23766 movl per_cpu__xen_vcpu, %eax
23767 #endif
23768 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-head.S linux-2.6.32.45/arch/x86/xen/xen-head.S
23769 --- linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
23770 +++ linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
23771 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
23772 #ifdef CONFIG_X86_32
23773 mov %esi,xen_start_info
23774 mov $init_thread_union+THREAD_SIZE,%esp
23775 +#ifdef CONFIG_SMP
23776 + movl $cpu_gdt_table,%edi
23777 + movl $__per_cpu_load,%eax
23778 + movw %ax,__KERNEL_PERCPU + 2(%edi)
23779 + rorl $16,%eax
23780 + movb %al,__KERNEL_PERCPU + 4(%edi)
23781 + movb %ah,__KERNEL_PERCPU + 7(%edi)
23782 + movl $__per_cpu_end - 1,%eax
23783 + subl $__per_cpu_start,%eax
23784 + movw %ax,__KERNEL_PERCPU + 0(%edi)
23785 +#endif
23786 #else
23787 mov %rsi,xen_start_info
23788 mov $init_thread_union+THREAD_SIZE,%rsp
23789 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-ops.h linux-2.6.32.45/arch/x86/xen/xen-ops.h
23790 --- linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
23791 +++ linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
23792 @@ -10,8 +10,6 @@
23793 extern const char xen_hypervisor_callback[];
23794 extern const char xen_failsafe_callback[];
23795
23796 -extern void *xen_initial_gdt;
23797 -
23798 struct trap_info;
23799 void xen_copy_trap_info(struct trap_info *traps);
23800
23801 diff -urNp linux-2.6.32.45/block/blk-integrity.c linux-2.6.32.45/block/blk-integrity.c
23802 --- linux-2.6.32.45/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
23803 +++ linux-2.6.32.45/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
23804 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
23805 NULL,
23806 };
23807
23808 -static struct sysfs_ops integrity_ops = {
23809 +static const struct sysfs_ops integrity_ops = {
23810 .show = &integrity_attr_show,
23811 .store = &integrity_attr_store,
23812 };
23813 diff -urNp linux-2.6.32.45/block/blk-iopoll.c linux-2.6.32.45/block/blk-iopoll.c
23814 --- linux-2.6.32.45/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
23815 +++ linux-2.6.32.45/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
23816 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23817 }
23818 EXPORT_SYMBOL(blk_iopoll_complete);
23819
23820 -static void blk_iopoll_softirq(struct softirq_action *h)
23821 +static void blk_iopoll_softirq(void)
23822 {
23823 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23824 int rearm = 0, budget = blk_iopoll_budget;
23825 diff -urNp linux-2.6.32.45/block/blk-map.c linux-2.6.32.45/block/blk-map.c
23826 --- linux-2.6.32.45/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
23827 +++ linux-2.6.32.45/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
23828 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
23829 * direct dma. else, set up kernel bounce buffers
23830 */
23831 uaddr = (unsigned long) ubuf;
23832 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
23833 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
23834 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
23835 else
23836 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
23837 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
23838 for (i = 0; i < iov_count; i++) {
23839 unsigned long uaddr = (unsigned long)iov[i].iov_base;
23840
23841 + if (!iov[i].iov_len)
23842 + return -EINVAL;
23843 +
23844 if (uaddr & queue_dma_alignment(q)) {
23845 unaligned = 1;
23846 break;
23847 }
23848 - if (!iov[i].iov_len)
23849 - return -EINVAL;
23850 }
23851
23852 if (unaligned || (q->dma_pad_mask & len) || map_data)
23853 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
23854 if (!len || !kbuf)
23855 return -EINVAL;
23856
23857 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
23858 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
23859 if (do_copy)
23860 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23861 else
23862 diff -urNp linux-2.6.32.45/block/blk-softirq.c linux-2.6.32.45/block/blk-softirq.c
23863 --- linux-2.6.32.45/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
23864 +++ linux-2.6.32.45/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
23865 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23866 * Softirq action handler - move entries to local list and loop over them
23867 * while passing them to the queue registered handler.
23868 */
23869 -static void blk_done_softirq(struct softirq_action *h)
23870 +static void blk_done_softirq(void)
23871 {
23872 struct list_head *cpu_list, local_list;
23873
23874 diff -urNp linux-2.6.32.45/block/blk-sysfs.c linux-2.6.32.45/block/blk-sysfs.c
23875 --- linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
23876 +++ linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
23877 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
23878 kmem_cache_free(blk_requestq_cachep, q);
23879 }
23880
23881 -static struct sysfs_ops queue_sysfs_ops = {
23882 +static const struct sysfs_ops queue_sysfs_ops = {
23883 .show = queue_attr_show,
23884 .store = queue_attr_store,
23885 };
23886 diff -urNp linux-2.6.32.45/block/bsg.c linux-2.6.32.45/block/bsg.c
23887 --- linux-2.6.32.45/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
23888 +++ linux-2.6.32.45/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
23889 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23890 struct sg_io_v4 *hdr, struct bsg_device *bd,
23891 fmode_t has_write_perm)
23892 {
23893 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23894 + unsigned char *cmdptr;
23895 +
23896 if (hdr->request_len > BLK_MAX_CDB) {
23897 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23898 if (!rq->cmd)
23899 return -ENOMEM;
23900 - }
23901 + cmdptr = rq->cmd;
23902 + } else
23903 + cmdptr = tmpcmd;
23904
23905 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
23906 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
23907 hdr->request_len))
23908 return -EFAULT;
23909
23910 + if (cmdptr != rq->cmd)
23911 + memcpy(rq->cmd, cmdptr, hdr->request_len);
23912 +
23913 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23914 if (blk_verify_command(rq->cmd, has_write_perm))
23915 return -EPERM;
23916 diff -urNp linux-2.6.32.45/block/elevator.c linux-2.6.32.45/block/elevator.c
23917 --- linux-2.6.32.45/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
23918 +++ linux-2.6.32.45/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
23919 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
23920 return error;
23921 }
23922
23923 -static struct sysfs_ops elv_sysfs_ops = {
23924 +static const struct sysfs_ops elv_sysfs_ops = {
23925 .show = elv_attr_show,
23926 .store = elv_attr_store,
23927 };
23928 diff -urNp linux-2.6.32.45/block/scsi_ioctl.c linux-2.6.32.45/block/scsi_ioctl.c
23929 --- linux-2.6.32.45/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
23930 +++ linux-2.6.32.45/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
23931 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
23932 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23933 struct sg_io_hdr *hdr, fmode_t mode)
23934 {
23935 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23936 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23937 + unsigned char *cmdptr;
23938 +
23939 + if (rq->cmd != rq->__cmd)
23940 + cmdptr = rq->cmd;
23941 + else
23942 + cmdptr = tmpcmd;
23943 +
23944 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23945 return -EFAULT;
23946 +
23947 + if (cmdptr != rq->cmd)
23948 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23949 +
23950 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23951 return -EPERM;
23952
23953 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
23954 int err;
23955 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23956 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23957 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23958 + unsigned char *cmdptr;
23959
23960 if (!sic)
23961 return -EINVAL;
23962 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
23963 */
23964 err = -EFAULT;
23965 rq->cmd_len = cmdlen;
23966 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
23967 +
23968 + if (rq->cmd != rq->__cmd)
23969 + cmdptr = rq->cmd;
23970 + else
23971 + cmdptr = tmpcmd;
23972 +
23973 + if (copy_from_user(cmdptr, sic->data, cmdlen))
23974 goto error;
23975
23976 + if (rq->cmd != cmdptr)
23977 + memcpy(rq->cmd, cmdptr, cmdlen);
23978 +
23979 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23980 goto error;
23981
23982 diff -urNp linux-2.6.32.45/crypto/cryptd.c linux-2.6.32.45/crypto/cryptd.c
23983 --- linux-2.6.32.45/crypto/cryptd.c 2011-03-27 14:31:47.000000000 -0400
23984 +++ linux-2.6.32.45/crypto/cryptd.c 2011-08-05 20:33:55.000000000 -0400
23985 @@ -214,7 +214,7 @@ static int cryptd_blkcipher_enqueue(stru
23986 struct cryptd_queue *queue;
23987
23988 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
23989 - rctx->complete = req->base.complete;
23990 + *(void **)&rctx->complete = req->base.complete;
23991 req->base.complete = complete;
23992
23993 return cryptd_enqueue_request(queue, &req->base);
23994 diff -urNp linux-2.6.32.45/crypto/gf128mul.c linux-2.6.32.45/crypto/gf128mul.c
23995 --- linux-2.6.32.45/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
23996 +++ linux-2.6.32.45/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
23997 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
23998 for (i = 0; i < 7; ++i)
23999 gf128mul_x_lle(&p[i + 1], &p[i]);
24000
24001 - memset(r, 0, sizeof(r));
24002 + memset(r, 0, sizeof(*r));
24003 for (i = 0;;) {
24004 u8 ch = ((u8 *)b)[15 - i];
24005
24006 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
24007 for (i = 0; i < 7; ++i)
24008 gf128mul_x_bbe(&p[i + 1], &p[i]);
24009
24010 - memset(r, 0, sizeof(r));
24011 + memset(r, 0, sizeof(*r));
24012 for (i = 0;;) {
24013 u8 ch = ((u8 *)b)[i];
24014
24015 diff -urNp linux-2.6.32.45/crypto/serpent.c linux-2.6.32.45/crypto/serpent.c
24016 --- linux-2.6.32.45/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
24017 +++ linux-2.6.32.45/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
24018 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
24019 u32 r0,r1,r2,r3,r4;
24020 int i;
24021
24022 + pax_track_stack();
24023 +
24024 /* Copy key, add padding */
24025
24026 for (i = 0; i < keylen; ++i)
24027 diff -urNp linux-2.6.32.45/Documentation/dontdiff linux-2.6.32.45/Documentation/dontdiff
24028 --- linux-2.6.32.45/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
24029 +++ linux-2.6.32.45/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
24030 @@ -1,13 +1,16 @@
24031 *.a
24032 *.aux
24033 *.bin
24034 +*.cis
24035 *.cpio
24036 *.csp
24037 +*.dbg
24038 *.dsp
24039 *.dvi
24040 *.elf
24041 *.eps
24042 *.fw
24043 +*.gcno
24044 *.gen.S
24045 *.gif
24046 *.grep
24047 @@ -38,8 +41,10 @@
24048 *.tab.h
24049 *.tex
24050 *.ver
24051 +*.vim
24052 *.xml
24053 *_MODULES
24054 +*_reg_safe.h
24055 *_vga16.c
24056 *~
24057 *.9
24058 @@ -49,11 +54,16 @@
24059 53c700_d.h
24060 CVS
24061 ChangeSet
24062 +GPATH
24063 +GRTAGS
24064 +GSYMS
24065 +GTAGS
24066 Image
24067 Kerntypes
24068 Module.markers
24069 Module.symvers
24070 PENDING
24071 +PERF*
24072 SCCS
24073 System.map*
24074 TAGS
24075 @@ -76,7 +86,11 @@ btfixupprep
24076 build
24077 bvmlinux
24078 bzImage*
24079 +capability_names.h
24080 +capflags.c
24081 classlist.h*
24082 +clut_vga16.c
24083 +common-cmds.h
24084 comp*.log
24085 compile.h*
24086 conf
24087 @@ -103,13 +117,14 @@ gen_crc32table
24088 gen_init_cpio
24089 genksyms
24090 *_gray256.c
24091 +hash
24092 ihex2fw
24093 ikconfig.h*
24094 initramfs_data.cpio
24095 +initramfs_data.cpio.bz2
24096 initramfs_data.cpio.gz
24097 initramfs_list
24098 kallsyms
24099 -kconfig
24100 keywords.c
24101 ksym.c*
24102 ksym.h*
24103 @@ -133,7 +148,9 @@ mkboot
24104 mkbugboot
24105 mkcpustr
24106 mkdep
24107 +mkpiggy
24108 mkprep
24109 +mkregtable
24110 mktables
24111 mktree
24112 modpost
24113 @@ -149,6 +166,7 @@ patches*
24114 pca200e.bin
24115 pca200e_ecd.bin2
24116 piggy.gz
24117 +piggy.S
24118 piggyback
24119 pnmtologo
24120 ppc_defs.h*
24121 @@ -157,12 +175,15 @@ qconf
24122 raid6altivec*.c
24123 raid6int*.c
24124 raid6tables.c
24125 +regdb.c
24126 relocs
24127 +rlim_names.h
24128 series
24129 setup
24130 setup.bin
24131 setup.elf
24132 sImage
24133 +slabinfo
24134 sm_tbl*
24135 split-include
24136 syscalltab.h
24137 @@ -186,14 +207,20 @@ version.h*
24138 vmlinux
24139 vmlinux-*
24140 vmlinux.aout
24141 +vmlinux.bin.all
24142 +vmlinux.bin.bz2
24143 vmlinux.lds
24144 +vmlinux.relocs
24145 +voffset.h
24146 vsyscall.lds
24147 vsyscall_32.lds
24148 wanxlfw.inc
24149 uImage
24150 unifdef
24151 +utsrelease.h
24152 wakeup.bin
24153 wakeup.elf
24154 wakeup.lds
24155 zImage*
24156 zconf.hash.c
24157 +zoffset.h
24158 diff -urNp linux-2.6.32.45/Documentation/kernel-parameters.txt linux-2.6.32.45/Documentation/kernel-parameters.txt
24159 --- linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
24160 +++ linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
24161 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
24162 the specified number of seconds. This is to be used if
24163 your oopses keep scrolling off the screen.
24164
24165 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
24166 + virtualization environments that don't cope well with the
24167 + expand down segment used by UDEREF on X86-32 or the frequent
24168 + page table updates on X86-64.
24169 +
24170 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
24171 +
24172 pcbit= [HW,ISDN]
24173
24174 pcd. [PARIDE]
24175 diff -urNp linux-2.6.32.45/drivers/acpi/acpi_pad.c linux-2.6.32.45/drivers/acpi/acpi_pad.c
24176 --- linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
24177 +++ linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
24178 @@ -30,7 +30,7 @@
24179 #include <acpi/acpi_bus.h>
24180 #include <acpi/acpi_drivers.h>
24181
24182 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
24183 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
24184 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
24185 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
24186 static DEFINE_MUTEX(isolated_cpus_lock);
24187 diff -urNp linux-2.6.32.45/drivers/acpi/battery.c linux-2.6.32.45/drivers/acpi/battery.c
24188 --- linux-2.6.32.45/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
24189 +++ linux-2.6.32.45/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
24190 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
24191 }
24192
24193 static struct battery_file {
24194 - struct file_operations ops;
24195 + const struct file_operations ops;
24196 mode_t mode;
24197 const char *name;
24198 } acpi_battery_file[] = {
24199 diff -urNp linux-2.6.32.45/drivers/acpi/dock.c linux-2.6.32.45/drivers/acpi/dock.c
24200 --- linux-2.6.32.45/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
24201 +++ linux-2.6.32.45/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
24202 @@ -77,7 +77,7 @@ struct dock_dependent_device {
24203 struct list_head list;
24204 struct list_head hotplug_list;
24205 acpi_handle handle;
24206 - struct acpi_dock_ops *ops;
24207 + const struct acpi_dock_ops *ops;
24208 void *context;
24209 };
24210
24211 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
24212 * the dock driver after _DCK is executed.
24213 */
24214 int
24215 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
24216 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
24217 void *context)
24218 {
24219 struct dock_dependent_device *dd;
24220 diff -urNp linux-2.6.32.45/drivers/acpi/osl.c linux-2.6.32.45/drivers/acpi/osl.c
24221 --- linux-2.6.32.45/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
24222 +++ linux-2.6.32.45/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
24223 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
24224 void __iomem *virt_addr;
24225
24226 virt_addr = ioremap(phys_addr, width);
24227 + if (!virt_addr)
24228 + return AE_NO_MEMORY;
24229 if (!value)
24230 value = &dummy;
24231
24232 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
24233 void __iomem *virt_addr;
24234
24235 virt_addr = ioremap(phys_addr, width);
24236 + if (!virt_addr)
24237 + return AE_NO_MEMORY;
24238
24239 switch (width) {
24240 case 8:
24241 diff -urNp linux-2.6.32.45/drivers/acpi/power_meter.c linux-2.6.32.45/drivers/acpi/power_meter.c
24242 --- linux-2.6.32.45/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
24243 +++ linux-2.6.32.45/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
24244 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
24245 return res;
24246
24247 temp /= 1000;
24248 - if (temp < 0)
24249 - return -EINVAL;
24250
24251 mutex_lock(&resource->lock);
24252 resource->trip[attr->index - 7] = temp;
24253 diff -urNp linux-2.6.32.45/drivers/acpi/proc.c linux-2.6.32.45/drivers/acpi/proc.c
24254 --- linux-2.6.32.45/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
24255 +++ linux-2.6.32.45/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
24256 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
24257 size_t count, loff_t * ppos)
24258 {
24259 struct list_head *node, *next;
24260 - char strbuf[5];
24261 - char str[5] = "";
24262 - unsigned int len = count;
24263 + char strbuf[5] = {0};
24264 struct acpi_device *found_dev = NULL;
24265
24266 - if (len > 4)
24267 - len = 4;
24268 - if (len < 0)
24269 - return -EFAULT;
24270 + if (count > 4)
24271 + count = 4;
24272
24273 - if (copy_from_user(strbuf, buffer, len))
24274 + if (copy_from_user(strbuf, buffer, count))
24275 return -EFAULT;
24276 - strbuf[len] = '\0';
24277 - sscanf(strbuf, "%s", str);
24278 + strbuf[count] = '\0';
24279
24280 mutex_lock(&acpi_device_lock);
24281 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24282 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24283 if (!dev->wakeup.flags.valid)
24284 continue;
24285
24286 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
24287 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24288 dev->wakeup.state.enabled =
24289 dev->wakeup.state.enabled ? 0 : 1;
24290 found_dev = dev;
24291 diff -urNp linux-2.6.32.45/drivers/acpi/processor_core.c linux-2.6.32.45/drivers/acpi/processor_core.c
24292 --- linux-2.6.32.45/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24293 +++ linux-2.6.32.45/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24294 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24295 return 0;
24296 }
24297
24298 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24299 + BUG_ON(pr->id >= nr_cpu_ids);
24300
24301 /*
24302 * Buggy BIOS check
24303 diff -urNp linux-2.6.32.45/drivers/acpi/sbshc.c linux-2.6.32.45/drivers/acpi/sbshc.c
24304 --- linux-2.6.32.45/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24305 +++ linux-2.6.32.45/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24306 @@ -17,7 +17,7 @@
24307
24308 #define PREFIX "ACPI: "
24309
24310 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24311 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24312 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24313
24314 struct acpi_smb_hc {
24315 diff -urNp linux-2.6.32.45/drivers/acpi/sleep.c linux-2.6.32.45/drivers/acpi/sleep.c
24316 --- linux-2.6.32.45/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24317 +++ linux-2.6.32.45/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24318 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24319 }
24320 }
24321
24322 -static struct platform_suspend_ops acpi_suspend_ops = {
24323 +static const struct platform_suspend_ops acpi_suspend_ops = {
24324 .valid = acpi_suspend_state_valid,
24325 .begin = acpi_suspend_begin,
24326 .prepare_late = acpi_pm_prepare,
24327 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24328 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24329 * been requested.
24330 */
24331 -static struct platform_suspend_ops acpi_suspend_ops_old = {
24332 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
24333 .valid = acpi_suspend_state_valid,
24334 .begin = acpi_suspend_begin_old,
24335 .prepare_late = acpi_pm_disable_gpes,
24336 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24337 acpi_enable_all_runtime_gpes();
24338 }
24339
24340 -static struct platform_hibernation_ops acpi_hibernation_ops = {
24341 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
24342 .begin = acpi_hibernation_begin,
24343 .end = acpi_pm_end,
24344 .pre_snapshot = acpi_hibernation_pre_snapshot,
24345 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24346 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24347 * been requested.
24348 */
24349 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24350 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24351 .begin = acpi_hibernation_begin_old,
24352 .end = acpi_pm_end,
24353 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24354 diff -urNp linux-2.6.32.45/drivers/acpi/video.c linux-2.6.32.45/drivers/acpi/video.c
24355 --- linux-2.6.32.45/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24356 +++ linux-2.6.32.45/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24357 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24358 vd->brightness->levels[request_level]);
24359 }
24360
24361 -static struct backlight_ops acpi_backlight_ops = {
24362 +static const struct backlight_ops acpi_backlight_ops = {
24363 .get_brightness = acpi_video_get_brightness,
24364 .update_status = acpi_video_set_brightness,
24365 };
24366 diff -urNp linux-2.6.32.45/drivers/ata/ahci.c linux-2.6.32.45/drivers/ata/ahci.c
24367 --- linux-2.6.32.45/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24368 +++ linux-2.6.32.45/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24369 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24370 .sdev_attrs = ahci_sdev_attrs,
24371 };
24372
24373 -static struct ata_port_operations ahci_ops = {
24374 +static const struct ata_port_operations ahci_ops = {
24375 .inherits = &sata_pmp_port_ops,
24376
24377 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24378 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24379 .port_stop = ahci_port_stop,
24380 };
24381
24382 -static struct ata_port_operations ahci_vt8251_ops = {
24383 +static const struct ata_port_operations ahci_vt8251_ops = {
24384 .inherits = &ahci_ops,
24385 .hardreset = ahci_vt8251_hardreset,
24386 };
24387
24388 -static struct ata_port_operations ahci_p5wdh_ops = {
24389 +static const struct ata_port_operations ahci_p5wdh_ops = {
24390 .inherits = &ahci_ops,
24391 .hardreset = ahci_p5wdh_hardreset,
24392 };
24393
24394 -static struct ata_port_operations ahci_sb600_ops = {
24395 +static const struct ata_port_operations ahci_sb600_ops = {
24396 .inherits = &ahci_ops,
24397 .softreset = ahci_sb600_softreset,
24398 .pmp_softreset = ahci_sb600_softreset,
24399 diff -urNp linux-2.6.32.45/drivers/ata/ata_generic.c linux-2.6.32.45/drivers/ata/ata_generic.c
24400 --- linux-2.6.32.45/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24401 +++ linux-2.6.32.45/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24402 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
24403 ATA_BMDMA_SHT(DRV_NAME),
24404 };
24405
24406 -static struct ata_port_operations generic_port_ops = {
24407 +static const struct ata_port_operations generic_port_ops = {
24408 .inherits = &ata_bmdma_port_ops,
24409 .cable_detect = ata_cable_unknown,
24410 .set_mode = generic_set_mode,
24411 diff -urNp linux-2.6.32.45/drivers/ata/ata_piix.c linux-2.6.32.45/drivers/ata/ata_piix.c
24412 --- linux-2.6.32.45/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24413 +++ linux-2.6.32.45/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24414 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24415 ATA_BMDMA_SHT(DRV_NAME),
24416 };
24417
24418 -static struct ata_port_operations piix_pata_ops = {
24419 +static const struct ata_port_operations piix_pata_ops = {
24420 .inherits = &ata_bmdma32_port_ops,
24421 .cable_detect = ata_cable_40wire,
24422 .set_piomode = piix_set_piomode,
24423 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24424 .prereset = piix_pata_prereset,
24425 };
24426
24427 -static struct ata_port_operations piix_vmw_ops = {
24428 +static const struct ata_port_operations piix_vmw_ops = {
24429 .inherits = &piix_pata_ops,
24430 .bmdma_status = piix_vmw_bmdma_status,
24431 };
24432
24433 -static struct ata_port_operations ich_pata_ops = {
24434 +static const struct ata_port_operations ich_pata_ops = {
24435 .inherits = &piix_pata_ops,
24436 .cable_detect = ich_pata_cable_detect,
24437 .set_dmamode = ich_set_dmamode,
24438 };
24439
24440 -static struct ata_port_operations piix_sata_ops = {
24441 +static const struct ata_port_operations piix_sata_ops = {
24442 .inherits = &ata_bmdma_port_ops,
24443 };
24444
24445 -static struct ata_port_operations piix_sidpr_sata_ops = {
24446 +static const struct ata_port_operations piix_sidpr_sata_ops = {
24447 .inherits = &piix_sata_ops,
24448 .hardreset = sata_std_hardreset,
24449 .scr_read = piix_sidpr_scr_read,
24450 diff -urNp linux-2.6.32.45/drivers/ata/libata-acpi.c linux-2.6.32.45/drivers/ata/libata-acpi.c
24451 --- linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24452 +++ linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24453 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24454 ata_acpi_uevent(dev->link->ap, dev, event);
24455 }
24456
24457 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24458 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24459 .handler = ata_acpi_dev_notify_dock,
24460 .uevent = ata_acpi_dev_uevent,
24461 };
24462
24463 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24464 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24465 .handler = ata_acpi_ap_notify_dock,
24466 .uevent = ata_acpi_ap_uevent,
24467 };
24468 diff -urNp linux-2.6.32.45/drivers/ata/libata-core.c linux-2.6.32.45/drivers/ata/libata-core.c
24469 --- linux-2.6.32.45/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24470 +++ linux-2.6.32.45/drivers/ata/libata-core.c 2011-08-05 20:33:55.000000000 -0400
24471 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24472 struct ata_port *ap;
24473 unsigned int tag;
24474
24475 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24476 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24477 ap = qc->ap;
24478
24479 qc->flags = 0;
24480 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24481 struct ata_port *ap;
24482 struct ata_link *link;
24483
24484 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24485 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24486 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24487 ap = qc->ap;
24488 link = qc->dev->link;
24489 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24490 * LOCKING:
24491 * None.
24492 */
24493 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
24494 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24495 {
24496 static DEFINE_SPINLOCK(lock);
24497 const struct ata_port_operations *cur;
24498 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24499 return;
24500
24501 spin_lock(&lock);
24502 + pax_open_kernel();
24503
24504 for (cur = ops->inherits; cur; cur = cur->inherits) {
24505 void **inherit = (void **)cur;
24506 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24507 if (IS_ERR(*pp))
24508 *pp = NULL;
24509
24510 - ops->inherits = NULL;
24511 + *(struct ata_port_operations **)&ops->inherits = NULL;
24512
24513 + pax_close_kernel();
24514 spin_unlock(&lock);
24515 }
24516
24517 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24518 */
24519 /* KILLME - the only user left is ipr */
24520 void ata_host_init(struct ata_host *host, struct device *dev,
24521 - unsigned long flags, struct ata_port_operations *ops)
24522 + unsigned long flags, const struct ata_port_operations *ops)
24523 {
24524 spin_lock_init(&host->lock);
24525 host->dev = dev;
24526 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24527 /* truly dummy */
24528 }
24529
24530 -struct ata_port_operations ata_dummy_port_ops = {
24531 +const struct ata_port_operations ata_dummy_port_ops = {
24532 .qc_prep = ata_noop_qc_prep,
24533 .qc_issue = ata_dummy_qc_issue,
24534 .error_handler = ata_dummy_error_handler,
24535 diff -urNp linux-2.6.32.45/drivers/ata/libata-eh.c linux-2.6.32.45/drivers/ata/libata-eh.c
24536 --- linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:35:28.000000000 -0400
24537 +++ linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:33:59.000000000 -0400
24538 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24539 {
24540 struct ata_link *link;
24541
24542 + pax_track_stack();
24543 +
24544 ata_for_each_link(link, ap, HOST_FIRST)
24545 ata_eh_link_report(link);
24546 }
24547 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24548 */
24549 void ata_std_error_handler(struct ata_port *ap)
24550 {
24551 - struct ata_port_operations *ops = ap->ops;
24552 + const struct ata_port_operations *ops = ap->ops;
24553 ata_reset_fn_t hardreset = ops->hardreset;
24554
24555 /* ignore built-in hardreset if SCR access is not available */
24556 diff -urNp linux-2.6.32.45/drivers/ata/libata-pmp.c linux-2.6.32.45/drivers/ata/libata-pmp.c
24557 --- linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24558 +++ linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24559 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24560 */
24561 static int sata_pmp_eh_recover(struct ata_port *ap)
24562 {
24563 - struct ata_port_operations *ops = ap->ops;
24564 + const struct ata_port_operations *ops = ap->ops;
24565 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24566 struct ata_link *pmp_link = &ap->link;
24567 struct ata_device *pmp_dev = pmp_link->device;
24568 diff -urNp linux-2.6.32.45/drivers/ata/pata_acpi.c linux-2.6.32.45/drivers/ata/pata_acpi.c
24569 --- linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24570 +++ linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24571 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24572 ATA_BMDMA_SHT(DRV_NAME),
24573 };
24574
24575 -static struct ata_port_operations pacpi_ops = {
24576 +static const struct ata_port_operations pacpi_ops = {
24577 .inherits = &ata_bmdma_port_ops,
24578 .qc_issue = pacpi_qc_issue,
24579 .cable_detect = pacpi_cable_detect,
24580 diff -urNp linux-2.6.32.45/drivers/ata/pata_ali.c linux-2.6.32.45/drivers/ata/pata_ali.c
24581 --- linux-2.6.32.45/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24582 +++ linux-2.6.32.45/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24583 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24584 * Port operations for PIO only ALi
24585 */
24586
24587 -static struct ata_port_operations ali_early_port_ops = {
24588 +static const struct ata_port_operations ali_early_port_ops = {
24589 .inherits = &ata_sff_port_ops,
24590 .cable_detect = ata_cable_40wire,
24591 .set_piomode = ali_set_piomode,
24592 @@ -382,7 +382,7 @@ static const struct ata_port_operations
24593 * Port operations for DMA capable ALi without cable
24594 * detect
24595 */
24596 -static struct ata_port_operations ali_20_port_ops = {
24597 +static const struct ata_port_operations ali_20_port_ops = {
24598 .inherits = &ali_dma_base_ops,
24599 .cable_detect = ata_cable_40wire,
24600 .mode_filter = ali_20_filter,
24601 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24602 /*
24603 * Port operations for DMA capable ALi with cable detect
24604 */
24605 -static struct ata_port_operations ali_c2_port_ops = {
24606 +static const struct ata_port_operations ali_c2_port_ops = {
24607 .inherits = &ali_dma_base_ops,
24608 .check_atapi_dma = ali_check_atapi_dma,
24609 .cable_detect = ali_c2_cable_detect,
24610 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24611 /*
24612 * Port operations for DMA capable ALi with cable detect
24613 */
24614 -static struct ata_port_operations ali_c4_port_ops = {
24615 +static const struct ata_port_operations ali_c4_port_ops = {
24616 .inherits = &ali_dma_base_ops,
24617 .check_atapi_dma = ali_check_atapi_dma,
24618 .cable_detect = ali_c2_cable_detect,
24619 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24620 /*
24621 * Port operations for DMA capable ALi with cable detect and LBA48
24622 */
24623 -static struct ata_port_operations ali_c5_port_ops = {
24624 +static const struct ata_port_operations ali_c5_port_ops = {
24625 .inherits = &ali_dma_base_ops,
24626 .check_atapi_dma = ali_check_atapi_dma,
24627 .dev_config = ali_warn_atapi_dma,
24628 diff -urNp linux-2.6.32.45/drivers/ata/pata_amd.c linux-2.6.32.45/drivers/ata/pata_amd.c
24629 --- linux-2.6.32.45/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24630 +++ linux-2.6.32.45/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24631 @@ -397,28 +397,28 @@ static const struct ata_port_operations
24632 .prereset = amd_pre_reset,
24633 };
24634
24635 -static struct ata_port_operations amd33_port_ops = {
24636 +static const struct ata_port_operations amd33_port_ops = {
24637 .inherits = &amd_base_port_ops,
24638 .cable_detect = ata_cable_40wire,
24639 .set_piomode = amd33_set_piomode,
24640 .set_dmamode = amd33_set_dmamode,
24641 };
24642
24643 -static struct ata_port_operations amd66_port_ops = {
24644 +static const struct ata_port_operations amd66_port_ops = {
24645 .inherits = &amd_base_port_ops,
24646 .cable_detect = ata_cable_unknown,
24647 .set_piomode = amd66_set_piomode,
24648 .set_dmamode = amd66_set_dmamode,
24649 };
24650
24651 -static struct ata_port_operations amd100_port_ops = {
24652 +static const struct ata_port_operations amd100_port_ops = {
24653 .inherits = &amd_base_port_ops,
24654 .cable_detect = ata_cable_unknown,
24655 .set_piomode = amd100_set_piomode,
24656 .set_dmamode = amd100_set_dmamode,
24657 };
24658
24659 -static struct ata_port_operations amd133_port_ops = {
24660 +static const struct ata_port_operations amd133_port_ops = {
24661 .inherits = &amd_base_port_ops,
24662 .cable_detect = amd_cable_detect,
24663 .set_piomode = amd133_set_piomode,
24664 @@ -433,13 +433,13 @@ static const struct ata_port_operations
24665 .host_stop = nv_host_stop,
24666 };
24667
24668 -static struct ata_port_operations nv100_port_ops = {
24669 +static const struct ata_port_operations nv100_port_ops = {
24670 .inherits = &nv_base_port_ops,
24671 .set_piomode = nv100_set_piomode,
24672 .set_dmamode = nv100_set_dmamode,
24673 };
24674
24675 -static struct ata_port_operations nv133_port_ops = {
24676 +static const struct ata_port_operations nv133_port_ops = {
24677 .inherits = &nv_base_port_ops,
24678 .set_piomode = nv133_set_piomode,
24679 .set_dmamode = nv133_set_dmamode,
24680 diff -urNp linux-2.6.32.45/drivers/ata/pata_artop.c linux-2.6.32.45/drivers/ata/pata_artop.c
24681 --- linux-2.6.32.45/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24682 +++ linux-2.6.32.45/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24683 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24684 ATA_BMDMA_SHT(DRV_NAME),
24685 };
24686
24687 -static struct ata_port_operations artop6210_ops = {
24688 +static const struct ata_port_operations artop6210_ops = {
24689 .inherits = &ata_bmdma_port_ops,
24690 .cable_detect = ata_cable_40wire,
24691 .set_piomode = artop6210_set_piomode,
24692 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24693 .qc_defer = artop6210_qc_defer,
24694 };
24695
24696 -static struct ata_port_operations artop6260_ops = {
24697 +static const struct ata_port_operations artop6260_ops = {
24698 .inherits = &ata_bmdma_port_ops,
24699 .cable_detect = artop6260_cable_detect,
24700 .set_piomode = artop6260_set_piomode,
24701 diff -urNp linux-2.6.32.45/drivers/ata/pata_at32.c linux-2.6.32.45/drivers/ata/pata_at32.c
24702 --- linux-2.6.32.45/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24703 +++ linux-2.6.32.45/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24704 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24705 ATA_PIO_SHT(DRV_NAME),
24706 };
24707
24708 -static struct ata_port_operations at32_port_ops = {
24709 +static const struct ata_port_operations at32_port_ops = {
24710 .inherits = &ata_sff_port_ops,
24711 .cable_detect = ata_cable_40wire,
24712 .set_piomode = pata_at32_set_piomode,
24713 diff -urNp linux-2.6.32.45/drivers/ata/pata_at91.c linux-2.6.32.45/drivers/ata/pata_at91.c
24714 --- linux-2.6.32.45/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
24715 +++ linux-2.6.32.45/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
24716 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
24717 ATA_PIO_SHT(DRV_NAME),
24718 };
24719
24720 -static struct ata_port_operations pata_at91_port_ops = {
24721 +static const struct ata_port_operations pata_at91_port_ops = {
24722 .inherits = &ata_sff_port_ops,
24723
24724 .sff_data_xfer = pata_at91_data_xfer_noirq,
24725 diff -urNp linux-2.6.32.45/drivers/ata/pata_atiixp.c linux-2.6.32.45/drivers/ata/pata_atiixp.c
24726 --- linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
24727 +++ linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
24728 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
24729 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24730 };
24731
24732 -static struct ata_port_operations atiixp_port_ops = {
24733 +static const struct ata_port_operations atiixp_port_ops = {
24734 .inherits = &ata_bmdma_port_ops,
24735
24736 .qc_prep = ata_sff_dumb_qc_prep,
24737 diff -urNp linux-2.6.32.45/drivers/ata/pata_atp867x.c linux-2.6.32.45/drivers/ata/pata_atp867x.c
24738 --- linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
24739 +++ linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
24740 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
24741 ATA_BMDMA_SHT(DRV_NAME),
24742 };
24743
24744 -static struct ata_port_operations atp867x_ops = {
24745 +static const struct ata_port_operations atp867x_ops = {
24746 .inherits = &ata_bmdma_port_ops,
24747 .cable_detect = atp867x_cable_detect,
24748 .set_piomode = atp867x_set_piomode,
24749 diff -urNp linux-2.6.32.45/drivers/ata/pata_bf54x.c linux-2.6.32.45/drivers/ata/pata_bf54x.c
24750 --- linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
24751 +++ linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
24752 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
24753 .dma_boundary = ATA_DMA_BOUNDARY,
24754 };
24755
24756 -static struct ata_port_operations bfin_pata_ops = {
24757 +static const struct ata_port_operations bfin_pata_ops = {
24758 .inherits = &ata_sff_port_ops,
24759
24760 .set_piomode = bfin_set_piomode,
24761 diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd640.c linux-2.6.32.45/drivers/ata/pata_cmd640.c
24762 --- linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
24763 +++ linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
24764 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
24765 ATA_BMDMA_SHT(DRV_NAME),
24766 };
24767
24768 -static struct ata_port_operations cmd640_port_ops = {
24769 +static const struct ata_port_operations cmd640_port_ops = {
24770 .inherits = &ata_bmdma_port_ops,
24771 /* In theory xfer_noirq is not needed once we kill the prefetcher */
24772 .sff_data_xfer = ata_sff_data_xfer_noirq,
24773 diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd64x.c linux-2.6.32.45/drivers/ata/pata_cmd64x.c
24774 --- linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
24775 +++ linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
24776 @@ -271,18 +271,18 @@ static const struct ata_port_operations
24777 .set_dmamode = cmd64x_set_dmamode,
24778 };
24779
24780 -static struct ata_port_operations cmd64x_port_ops = {
24781 +static const struct ata_port_operations cmd64x_port_ops = {
24782 .inherits = &cmd64x_base_ops,
24783 .cable_detect = ata_cable_40wire,
24784 };
24785
24786 -static struct ata_port_operations cmd646r1_port_ops = {
24787 +static const struct ata_port_operations cmd646r1_port_ops = {
24788 .inherits = &cmd64x_base_ops,
24789 .bmdma_stop = cmd646r1_bmdma_stop,
24790 .cable_detect = ata_cable_40wire,
24791 };
24792
24793 -static struct ata_port_operations cmd648_port_ops = {
24794 +static const struct ata_port_operations cmd648_port_ops = {
24795 .inherits = &cmd64x_base_ops,
24796 .bmdma_stop = cmd648_bmdma_stop,
24797 .cable_detect = cmd648_cable_detect,
24798 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5520.c linux-2.6.32.45/drivers/ata/pata_cs5520.c
24799 --- linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
24800 +++ linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
24801 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
24802 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24803 };
24804
24805 -static struct ata_port_operations cs5520_port_ops = {
24806 +static const struct ata_port_operations cs5520_port_ops = {
24807 .inherits = &ata_bmdma_port_ops,
24808 .qc_prep = ata_sff_dumb_qc_prep,
24809 .cable_detect = ata_cable_40wire,
24810 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5530.c linux-2.6.32.45/drivers/ata/pata_cs5530.c
24811 --- linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
24812 +++ linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
24813 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
24814 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24815 };
24816
24817 -static struct ata_port_operations cs5530_port_ops = {
24818 +static const struct ata_port_operations cs5530_port_ops = {
24819 .inherits = &ata_bmdma_port_ops,
24820
24821 .qc_prep = ata_sff_dumb_qc_prep,
24822 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5535.c linux-2.6.32.45/drivers/ata/pata_cs5535.c
24823 --- linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
24824 +++ linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
24825 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
24826 ATA_BMDMA_SHT(DRV_NAME),
24827 };
24828
24829 -static struct ata_port_operations cs5535_port_ops = {
24830 +static const struct ata_port_operations cs5535_port_ops = {
24831 .inherits = &ata_bmdma_port_ops,
24832 .cable_detect = cs5535_cable_detect,
24833 .set_piomode = cs5535_set_piomode,
24834 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5536.c linux-2.6.32.45/drivers/ata/pata_cs5536.c
24835 --- linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
24836 +++ linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
24837 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
24838 ATA_BMDMA_SHT(DRV_NAME),
24839 };
24840
24841 -static struct ata_port_operations cs5536_port_ops = {
24842 +static const struct ata_port_operations cs5536_port_ops = {
24843 .inherits = &ata_bmdma_port_ops,
24844 .cable_detect = cs5536_cable_detect,
24845 .set_piomode = cs5536_set_piomode,
24846 diff -urNp linux-2.6.32.45/drivers/ata/pata_cypress.c linux-2.6.32.45/drivers/ata/pata_cypress.c
24847 --- linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
24848 +++ linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
24849 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
24850 ATA_BMDMA_SHT(DRV_NAME),
24851 };
24852
24853 -static struct ata_port_operations cy82c693_port_ops = {
24854 +static const struct ata_port_operations cy82c693_port_ops = {
24855 .inherits = &ata_bmdma_port_ops,
24856 .cable_detect = ata_cable_40wire,
24857 .set_piomode = cy82c693_set_piomode,
24858 diff -urNp linux-2.6.32.45/drivers/ata/pata_efar.c linux-2.6.32.45/drivers/ata/pata_efar.c
24859 --- linux-2.6.32.45/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
24860 +++ linux-2.6.32.45/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
24861 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
24862 ATA_BMDMA_SHT(DRV_NAME),
24863 };
24864
24865 -static struct ata_port_operations efar_ops = {
24866 +static const struct ata_port_operations efar_ops = {
24867 .inherits = &ata_bmdma_port_ops,
24868 .cable_detect = efar_cable_detect,
24869 .set_piomode = efar_set_piomode,
24870 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt366.c linux-2.6.32.45/drivers/ata/pata_hpt366.c
24871 --- linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
24872 +++ linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
24873 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
24874 * Configuration for HPT366/68
24875 */
24876
24877 -static struct ata_port_operations hpt366_port_ops = {
24878 +static const struct ata_port_operations hpt366_port_ops = {
24879 .inherits = &ata_bmdma_port_ops,
24880 .cable_detect = hpt36x_cable_detect,
24881 .mode_filter = hpt366_filter,
24882 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt37x.c linux-2.6.32.45/drivers/ata/pata_hpt37x.c
24883 --- linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
24884 +++ linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
24885 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
24886 * Configuration for HPT370
24887 */
24888
24889 -static struct ata_port_operations hpt370_port_ops = {
24890 +static const struct ata_port_operations hpt370_port_ops = {
24891 .inherits = &ata_bmdma_port_ops,
24892
24893 .bmdma_stop = hpt370_bmdma_stop,
24894 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
24895 * Configuration for HPT370A. Close to 370 but less filters
24896 */
24897
24898 -static struct ata_port_operations hpt370a_port_ops = {
24899 +static const struct ata_port_operations hpt370a_port_ops = {
24900 .inherits = &hpt370_port_ops,
24901 .mode_filter = hpt370a_filter,
24902 };
24903 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
24904 * and DMA mode setting functionality.
24905 */
24906
24907 -static struct ata_port_operations hpt372_port_ops = {
24908 +static const struct ata_port_operations hpt372_port_ops = {
24909 .inherits = &ata_bmdma_port_ops,
24910
24911 .bmdma_stop = hpt37x_bmdma_stop,
24912 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
24913 * but we have a different cable detection procedure for function 1.
24914 */
24915
24916 -static struct ata_port_operations hpt374_fn1_port_ops = {
24917 +static const struct ata_port_operations hpt374_fn1_port_ops = {
24918 .inherits = &hpt372_port_ops,
24919 .prereset = hpt374_fn1_pre_reset,
24920 };
24921 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c
24922 --- linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
24923 +++ linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
24924 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
24925 * Configuration for HPT3x2n.
24926 */
24927
24928 -static struct ata_port_operations hpt3x2n_port_ops = {
24929 +static const struct ata_port_operations hpt3x2n_port_ops = {
24930 .inherits = &ata_bmdma_port_ops,
24931
24932 .bmdma_stop = hpt3x2n_bmdma_stop,
24933 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x3.c linux-2.6.32.45/drivers/ata/pata_hpt3x3.c
24934 --- linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
24935 +++ linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
24936 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
24937 ATA_BMDMA_SHT(DRV_NAME),
24938 };
24939
24940 -static struct ata_port_operations hpt3x3_port_ops = {
24941 +static const struct ata_port_operations hpt3x3_port_ops = {
24942 .inherits = &ata_bmdma_port_ops,
24943 .cable_detect = ata_cable_40wire,
24944 .set_piomode = hpt3x3_set_piomode,
24945 diff -urNp linux-2.6.32.45/drivers/ata/pata_icside.c linux-2.6.32.45/drivers/ata/pata_icside.c
24946 --- linux-2.6.32.45/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
24947 +++ linux-2.6.32.45/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
24948 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
24949 }
24950 }
24951
24952 -static struct ata_port_operations pata_icside_port_ops = {
24953 +static const struct ata_port_operations pata_icside_port_ops = {
24954 .inherits = &ata_sff_port_ops,
24955 /* no need to build any PRD tables for DMA */
24956 .qc_prep = ata_noop_qc_prep,
24957 diff -urNp linux-2.6.32.45/drivers/ata/pata_isapnp.c linux-2.6.32.45/drivers/ata/pata_isapnp.c
24958 --- linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
24959 +++ linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
24960 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
24961 ATA_PIO_SHT(DRV_NAME),
24962 };
24963
24964 -static struct ata_port_operations isapnp_port_ops = {
24965 +static const struct ata_port_operations isapnp_port_ops = {
24966 .inherits = &ata_sff_port_ops,
24967 .cable_detect = ata_cable_40wire,
24968 };
24969
24970 -static struct ata_port_operations isapnp_noalt_port_ops = {
24971 +static const struct ata_port_operations isapnp_noalt_port_ops = {
24972 .inherits = &ata_sff_port_ops,
24973 .cable_detect = ata_cable_40wire,
24974 /* No altstatus so we don't want to use the lost interrupt poll */
24975 diff -urNp linux-2.6.32.45/drivers/ata/pata_it8213.c linux-2.6.32.45/drivers/ata/pata_it8213.c
24976 --- linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
24977 +++ linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
24978 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
24979 };
24980
24981
24982 -static struct ata_port_operations it8213_ops = {
24983 +static const struct ata_port_operations it8213_ops = {
24984 .inherits = &ata_bmdma_port_ops,
24985 .cable_detect = it8213_cable_detect,
24986 .set_piomode = it8213_set_piomode,
24987 diff -urNp linux-2.6.32.45/drivers/ata/pata_it821x.c linux-2.6.32.45/drivers/ata/pata_it821x.c
24988 --- linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
24989 +++ linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
24990 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
24991 ATA_BMDMA_SHT(DRV_NAME),
24992 };
24993
24994 -static struct ata_port_operations it821x_smart_port_ops = {
24995 +static const struct ata_port_operations it821x_smart_port_ops = {
24996 .inherits = &ata_bmdma_port_ops,
24997
24998 .check_atapi_dma= it821x_check_atapi_dma,
24999 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x
25000 .port_start = it821x_port_start,
25001 };
25002
25003 -static struct ata_port_operations it821x_passthru_port_ops = {
25004 +static const struct ata_port_operations it821x_passthru_port_ops = {
25005 .inherits = &ata_bmdma_port_ops,
25006
25007 .check_atapi_dma= it821x_check_atapi_dma,
25008 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x
25009 .port_start = it821x_port_start,
25010 };
25011
25012 -static struct ata_port_operations it821x_rdc_port_ops = {
25013 +static const struct ata_port_operations it821x_rdc_port_ops = {
25014 .inherits = &ata_bmdma_port_ops,
25015
25016 .check_atapi_dma= it821x_check_atapi_dma,
25017 diff -urNp linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c
25018 --- linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
25019 +++ linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
25020 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
25021 ATA_PIO_SHT(DRV_NAME),
25022 };
25023
25024 -static struct ata_port_operations ixp4xx_port_ops = {
25025 +static const struct ata_port_operations ixp4xx_port_ops = {
25026 .inherits = &ata_sff_port_ops,
25027 .sff_data_xfer = ixp4xx_mmio_data_xfer,
25028 .cable_detect = ata_cable_40wire,
25029 diff -urNp linux-2.6.32.45/drivers/ata/pata_jmicron.c linux-2.6.32.45/drivers/ata/pata_jmicron.c
25030 --- linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
25031 +++ linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
25032 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
25033 ATA_BMDMA_SHT(DRV_NAME),
25034 };
25035
25036 -static struct ata_port_operations jmicron_ops = {
25037 +static const struct ata_port_operations jmicron_ops = {
25038 .inherits = &ata_bmdma_port_ops,
25039 .prereset = jmicron_pre_reset,
25040 };
25041 diff -urNp linux-2.6.32.45/drivers/ata/pata_legacy.c linux-2.6.32.45/drivers/ata/pata_legacy.c
25042 --- linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
25043 +++ linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
25044 @@ -106,7 +106,7 @@ struct legacy_probe {
25045
25046 struct legacy_controller {
25047 const char *name;
25048 - struct ata_port_operations *ops;
25049 + const struct ata_port_operations *ops;
25050 unsigned int pio_mask;
25051 unsigned int flags;
25052 unsigned int pflags;
25053 @@ -223,12 +223,12 @@ static const struct ata_port_operations
25054 * pio_mask as well.
25055 */
25056
25057 -static struct ata_port_operations simple_port_ops = {
25058 +static const struct ata_port_operations simple_port_ops = {
25059 .inherits = &legacy_base_port_ops,
25060 .sff_data_xfer = ata_sff_data_xfer_noirq,
25061 };
25062
25063 -static struct ata_port_operations legacy_port_ops = {
25064 +static const struct ata_port_operations legacy_port_ops = {
25065 .inherits = &legacy_base_port_ops,
25066 .sff_data_xfer = ata_sff_data_xfer_noirq,
25067 .set_mode = legacy_set_mode,
25068 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
25069 return buflen;
25070 }
25071
25072 -static struct ata_port_operations pdc20230_port_ops = {
25073 +static const struct ata_port_operations pdc20230_port_ops = {
25074 .inherits = &legacy_base_port_ops,
25075 .set_piomode = pdc20230_set_piomode,
25076 .sff_data_xfer = pdc_data_xfer_vlb,
25077 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
25078 ioread8(ap->ioaddr.status_addr);
25079 }
25080
25081 -static struct ata_port_operations ht6560a_port_ops = {
25082 +static const struct ata_port_operations ht6560a_port_ops = {
25083 .inherits = &legacy_base_port_ops,
25084 .set_piomode = ht6560a_set_piomode,
25085 };
25086 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
25087 ioread8(ap->ioaddr.status_addr);
25088 }
25089
25090 -static struct ata_port_operations ht6560b_port_ops = {
25091 +static const struct ata_port_operations ht6560b_port_ops = {
25092 .inherits = &legacy_base_port_ops,
25093 .set_piomode = ht6560b_set_piomode,
25094 };
25095 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
25096 }
25097
25098
25099 -static struct ata_port_operations opti82c611a_port_ops = {
25100 +static const struct ata_port_operations opti82c611a_port_ops = {
25101 .inherits = &legacy_base_port_ops,
25102 .set_piomode = opti82c611a_set_piomode,
25103 };
25104 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
25105 return ata_sff_qc_issue(qc);
25106 }
25107
25108 -static struct ata_port_operations opti82c46x_port_ops = {
25109 +static const struct ata_port_operations opti82c46x_port_ops = {
25110 .inherits = &legacy_base_port_ops,
25111 .set_piomode = opti82c46x_set_piomode,
25112 .qc_issue = opti82c46x_qc_issue,
25113 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
25114 return 0;
25115 }
25116
25117 -static struct ata_port_operations qdi6500_port_ops = {
25118 +static const struct ata_port_operations qdi6500_port_ops = {
25119 .inherits = &legacy_base_port_ops,
25120 .set_piomode = qdi6500_set_piomode,
25121 .qc_issue = qdi_qc_issue,
25122 .sff_data_xfer = vlb32_data_xfer,
25123 };
25124
25125 -static struct ata_port_operations qdi6580_port_ops = {
25126 +static const struct ata_port_operations qdi6580_port_ops = {
25127 .inherits = &legacy_base_port_ops,
25128 .set_piomode = qdi6580_set_piomode,
25129 .sff_data_xfer = vlb32_data_xfer,
25130 };
25131
25132 -static struct ata_port_operations qdi6580dp_port_ops = {
25133 +static const struct ata_port_operations qdi6580dp_port_ops = {
25134 .inherits = &legacy_base_port_ops,
25135 .set_piomode = qdi6580dp_set_piomode,
25136 .sff_data_xfer = vlb32_data_xfer,
25137 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_
25138 return 0;
25139 }
25140
25141 -static struct ata_port_operations winbond_port_ops = {
25142 +static const struct ata_port_operations winbond_port_ops = {
25143 .inherits = &legacy_base_port_ops,
25144 .set_piomode = winbond_set_piomode,
25145 .sff_data_xfer = vlb32_data_xfer,
25146 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
25147 int pio_modes = controller->pio_mask;
25148 unsigned long io = probe->port;
25149 u32 mask = (1 << probe->slot);
25150 - struct ata_port_operations *ops = controller->ops;
25151 + const struct ata_port_operations *ops = controller->ops;
25152 struct legacy_data *ld = &legacy_data[probe->slot];
25153 struct ata_host *host = NULL;
25154 struct ata_port *ap;
25155 diff -urNp linux-2.6.32.45/drivers/ata/pata_marvell.c linux-2.6.32.45/drivers/ata/pata_marvell.c
25156 --- linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
25157 +++ linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
25158 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
25159 ATA_BMDMA_SHT(DRV_NAME),
25160 };
25161
25162 -static struct ata_port_operations marvell_ops = {
25163 +static const struct ata_port_operations marvell_ops = {
25164 .inherits = &ata_bmdma_port_ops,
25165 .cable_detect = marvell_cable_detect,
25166 .prereset = marvell_pre_reset,
25167 diff -urNp linux-2.6.32.45/drivers/ata/pata_mpc52xx.c linux-2.6.32.45/drivers/ata/pata_mpc52xx.c
25168 --- linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
25169 +++ linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
25170 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
25171 ATA_PIO_SHT(DRV_NAME),
25172 };
25173
25174 -static struct ata_port_operations mpc52xx_ata_port_ops = {
25175 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
25176 .inherits = &ata_bmdma_port_ops,
25177 .sff_dev_select = mpc52xx_ata_dev_select,
25178 .set_piomode = mpc52xx_ata_set_piomode,
25179 diff -urNp linux-2.6.32.45/drivers/ata/pata_mpiix.c linux-2.6.32.45/drivers/ata/pata_mpiix.c
25180 --- linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
25181 +++ linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
25182 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
25183 ATA_PIO_SHT(DRV_NAME),
25184 };
25185
25186 -static struct ata_port_operations mpiix_port_ops = {
25187 +static const struct ata_port_operations mpiix_port_ops = {
25188 .inherits = &ata_sff_port_ops,
25189 .qc_issue = mpiix_qc_issue,
25190 .cable_detect = ata_cable_40wire,
25191 diff -urNp linux-2.6.32.45/drivers/ata/pata_netcell.c linux-2.6.32.45/drivers/ata/pata_netcell.c
25192 --- linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
25193 +++ linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
25194 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
25195 ATA_BMDMA_SHT(DRV_NAME),
25196 };
25197
25198 -static struct ata_port_operations netcell_ops = {
25199 +static const struct ata_port_operations netcell_ops = {
25200 .inherits = &ata_bmdma_port_ops,
25201 .cable_detect = ata_cable_80wire,
25202 .read_id = netcell_read_id,
25203 diff -urNp linux-2.6.32.45/drivers/ata/pata_ninja32.c linux-2.6.32.45/drivers/ata/pata_ninja32.c
25204 --- linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
25205 +++ linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
25206 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
25207 ATA_BMDMA_SHT(DRV_NAME),
25208 };
25209
25210 -static struct ata_port_operations ninja32_port_ops = {
25211 +static const struct ata_port_operations ninja32_port_ops = {
25212 .inherits = &ata_bmdma_port_ops,
25213 .sff_dev_select = ninja32_dev_select,
25214 .cable_detect = ata_cable_40wire,
25215 diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87410.c linux-2.6.32.45/drivers/ata/pata_ns87410.c
25216 --- linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
25217 +++ linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
25218 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
25219 ATA_PIO_SHT(DRV_NAME),
25220 };
25221
25222 -static struct ata_port_operations ns87410_port_ops = {
25223 +static const struct ata_port_operations ns87410_port_ops = {
25224 .inherits = &ata_sff_port_ops,
25225 .qc_issue = ns87410_qc_issue,
25226 .cable_detect = ata_cable_40wire,
25227 diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87415.c linux-2.6.32.45/drivers/ata/pata_ns87415.c
25228 --- linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
25229 +++ linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
25230 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
25231 }
25232 #endif /* 87560 SuperIO Support */
25233
25234 -static struct ata_port_operations ns87415_pata_ops = {
25235 +static const struct ata_port_operations ns87415_pata_ops = {
25236 .inherits = &ata_bmdma_port_ops,
25237
25238 .check_atapi_dma = ns87415_check_atapi_dma,
25239 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
25240 };
25241
25242 #if defined(CONFIG_SUPERIO)
25243 -static struct ata_port_operations ns87560_pata_ops = {
25244 +static const struct ata_port_operations ns87560_pata_ops = {
25245 .inherits = &ns87415_pata_ops,
25246 .sff_tf_read = ns87560_tf_read,
25247 .sff_check_status = ns87560_check_status,
25248 diff -urNp linux-2.6.32.45/drivers/ata/pata_octeon_cf.c linux-2.6.32.45/drivers/ata/pata_octeon_cf.c
25249 --- linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
25250 +++ linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
25251 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
25252 return 0;
25253 }
25254
25255 +/* cannot be const */
25256 static struct ata_port_operations octeon_cf_ops = {
25257 .inherits = &ata_sff_port_ops,
25258 .check_atapi_dma = octeon_cf_check_atapi_dma,
25259 diff -urNp linux-2.6.32.45/drivers/ata/pata_oldpiix.c linux-2.6.32.45/drivers/ata/pata_oldpiix.c
25260 --- linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
25261 +++ linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
25262 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
25263 ATA_BMDMA_SHT(DRV_NAME),
25264 };
25265
25266 -static struct ata_port_operations oldpiix_pata_ops = {
25267 +static const struct ata_port_operations oldpiix_pata_ops = {
25268 .inherits = &ata_bmdma_port_ops,
25269 .qc_issue = oldpiix_qc_issue,
25270 .cable_detect = ata_cable_40wire,
25271 diff -urNp linux-2.6.32.45/drivers/ata/pata_opti.c linux-2.6.32.45/drivers/ata/pata_opti.c
25272 --- linux-2.6.32.45/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
25273 +++ linux-2.6.32.45/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
25274 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
25275 ATA_PIO_SHT(DRV_NAME),
25276 };
25277
25278 -static struct ata_port_operations opti_port_ops = {
25279 +static const struct ata_port_operations opti_port_ops = {
25280 .inherits = &ata_sff_port_ops,
25281 .cable_detect = ata_cable_40wire,
25282 .set_piomode = opti_set_piomode,
25283 diff -urNp linux-2.6.32.45/drivers/ata/pata_optidma.c linux-2.6.32.45/drivers/ata/pata_optidma.c
25284 --- linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25285 +++ linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25286 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25287 ATA_BMDMA_SHT(DRV_NAME),
25288 };
25289
25290 -static struct ata_port_operations optidma_port_ops = {
25291 +static const struct ata_port_operations optidma_port_ops = {
25292 .inherits = &ata_bmdma_port_ops,
25293 .cable_detect = ata_cable_40wire,
25294 .set_piomode = optidma_set_pio_mode,
25295 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25296 .prereset = optidma_pre_reset,
25297 };
25298
25299 -static struct ata_port_operations optiplus_port_ops = {
25300 +static const struct ata_port_operations optiplus_port_ops = {
25301 .inherits = &optidma_port_ops,
25302 .set_piomode = optiplus_set_pio_mode,
25303 .set_dmamode = optiplus_set_dma_mode,
25304 diff -urNp linux-2.6.32.45/drivers/ata/pata_palmld.c linux-2.6.32.45/drivers/ata/pata_palmld.c
25305 --- linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25306 +++ linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25307 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25308 ATA_PIO_SHT(DRV_NAME),
25309 };
25310
25311 -static struct ata_port_operations palmld_port_ops = {
25312 +static const struct ata_port_operations palmld_port_ops = {
25313 .inherits = &ata_sff_port_ops,
25314 .sff_data_xfer = ata_sff_data_xfer_noirq,
25315 .cable_detect = ata_cable_40wire,
25316 diff -urNp linux-2.6.32.45/drivers/ata/pata_pcmcia.c linux-2.6.32.45/drivers/ata/pata_pcmcia.c
25317 --- linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25318 +++ linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25319 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25320 ATA_PIO_SHT(DRV_NAME),
25321 };
25322
25323 -static struct ata_port_operations pcmcia_port_ops = {
25324 +static const struct ata_port_operations pcmcia_port_ops = {
25325 .inherits = &ata_sff_port_ops,
25326 .sff_data_xfer = ata_sff_data_xfer_noirq,
25327 .cable_detect = ata_cable_40wire,
25328 .set_mode = pcmcia_set_mode,
25329 };
25330
25331 -static struct ata_port_operations pcmcia_8bit_port_ops = {
25332 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
25333 .inherits = &ata_sff_port_ops,
25334 .sff_data_xfer = ata_data_xfer_8bit,
25335 .cable_detect = ata_cable_40wire,
25336 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25337 unsigned long io_base, ctl_base;
25338 void __iomem *io_addr, *ctl_addr;
25339 int n_ports = 1;
25340 - struct ata_port_operations *ops = &pcmcia_port_ops;
25341 + const struct ata_port_operations *ops = &pcmcia_port_ops;
25342
25343 info = kzalloc(sizeof(*info), GFP_KERNEL);
25344 if (info == NULL)
25345 diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc2027x.c linux-2.6.32.45/drivers/ata/pata_pdc2027x.c
25346 --- linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25347 +++ linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25348 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25349 ATA_BMDMA_SHT(DRV_NAME),
25350 };
25351
25352 -static struct ata_port_operations pdc2027x_pata100_ops = {
25353 +static const struct ata_port_operations pdc2027x_pata100_ops = {
25354 .inherits = &ata_bmdma_port_ops,
25355 .check_atapi_dma = pdc2027x_check_atapi_dma,
25356 .cable_detect = pdc2027x_cable_detect,
25357 .prereset = pdc2027x_prereset,
25358 };
25359
25360 -static struct ata_port_operations pdc2027x_pata133_ops = {
25361 +static const struct ata_port_operations pdc2027x_pata133_ops = {
25362 .inherits = &pdc2027x_pata100_ops,
25363 .mode_filter = pdc2027x_mode_filter,
25364 .set_piomode = pdc2027x_set_piomode,
25365 diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c
25366 --- linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25367 +++ linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25368 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25369 ATA_BMDMA_SHT(DRV_NAME),
25370 };
25371
25372 -static struct ata_port_operations pdc2024x_port_ops = {
25373 +static const struct ata_port_operations pdc2024x_port_ops = {
25374 .inherits = &ata_bmdma_port_ops,
25375
25376 .cable_detect = ata_cable_40wire,
25377 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25378 .sff_exec_command = pdc202xx_exec_command,
25379 };
25380
25381 -static struct ata_port_operations pdc2026x_port_ops = {
25382 +static const struct ata_port_operations pdc2026x_port_ops = {
25383 .inherits = &pdc2024x_port_ops,
25384
25385 .check_atapi_dma = pdc2026x_check_atapi_dma,
25386 diff -urNp linux-2.6.32.45/drivers/ata/pata_platform.c linux-2.6.32.45/drivers/ata/pata_platform.c
25387 --- linux-2.6.32.45/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25388 +++ linux-2.6.32.45/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25389 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25390 ATA_PIO_SHT(DRV_NAME),
25391 };
25392
25393 -static struct ata_port_operations pata_platform_port_ops = {
25394 +static const struct ata_port_operations pata_platform_port_ops = {
25395 .inherits = &ata_sff_port_ops,
25396 .sff_data_xfer = ata_sff_data_xfer_noirq,
25397 .cable_detect = ata_cable_unknown,
25398 diff -urNp linux-2.6.32.45/drivers/ata/pata_qdi.c linux-2.6.32.45/drivers/ata/pata_qdi.c
25399 --- linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25400 +++ linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25401 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25402 ATA_PIO_SHT(DRV_NAME),
25403 };
25404
25405 -static struct ata_port_operations qdi6500_port_ops = {
25406 +static const struct ata_port_operations qdi6500_port_ops = {
25407 .inherits = &ata_sff_port_ops,
25408 .qc_issue = qdi_qc_issue,
25409 .sff_data_xfer = qdi_data_xfer,
25410 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25411 .set_piomode = qdi6500_set_piomode,
25412 };
25413
25414 -static struct ata_port_operations qdi6580_port_ops = {
25415 +static const struct ata_port_operations qdi6580_port_ops = {
25416 .inherits = &qdi6500_port_ops,
25417 .set_piomode = qdi6580_set_piomode,
25418 };
25419 diff -urNp linux-2.6.32.45/drivers/ata/pata_radisys.c linux-2.6.32.45/drivers/ata/pata_radisys.c
25420 --- linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25421 +++ linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25422 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25423 ATA_BMDMA_SHT(DRV_NAME),
25424 };
25425
25426 -static struct ata_port_operations radisys_pata_ops = {
25427 +static const struct ata_port_operations radisys_pata_ops = {
25428 .inherits = &ata_bmdma_port_ops,
25429 .qc_issue = radisys_qc_issue,
25430 .cable_detect = ata_cable_unknown,
25431 diff -urNp linux-2.6.32.45/drivers/ata/pata_rb532_cf.c linux-2.6.32.45/drivers/ata/pata_rb532_cf.c
25432 --- linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25433 +++ linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25434 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25435 return IRQ_HANDLED;
25436 }
25437
25438 -static struct ata_port_operations rb532_pata_port_ops = {
25439 +static const struct ata_port_operations rb532_pata_port_ops = {
25440 .inherits = &ata_sff_port_ops,
25441 .sff_data_xfer = ata_sff_data_xfer32,
25442 };
25443 diff -urNp linux-2.6.32.45/drivers/ata/pata_rdc.c linux-2.6.32.45/drivers/ata/pata_rdc.c
25444 --- linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25445 +++ linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25446 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25447 pci_write_config_byte(dev, 0x48, udma_enable);
25448 }
25449
25450 -static struct ata_port_operations rdc_pata_ops = {
25451 +static const struct ata_port_operations rdc_pata_ops = {
25452 .inherits = &ata_bmdma32_port_ops,
25453 .cable_detect = rdc_pata_cable_detect,
25454 .set_piomode = rdc_set_piomode,
25455 diff -urNp linux-2.6.32.45/drivers/ata/pata_rz1000.c linux-2.6.32.45/drivers/ata/pata_rz1000.c
25456 --- linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25457 +++ linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25458 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25459 ATA_PIO_SHT(DRV_NAME),
25460 };
25461
25462 -static struct ata_port_operations rz1000_port_ops = {
25463 +static const struct ata_port_operations rz1000_port_ops = {
25464 .inherits = &ata_sff_port_ops,
25465 .cable_detect = ata_cable_40wire,
25466 .set_mode = rz1000_set_mode,
25467 diff -urNp linux-2.6.32.45/drivers/ata/pata_sc1200.c linux-2.6.32.45/drivers/ata/pata_sc1200.c
25468 --- linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25469 +++ linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25470 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25471 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25472 };
25473
25474 -static struct ata_port_operations sc1200_port_ops = {
25475 +static const struct ata_port_operations sc1200_port_ops = {
25476 .inherits = &ata_bmdma_port_ops,
25477 .qc_prep = ata_sff_dumb_qc_prep,
25478 .qc_issue = sc1200_qc_issue,
25479 diff -urNp linux-2.6.32.45/drivers/ata/pata_scc.c linux-2.6.32.45/drivers/ata/pata_scc.c
25480 --- linux-2.6.32.45/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25481 +++ linux-2.6.32.45/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25482 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25483 ATA_BMDMA_SHT(DRV_NAME),
25484 };
25485
25486 -static struct ata_port_operations scc_pata_ops = {
25487 +static const struct ata_port_operations scc_pata_ops = {
25488 .inherits = &ata_bmdma_port_ops,
25489
25490 .set_piomode = scc_set_piomode,
25491 diff -urNp linux-2.6.32.45/drivers/ata/pata_sch.c linux-2.6.32.45/drivers/ata/pata_sch.c
25492 --- linux-2.6.32.45/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25493 +++ linux-2.6.32.45/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25494 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25495 ATA_BMDMA_SHT(DRV_NAME),
25496 };
25497
25498 -static struct ata_port_operations sch_pata_ops = {
25499 +static const struct ata_port_operations sch_pata_ops = {
25500 .inherits = &ata_bmdma_port_ops,
25501 .cable_detect = ata_cable_unknown,
25502 .set_piomode = sch_set_piomode,
25503 diff -urNp linux-2.6.32.45/drivers/ata/pata_serverworks.c linux-2.6.32.45/drivers/ata/pata_serverworks.c
25504 --- linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25505 +++ linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25506 @@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25507 ATA_BMDMA_SHT(DRV_NAME),
25508 };
25509
25510 -static struct ata_port_operations serverworks_osb4_port_ops = {
25511 +static const struct ata_port_operations serverworks_osb4_port_ops = {
25512 .inherits = &ata_bmdma_port_ops,
25513 .cable_detect = serverworks_cable_detect,
25514 .mode_filter = serverworks_osb4_filter,
25515 @@ -307,7 +307,7 @@ static struct ata_port_operations server
25516 .set_dmamode = serverworks_set_dmamode,
25517 };
25518
25519 -static struct ata_port_operations serverworks_csb_port_ops = {
25520 +static const struct ata_port_operations serverworks_csb_port_ops = {
25521 .inherits = &serverworks_osb4_port_ops,
25522 .mode_filter = serverworks_csb_filter,
25523 };
25524 diff -urNp linux-2.6.32.45/drivers/ata/pata_sil680.c linux-2.6.32.45/drivers/ata/pata_sil680.c
25525 --- linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25526 +++ linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25527 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25528 ATA_BMDMA_SHT(DRV_NAME),
25529 };
25530
25531 -static struct ata_port_operations sil680_port_ops = {
25532 +static const struct ata_port_operations sil680_port_ops = {
25533 .inherits = &ata_bmdma32_port_ops,
25534 .cable_detect = sil680_cable_detect,
25535 .set_piomode = sil680_set_piomode,
25536 diff -urNp linux-2.6.32.45/drivers/ata/pata_sis.c linux-2.6.32.45/drivers/ata/pata_sis.c
25537 --- linux-2.6.32.45/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25538 +++ linux-2.6.32.45/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25539 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25540 ATA_BMDMA_SHT(DRV_NAME),
25541 };
25542
25543 -static struct ata_port_operations sis_133_for_sata_ops = {
25544 +static const struct ata_port_operations sis_133_for_sata_ops = {
25545 .inherits = &ata_bmdma_port_ops,
25546 .set_piomode = sis_133_set_piomode,
25547 .set_dmamode = sis_133_set_dmamode,
25548 .cable_detect = sis_133_cable_detect,
25549 };
25550
25551 -static struct ata_port_operations sis_base_ops = {
25552 +static const struct ata_port_operations sis_base_ops = {
25553 .inherits = &ata_bmdma_port_ops,
25554 .prereset = sis_pre_reset,
25555 };
25556
25557 -static struct ata_port_operations sis_133_ops = {
25558 +static const struct ata_port_operations sis_133_ops = {
25559 .inherits = &sis_base_ops,
25560 .set_piomode = sis_133_set_piomode,
25561 .set_dmamode = sis_133_set_dmamode,
25562 .cable_detect = sis_133_cable_detect,
25563 };
25564
25565 -static struct ata_port_operations sis_133_early_ops = {
25566 +static const struct ata_port_operations sis_133_early_ops = {
25567 .inherits = &sis_base_ops,
25568 .set_piomode = sis_100_set_piomode,
25569 .set_dmamode = sis_133_early_set_dmamode,
25570 .cable_detect = sis_66_cable_detect,
25571 };
25572
25573 -static struct ata_port_operations sis_100_ops = {
25574 +static const struct ata_port_operations sis_100_ops = {
25575 .inherits = &sis_base_ops,
25576 .set_piomode = sis_100_set_piomode,
25577 .set_dmamode = sis_100_set_dmamode,
25578 .cable_detect = sis_66_cable_detect,
25579 };
25580
25581 -static struct ata_port_operations sis_66_ops = {
25582 +static const struct ata_port_operations sis_66_ops = {
25583 .inherits = &sis_base_ops,
25584 .set_piomode = sis_old_set_piomode,
25585 .set_dmamode = sis_66_set_dmamode,
25586 .cable_detect = sis_66_cable_detect,
25587 };
25588
25589 -static struct ata_port_operations sis_old_ops = {
25590 +static const struct ata_port_operations sis_old_ops = {
25591 .inherits = &sis_base_ops,
25592 .set_piomode = sis_old_set_piomode,
25593 .set_dmamode = sis_old_set_dmamode,
25594 diff -urNp linux-2.6.32.45/drivers/ata/pata_sl82c105.c linux-2.6.32.45/drivers/ata/pata_sl82c105.c
25595 --- linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25596 +++ linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25597 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25598 ATA_BMDMA_SHT(DRV_NAME),
25599 };
25600
25601 -static struct ata_port_operations sl82c105_port_ops = {
25602 +static const struct ata_port_operations sl82c105_port_ops = {
25603 .inherits = &ata_bmdma_port_ops,
25604 .qc_defer = sl82c105_qc_defer,
25605 .bmdma_start = sl82c105_bmdma_start,
25606 diff -urNp linux-2.6.32.45/drivers/ata/pata_triflex.c linux-2.6.32.45/drivers/ata/pata_triflex.c
25607 --- linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25608 +++ linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25609 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25610 ATA_BMDMA_SHT(DRV_NAME),
25611 };
25612
25613 -static struct ata_port_operations triflex_port_ops = {
25614 +static const struct ata_port_operations triflex_port_ops = {
25615 .inherits = &ata_bmdma_port_ops,
25616 .bmdma_start = triflex_bmdma_start,
25617 .bmdma_stop = triflex_bmdma_stop,
25618 diff -urNp linux-2.6.32.45/drivers/ata/pata_via.c linux-2.6.32.45/drivers/ata/pata_via.c
25619 --- linux-2.6.32.45/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25620 +++ linux-2.6.32.45/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25621 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25622 ATA_BMDMA_SHT(DRV_NAME),
25623 };
25624
25625 -static struct ata_port_operations via_port_ops = {
25626 +static const struct ata_port_operations via_port_ops = {
25627 .inherits = &ata_bmdma_port_ops,
25628 .cable_detect = via_cable_detect,
25629 .set_piomode = via_set_piomode,
25630 @@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25631 .port_start = via_port_start,
25632 };
25633
25634 -static struct ata_port_operations via_port_ops_noirq = {
25635 +static const struct ata_port_operations via_port_ops_noirq = {
25636 .inherits = &via_port_ops,
25637 .sff_data_xfer = ata_sff_data_xfer_noirq,
25638 };
25639 diff -urNp linux-2.6.32.45/drivers/ata/pata_winbond.c linux-2.6.32.45/drivers/ata/pata_winbond.c
25640 --- linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25641 +++ linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25642 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25643 ATA_PIO_SHT(DRV_NAME),
25644 };
25645
25646 -static struct ata_port_operations winbond_port_ops = {
25647 +static const struct ata_port_operations winbond_port_ops = {
25648 .inherits = &ata_sff_port_ops,
25649 .sff_data_xfer = winbond_data_xfer,
25650 .cable_detect = ata_cable_40wire,
25651 diff -urNp linux-2.6.32.45/drivers/ata/pdc_adma.c linux-2.6.32.45/drivers/ata/pdc_adma.c
25652 --- linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25653 +++ linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25654 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25655 .dma_boundary = ADMA_DMA_BOUNDARY,
25656 };
25657
25658 -static struct ata_port_operations adma_ata_ops = {
25659 +static const struct ata_port_operations adma_ata_ops = {
25660 .inherits = &ata_sff_port_ops,
25661
25662 .lost_interrupt = ATA_OP_NULL,
25663 diff -urNp linux-2.6.32.45/drivers/ata/sata_fsl.c linux-2.6.32.45/drivers/ata/sata_fsl.c
25664 --- linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25665 +++ linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25666 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25667 .dma_boundary = ATA_DMA_BOUNDARY,
25668 };
25669
25670 -static struct ata_port_operations sata_fsl_ops = {
25671 +static const struct ata_port_operations sata_fsl_ops = {
25672 .inherits = &sata_pmp_port_ops,
25673
25674 .qc_defer = ata_std_qc_defer,
25675 diff -urNp linux-2.6.32.45/drivers/ata/sata_inic162x.c linux-2.6.32.45/drivers/ata/sata_inic162x.c
25676 --- linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25677 +++ linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25678 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25679 return 0;
25680 }
25681
25682 -static struct ata_port_operations inic_port_ops = {
25683 +static const struct ata_port_operations inic_port_ops = {
25684 .inherits = &sata_port_ops,
25685
25686 .check_atapi_dma = inic_check_atapi_dma,
25687 diff -urNp linux-2.6.32.45/drivers/ata/sata_mv.c linux-2.6.32.45/drivers/ata/sata_mv.c
25688 --- linux-2.6.32.45/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25689 +++ linux-2.6.32.45/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25690 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25691 .dma_boundary = MV_DMA_BOUNDARY,
25692 };
25693
25694 -static struct ata_port_operations mv5_ops = {
25695 +static const struct ata_port_operations mv5_ops = {
25696 .inherits = &ata_sff_port_ops,
25697
25698 .lost_interrupt = ATA_OP_NULL,
25699 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25700 .port_stop = mv_port_stop,
25701 };
25702
25703 -static struct ata_port_operations mv6_ops = {
25704 +static const struct ata_port_operations mv6_ops = {
25705 .inherits = &mv5_ops,
25706 .dev_config = mv6_dev_config,
25707 .scr_read = mv_scr_read,
25708 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25709 .bmdma_status = mv_bmdma_status,
25710 };
25711
25712 -static struct ata_port_operations mv_iie_ops = {
25713 +static const struct ata_port_operations mv_iie_ops = {
25714 .inherits = &mv6_ops,
25715 .dev_config = ATA_OP_NULL,
25716 .qc_prep = mv_qc_prep_iie,
25717 diff -urNp linux-2.6.32.45/drivers/ata/sata_nv.c linux-2.6.32.45/drivers/ata/sata_nv.c
25718 --- linux-2.6.32.45/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
25719 +++ linux-2.6.32.45/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
25720 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
25721 * cases. Define nv_hardreset() which only kicks in for post-boot
25722 * probing and use it for all variants.
25723 */
25724 -static struct ata_port_operations nv_generic_ops = {
25725 +static const struct ata_port_operations nv_generic_ops = {
25726 .inherits = &ata_bmdma_port_ops,
25727 .lost_interrupt = ATA_OP_NULL,
25728 .scr_read = nv_scr_read,
25729 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
25730 .hardreset = nv_hardreset,
25731 };
25732
25733 -static struct ata_port_operations nv_nf2_ops = {
25734 +static const struct ata_port_operations nv_nf2_ops = {
25735 .inherits = &nv_generic_ops,
25736 .freeze = nv_nf2_freeze,
25737 .thaw = nv_nf2_thaw,
25738 };
25739
25740 -static struct ata_port_operations nv_ck804_ops = {
25741 +static const struct ata_port_operations nv_ck804_ops = {
25742 .inherits = &nv_generic_ops,
25743 .freeze = nv_ck804_freeze,
25744 .thaw = nv_ck804_thaw,
25745 .host_stop = nv_ck804_host_stop,
25746 };
25747
25748 -static struct ata_port_operations nv_adma_ops = {
25749 +static const struct ata_port_operations nv_adma_ops = {
25750 .inherits = &nv_ck804_ops,
25751
25752 .check_atapi_dma = nv_adma_check_atapi_dma,
25753 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
25754 .host_stop = nv_adma_host_stop,
25755 };
25756
25757 -static struct ata_port_operations nv_swncq_ops = {
25758 +static const struct ata_port_operations nv_swncq_ops = {
25759 .inherits = &nv_generic_ops,
25760
25761 .qc_defer = ata_std_qc_defer,
25762 diff -urNp linux-2.6.32.45/drivers/ata/sata_promise.c linux-2.6.32.45/drivers/ata/sata_promise.c
25763 --- linux-2.6.32.45/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
25764 +++ linux-2.6.32.45/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
25765 @@ -195,7 +195,7 @@ static const struct ata_port_operations
25766 .error_handler = pdc_error_handler,
25767 };
25768
25769 -static struct ata_port_operations pdc_sata_ops = {
25770 +static const struct ata_port_operations pdc_sata_ops = {
25771 .inherits = &pdc_common_ops,
25772 .cable_detect = pdc_sata_cable_detect,
25773 .freeze = pdc_sata_freeze,
25774 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
25775
25776 /* First-generation chips need a more restrictive ->check_atapi_dma op,
25777 and ->freeze/thaw that ignore the hotplug controls. */
25778 -static struct ata_port_operations pdc_old_sata_ops = {
25779 +static const struct ata_port_operations pdc_old_sata_ops = {
25780 .inherits = &pdc_sata_ops,
25781 .freeze = pdc_freeze,
25782 .thaw = pdc_thaw,
25783 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
25784 };
25785
25786 -static struct ata_port_operations pdc_pata_ops = {
25787 +static const struct ata_port_operations pdc_pata_ops = {
25788 .inherits = &pdc_common_ops,
25789 .cable_detect = pdc_pata_cable_detect,
25790 .freeze = pdc_freeze,
25791 diff -urNp linux-2.6.32.45/drivers/ata/sata_qstor.c linux-2.6.32.45/drivers/ata/sata_qstor.c
25792 --- linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
25793 +++ linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
25794 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
25795 .dma_boundary = QS_DMA_BOUNDARY,
25796 };
25797
25798 -static struct ata_port_operations qs_ata_ops = {
25799 +static const struct ata_port_operations qs_ata_ops = {
25800 .inherits = &ata_sff_port_ops,
25801
25802 .check_atapi_dma = qs_check_atapi_dma,
25803 diff -urNp linux-2.6.32.45/drivers/ata/sata_sil24.c linux-2.6.32.45/drivers/ata/sata_sil24.c
25804 --- linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
25805 +++ linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
25806 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
25807 .dma_boundary = ATA_DMA_BOUNDARY,
25808 };
25809
25810 -static struct ata_port_operations sil24_ops = {
25811 +static const struct ata_port_operations sil24_ops = {
25812 .inherits = &sata_pmp_port_ops,
25813
25814 .qc_defer = sil24_qc_defer,
25815 diff -urNp linux-2.6.32.45/drivers/ata/sata_sil.c linux-2.6.32.45/drivers/ata/sata_sil.c
25816 --- linux-2.6.32.45/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
25817 +++ linux-2.6.32.45/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
25818 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
25819 .sg_tablesize = ATA_MAX_PRD
25820 };
25821
25822 -static struct ata_port_operations sil_ops = {
25823 +static const struct ata_port_operations sil_ops = {
25824 .inherits = &ata_bmdma32_port_ops,
25825 .dev_config = sil_dev_config,
25826 .set_mode = sil_set_mode,
25827 diff -urNp linux-2.6.32.45/drivers/ata/sata_sis.c linux-2.6.32.45/drivers/ata/sata_sis.c
25828 --- linux-2.6.32.45/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
25829 +++ linux-2.6.32.45/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
25830 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
25831 ATA_BMDMA_SHT(DRV_NAME),
25832 };
25833
25834 -static struct ata_port_operations sis_ops = {
25835 +static const struct ata_port_operations sis_ops = {
25836 .inherits = &ata_bmdma_port_ops,
25837 .scr_read = sis_scr_read,
25838 .scr_write = sis_scr_write,
25839 diff -urNp linux-2.6.32.45/drivers/ata/sata_svw.c linux-2.6.32.45/drivers/ata/sata_svw.c
25840 --- linux-2.6.32.45/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
25841 +++ linux-2.6.32.45/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
25842 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
25843 };
25844
25845
25846 -static struct ata_port_operations k2_sata_ops = {
25847 +static const struct ata_port_operations k2_sata_ops = {
25848 .inherits = &ata_bmdma_port_ops,
25849 .sff_tf_load = k2_sata_tf_load,
25850 .sff_tf_read = k2_sata_tf_read,
25851 diff -urNp linux-2.6.32.45/drivers/ata/sata_sx4.c linux-2.6.32.45/drivers/ata/sata_sx4.c
25852 --- linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
25853 +++ linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
25854 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
25855 };
25856
25857 /* TODO: inherit from base port_ops after converting to new EH */
25858 -static struct ata_port_operations pdc_20621_ops = {
25859 +static const struct ata_port_operations pdc_20621_ops = {
25860 .inherits = &ata_sff_port_ops,
25861
25862 .check_atapi_dma = pdc_check_atapi_dma,
25863 diff -urNp linux-2.6.32.45/drivers/ata/sata_uli.c linux-2.6.32.45/drivers/ata/sata_uli.c
25864 --- linux-2.6.32.45/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
25865 +++ linux-2.6.32.45/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
25866 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
25867 ATA_BMDMA_SHT(DRV_NAME),
25868 };
25869
25870 -static struct ata_port_operations uli_ops = {
25871 +static const struct ata_port_operations uli_ops = {
25872 .inherits = &ata_bmdma_port_ops,
25873 .scr_read = uli_scr_read,
25874 .scr_write = uli_scr_write,
25875 diff -urNp linux-2.6.32.45/drivers/ata/sata_via.c linux-2.6.32.45/drivers/ata/sata_via.c
25876 --- linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
25877 +++ linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
25878 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
25879 ATA_BMDMA_SHT(DRV_NAME),
25880 };
25881
25882 -static struct ata_port_operations svia_base_ops = {
25883 +static const struct ata_port_operations svia_base_ops = {
25884 .inherits = &ata_bmdma_port_ops,
25885 .sff_tf_load = svia_tf_load,
25886 };
25887
25888 -static struct ata_port_operations vt6420_sata_ops = {
25889 +static const struct ata_port_operations vt6420_sata_ops = {
25890 .inherits = &svia_base_ops,
25891 .freeze = svia_noop_freeze,
25892 .prereset = vt6420_prereset,
25893 .bmdma_start = vt6420_bmdma_start,
25894 };
25895
25896 -static struct ata_port_operations vt6421_pata_ops = {
25897 +static const struct ata_port_operations vt6421_pata_ops = {
25898 .inherits = &svia_base_ops,
25899 .cable_detect = vt6421_pata_cable_detect,
25900 .set_piomode = vt6421_set_pio_mode,
25901 .set_dmamode = vt6421_set_dma_mode,
25902 };
25903
25904 -static struct ata_port_operations vt6421_sata_ops = {
25905 +static const struct ata_port_operations vt6421_sata_ops = {
25906 .inherits = &svia_base_ops,
25907 .scr_read = svia_scr_read,
25908 .scr_write = svia_scr_write,
25909 };
25910
25911 -static struct ata_port_operations vt8251_ops = {
25912 +static const struct ata_port_operations vt8251_ops = {
25913 .inherits = &svia_base_ops,
25914 .hardreset = sata_std_hardreset,
25915 .scr_read = vt8251_scr_read,
25916 diff -urNp linux-2.6.32.45/drivers/ata/sata_vsc.c linux-2.6.32.45/drivers/ata/sata_vsc.c
25917 --- linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
25918 +++ linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
25919 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
25920 };
25921
25922
25923 -static struct ata_port_operations vsc_sata_ops = {
25924 +static const struct ata_port_operations vsc_sata_ops = {
25925 .inherits = &ata_bmdma_port_ops,
25926 /* The IRQ handling is not quite standard SFF behaviour so we
25927 cannot use the default lost interrupt handler */
25928 diff -urNp linux-2.6.32.45/drivers/atm/adummy.c linux-2.6.32.45/drivers/atm/adummy.c
25929 --- linux-2.6.32.45/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
25930 +++ linux-2.6.32.45/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
25931 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
25932 vcc->pop(vcc, skb);
25933 else
25934 dev_kfree_skb_any(skb);
25935 - atomic_inc(&vcc->stats->tx);
25936 + atomic_inc_unchecked(&vcc->stats->tx);
25937
25938 return 0;
25939 }
25940 diff -urNp linux-2.6.32.45/drivers/atm/ambassador.c linux-2.6.32.45/drivers/atm/ambassador.c
25941 --- linux-2.6.32.45/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
25942 +++ linux-2.6.32.45/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
25943 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
25944 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25945
25946 // VC layer stats
25947 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25948 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25949
25950 // free the descriptor
25951 kfree (tx_descr);
25952 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
25953 dump_skb ("<<<", vc, skb);
25954
25955 // VC layer stats
25956 - atomic_inc(&atm_vcc->stats->rx);
25957 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25958 __net_timestamp(skb);
25959 // end of our responsability
25960 atm_vcc->push (atm_vcc, skb);
25961 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
25962 } else {
25963 PRINTK (KERN_INFO, "dropped over-size frame");
25964 // should we count this?
25965 - atomic_inc(&atm_vcc->stats->rx_drop);
25966 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25967 }
25968
25969 } else {
25970 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
25971 }
25972
25973 if (check_area (skb->data, skb->len)) {
25974 - atomic_inc(&atm_vcc->stats->tx_err);
25975 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25976 return -ENOMEM; // ?
25977 }
25978
25979 diff -urNp linux-2.6.32.45/drivers/atm/atmtcp.c linux-2.6.32.45/drivers/atm/atmtcp.c
25980 --- linux-2.6.32.45/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
25981 +++ linux-2.6.32.45/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
25982 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
25983 if (vcc->pop) vcc->pop(vcc,skb);
25984 else dev_kfree_skb(skb);
25985 if (dev_data) return 0;
25986 - atomic_inc(&vcc->stats->tx_err);
25987 + atomic_inc_unchecked(&vcc->stats->tx_err);
25988 return -ENOLINK;
25989 }
25990 size = skb->len+sizeof(struct atmtcp_hdr);
25991 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
25992 if (!new_skb) {
25993 if (vcc->pop) vcc->pop(vcc,skb);
25994 else dev_kfree_skb(skb);
25995 - atomic_inc(&vcc->stats->tx_err);
25996 + atomic_inc_unchecked(&vcc->stats->tx_err);
25997 return -ENOBUFS;
25998 }
25999 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26000 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
26001 if (vcc->pop) vcc->pop(vcc,skb);
26002 else dev_kfree_skb(skb);
26003 out_vcc->push(out_vcc,new_skb);
26004 - atomic_inc(&vcc->stats->tx);
26005 - atomic_inc(&out_vcc->stats->rx);
26006 + atomic_inc_unchecked(&vcc->stats->tx);
26007 + atomic_inc_unchecked(&out_vcc->stats->rx);
26008 return 0;
26009 }
26010
26011 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
26012 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26013 read_unlock(&vcc_sklist_lock);
26014 if (!out_vcc) {
26015 - atomic_inc(&vcc->stats->tx_err);
26016 + atomic_inc_unchecked(&vcc->stats->tx_err);
26017 goto done;
26018 }
26019 skb_pull(skb,sizeof(struct atmtcp_hdr));
26020 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
26021 __net_timestamp(new_skb);
26022 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26023 out_vcc->push(out_vcc,new_skb);
26024 - atomic_inc(&vcc->stats->tx);
26025 - atomic_inc(&out_vcc->stats->rx);
26026 + atomic_inc_unchecked(&vcc->stats->tx);
26027 + atomic_inc_unchecked(&out_vcc->stats->rx);
26028 done:
26029 if (vcc->pop) vcc->pop(vcc,skb);
26030 else dev_kfree_skb(skb);
26031 diff -urNp linux-2.6.32.45/drivers/atm/eni.c linux-2.6.32.45/drivers/atm/eni.c
26032 --- linux-2.6.32.45/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
26033 +++ linux-2.6.32.45/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
26034 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26035 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26036 vcc->dev->number);
26037 length = 0;
26038 - atomic_inc(&vcc->stats->rx_err);
26039 + atomic_inc_unchecked(&vcc->stats->rx_err);
26040 }
26041 else {
26042 length = ATM_CELL_SIZE-1; /* no HEC */
26043 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26044 size);
26045 }
26046 eff = length = 0;
26047 - atomic_inc(&vcc->stats->rx_err);
26048 + atomic_inc_unchecked(&vcc->stats->rx_err);
26049 }
26050 else {
26051 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26052 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26053 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26054 vcc->dev->number,vcc->vci,length,size << 2,descr);
26055 length = eff = 0;
26056 - atomic_inc(&vcc->stats->rx_err);
26057 + atomic_inc_unchecked(&vcc->stats->rx_err);
26058 }
26059 }
26060 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26061 @@ -770,7 +770,7 @@ rx_dequeued++;
26062 vcc->push(vcc,skb);
26063 pushed++;
26064 }
26065 - atomic_inc(&vcc->stats->rx);
26066 + atomic_inc_unchecked(&vcc->stats->rx);
26067 }
26068 wake_up(&eni_dev->rx_wait);
26069 }
26070 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
26071 PCI_DMA_TODEVICE);
26072 if (vcc->pop) vcc->pop(vcc,skb);
26073 else dev_kfree_skb_irq(skb);
26074 - atomic_inc(&vcc->stats->tx);
26075 + atomic_inc_unchecked(&vcc->stats->tx);
26076 wake_up(&eni_dev->tx_wait);
26077 dma_complete++;
26078 }
26079 diff -urNp linux-2.6.32.45/drivers/atm/firestream.c linux-2.6.32.45/drivers/atm/firestream.c
26080 --- linux-2.6.32.45/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
26081 +++ linux-2.6.32.45/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
26082 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct
26083 }
26084 }
26085
26086 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26087 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26088
26089 fs_dprintk (FS_DEBUG_TXMEM, "i");
26090 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26091 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_
26092 #endif
26093 skb_put (skb, qe->p1 & 0xffff);
26094 ATM_SKB(skb)->vcc = atm_vcc;
26095 - atomic_inc(&atm_vcc->stats->rx);
26096 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26097 __net_timestamp(skb);
26098 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26099 atm_vcc->push (atm_vcc, skb);
26100 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_
26101 kfree (pe);
26102 }
26103 if (atm_vcc)
26104 - atomic_inc(&atm_vcc->stats->rx_drop);
26105 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26106 break;
26107 case 0x1f: /* Reassembly abort: no buffers. */
26108 /* Silently increment error counter. */
26109 if (atm_vcc)
26110 - atomic_inc(&atm_vcc->stats->rx_drop);
26111 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26112 break;
26113 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26114 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26115 diff -urNp linux-2.6.32.45/drivers/atm/fore200e.c linux-2.6.32.45/drivers/atm/fore200e.c
26116 --- linux-2.6.32.45/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
26117 +++ linux-2.6.32.45/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
26118 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
26119 #endif
26120 /* check error condition */
26121 if (*entry->status & STATUS_ERROR)
26122 - atomic_inc(&vcc->stats->tx_err);
26123 + atomic_inc_unchecked(&vcc->stats->tx_err);
26124 else
26125 - atomic_inc(&vcc->stats->tx);
26126 + atomic_inc_unchecked(&vcc->stats->tx);
26127 }
26128 }
26129
26130 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
26131 if (skb == NULL) {
26132 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26133
26134 - atomic_inc(&vcc->stats->rx_drop);
26135 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26136 return -ENOMEM;
26137 }
26138
26139 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
26140
26141 dev_kfree_skb_any(skb);
26142
26143 - atomic_inc(&vcc->stats->rx_drop);
26144 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26145 return -ENOMEM;
26146 }
26147
26148 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26149
26150 vcc->push(vcc, skb);
26151 - atomic_inc(&vcc->stats->rx);
26152 + atomic_inc_unchecked(&vcc->stats->rx);
26153
26154 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26155
26156 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
26157 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
26158 fore200e->atm_dev->number,
26159 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
26160 - atomic_inc(&vcc->stats->rx_err);
26161 + atomic_inc_unchecked(&vcc->stats->rx_err);
26162 }
26163 }
26164
26165 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
26166 goto retry_here;
26167 }
26168
26169 - atomic_inc(&vcc->stats->tx_err);
26170 + atomic_inc_unchecked(&vcc->stats->tx_err);
26171
26172 fore200e->tx_sat++;
26173 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
26174 diff -urNp linux-2.6.32.45/drivers/atm/he.c linux-2.6.32.45/drivers/atm/he.c
26175 --- linux-2.6.32.45/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
26176 +++ linux-2.6.32.45/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
26177 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26178
26179 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
26180 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
26181 - atomic_inc(&vcc->stats->rx_drop);
26182 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26183 goto return_host_buffers;
26184 }
26185
26186 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26187 RBRQ_LEN_ERR(he_dev->rbrq_head)
26188 ? "LEN_ERR" : "",
26189 vcc->vpi, vcc->vci);
26190 - atomic_inc(&vcc->stats->rx_err);
26191 + atomic_inc_unchecked(&vcc->stats->rx_err);
26192 goto return_host_buffers;
26193 }
26194
26195 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26196 vcc->push(vcc, skb);
26197 spin_lock(&he_dev->global_lock);
26198
26199 - atomic_inc(&vcc->stats->rx);
26200 + atomic_inc_unchecked(&vcc->stats->rx);
26201
26202 return_host_buffers:
26203 ++pdus_assembled;
26204 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
26205 tpd->vcc->pop(tpd->vcc, tpd->skb);
26206 else
26207 dev_kfree_skb_any(tpd->skb);
26208 - atomic_inc(&tpd->vcc->stats->tx_err);
26209 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
26210 }
26211 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
26212 return;
26213 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26214 vcc->pop(vcc, skb);
26215 else
26216 dev_kfree_skb_any(skb);
26217 - atomic_inc(&vcc->stats->tx_err);
26218 + atomic_inc_unchecked(&vcc->stats->tx_err);
26219 return -EINVAL;
26220 }
26221
26222 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26223 vcc->pop(vcc, skb);
26224 else
26225 dev_kfree_skb_any(skb);
26226 - atomic_inc(&vcc->stats->tx_err);
26227 + atomic_inc_unchecked(&vcc->stats->tx_err);
26228 return -EINVAL;
26229 }
26230 #endif
26231 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26232 vcc->pop(vcc, skb);
26233 else
26234 dev_kfree_skb_any(skb);
26235 - atomic_inc(&vcc->stats->tx_err);
26236 + atomic_inc_unchecked(&vcc->stats->tx_err);
26237 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26238 return -ENOMEM;
26239 }
26240 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26241 vcc->pop(vcc, skb);
26242 else
26243 dev_kfree_skb_any(skb);
26244 - atomic_inc(&vcc->stats->tx_err);
26245 + atomic_inc_unchecked(&vcc->stats->tx_err);
26246 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26247 return -ENOMEM;
26248 }
26249 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26250 __enqueue_tpd(he_dev, tpd, cid);
26251 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26252
26253 - atomic_inc(&vcc->stats->tx);
26254 + atomic_inc_unchecked(&vcc->stats->tx);
26255
26256 return 0;
26257 }
26258 diff -urNp linux-2.6.32.45/drivers/atm/horizon.c linux-2.6.32.45/drivers/atm/horizon.c
26259 --- linux-2.6.32.45/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
26260 +++ linux-2.6.32.45/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
26261 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
26262 {
26263 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
26264 // VC layer stats
26265 - atomic_inc(&vcc->stats->rx);
26266 + atomic_inc_unchecked(&vcc->stats->rx);
26267 __net_timestamp(skb);
26268 // end of our responsability
26269 vcc->push (vcc, skb);
26270 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
26271 dev->tx_iovec = NULL;
26272
26273 // VC layer stats
26274 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26275 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26276
26277 // free the skb
26278 hrz_kfree_skb (skb);
26279 diff -urNp linux-2.6.32.45/drivers/atm/idt77252.c linux-2.6.32.45/drivers/atm/idt77252.c
26280 --- linux-2.6.32.45/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26281 +++ linux-2.6.32.45/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26282 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26283 else
26284 dev_kfree_skb(skb);
26285
26286 - atomic_inc(&vcc->stats->tx);
26287 + atomic_inc_unchecked(&vcc->stats->tx);
26288 }
26289
26290 atomic_dec(&scq->used);
26291 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26292 if ((sb = dev_alloc_skb(64)) == NULL) {
26293 printk("%s: Can't allocate buffers for aal0.\n",
26294 card->name);
26295 - atomic_add(i, &vcc->stats->rx_drop);
26296 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
26297 break;
26298 }
26299 if (!atm_charge(vcc, sb->truesize)) {
26300 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26301 card->name);
26302 - atomic_add(i - 1, &vcc->stats->rx_drop);
26303 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26304 dev_kfree_skb(sb);
26305 break;
26306 }
26307 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26308 ATM_SKB(sb)->vcc = vcc;
26309 __net_timestamp(sb);
26310 vcc->push(vcc, sb);
26311 - atomic_inc(&vcc->stats->rx);
26312 + atomic_inc_unchecked(&vcc->stats->rx);
26313
26314 cell += ATM_CELL_PAYLOAD;
26315 }
26316 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26317 "(CDC: %08x)\n",
26318 card->name, len, rpp->len, readl(SAR_REG_CDC));
26319 recycle_rx_pool_skb(card, rpp);
26320 - atomic_inc(&vcc->stats->rx_err);
26321 + atomic_inc_unchecked(&vcc->stats->rx_err);
26322 return;
26323 }
26324 if (stat & SAR_RSQE_CRC) {
26325 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26326 recycle_rx_pool_skb(card, rpp);
26327 - atomic_inc(&vcc->stats->rx_err);
26328 + atomic_inc_unchecked(&vcc->stats->rx_err);
26329 return;
26330 }
26331 if (skb_queue_len(&rpp->queue) > 1) {
26332 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26333 RXPRINTK("%s: Can't alloc RX skb.\n",
26334 card->name);
26335 recycle_rx_pool_skb(card, rpp);
26336 - atomic_inc(&vcc->stats->rx_err);
26337 + atomic_inc_unchecked(&vcc->stats->rx_err);
26338 return;
26339 }
26340 if (!atm_charge(vcc, skb->truesize)) {
26341 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26342 __net_timestamp(skb);
26343
26344 vcc->push(vcc, skb);
26345 - atomic_inc(&vcc->stats->rx);
26346 + atomic_inc_unchecked(&vcc->stats->rx);
26347
26348 return;
26349 }
26350 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26351 __net_timestamp(skb);
26352
26353 vcc->push(vcc, skb);
26354 - atomic_inc(&vcc->stats->rx);
26355 + atomic_inc_unchecked(&vcc->stats->rx);
26356
26357 if (skb->truesize > SAR_FB_SIZE_3)
26358 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26359 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26360 if (vcc->qos.aal != ATM_AAL0) {
26361 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26362 card->name, vpi, vci);
26363 - atomic_inc(&vcc->stats->rx_drop);
26364 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26365 goto drop;
26366 }
26367
26368 if ((sb = dev_alloc_skb(64)) == NULL) {
26369 printk("%s: Can't allocate buffers for AAL0.\n",
26370 card->name);
26371 - atomic_inc(&vcc->stats->rx_err);
26372 + atomic_inc_unchecked(&vcc->stats->rx_err);
26373 goto drop;
26374 }
26375
26376 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26377 ATM_SKB(sb)->vcc = vcc;
26378 __net_timestamp(sb);
26379 vcc->push(vcc, sb);
26380 - atomic_inc(&vcc->stats->rx);
26381 + atomic_inc_unchecked(&vcc->stats->rx);
26382
26383 drop:
26384 skb_pull(queue, 64);
26385 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26386
26387 if (vc == NULL) {
26388 printk("%s: NULL connection in send().\n", card->name);
26389 - atomic_inc(&vcc->stats->tx_err);
26390 + atomic_inc_unchecked(&vcc->stats->tx_err);
26391 dev_kfree_skb(skb);
26392 return -EINVAL;
26393 }
26394 if (!test_bit(VCF_TX, &vc->flags)) {
26395 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26396 - atomic_inc(&vcc->stats->tx_err);
26397 + atomic_inc_unchecked(&vcc->stats->tx_err);
26398 dev_kfree_skb(skb);
26399 return -EINVAL;
26400 }
26401 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26402 break;
26403 default:
26404 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26405 - atomic_inc(&vcc->stats->tx_err);
26406 + atomic_inc_unchecked(&vcc->stats->tx_err);
26407 dev_kfree_skb(skb);
26408 return -EINVAL;
26409 }
26410
26411 if (skb_shinfo(skb)->nr_frags != 0) {
26412 printk("%s: No scatter-gather yet.\n", card->name);
26413 - atomic_inc(&vcc->stats->tx_err);
26414 + atomic_inc_unchecked(&vcc->stats->tx_err);
26415 dev_kfree_skb(skb);
26416 return -EINVAL;
26417 }
26418 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26419
26420 err = queue_skb(card, vc, skb, oam);
26421 if (err) {
26422 - atomic_inc(&vcc->stats->tx_err);
26423 + atomic_inc_unchecked(&vcc->stats->tx_err);
26424 dev_kfree_skb(skb);
26425 return err;
26426 }
26427 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26428 skb = dev_alloc_skb(64);
26429 if (!skb) {
26430 printk("%s: Out of memory in send_oam().\n", card->name);
26431 - atomic_inc(&vcc->stats->tx_err);
26432 + atomic_inc_unchecked(&vcc->stats->tx_err);
26433 return -ENOMEM;
26434 }
26435 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26436 diff -urNp linux-2.6.32.45/drivers/atm/iphase.c linux-2.6.32.45/drivers/atm/iphase.c
26437 --- linux-2.6.32.45/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26438 +++ linux-2.6.32.45/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26439 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26440 status = (u_short) (buf_desc_ptr->desc_mode);
26441 if (status & (RX_CER | RX_PTE | RX_OFL))
26442 {
26443 - atomic_inc(&vcc->stats->rx_err);
26444 + atomic_inc_unchecked(&vcc->stats->rx_err);
26445 IF_ERR(printk("IA: bad packet, dropping it");)
26446 if (status & RX_CER) {
26447 IF_ERR(printk(" cause: packet CRC error\n");)
26448 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26449 len = dma_addr - buf_addr;
26450 if (len > iadev->rx_buf_sz) {
26451 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26452 - atomic_inc(&vcc->stats->rx_err);
26453 + atomic_inc_unchecked(&vcc->stats->rx_err);
26454 goto out_free_desc;
26455 }
26456
26457 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26458 ia_vcc = INPH_IA_VCC(vcc);
26459 if (ia_vcc == NULL)
26460 {
26461 - atomic_inc(&vcc->stats->rx_err);
26462 + atomic_inc_unchecked(&vcc->stats->rx_err);
26463 dev_kfree_skb_any(skb);
26464 atm_return(vcc, atm_guess_pdu2truesize(len));
26465 goto INCR_DLE;
26466 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26467 if ((length > iadev->rx_buf_sz) || (length >
26468 (skb->len - sizeof(struct cpcs_trailer))))
26469 {
26470 - atomic_inc(&vcc->stats->rx_err);
26471 + atomic_inc_unchecked(&vcc->stats->rx_err);
26472 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26473 length, skb->len);)
26474 dev_kfree_skb_any(skb);
26475 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26476
26477 IF_RX(printk("rx_dle_intr: skb push");)
26478 vcc->push(vcc,skb);
26479 - atomic_inc(&vcc->stats->rx);
26480 + atomic_inc_unchecked(&vcc->stats->rx);
26481 iadev->rx_pkt_cnt++;
26482 }
26483 INCR_DLE:
26484 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26485 {
26486 struct k_sonet_stats *stats;
26487 stats = &PRIV(_ia_dev[board])->sonet_stats;
26488 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26489 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26490 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26491 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26492 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26493 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26494 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26495 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26496 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26497 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26498 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26499 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26500 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26501 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26502 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26503 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26504 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26505 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26506 }
26507 ia_cmds.status = 0;
26508 break;
26509 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26510 if ((desc == 0) || (desc > iadev->num_tx_desc))
26511 {
26512 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26513 - atomic_inc(&vcc->stats->tx);
26514 + atomic_inc_unchecked(&vcc->stats->tx);
26515 if (vcc->pop)
26516 vcc->pop(vcc, skb);
26517 else
26518 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26519 ATM_DESC(skb) = vcc->vci;
26520 skb_queue_tail(&iadev->tx_dma_q, skb);
26521
26522 - atomic_inc(&vcc->stats->tx);
26523 + atomic_inc_unchecked(&vcc->stats->tx);
26524 iadev->tx_pkt_cnt++;
26525 /* Increment transaction counter */
26526 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26527
26528 #if 0
26529 /* add flow control logic */
26530 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26531 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26532 if (iavcc->vc_desc_cnt > 10) {
26533 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26534 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26535 diff -urNp linux-2.6.32.45/drivers/atm/lanai.c linux-2.6.32.45/drivers/atm/lanai.c
26536 --- linux-2.6.32.45/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26537 +++ linux-2.6.32.45/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26538 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26539 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26540 lanai_endtx(lanai, lvcc);
26541 lanai_free_skb(lvcc->tx.atmvcc, skb);
26542 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26543 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26544 }
26545
26546 /* Try to fill the buffer - don't call unless there is backlog */
26547 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26548 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26549 __net_timestamp(skb);
26550 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26551 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26552 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26553 out:
26554 lvcc->rx.buf.ptr = end;
26555 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26556 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26557 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26558 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26559 lanai->stats.service_rxnotaal5++;
26560 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26561 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26562 return 0;
26563 }
26564 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26565 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26566 int bytes;
26567 read_unlock(&vcc_sklist_lock);
26568 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26569 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26570 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26571 lvcc->stats.x.aal5.service_trash++;
26572 bytes = (SERVICE_GET_END(s) * 16) -
26573 (((unsigned long) lvcc->rx.buf.ptr) -
26574 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26575 }
26576 if (s & SERVICE_STREAM) {
26577 read_unlock(&vcc_sklist_lock);
26578 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26579 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26580 lvcc->stats.x.aal5.service_stream++;
26581 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26582 "PDU on VCI %d!\n", lanai->number, vci);
26583 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26584 return 0;
26585 }
26586 DPRINTK("got rx crc error on vci %d\n", vci);
26587 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26588 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26589 lvcc->stats.x.aal5.service_rxcrc++;
26590 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26591 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26592 diff -urNp linux-2.6.32.45/drivers/atm/nicstar.c linux-2.6.32.45/drivers/atm/nicstar.c
26593 --- linux-2.6.32.45/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26594 +++ linux-2.6.32.45/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26595 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26596 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26597 {
26598 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26599 - atomic_inc(&vcc->stats->tx_err);
26600 + atomic_inc_unchecked(&vcc->stats->tx_err);
26601 dev_kfree_skb_any(skb);
26602 return -EINVAL;
26603 }
26604 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26605 if (!vc->tx)
26606 {
26607 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26608 - atomic_inc(&vcc->stats->tx_err);
26609 + atomic_inc_unchecked(&vcc->stats->tx_err);
26610 dev_kfree_skb_any(skb);
26611 return -EINVAL;
26612 }
26613 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26614 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26615 {
26616 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26617 - atomic_inc(&vcc->stats->tx_err);
26618 + atomic_inc_unchecked(&vcc->stats->tx_err);
26619 dev_kfree_skb_any(skb);
26620 return -EINVAL;
26621 }
26622 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26623 if (skb_shinfo(skb)->nr_frags != 0)
26624 {
26625 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26626 - atomic_inc(&vcc->stats->tx_err);
26627 + atomic_inc_unchecked(&vcc->stats->tx_err);
26628 dev_kfree_skb_any(skb);
26629 return -EINVAL;
26630 }
26631 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26632
26633 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26634 {
26635 - atomic_inc(&vcc->stats->tx_err);
26636 + atomic_inc_unchecked(&vcc->stats->tx_err);
26637 dev_kfree_skb_any(skb);
26638 return -EIO;
26639 }
26640 - atomic_inc(&vcc->stats->tx);
26641 + atomic_inc_unchecked(&vcc->stats->tx);
26642
26643 return 0;
26644 }
26645 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26646 {
26647 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26648 card->index);
26649 - atomic_add(i,&vcc->stats->rx_drop);
26650 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
26651 break;
26652 }
26653 if (!atm_charge(vcc, sb->truesize))
26654 {
26655 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26656 card->index);
26657 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26658 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26659 dev_kfree_skb_any(sb);
26660 break;
26661 }
26662 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26663 ATM_SKB(sb)->vcc = vcc;
26664 __net_timestamp(sb);
26665 vcc->push(vcc, sb);
26666 - atomic_inc(&vcc->stats->rx);
26667 + atomic_inc_unchecked(&vcc->stats->rx);
26668 cell += ATM_CELL_PAYLOAD;
26669 }
26670
26671 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26672 if (iovb == NULL)
26673 {
26674 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26675 - atomic_inc(&vcc->stats->rx_drop);
26676 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26677 recycle_rx_buf(card, skb);
26678 return;
26679 }
26680 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26681 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26682 {
26683 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26684 - atomic_inc(&vcc->stats->rx_err);
26685 + atomic_inc_unchecked(&vcc->stats->rx_err);
26686 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26687 NS_SKB(iovb)->iovcnt = 0;
26688 iovb->len = 0;
26689 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26690 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26691 card->index);
26692 which_list(card, skb);
26693 - atomic_inc(&vcc->stats->rx_err);
26694 + atomic_inc_unchecked(&vcc->stats->rx_err);
26695 recycle_rx_buf(card, skb);
26696 vc->rx_iov = NULL;
26697 recycle_iov_buf(card, iovb);
26698 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26699 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26700 card->index);
26701 which_list(card, skb);
26702 - atomic_inc(&vcc->stats->rx_err);
26703 + atomic_inc_unchecked(&vcc->stats->rx_err);
26704 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26705 NS_SKB(iovb)->iovcnt);
26706 vc->rx_iov = NULL;
26707 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26708 printk(" - PDU size mismatch.\n");
26709 else
26710 printk(".\n");
26711 - atomic_inc(&vcc->stats->rx_err);
26712 + atomic_inc_unchecked(&vcc->stats->rx_err);
26713 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26714 NS_SKB(iovb)->iovcnt);
26715 vc->rx_iov = NULL;
26716 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
26717 if (!atm_charge(vcc, skb->truesize))
26718 {
26719 push_rxbufs(card, skb);
26720 - atomic_inc(&vcc->stats->rx_drop);
26721 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26722 }
26723 else
26724 {
26725 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
26726 ATM_SKB(skb)->vcc = vcc;
26727 __net_timestamp(skb);
26728 vcc->push(vcc, skb);
26729 - atomic_inc(&vcc->stats->rx);
26730 + atomic_inc_unchecked(&vcc->stats->rx);
26731 }
26732 }
26733 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
26734 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
26735 if (!atm_charge(vcc, sb->truesize))
26736 {
26737 push_rxbufs(card, sb);
26738 - atomic_inc(&vcc->stats->rx_drop);
26739 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26740 }
26741 else
26742 {
26743 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
26744 ATM_SKB(sb)->vcc = vcc;
26745 __net_timestamp(sb);
26746 vcc->push(vcc, sb);
26747 - atomic_inc(&vcc->stats->rx);
26748 + atomic_inc_unchecked(&vcc->stats->rx);
26749 }
26750
26751 push_rxbufs(card, skb);
26752 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
26753 if (!atm_charge(vcc, skb->truesize))
26754 {
26755 push_rxbufs(card, skb);
26756 - atomic_inc(&vcc->stats->rx_drop);
26757 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26758 }
26759 else
26760 {
26761 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
26762 ATM_SKB(skb)->vcc = vcc;
26763 __net_timestamp(skb);
26764 vcc->push(vcc, skb);
26765 - atomic_inc(&vcc->stats->rx);
26766 + atomic_inc_unchecked(&vcc->stats->rx);
26767 }
26768
26769 push_rxbufs(card, sb);
26770 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
26771 if (hb == NULL)
26772 {
26773 printk("nicstar%d: Out of huge buffers.\n", card->index);
26774 - atomic_inc(&vcc->stats->rx_drop);
26775 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26776 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26777 NS_SKB(iovb)->iovcnt);
26778 vc->rx_iov = NULL;
26779 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
26780 }
26781 else
26782 dev_kfree_skb_any(hb);
26783 - atomic_inc(&vcc->stats->rx_drop);
26784 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26785 }
26786 else
26787 {
26788 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
26789 #endif /* NS_USE_DESTRUCTORS */
26790 __net_timestamp(hb);
26791 vcc->push(vcc, hb);
26792 - atomic_inc(&vcc->stats->rx);
26793 + atomic_inc_unchecked(&vcc->stats->rx);
26794 }
26795 }
26796
26797 diff -urNp linux-2.6.32.45/drivers/atm/solos-pci.c linux-2.6.32.45/drivers/atm/solos-pci.c
26798 --- linux-2.6.32.45/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
26799 +++ linux-2.6.32.45/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
26800 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
26801 }
26802 atm_charge(vcc, skb->truesize);
26803 vcc->push(vcc, skb);
26804 - atomic_inc(&vcc->stats->rx);
26805 + atomic_inc_unchecked(&vcc->stats->rx);
26806 break;
26807
26808 case PKT_STATUS:
26809 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
26810 char msg[500];
26811 char item[10];
26812
26813 + pax_track_stack();
26814 +
26815 len = buf->len;
26816 for (i = 0; i < len; i++){
26817 if(i % 8 == 0)
26818 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
26819 vcc = SKB_CB(oldskb)->vcc;
26820
26821 if (vcc) {
26822 - atomic_inc(&vcc->stats->tx);
26823 + atomic_inc_unchecked(&vcc->stats->tx);
26824 solos_pop(vcc, oldskb);
26825 } else
26826 dev_kfree_skb_irq(oldskb);
26827 diff -urNp linux-2.6.32.45/drivers/atm/suni.c linux-2.6.32.45/drivers/atm/suni.c
26828 --- linux-2.6.32.45/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
26829 +++ linux-2.6.32.45/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
26830 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26831
26832
26833 #define ADD_LIMITED(s,v) \
26834 - atomic_add((v),&stats->s); \
26835 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26836 + atomic_add_unchecked((v),&stats->s); \
26837 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26838
26839
26840 static void suni_hz(unsigned long from_timer)
26841 diff -urNp linux-2.6.32.45/drivers/atm/uPD98402.c linux-2.6.32.45/drivers/atm/uPD98402.c
26842 --- linux-2.6.32.45/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
26843 +++ linux-2.6.32.45/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
26844 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
26845 struct sonet_stats tmp;
26846 int error = 0;
26847
26848 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26849 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26850 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26851 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26852 if (zero && !error) {
26853 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
26854
26855
26856 #define ADD_LIMITED(s,v) \
26857 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26858 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26859 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26860 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26861 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26862 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26863
26864
26865 static void stat_event(struct atm_dev *dev)
26866 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
26867 if (reason & uPD98402_INT_PFM) stat_event(dev);
26868 if (reason & uPD98402_INT_PCO) {
26869 (void) GET(PCOCR); /* clear interrupt cause */
26870 - atomic_add(GET(HECCT),
26871 + atomic_add_unchecked(GET(HECCT),
26872 &PRIV(dev)->sonet_stats.uncorr_hcs);
26873 }
26874 if ((reason & uPD98402_INT_RFO) &&
26875 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
26876 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26877 uPD98402_INT_LOS),PIMR); /* enable them */
26878 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26879 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26880 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26881 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26882 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26883 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26884 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26885 return 0;
26886 }
26887
26888 diff -urNp linux-2.6.32.45/drivers/atm/zatm.c linux-2.6.32.45/drivers/atm/zatm.c
26889 --- linux-2.6.32.45/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
26890 +++ linux-2.6.32.45/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
26891 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26892 }
26893 if (!size) {
26894 dev_kfree_skb_irq(skb);
26895 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26896 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26897 continue;
26898 }
26899 if (!atm_charge(vcc,skb->truesize)) {
26900 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26901 skb->len = size;
26902 ATM_SKB(skb)->vcc = vcc;
26903 vcc->push(vcc,skb);
26904 - atomic_inc(&vcc->stats->rx);
26905 + atomic_inc_unchecked(&vcc->stats->rx);
26906 }
26907 zout(pos & 0xffff,MTA(mbx));
26908 #if 0 /* probably a stupid idea */
26909 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
26910 skb_queue_head(&zatm_vcc->backlog,skb);
26911 break;
26912 }
26913 - atomic_inc(&vcc->stats->tx);
26914 + atomic_inc_unchecked(&vcc->stats->tx);
26915 wake_up(&zatm_vcc->tx_wait);
26916 }
26917
26918 diff -urNp linux-2.6.32.45/drivers/base/bus.c linux-2.6.32.45/drivers/base/bus.c
26919 --- linux-2.6.32.45/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
26920 +++ linux-2.6.32.45/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
26921 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
26922 return ret;
26923 }
26924
26925 -static struct sysfs_ops driver_sysfs_ops = {
26926 +static const struct sysfs_ops driver_sysfs_ops = {
26927 .show = drv_attr_show,
26928 .store = drv_attr_store,
26929 };
26930 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
26931 return ret;
26932 }
26933
26934 -static struct sysfs_ops bus_sysfs_ops = {
26935 +static const struct sysfs_ops bus_sysfs_ops = {
26936 .show = bus_attr_show,
26937 .store = bus_attr_store,
26938 };
26939 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
26940 return 0;
26941 }
26942
26943 -static struct kset_uevent_ops bus_uevent_ops = {
26944 +static const struct kset_uevent_ops bus_uevent_ops = {
26945 .filter = bus_uevent_filter,
26946 };
26947
26948 diff -urNp linux-2.6.32.45/drivers/base/class.c linux-2.6.32.45/drivers/base/class.c
26949 --- linux-2.6.32.45/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
26950 +++ linux-2.6.32.45/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
26951 @@ -63,7 +63,7 @@ static void class_release(struct kobject
26952 kfree(cp);
26953 }
26954
26955 -static struct sysfs_ops class_sysfs_ops = {
26956 +static const struct sysfs_ops class_sysfs_ops = {
26957 .show = class_attr_show,
26958 .store = class_attr_store,
26959 };
26960 diff -urNp linux-2.6.32.45/drivers/base/core.c linux-2.6.32.45/drivers/base/core.c
26961 --- linux-2.6.32.45/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
26962 +++ linux-2.6.32.45/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
26963 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
26964 return ret;
26965 }
26966
26967 -static struct sysfs_ops dev_sysfs_ops = {
26968 +static const struct sysfs_ops dev_sysfs_ops = {
26969 .show = dev_attr_show,
26970 .store = dev_attr_store,
26971 };
26972 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
26973 return retval;
26974 }
26975
26976 -static struct kset_uevent_ops device_uevent_ops = {
26977 +static const struct kset_uevent_ops device_uevent_ops = {
26978 .filter = dev_uevent_filter,
26979 .name = dev_uevent_name,
26980 .uevent = dev_uevent,
26981 diff -urNp linux-2.6.32.45/drivers/base/memory.c linux-2.6.32.45/drivers/base/memory.c
26982 --- linux-2.6.32.45/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
26983 +++ linux-2.6.32.45/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
26984 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
26985 return retval;
26986 }
26987
26988 -static struct kset_uevent_ops memory_uevent_ops = {
26989 +static const struct kset_uevent_ops memory_uevent_ops = {
26990 .name = memory_uevent_name,
26991 .uevent = memory_uevent,
26992 };
26993 diff -urNp linux-2.6.32.45/drivers/base/sys.c linux-2.6.32.45/drivers/base/sys.c
26994 --- linux-2.6.32.45/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
26995 +++ linux-2.6.32.45/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
26996 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
26997 return -EIO;
26998 }
26999
27000 -static struct sysfs_ops sysfs_ops = {
27001 +static const struct sysfs_ops sysfs_ops = {
27002 .show = sysdev_show,
27003 .store = sysdev_store,
27004 };
27005 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
27006 return -EIO;
27007 }
27008
27009 -static struct sysfs_ops sysfs_class_ops = {
27010 +static const struct sysfs_ops sysfs_class_ops = {
27011 .show = sysdev_class_show,
27012 .store = sysdev_class_store,
27013 };
27014 diff -urNp linux-2.6.32.45/drivers/block/cciss.c linux-2.6.32.45/drivers/block/cciss.c
27015 --- linux-2.6.32.45/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
27016 +++ linux-2.6.32.45/drivers/block/cciss.c 2011-08-05 20:33:55.000000000 -0400
27017 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
27018 int err;
27019 u32 cp;
27020
27021 + memset(&arg64, 0, sizeof(arg64));
27022 +
27023 err = 0;
27024 err |=
27025 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27026 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ct
27027 /* Wait (up to 20 seconds) for a command to complete */
27028
27029 for (i = 20 * HZ; i > 0; i--) {
27030 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
27031 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
27032 if (done == FIFO_EMPTY)
27033 schedule_timeout_uninterruptible(1);
27034 else
27035 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h,
27036 resend_cmd1:
27037
27038 /* Disable interrupt on the board. */
27039 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27040 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27041
27042 /* Make sure there is room in the command FIFO */
27043 /* Actually it should be completely empty at this time */
27044 @@ -2884,13 +2886,13 @@ resend_cmd1:
27045 /* tape side of the driver. */
27046 for (i = 200000; i > 0; i--) {
27047 /* if fifo isn't full go */
27048 - if (!(h->access.fifo_full(h)))
27049 + if (!(h->access->fifo_full(h)))
27050 break;
27051 udelay(10);
27052 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
27053 " waiting!\n", h->ctlr);
27054 }
27055 - h->access.submit_command(h, c); /* Send the cmd */
27056 + h->access->submit_command(h, c); /* Send the cmd */
27057 do {
27058 complete = pollcomplete(h->ctlr);
27059
27060 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
27061 while (!hlist_empty(&h->reqQ)) {
27062 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
27063 /* can't do anything if fifo is full */
27064 - if ((h->access.fifo_full(h))) {
27065 + if ((h->access->fifo_full(h))) {
27066 printk(KERN_WARNING "cciss: fifo full\n");
27067 break;
27068 }
27069 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
27070 h->Qdepth--;
27071
27072 /* Tell the controller execute command */
27073 - h->access.submit_command(h, c);
27074 + h->access->submit_command(h, c);
27075
27076 /* Put job onto the completed Q */
27077 addQ(&h->cmpQ, c);
27078 @@ -3393,17 +3395,17 @@ startio:
27079
27080 static inline unsigned long get_next_completion(ctlr_info_t *h)
27081 {
27082 - return h->access.command_completed(h);
27083 + return h->access->command_completed(h);
27084 }
27085
27086 static inline int interrupt_pending(ctlr_info_t *h)
27087 {
27088 - return h->access.intr_pending(h);
27089 + return h->access->intr_pending(h);
27090 }
27091
27092 static inline long interrupt_not_for_us(ctlr_info_t *h)
27093 {
27094 - return (((h->access.intr_pending(h) == 0) ||
27095 + return (((h->access->intr_pending(h) == 0) ||
27096 (h->interrupts_enabled == 0)));
27097 }
27098
27099 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr
27100 */
27101 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
27102 c->product_name = products[prod_index].product_name;
27103 - c->access = *(products[prod_index].access);
27104 + c->access = products[prod_index].access;
27105 c->nr_cmds = c->max_commands - 4;
27106 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
27107 (readb(&c->cfgtable->Signature[1]) != 'I') ||
27108 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(stru
27109 }
27110
27111 /* make sure the board interrupts are off */
27112 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
27113 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
27114 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
27115 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
27116 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
27117 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(stru
27118 cciss_scsi_setup(i);
27119
27120 /* Turn the interrupts on so we can service requests */
27121 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
27122 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
27123
27124 /* Get the firmware version */
27125 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27126 diff -urNp linux-2.6.32.45/drivers/block/cciss.h linux-2.6.32.45/drivers/block/cciss.h
27127 --- linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:35:28.000000000 -0400
27128 +++ linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:33:59.000000000 -0400
27129 @@ -90,7 +90,7 @@ struct ctlr_info
27130 // information about each logical volume
27131 drive_info_struct *drv[CISS_MAX_LUN];
27132
27133 - struct access_method access;
27134 + struct access_method *access;
27135
27136 /* queue and queue Info */
27137 struct hlist_head reqQ;
27138 diff -urNp linux-2.6.32.45/drivers/block/cpqarray.c linux-2.6.32.45/drivers/block/cpqarray.c
27139 --- linux-2.6.32.45/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
27140 +++ linux-2.6.32.45/drivers/block/cpqarray.c 2011-08-05 20:33:55.000000000 -0400
27141 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr
27142 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27143 goto Enomem4;
27144 }
27145 - hba[i]->access.set_intr_mask(hba[i], 0);
27146 + hba[i]->access->set_intr_mask(hba[i], 0);
27147 if (request_irq(hba[i]->intr, do_ida_intr,
27148 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27149 {
27150 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr
27151 add_timer(&hba[i]->timer);
27152
27153 /* Enable IRQ now that spinlock and rate limit timer are set up */
27154 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27155 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27156
27157 for(j=0; j<NWD; j++) {
27158 struct gendisk *disk = ida_gendisk[i][j];
27159 @@ -695,7 +695,7 @@ DBGINFO(
27160 for(i=0; i<NR_PRODUCTS; i++) {
27161 if (board_id == products[i].board_id) {
27162 c->product_name = products[i].product_name;
27163 - c->access = *(products[i].access);
27164 + c->access = products[i].access;
27165 break;
27166 }
27167 }
27168 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(v
27169 hba[ctlr]->intr = intr;
27170 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
27171 hba[ctlr]->product_name = products[j].product_name;
27172 - hba[ctlr]->access = *(products[j].access);
27173 + hba[ctlr]->access = products[j].access;
27174 hba[ctlr]->ctlr = ctlr;
27175 hba[ctlr]->board_id = board_id;
27176 hba[ctlr]->pci_dev = NULL; /* not PCI */
27177 @@ -896,6 +896,8 @@ static void do_ida_request(struct reques
27178 struct scatterlist tmp_sg[SG_MAX];
27179 int i, dir, seg;
27180
27181 + pax_track_stack();
27182 +
27183 if (blk_queue_plugged(q))
27184 goto startio;
27185
27186 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
27187
27188 while((c = h->reqQ) != NULL) {
27189 /* Can't do anything if we're busy */
27190 - if (h->access.fifo_full(h) == 0)
27191 + if (h->access->fifo_full(h) == 0)
27192 return;
27193
27194 /* Get the first entry from the request Q */
27195 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
27196 h->Qdepth--;
27197
27198 /* Tell the controller to do our bidding */
27199 - h->access.submit_command(h, c);
27200 + h->access->submit_command(h, c);
27201
27202 /* Get onto the completion Q */
27203 addQ(&h->cmpQ, c);
27204 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq,
27205 unsigned long flags;
27206 __u32 a,a1;
27207
27208 - istat = h->access.intr_pending(h);
27209 + istat = h->access->intr_pending(h);
27210 /* Is this interrupt for us? */
27211 if (istat == 0)
27212 return IRQ_NONE;
27213 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq,
27214 */
27215 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
27216 if (istat & FIFO_NOT_EMPTY) {
27217 - while((a = h->access.command_completed(h))) {
27218 + while((a = h->access->command_completed(h))) {
27219 a1 = a; a &= ~3;
27220 if ((c = h->cmpQ) == NULL)
27221 {
27222 @@ -1434,11 +1436,11 @@ static int sendcmd(
27223 /*
27224 * Disable interrupt
27225 */
27226 - info_p->access.set_intr_mask(info_p, 0);
27227 + info_p->access->set_intr_mask(info_p, 0);
27228 /* Make sure there is room in the command FIFO */
27229 /* Actually it should be completely empty at this time. */
27230 for (i = 200000; i > 0; i--) {
27231 - temp = info_p->access.fifo_full(info_p);
27232 + temp = info_p->access->fifo_full(info_p);
27233 if (temp != 0) {
27234 break;
27235 }
27236 @@ -1451,7 +1453,7 @@ DBG(
27237 /*
27238 * Send the cmd
27239 */
27240 - info_p->access.submit_command(info_p, c);
27241 + info_p->access->submit_command(info_p, c);
27242 complete = pollcomplete(ctlr);
27243
27244 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
27245 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t
27246 * we check the new geometry. Then turn interrupts back on when
27247 * we're done.
27248 */
27249 - host->access.set_intr_mask(host, 0);
27250 + host->access->set_intr_mask(host, 0);
27251 getgeometry(ctlr);
27252 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
27253 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
27254
27255 for(i=0; i<NWD; i++) {
27256 struct gendisk *disk = ida_gendisk[ctlr][i];
27257 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
27258 /* Wait (up to 2 seconds) for a command to complete */
27259
27260 for (i = 200000; i > 0; i--) {
27261 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
27262 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
27263 if (done == 0) {
27264 udelay(10); /* a short fixed delay */
27265 } else
27266 diff -urNp linux-2.6.32.45/drivers/block/cpqarray.h linux-2.6.32.45/drivers/block/cpqarray.h
27267 --- linux-2.6.32.45/drivers/block/cpqarray.h 2011-03-27 14:31:47.000000000 -0400
27268 +++ linux-2.6.32.45/drivers/block/cpqarray.h 2011-08-05 20:33:55.000000000 -0400
27269 @@ -99,7 +99,7 @@ struct ctlr_info {
27270 drv_info_t drv[NWD];
27271 struct proc_dir_entry *proc;
27272
27273 - struct access_method access;
27274 + struct access_method *access;
27275
27276 cmdlist_t *reqQ;
27277 cmdlist_t *cmpQ;
27278 diff -urNp linux-2.6.32.45/drivers/block/DAC960.c linux-2.6.32.45/drivers/block/DAC960.c
27279 --- linux-2.6.32.45/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
27280 +++ linux-2.6.32.45/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
27281 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
27282 unsigned long flags;
27283 int Channel, TargetID;
27284
27285 + pax_track_stack();
27286 +
27287 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
27288 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
27289 sizeof(DAC960_SCSI_Inquiry_T) +
27290 diff -urNp linux-2.6.32.45/drivers/block/nbd.c linux-2.6.32.45/drivers/block/nbd.c
27291 --- linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
27292 +++ linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
27293 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
27294 struct kvec iov;
27295 sigset_t blocked, oldset;
27296
27297 + pax_track_stack();
27298 +
27299 if (unlikely(!sock)) {
27300 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
27301 lo->disk->disk_name, (send ? "send" : "recv"));
27302 @@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
27303 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
27304 unsigned int cmd, unsigned long arg)
27305 {
27306 + pax_track_stack();
27307 +
27308 switch (cmd) {
27309 case NBD_DISCONNECT: {
27310 struct request sreq;
27311 diff -urNp linux-2.6.32.45/drivers/block/pktcdvd.c linux-2.6.32.45/drivers/block/pktcdvd.c
27312 --- linux-2.6.32.45/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
27313 +++ linux-2.6.32.45/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
27314 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
27315 return len;
27316 }
27317
27318 -static struct sysfs_ops kobj_pkt_ops = {
27319 +static const struct sysfs_ops kobj_pkt_ops = {
27320 .show = kobj_pkt_show,
27321 .store = kobj_pkt_store
27322 };
27323 diff -urNp linux-2.6.32.45/drivers/char/agp/frontend.c linux-2.6.32.45/drivers/char/agp/frontend.c
27324 --- linux-2.6.32.45/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
27325 +++ linux-2.6.32.45/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
27326 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
27327 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27328 return -EFAULT;
27329
27330 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27331 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27332 return -EFAULT;
27333
27334 client = agp_find_client_by_pid(reserve.pid);
27335 diff -urNp linux-2.6.32.45/drivers/char/briq_panel.c linux-2.6.32.45/drivers/char/briq_panel.c
27336 --- linux-2.6.32.45/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
27337 +++ linux-2.6.32.45/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
27338 @@ -10,6 +10,7 @@
27339 #include <linux/types.h>
27340 #include <linux/errno.h>
27341 #include <linux/tty.h>
27342 +#include <linux/mutex.h>
27343 #include <linux/timer.h>
27344 #include <linux/kernel.h>
27345 #include <linux/wait.h>
27346 @@ -36,6 +37,7 @@ static int vfd_is_open;
27347 static unsigned char vfd[40];
27348 static int vfd_cursor;
27349 static unsigned char ledpb, led;
27350 +static DEFINE_MUTEX(vfd_mutex);
27351
27352 static void update_vfd(void)
27353 {
27354 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
27355 if (!vfd_is_open)
27356 return -EBUSY;
27357
27358 + mutex_lock(&vfd_mutex);
27359 for (;;) {
27360 char c;
27361 if (!indx)
27362 break;
27363 - if (get_user(c, buf))
27364 + if (get_user(c, buf)) {
27365 + mutex_unlock(&vfd_mutex);
27366 return -EFAULT;
27367 + }
27368 if (esc) {
27369 set_led(c);
27370 esc = 0;
27371 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
27372 buf++;
27373 }
27374 update_vfd();
27375 + mutex_unlock(&vfd_mutex);
27376
27377 return len;
27378 }
27379 diff -urNp linux-2.6.32.45/drivers/char/genrtc.c linux-2.6.32.45/drivers/char/genrtc.c
27380 --- linux-2.6.32.45/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
27381 +++ linux-2.6.32.45/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
27382 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
27383 switch (cmd) {
27384
27385 case RTC_PLL_GET:
27386 + memset(&pll, 0, sizeof(pll));
27387 if (get_rtc_pll(&pll))
27388 return -EINVAL;
27389 else
27390 diff -urNp linux-2.6.32.45/drivers/char/hpet.c linux-2.6.32.45/drivers/char/hpet.c
27391 --- linux-2.6.32.45/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
27392 +++ linux-2.6.32.45/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
27393 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
27394 return 0;
27395 }
27396
27397 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
27398 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
27399
27400 static int
27401 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
27402 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
27403 }
27404
27405 static int
27406 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
27407 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
27408 {
27409 struct hpet_timer __iomem *timer;
27410 struct hpet __iomem *hpet;
27411 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
27412 {
27413 struct hpet_info info;
27414
27415 + memset(&info, 0, sizeof(info));
27416 +
27417 if (devp->hd_ireqfreq)
27418 info.hi_ireqfreq =
27419 hpet_time_div(hpetp, devp->hd_ireqfreq);
27420 - else
27421 - info.hi_ireqfreq = 0;
27422 info.hi_flags =
27423 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
27424 info.hi_hpet = hpetp->hp_which;
27425 diff -urNp linux-2.6.32.45/drivers/char/hvc_beat.c linux-2.6.32.45/drivers/char/hvc_beat.c
27426 --- linux-2.6.32.45/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
27427 +++ linux-2.6.32.45/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
27428 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
27429 return cnt;
27430 }
27431
27432 -static struct hv_ops hvc_beat_get_put_ops = {
27433 +static const struct hv_ops hvc_beat_get_put_ops = {
27434 .get_chars = hvc_beat_get_chars,
27435 .put_chars = hvc_beat_put_chars,
27436 };
27437 diff -urNp linux-2.6.32.45/drivers/char/hvc_console.c linux-2.6.32.45/drivers/char/hvc_console.c
27438 --- linux-2.6.32.45/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
27439 +++ linux-2.6.32.45/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
27440 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
27441 * console interfaces but can still be used as a tty device. This has to be
27442 * static because kmalloc will not work during early console init.
27443 */
27444 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27445 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27446 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
27447 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
27448
27449 @@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
27450 * vty adapters do NOT get an hvc_instantiate() callback since they
27451 * appear after early console init.
27452 */
27453 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
27454 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
27455 {
27456 struct hvc_struct *hp;
27457
27458 @@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
27459 };
27460
27461 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
27462 - struct hv_ops *ops, int outbuf_size)
27463 + const struct hv_ops *ops, int outbuf_size)
27464 {
27465 struct hvc_struct *hp;
27466 int i;
27467 diff -urNp linux-2.6.32.45/drivers/char/hvc_console.h linux-2.6.32.45/drivers/char/hvc_console.h
27468 --- linux-2.6.32.45/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
27469 +++ linux-2.6.32.45/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
27470 @@ -55,7 +55,7 @@ struct hvc_struct {
27471 int outbuf_size;
27472 int n_outbuf;
27473 uint32_t vtermno;
27474 - struct hv_ops *ops;
27475 + const struct hv_ops *ops;
27476 int irq_requested;
27477 int data;
27478 struct winsize ws;
27479 @@ -76,11 +76,11 @@ struct hv_ops {
27480 };
27481
27482 /* Register a vterm and a slot index for use as a console (console_init) */
27483 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
27484 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
27485
27486 /* register a vterm for hvc tty operation (module_init or hotplug add) */
27487 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
27488 - struct hv_ops *ops, int outbuf_size);
27489 + const struct hv_ops *ops, int outbuf_size);
27490 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
27491 extern int hvc_remove(struct hvc_struct *hp);
27492
27493 diff -urNp linux-2.6.32.45/drivers/char/hvc_iseries.c linux-2.6.32.45/drivers/char/hvc_iseries.c
27494 --- linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
27495 +++ linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
27496 @@ -197,7 +197,7 @@ done:
27497 return sent;
27498 }
27499
27500 -static struct hv_ops hvc_get_put_ops = {
27501 +static const struct hv_ops hvc_get_put_ops = {
27502 .get_chars = get_chars,
27503 .put_chars = put_chars,
27504 .notifier_add = notifier_add_irq,
27505 diff -urNp linux-2.6.32.45/drivers/char/hvc_iucv.c linux-2.6.32.45/drivers/char/hvc_iucv.c
27506 --- linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
27507 +++ linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
27508 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
27509
27510
27511 /* HVC operations */
27512 -static struct hv_ops hvc_iucv_ops = {
27513 +static const struct hv_ops hvc_iucv_ops = {
27514 .get_chars = hvc_iucv_get_chars,
27515 .put_chars = hvc_iucv_put_chars,
27516 .notifier_add = hvc_iucv_notifier_add,
27517 diff -urNp linux-2.6.32.45/drivers/char/hvc_rtas.c linux-2.6.32.45/drivers/char/hvc_rtas.c
27518 --- linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27519 +++ linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27520 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27521 return i;
27522 }
27523
27524 -static struct hv_ops hvc_rtas_get_put_ops = {
27525 +static const struct hv_ops hvc_rtas_get_put_ops = {
27526 .get_chars = hvc_rtas_read_console,
27527 .put_chars = hvc_rtas_write_console,
27528 };
27529 diff -urNp linux-2.6.32.45/drivers/char/hvcs.c linux-2.6.32.45/drivers/char/hvcs.c
27530 --- linux-2.6.32.45/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27531 +++ linux-2.6.32.45/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27532 @@ -82,6 +82,7 @@
27533 #include <asm/hvcserver.h>
27534 #include <asm/uaccess.h>
27535 #include <asm/vio.h>
27536 +#include <asm/local.h>
27537
27538 /*
27539 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27540 @@ -269,7 +270,7 @@ struct hvcs_struct {
27541 unsigned int index;
27542
27543 struct tty_struct *tty;
27544 - int open_count;
27545 + local_t open_count;
27546
27547 /*
27548 * Used to tell the driver kernel_thread what operations need to take
27549 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27550
27551 spin_lock_irqsave(&hvcsd->lock, flags);
27552
27553 - if (hvcsd->open_count > 0) {
27554 + if (local_read(&hvcsd->open_count) > 0) {
27555 spin_unlock_irqrestore(&hvcsd->lock, flags);
27556 printk(KERN_INFO "HVCS: vterm state unchanged. "
27557 "The hvcs device node is still in use.\n");
27558 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27559 if ((retval = hvcs_partner_connect(hvcsd)))
27560 goto error_release;
27561
27562 - hvcsd->open_count = 1;
27563 + local_set(&hvcsd->open_count, 1);
27564 hvcsd->tty = tty;
27565 tty->driver_data = hvcsd;
27566
27567 @@ -1169,7 +1170,7 @@ fast_open:
27568
27569 spin_lock_irqsave(&hvcsd->lock, flags);
27570 kref_get(&hvcsd->kref);
27571 - hvcsd->open_count++;
27572 + local_inc(&hvcsd->open_count);
27573 hvcsd->todo_mask |= HVCS_SCHED_READ;
27574 spin_unlock_irqrestore(&hvcsd->lock, flags);
27575
27576 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27577 hvcsd = tty->driver_data;
27578
27579 spin_lock_irqsave(&hvcsd->lock, flags);
27580 - if (--hvcsd->open_count == 0) {
27581 + if (local_dec_and_test(&hvcsd->open_count)) {
27582
27583 vio_disable_interrupts(hvcsd->vdev);
27584
27585 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27586 free_irq(irq, hvcsd);
27587 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27588 return;
27589 - } else if (hvcsd->open_count < 0) {
27590 + } else if (local_read(&hvcsd->open_count) < 0) {
27591 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27592 " is missmanaged.\n",
27593 - hvcsd->vdev->unit_address, hvcsd->open_count);
27594 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27595 }
27596
27597 spin_unlock_irqrestore(&hvcsd->lock, flags);
27598 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27599
27600 spin_lock_irqsave(&hvcsd->lock, flags);
27601 /* Preserve this so that we know how many kref refs to put */
27602 - temp_open_count = hvcsd->open_count;
27603 + temp_open_count = local_read(&hvcsd->open_count);
27604
27605 /*
27606 * Don't kref put inside the spinlock because the destruction
27607 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27608 hvcsd->tty->driver_data = NULL;
27609 hvcsd->tty = NULL;
27610
27611 - hvcsd->open_count = 0;
27612 + local_set(&hvcsd->open_count, 0);
27613
27614 /* This will drop any buffered data on the floor which is OK in a hangup
27615 * scenario. */
27616 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27617 * the middle of a write operation? This is a crummy place to do this
27618 * but we want to keep it all in the spinlock.
27619 */
27620 - if (hvcsd->open_count <= 0) {
27621 + if (local_read(&hvcsd->open_count) <= 0) {
27622 spin_unlock_irqrestore(&hvcsd->lock, flags);
27623 return -ENODEV;
27624 }
27625 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27626 {
27627 struct hvcs_struct *hvcsd = tty->driver_data;
27628
27629 - if (!hvcsd || hvcsd->open_count <= 0)
27630 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27631 return 0;
27632
27633 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27634 diff -urNp linux-2.6.32.45/drivers/char/hvc_udbg.c linux-2.6.32.45/drivers/char/hvc_udbg.c
27635 --- linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27636 +++ linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27637 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27638 return i;
27639 }
27640
27641 -static struct hv_ops hvc_udbg_ops = {
27642 +static const struct hv_ops hvc_udbg_ops = {
27643 .get_chars = hvc_udbg_get,
27644 .put_chars = hvc_udbg_put,
27645 };
27646 diff -urNp linux-2.6.32.45/drivers/char/hvc_vio.c linux-2.6.32.45/drivers/char/hvc_vio.c
27647 --- linux-2.6.32.45/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27648 +++ linux-2.6.32.45/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27649 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27650 return got;
27651 }
27652
27653 -static struct hv_ops hvc_get_put_ops = {
27654 +static const struct hv_ops hvc_get_put_ops = {
27655 .get_chars = filtered_get_chars,
27656 .put_chars = hvc_put_chars,
27657 .notifier_add = notifier_add_irq,
27658 diff -urNp linux-2.6.32.45/drivers/char/hvc_xen.c linux-2.6.32.45/drivers/char/hvc_xen.c
27659 --- linux-2.6.32.45/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27660 +++ linux-2.6.32.45/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27661 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27662 return recv;
27663 }
27664
27665 -static struct hv_ops hvc_ops = {
27666 +static const struct hv_ops hvc_ops = {
27667 .get_chars = read_console,
27668 .put_chars = write_console,
27669 .notifier_add = notifier_add_irq,
27670 diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c
27671 --- linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27672 +++ linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27673 @@ -414,7 +414,7 @@ struct ipmi_smi {
27674 struct proc_dir_entry *proc_dir;
27675 char proc_dir_name[10];
27676
27677 - atomic_t stats[IPMI_NUM_STATS];
27678 + atomic_unchecked_t stats[IPMI_NUM_STATS];
27679
27680 /*
27681 * run_to_completion duplicate of smb_info, smi_info
27682 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27683
27684
27685 #define ipmi_inc_stat(intf, stat) \
27686 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27687 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27688 #define ipmi_get_stat(intf, stat) \
27689 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27690 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27691
27692 static int is_lan_addr(struct ipmi_addr *addr)
27693 {
27694 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27695 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27696 init_waitqueue_head(&intf->waitq);
27697 for (i = 0; i < IPMI_NUM_STATS; i++)
27698 - atomic_set(&intf->stats[i], 0);
27699 + atomic_set_unchecked(&intf->stats[i], 0);
27700
27701 intf->proc_dir = NULL;
27702
27703 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27704 struct ipmi_smi_msg smi_msg;
27705 struct ipmi_recv_msg recv_msg;
27706
27707 + pax_track_stack();
27708 +
27709 si = (struct ipmi_system_interface_addr *) &addr;
27710 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27711 si->channel = IPMI_BMC_CHANNEL;
27712 diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c
27713 --- linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
27714 +++ linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
27715 @@ -277,7 +277,7 @@ struct smi_info {
27716 unsigned char slave_addr;
27717
27718 /* Counters and things for the proc filesystem. */
27719 - atomic_t stats[SI_NUM_STATS];
27720 + atomic_unchecked_t stats[SI_NUM_STATS];
27721
27722 struct task_struct *thread;
27723
27724 @@ -285,9 +285,9 @@ struct smi_info {
27725 };
27726
27727 #define smi_inc_stat(smi, stat) \
27728 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27729 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27730 #define smi_get_stat(smi, stat) \
27731 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27732 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27733
27734 #define SI_MAX_PARMS 4
27735
27736 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
27737 atomic_set(&new_smi->req_events, 0);
27738 new_smi->run_to_completion = 0;
27739 for (i = 0; i < SI_NUM_STATS; i++)
27740 - atomic_set(&new_smi->stats[i], 0);
27741 + atomic_set_unchecked(&new_smi->stats[i], 0);
27742
27743 new_smi->interrupt_disabled = 0;
27744 atomic_set(&new_smi->stop_operation, 0);
27745 diff -urNp linux-2.6.32.45/drivers/char/istallion.c linux-2.6.32.45/drivers/char/istallion.c
27746 --- linux-2.6.32.45/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
27747 +++ linux-2.6.32.45/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
27748 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
27749 * re-used for each stats call.
27750 */
27751 static comstats_t stli_comstats;
27752 -static combrd_t stli_brdstats;
27753 static struct asystats stli_cdkstats;
27754
27755 /*****************************************************************************/
27756 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
27757 {
27758 struct stlibrd *brdp;
27759 unsigned int i;
27760 + combrd_t stli_brdstats;
27761
27762 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
27763 return -EFAULT;
27764 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
27765 struct stliport stli_dummyport;
27766 struct stliport *portp;
27767
27768 + pax_track_stack();
27769 +
27770 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
27771 return -EFAULT;
27772 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
27773 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
27774 struct stlibrd stli_dummybrd;
27775 struct stlibrd *brdp;
27776
27777 + pax_track_stack();
27778 +
27779 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
27780 return -EFAULT;
27781 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
27782 diff -urNp linux-2.6.32.45/drivers/char/Kconfig linux-2.6.32.45/drivers/char/Kconfig
27783 --- linux-2.6.32.45/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
27784 +++ linux-2.6.32.45/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
27785 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
27786
27787 config DEVKMEM
27788 bool "/dev/kmem virtual device support"
27789 - default y
27790 + default n
27791 + depends on !GRKERNSEC_KMEM
27792 help
27793 Say Y here if you want to support the /dev/kmem device. The
27794 /dev/kmem device is rarely used, but can be used for certain
27795 @@ -1114,6 +1115,7 @@ config DEVPORT
27796 bool
27797 depends on !M68K
27798 depends on ISA || PCI
27799 + depends on !GRKERNSEC_KMEM
27800 default y
27801
27802 source "drivers/s390/char/Kconfig"
27803 diff -urNp linux-2.6.32.45/drivers/char/keyboard.c linux-2.6.32.45/drivers/char/keyboard.c
27804 --- linux-2.6.32.45/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
27805 +++ linux-2.6.32.45/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
27806 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
27807 kbd->kbdmode == VC_MEDIUMRAW) &&
27808 value != KVAL(K_SAK))
27809 return; /* SAK is allowed even in raw mode */
27810 +
27811 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
27812 + {
27813 + void *func = fn_handler[value];
27814 + if (func == fn_show_state || func == fn_show_ptregs ||
27815 + func == fn_show_mem)
27816 + return;
27817 + }
27818 +#endif
27819 +
27820 fn_handler[value](vc);
27821 }
27822
27823 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
27824 .evbit = { BIT_MASK(EV_SND) },
27825 },
27826
27827 - { }, /* Terminating entry */
27828 + { 0 }, /* Terminating entry */
27829 };
27830
27831 MODULE_DEVICE_TABLE(input, kbd_ids);
27832 diff -urNp linux-2.6.32.45/drivers/char/mem.c linux-2.6.32.45/drivers/char/mem.c
27833 --- linux-2.6.32.45/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
27834 +++ linux-2.6.32.45/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
27835 @@ -18,6 +18,7 @@
27836 #include <linux/raw.h>
27837 #include <linux/tty.h>
27838 #include <linux/capability.h>
27839 +#include <linux/security.h>
27840 #include <linux/ptrace.h>
27841 #include <linux/device.h>
27842 #include <linux/highmem.h>
27843 @@ -35,6 +36,10 @@
27844 # include <linux/efi.h>
27845 #endif
27846
27847 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27848 +extern struct file_operations grsec_fops;
27849 +#endif
27850 +
27851 static inline unsigned long size_inside_page(unsigned long start,
27852 unsigned long size)
27853 {
27854 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
27855
27856 while (cursor < to) {
27857 if (!devmem_is_allowed(pfn)) {
27858 +#ifdef CONFIG_GRKERNSEC_KMEM
27859 + gr_handle_mem_readwrite(from, to);
27860 +#else
27861 printk(KERN_INFO
27862 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27863 current->comm, from, to);
27864 +#endif
27865 return 0;
27866 }
27867 cursor += PAGE_SIZE;
27868 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
27869 }
27870 return 1;
27871 }
27872 +#elif defined(CONFIG_GRKERNSEC_KMEM)
27873 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27874 +{
27875 + return 0;
27876 +}
27877 #else
27878 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27879 {
27880 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
27881 #endif
27882
27883 while (count > 0) {
27884 + char *temp;
27885 +
27886 /*
27887 * Handle first page in case it's not aligned
27888 */
27889 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
27890 if (!ptr)
27891 return -EFAULT;
27892
27893 - if (copy_to_user(buf, ptr, sz)) {
27894 +#ifdef CONFIG_PAX_USERCOPY
27895 + temp = kmalloc(sz, GFP_KERNEL);
27896 + if (!temp) {
27897 + unxlate_dev_mem_ptr(p, ptr);
27898 + return -ENOMEM;
27899 + }
27900 + memcpy(temp, ptr, sz);
27901 +#else
27902 + temp = ptr;
27903 +#endif
27904 +
27905 + if (copy_to_user(buf, temp, sz)) {
27906 +
27907 +#ifdef CONFIG_PAX_USERCOPY
27908 + kfree(temp);
27909 +#endif
27910 +
27911 unxlate_dev_mem_ptr(p, ptr);
27912 return -EFAULT;
27913 }
27914
27915 +#ifdef CONFIG_PAX_USERCOPY
27916 + kfree(temp);
27917 +#endif
27918 +
27919 unxlate_dev_mem_ptr(p, ptr);
27920
27921 buf += sz;
27922 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
27923 size_t count, loff_t *ppos)
27924 {
27925 unsigned long p = *ppos;
27926 - ssize_t low_count, read, sz;
27927 + ssize_t low_count, read, sz, err = 0;
27928 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27929 - int err = 0;
27930
27931 read = 0;
27932 if (p < (unsigned long) high_memory) {
27933 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
27934 }
27935 #endif
27936 while (low_count > 0) {
27937 + char *temp;
27938 +
27939 sz = size_inside_page(p, low_count);
27940
27941 /*
27942 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
27943 */
27944 kbuf = xlate_dev_kmem_ptr((char *)p);
27945
27946 - if (copy_to_user(buf, kbuf, sz))
27947 +#ifdef CONFIG_PAX_USERCOPY
27948 + temp = kmalloc(sz, GFP_KERNEL);
27949 + if (!temp)
27950 + return -ENOMEM;
27951 + memcpy(temp, kbuf, sz);
27952 +#else
27953 + temp = kbuf;
27954 +#endif
27955 +
27956 + err = copy_to_user(buf, temp, sz);
27957 +
27958 +#ifdef CONFIG_PAX_USERCOPY
27959 + kfree(temp);
27960 +#endif
27961 +
27962 + if (err)
27963 return -EFAULT;
27964 buf += sz;
27965 p += sz;
27966 @@ -889,6 +941,9 @@ static const struct memdev {
27967 #ifdef CONFIG_CRASH_DUMP
27968 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27969 #endif
27970 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27971 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27972 +#endif
27973 };
27974
27975 static int memory_open(struct inode *inode, struct file *filp)
27976 diff -urNp linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c
27977 --- linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
27978 +++ linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
27979 @@ -29,6 +29,7 @@
27980 #include <linux/tty_driver.h>
27981 #include <linux/tty_flip.h>
27982 #include <linux/uaccess.h>
27983 +#include <asm/local.h>
27984
27985 #include "tty.h"
27986 #include "network.h"
27987 @@ -51,7 +52,7 @@ struct ipw_tty {
27988 int tty_type;
27989 struct ipw_network *network;
27990 struct tty_struct *linux_tty;
27991 - int open_count;
27992 + local_t open_count;
27993 unsigned int control_lines;
27994 struct mutex ipw_tty_mutex;
27995 int tx_bytes_queued;
27996 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
27997 mutex_unlock(&tty->ipw_tty_mutex);
27998 return -ENODEV;
27999 }
28000 - if (tty->open_count == 0)
28001 + if (local_read(&tty->open_count) == 0)
28002 tty->tx_bytes_queued = 0;
28003
28004 - tty->open_count++;
28005 + local_inc(&tty->open_count);
28006
28007 tty->linux_tty = linux_tty;
28008 linux_tty->driver_data = tty;
28009 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
28010
28011 static void do_ipw_close(struct ipw_tty *tty)
28012 {
28013 - tty->open_count--;
28014 -
28015 - if (tty->open_count == 0) {
28016 + if (local_dec_return(&tty->open_count) == 0) {
28017 struct tty_struct *linux_tty = tty->linux_tty;
28018
28019 if (linux_tty != NULL) {
28020 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
28021 return;
28022
28023 mutex_lock(&tty->ipw_tty_mutex);
28024 - if (tty->open_count == 0) {
28025 + if (local_read(&tty->open_count) == 0) {
28026 mutex_unlock(&tty->ipw_tty_mutex);
28027 return;
28028 }
28029 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
28030 return;
28031 }
28032
28033 - if (!tty->open_count) {
28034 + if (!local_read(&tty->open_count)) {
28035 mutex_unlock(&tty->ipw_tty_mutex);
28036 return;
28037 }
28038 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
28039 return -ENODEV;
28040
28041 mutex_lock(&tty->ipw_tty_mutex);
28042 - if (!tty->open_count) {
28043 + if (!local_read(&tty->open_count)) {
28044 mutex_unlock(&tty->ipw_tty_mutex);
28045 return -EINVAL;
28046 }
28047 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
28048 if (!tty)
28049 return -ENODEV;
28050
28051 - if (!tty->open_count)
28052 + if (!local_read(&tty->open_count))
28053 return -EINVAL;
28054
28055 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
28056 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
28057 if (!tty)
28058 return 0;
28059
28060 - if (!tty->open_count)
28061 + if (!local_read(&tty->open_count))
28062 return 0;
28063
28064 return tty->tx_bytes_queued;
28065 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
28066 if (!tty)
28067 return -ENODEV;
28068
28069 - if (!tty->open_count)
28070 + if (!local_read(&tty->open_count))
28071 return -EINVAL;
28072
28073 return get_control_lines(tty);
28074 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
28075 if (!tty)
28076 return -ENODEV;
28077
28078 - if (!tty->open_count)
28079 + if (!local_read(&tty->open_count))
28080 return -EINVAL;
28081
28082 return set_control_lines(tty, set, clear);
28083 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
28084 if (!tty)
28085 return -ENODEV;
28086
28087 - if (!tty->open_count)
28088 + if (!local_read(&tty->open_count))
28089 return -EINVAL;
28090
28091 /* FIXME: Exactly how is the tty object locked here .. */
28092 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
28093 against a parallel ioctl etc */
28094 mutex_lock(&ttyj->ipw_tty_mutex);
28095 }
28096 - while (ttyj->open_count)
28097 + while (local_read(&ttyj->open_count))
28098 do_ipw_close(ttyj);
28099 ipwireless_disassociate_network_ttys(network,
28100 ttyj->channel_idx);
28101 diff -urNp linux-2.6.32.45/drivers/char/pty.c linux-2.6.32.45/drivers/char/pty.c
28102 --- linux-2.6.32.45/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
28103 +++ linux-2.6.32.45/drivers/char/pty.c 2011-08-05 20:33:55.000000000 -0400
28104 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
28105 register_sysctl_table(pty_root_table);
28106
28107 /* Now create the /dev/ptmx special device */
28108 + pax_open_kernel();
28109 tty_default_fops(&ptmx_fops);
28110 - ptmx_fops.open = ptmx_open;
28111 + *(void **)&ptmx_fops.open = ptmx_open;
28112 + pax_close_kernel();
28113
28114 cdev_init(&ptmx_cdev, &ptmx_fops);
28115 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
28116 diff -urNp linux-2.6.32.45/drivers/char/random.c linux-2.6.32.45/drivers/char/random.c
28117 --- linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:37:25.000000000 -0400
28118 +++ linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:43:23.000000000 -0400
28119 @@ -254,8 +254,13 @@
28120 /*
28121 * Configuration information
28122 */
28123 +#ifdef CONFIG_GRKERNSEC_RANDNET
28124 +#define INPUT_POOL_WORDS 512
28125 +#define OUTPUT_POOL_WORDS 128
28126 +#else
28127 #define INPUT_POOL_WORDS 128
28128 #define OUTPUT_POOL_WORDS 32
28129 +#endif
28130 #define SEC_XFER_SIZE 512
28131
28132 /*
28133 @@ -292,10 +297,17 @@ static struct poolinfo {
28134 int poolwords;
28135 int tap1, tap2, tap3, tap4, tap5;
28136 } poolinfo_table[] = {
28137 +#ifdef CONFIG_GRKERNSEC_RANDNET
28138 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28139 + { 512, 411, 308, 208, 104, 1 },
28140 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28141 + { 128, 103, 76, 51, 25, 1 },
28142 +#else
28143 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28144 { 128, 103, 76, 51, 25, 1 },
28145 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28146 { 32, 26, 20, 14, 7, 1 },
28147 +#endif
28148 #if 0
28149 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28150 { 2048, 1638, 1231, 819, 411, 1 },
28151 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28152 #include <linux/sysctl.h>
28153
28154 static int min_read_thresh = 8, min_write_thresh;
28155 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
28156 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28157 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28158 static char sysctl_bootid[16];
28159
28160 diff -urNp linux-2.6.32.45/drivers/char/rocket.c linux-2.6.32.45/drivers/char/rocket.c
28161 --- linux-2.6.32.45/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
28162 +++ linux-2.6.32.45/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
28163 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
28164 struct rocket_ports tmp;
28165 int board;
28166
28167 + pax_track_stack();
28168 +
28169 if (!retports)
28170 return -EFAULT;
28171 memset(&tmp, 0, sizeof (tmp));
28172 diff -urNp linux-2.6.32.45/drivers/char/sonypi.c linux-2.6.32.45/drivers/char/sonypi.c
28173 --- linux-2.6.32.45/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
28174 +++ linux-2.6.32.45/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
28175 @@ -55,6 +55,7 @@
28176 #include <asm/uaccess.h>
28177 #include <asm/io.h>
28178 #include <asm/system.h>
28179 +#include <asm/local.h>
28180
28181 #include <linux/sonypi.h>
28182
28183 @@ -491,7 +492,7 @@ static struct sonypi_device {
28184 spinlock_t fifo_lock;
28185 wait_queue_head_t fifo_proc_list;
28186 struct fasync_struct *fifo_async;
28187 - int open_count;
28188 + local_t open_count;
28189 int model;
28190 struct input_dev *input_jog_dev;
28191 struct input_dev *input_key_dev;
28192 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
28193 static int sonypi_misc_release(struct inode *inode, struct file *file)
28194 {
28195 mutex_lock(&sonypi_device.lock);
28196 - sonypi_device.open_count--;
28197 + local_dec(&sonypi_device.open_count);
28198 mutex_unlock(&sonypi_device.lock);
28199 return 0;
28200 }
28201 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
28202 lock_kernel();
28203 mutex_lock(&sonypi_device.lock);
28204 /* Flush input queue on first open */
28205 - if (!sonypi_device.open_count)
28206 + if (!local_read(&sonypi_device.open_count))
28207 kfifo_reset(sonypi_device.fifo);
28208 - sonypi_device.open_count++;
28209 + local_inc(&sonypi_device.open_count);
28210 mutex_unlock(&sonypi_device.lock);
28211 unlock_kernel();
28212 return 0;
28213 diff -urNp linux-2.6.32.45/drivers/char/stallion.c linux-2.6.32.45/drivers/char/stallion.c
28214 --- linux-2.6.32.45/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
28215 +++ linux-2.6.32.45/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
28216 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
28217 struct stlport stl_dummyport;
28218 struct stlport *portp;
28219
28220 + pax_track_stack();
28221 +
28222 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
28223 return -EFAULT;
28224 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
28225 diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm_bios.c linux-2.6.32.45/drivers/char/tpm/tpm_bios.c
28226 --- linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
28227 +++ linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
28228 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
28229 event = addr;
28230
28231 if ((event->event_type == 0 && event->event_size == 0) ||
28232 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28233 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28234 return NULL;
28235
28236 return addr;
28237 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
28238 return NULL;
28239
28240 if ((event->event_type == 0 && event->event_size == 0) ||
28241 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28242 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28243 return NULL;
28244
28245 (*pos)++;
28246 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
28247 int i;
28248
28249 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28250 - seq_putc(m, data[i]);
28251 + if (!seq_putc(m, data[i]))
28252 + return -EFAULT;
28253
28254 return 0;
28255 }
28256 @@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
28257 log->bios_event_log_end = log->bios_event_log + len;
28258
28259 virt = acpi_os_map_memory(start, len);
28260 + if (!virt) {
28261 + kfree(log->bios_event_log);
28262 + log->bios_event_log = NULL;
28263 + return -EFAULT;
28264 + }
28265
28266 memcpy(log->bios_event_log, virt, len);
28267
28268 diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm.c linux-2.6.32.45/drivers/char/tpm/tpm.c
28269 --- linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
28270 +++ linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
28271 @@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
28272 chip->vendor.req_complete_val)
28273 goto out_recv;
28274
28275 - if ((status == chip->vendor.req_canceled)) {
28276 + if (status == chip->vendor.req_canceled) {
28277 dev_err(chip->dev, "Operation Canceled\n");
28278 rc = -ECANCELED;
28279 goto out;
28280 @@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
28281
28282 struct tpm_chip *chip = dev_get_drvdata(dev);
28283
28284 + pax_track_stack();
28285 +
28286 tpm_cmd.header.in = tpm_readpubek_header;
28287 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
28288 "attempting to read the PUBEK");
28289 diff -urNp linux-2.6.32.45/drivers/char/tty_io.c linux-2.6.32.45/drivers/char/tty_io.c
28290 --- linux-2.6.32.45/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
28291 +++ linux-2.6.32.45/drivers/char/tty_io.c 2011-08-05 20:33:55.000000000 -0400
28292 @@ -2582,8 +2582,10 @@ long tty_ioctl(struct file *file, unsign
28293 return retval;
28294 }
28295
28296 +EXPORT_SYMBOL(tty_ioctl);
28297 +
28298 #ifdef CONFIG_COMPAT
28299 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
28300 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
28301 unsigned long arg)
28302 {
28303 struct inode *inode = file->f_dentry->d_inode;
28304 @@ -2607,6 +2609,8 @@ static long tty_compat_ioctl(struct file
28305
28306 return retval;
28307 }
28308 +
28309 +EXPORT_SYMBOL(tty_compat_ioctl);
28310 #endif
28311
28312 /*
28313 @@ -3052,7 +3056,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
28314
28315 void tty_default_fops(struct file_operations *fops)
28316 {
28317 - *fops = tty_fops;
28318 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
28319 }
28320
28321 /*
28322 diff -urNp linux-2.6.32.45/drivers/char/tty_ldisc.c linux-2.6.32.45/drivers/char/tty_ldisc.c
28323 --- linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:04.000000000 -0400
28324 +++ linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:18.000000000 -0400
28325 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
28326 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
28327 struct tty_ldisc_ops *ldo = ld->ops;
28328
28329 - ldo->refcount--;
28330 + atomic_dec(&ldo->refcount);
28331 module_put(ldo->owner);
28332 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28333
28334 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
28335 spin_lock_irqsave(&tty_ldisc_lock, flags);
28336 tty_ldiscs[disc] = new_ldisc;
28337 new_ldisc->num = disc;
28338 - new_ldisc->refcount = 0;
28339 + atomic_set(&new_ldisc->refcount, 0);
28340 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28341
28342 return ret;
28343 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
28344 return -EINVAL;
28345
28346 spin_lock_irqsave(&tty_ldisc_lock, flags);
28347 - if (tty_ldiscs[disc]->refcount)
28348 + if (atomic_read(&tty_ldiscs[disc]->refcount))
28349 ret = -EBUSY;
28350 else
28351 tty_ldiscs[disc] = NULL;
28352 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
28353 if (ldops) {
28354 ret = ERR_PTR(-EAGAIN);
28355 if (try_module_get(ldops->owner)) {
28356 - ldops->refcount++;
28357 + atomic_inc(&ldops->refcount);
28358 ret = ldops;
28359 }
28360 }
28361 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
28362 unsigned long flags;
28363
28364 spin_lock_irqsave(&tty_ldisc_lock, flags);
28365 - ldops->refcount--;
28366 + atomic_dec(&ldops->refcount);
28367 module_put(ldops->owner);
28368 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28369 }
28370 diff -urNp linux-2.6.32.45/drivers/char/virtio_console.c linux-2.6.32.45/drivers/char/virtio_console.c
28371 --- linux-2.6.32.45/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
28372 +++ linux-2.6.32.45/drivers/char/virtio_console.c 2011-08-05 20:33:55.000000000 -0400
28373 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *
28374 * virtqueue, so we let the drivers do some boutique early-output thing. */
28375 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
28376 {
28377 - virtio_cons.put_chars = put_chars;
28378 + pax_open_kernel();
28379 + *(void **)&virtio_cons.put_chars = put_chars;
28380 + pax_close_kernel();
28381 return hvc_instantiate(0, 0, &virtio_cons);
28382 }
28383
28384 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(stru
28385 out_vq = vqs[1];
28386
28387 /* Start using the new console output. */
28388 - virtio_cons.get_chars = get_chars;
28389 - virtio_cons.put_chars = put_chars;
28390 - virtio_cons.notifier_add = notifier_add_vio;
28391 - virtio_cons.notifier_del = notifier_del_vio;
28392 - virtio_cons.notifier_hangup = notifier_del_vio;
28393 + pax_open_kernel();
28394 + *(void **)&virtio_cons.get_chars = get_chars;
28395 + *(void **)&virtio_cons.put_chars = put_chars;
28396 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
28397 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
28398 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
28399 + pax_close_kernel();
28400
28401 /* The first argument of hvc_alloc() is the virtual console number, so
28402 * we use zero. The second argument is the parameter for the
28403 diff -urNp linux-2.6.32.45/drivers/char/vt.c linux-2.6.32.45/drivers/char/vt.c
28404 --- linux-2.6.32.45/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
28405 +++ linux-2.6.32.45/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
28406 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
28407
28408 static void notify_write(struct vc_data *vc, unsigned int unicode)
28409 {
28410 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
28411 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
28412 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28413 }
28414
28415 diff -urNp linux-2.6.32.45/drivers/char/vt_ioctl.c linux-2.6.32.45/drivers/char/vt_ioctl.c
28416 --- linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28417 +++ linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28418 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28419 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28420 return -EFAULT;
28421
28422 - if (!capable(CAP_SYS_TTY_CONFIG))
28423 - perm = 0;
28424 -
28425 switch (cmd) {
28426 case KDGKBENT:
28427 key_map = key_maps[s];
28428 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28429 val = (i ? K_HOLE : K_NOSUCHMAP);
28430 return put_user(val, &user_kbe->kb_value);
28431 case KDSKBENT:
28432 + if (!capable(CAP_SYS_TTY_CONFIG))
28433 + perm = 0;
28434 +
28435 if (!perm)
28436 return -EPERM;
28437 +
28438 if (!i && v == K_NOSUCHMAP) {
28439 /* deallocate map */
28440 key_map = key_maps[s];
28441 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28442 int i, j, k;
28443 int ret;
28444
28445 - if (!capable(CAP_SYS_TTY_CONFIG))
28446 - perm = 0;
28447 -
28448 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28449 if (!kbs) {
28450 ret = -ENOMEM;
28451 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28452 kfree(kbs);
28453 return ((p && *p) ? -EOVERFLOW : 0);
28454 case KDSKBSENT:
28455 + if (!capable(CAP_SYS_TTY_CONFIG))
28456 + perm = 0;
28457 +
28458 if (!perm) {
28459 ret = -EPERM;
28460 goto reterr;
28461 diff -urNp linux-2.6.32.45/drivers/cpufreq/cpufreq.c linux-2.6.32.45/drivers/cpufreq/cpufreq.c
28462 --- linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28463 +++ linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28464 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28465 complete(&policy->kobj_unregister);
28466 }
28467
28468 -static struct sysfs_ops sysfs_ops = {
28469 +static const struct sysfs_ops sysfs_ops = {
28470 .show = show,
28471 .store = store,
28472 };
28473 diff -urNp linux-2.6.32.45/drivers/cpuidle/sysfs.c linux-2.6.32.45/drivers/cpuidle/sysfs.c
28474 --- linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28475 +++ linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28476 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28477 return ret;
28478 }
28479
28480 -static struct sysfs_ops cpuidle_sysfs_ops = {
28481 +static const struct sysfs_ops cpuidle_sysfs_ops = {
28482 .show = cpuidle_show,
28483 .store = cpuidle_store,
28484 };
28485 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28486 return ret;
28487 }
28488
28489 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
28490 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28491 .show = cpuidle_state_show,
28492 };
28493
28494 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28495 .release = cpuidle_state_sysfs_release,
28496 };
28497
28498 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28499 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28500 {
28501 kobject_put(&device->kobjs[i]->kobj);
28502 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28503 diff -urNp linux-2.6.32.45/drivers/crypto/hifn_795x.c linux-2.6.32.45/drivers/crypto/hifn_795x.c
28504 --- linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28505 +++ linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28506 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28507 0xCA, 0x34, 0x2B, 0x2E};
28508 struct scatterlist sg;
28509
28510 + pax_track_stack();
28511 +
28512 memset(src, 0, sizeof(src));
28513 memset(ctx.key, 0, sizeof(ctx.key));
28514
28515 diff -urNp linux-2.6.32.45/drivers/crypto/padlock-aes.c linux-2.6.32.45/drivers/crypto/padlock-aes.c
28516 --- linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
28517 +++ linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
28518 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
28519 struct crypto_aes_ctx gen_aes;
28520 int cpu;
28521
28522 + pax_track_stack();
28523 +
28524 if (key_len % 8) {
28525 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
28526 return -EINVAL;
28527 diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.c linux-2.6.32.45/drivers/dma/ioat/dma.c
28528 --- linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28529 +++ linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28530 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28531 return entry->show(&chan->common, page);
28532 }
28533
28534 -struct sysfs_ops ioat_sysfs_ops = {
28535 +const struct sysfs_ops ioat_sysfs_ops = {
28536 .show = ioat_attr_show,
28537 };
28538
28539 diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.h linux-2.6.32.45/drivers/dma/ioat/dma.h
28540 --- linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28541 +++ linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28542 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28543 unsigned long *phys_complete);
28544 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28545 void ioat_kobject_del(struct ioatdma_device *device);
28546 -extern struct sysfs_ops ioat_sysfs_ops;
28547 +extern const struct sysfs_ops ioat_sysfs_ops;
28548 extern struct ioat_sysfs_entry ioat_version_attr;
28549 extern struct ioat_sysfs_entry ioat_cap_attr;
28550 #endif /* IOATDMA_H */
28551 diff -urNp linux-2.6.32.45/drivers/edac/edac_device_sysfs.c linux-2.6.32.45/drivers/edac/edac_device_sysfs.c
28552 --- linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28553 +++ linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28554 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28555 }
28556
28557 /* edac_dev file operations for an 'ctl_info' */
28558 -static struct sysfs_ops device_ctl_info_ops = {
28559 +static const struct sysfs_ops device_ctl_info_ops = {
28560 .show = edac_dev_ctl_info_show,
28561 .store = edac_dev_ctl_info_store
28562 };
28563 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
28564 }
28565
28566 /* edac_dev file operations for an 'instance' */
28567 -static struct sysfs_ops device_instance_ops = {
28568 +static const struct sysfs_ops device_instance_ops = {
28569 .show = edac_dev_instance_show,
28570 .store = edac_dev_instance_store
28571 };
28572 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
28573 }
28574
28575 /* edac_dev file operations for a 'block' */
28576 -static struct sysfs_ops device_block_ops = {
28577 +static const struct sysfs_ops device_block_ops = {
28578 .show = edac_dev_block_show,
28579 .store = edac_dev_block_store
28580 };
28581 diff -urNp linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c
28582 --- linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28583 +++ linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28584 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
28585 return -EIO;
28586 }
28587
28588 -static struct sysfs_ops csrowfs_ops = {
28589 +static const struct sysfs_ops csrowfs_ops = {
28590 .show = csrowdev_show,
28591 .store = csrowdev_store
28592 };
28593 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
28594 }
28595
28596 /* Intermediate show/store table */
28597 -static struct sysfs_ops mci_ops = {
28598 +static const struct sysfs_ops mci_ops = {
28599 .show = mcidev_show,
28600 .store = mcidev_store
28601 };
28602 diff -urNp linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c
28603 --- linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28604 +++ linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
28605 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
28606 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28607 static int edac_pci_poll_msec = 1000; /* one second workq period */
28608
28609 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
28610 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28611 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28612 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28613
28614 static struct kobject *edac_pci_top_main_kobj;
28615 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28616 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
28617 }
28618
28619 /* fs_ops table */
28620 -static struct sysfs_ops pci_instance_ops = {
28621 +static const struct sysfs_ops pci_instance_ops = {
28622 .show = edac_pci_instance_show,
28623 .store = edac_pci_instance_store
28624 };
28625 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
28626 return -EIO;
28627 }
28628
28629 -static struct sysfs_ops edac_pci_sysfs_ops = {
28630 +static const struct sysfs_ops edac_pci_sysfs_ops = {
28631 .show = edac_pci_dev_show,
28632 .store = edac_pci_dev_store
28633 };
28634 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
28635 edac_printk(KERN_CRIT, EDAC_PCI,
28636 "Signaled System Error on %s\n",
28637 pci_name(dev));
28638 - atomic_inc(&pci_nonparity_count);
28639 + atomic_inc_unchecked(&pci_nonparity_count);
28640 }
28641
28642 if (status & (PCI_STATUS_PARITY)) {
28643 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
28644 "Master Data Parity Error on %s\n",
28645 pci_name(dev));
28646
28647 - atomic_inc(&pci_parity_count);
28648 + atomic_inc_unchecked(&pci_parity_count);
28649 }
28650
28651 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28652 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
28653 "Detected Parity Error on %s\n",
28654 pci_name(dev));
28655
28656 - atomic_inc(&pci_parity_count);
28657 + atomic_inc_unchecked(&pci_parity_count);
28658 }
28659 }
28660
28661 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
28662 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28663 "Signaled System Error on %s\n",
28664 pci_name(dev));
28665 - atomic_inc(&pci_nonparity_count);
28666 + atomic_inc_unchecked(&pci_nonparity_count);
28667 }
28668
28669 if (status & (PCI_STATUS_PARITY)) {
28670 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
28671 "Master Data Parity Error on "
28672 "%s\n", pci_name(dev));
28673
28674 - atomic_inc(&pci_parity_count);
28675 + atomic_inc_unchecked(&pci_parity_count);
28676 }
28677
28678 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28679 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
28680 "Detected Parity Error on %s\n",
28681 pci_name(dev));
28682
28683 - atomic_inc(&pci_parity_count);
28684 + atomic_inc_unchecked(&pci_parity_count);
28685 }
28686 }
28687 }
28688 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
28689 if (!check_pci_errors)
28690 return;
28691
28692 - before_count = atomic_read(&pci_parity_count);
28693 + before_count = atomic_read_unchecked(&pci_parity_count);
28694
28695 /* scan all PCI devices looking for a Parity Error on devices and
28696 * bridges.
28697 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
28698 /* Only if operator has selected panic on PCI Error */
28699 if (edac_pci_get_panic_on_pe()) {
28700 /* If the count is different 'after' from 'before' */
28701 - if (before_count != atomic_read(&pci_parity_count))
28702 + if (before_count != atomic_read_unchecked(&pci_parity_count))
28703 panic("EDAC: PCI Parity Error");
28704 }
28705 }
28706 diff -urNp linux-2.6.32.45/drivers/firewire/core-card.c linux-2.6.32.45/drivers/firewire/core-card.c
28707 --- linux-2.6.32.45/drivers/firewire/core-card.c 2011-03-27 14:31:47.000000000 -0400
28708 +++ linux-2.6.32.45/drivers/firewire/core-card.c 2011-08-05 20:33:55.000000000 -0400
28709 @@ -569,8 +569,10 @@ void fw_core_remove_card(struct fw_card
28710 mutex_unlock(&card_mutex);
28711
28712 /* Switch off most of the card driver interface. */
28713 - dummy_driver.free_iso_context = card->driver->free_iso_context;
28714 - dummy_driver.stop_iso = card->driver->stop_iso;
28715 + pax_open_kernel();
28716 + *(void **)&dummy_driver.free_iso_context = card->driver->free_iso_context;
28717 + *(void **)&dummy_driver.stop_iso = card->driver->stop_iso;
28718 + pax_close_kernel();
28719 card->driver = &dummy_driver;
28720
28721 fw_destroy_nodes(card);
28722 diff -urNp linux-2.6.32.45/drivers/firewire/core-cdev.c linux-2.6.32.45/drivers/firewire/core-cdev.c
28723 --- linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
28724 +++ linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
28725 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
28726 int ret;
28727
28728 if ((request->channels == 0 && request->bandwidth == 0) ||
28729 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
28730 - request->bandwidth < 0)
28731 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
28732 return -EINVAL;
28733
28734 r = kmalloc(sizeof(*r), GFP_KERNEL);
28735 diff -urNp linux-2.6.32.45/drivers/firewire/core-transaction.c linux-2.6.32.45/drivers/firewire/core-transaction.c
28736 --- linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
28737 +++ linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
28738 @@ -36,6 +36,7 @@
28739 #include <linux/string.h>
28740 #include <linux/timer.h>
28741 #include <linux/types.h>
28742 +#include <linux/sched.h>
28743
28744 #include <asm/byteorder.h>
28745
28746 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
28747 struct transaction_callback_data d;
28748 struct fw_transaction t;
28749
28750 + pax_track_stack();
28751 +
28752 init_completion(&d.done);
28753 d.payload = payload;
28754 fw_send_request(card, &t, tcode, destination_id, generation, speed,
28755 diff -urNp linux-2.6.32.45/drivers/firmware/dmi_scan.c linux-2.6.32.45/drivers/firmware/dmi_scan.c
28756 --- linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
28757 +++ linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
28758 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
28759 }
28760 }
28761 else {
28762 - /*
28763 - * no iounmap() for that ioremap(); it would be a no-op, but
28764 - * it's so early in setup that sucker gets confused into doing
28765 - * what it shouldn't if we actually call it.
28766 - */
28767 p = dmi_ioremap(0xF0000, 0x10000);
28768 if (p == NULL)
28769 goto error;
28770 diff -urNp linux-2.6.32.45/drivers/firmware/edd.c linux-2.6.32.45/drivers/firmware/edd.c
28771 --- linux-2.6.32.45/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
28772 +++ linux-2.6.32.45/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
28773 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
28774 return ret;
28775 }
28776
28777 -static struct sysfs_ops edd_attr_ops = {
28778 +static const struct sysfs_ops edd_attr_ops = {
28779 .show = edd_attr_show,
28780 };
28781
28782 diff -urNp linux-2.6.32.45/drivers/firmware/efivars.c linux-2.6.32.45/drivers/firmware/efivars.c
28783 --- linux-2.6.32.45/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
28784 +++ linux-2.6.32.45/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
28785 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
28786 return ret;
28787 }
28788
28789 -static struct sysfs_ops efivar_attr_ops = {
28790 +static const struct sysfs_ops efivar_attr_ops = {
28791 .show = efivar_attr_show,
28792 .store = efivar_attr_store,
28793 };
28794 diff -urNp linux-2.6.32.45/drivers/firmware/iscsi_ibft.c linux-2.6.32.45/drivers/firmware/iscsi_ibft.c
28795 --- linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
28796 +++ linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
28797 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
28798 return ret;
28799 }
28800
28801 -static struct sysfs_ops ibft_attr_ops = {
28802 +static const struct sysfs_ops ibft_attr_ops = {
28803 .show = ibft_show_attribute,
28804 };
28805
28806 diff -urNp linux-2.6.32.45/drivers/firmware/memmap.c linux-2.6.32.45/drivers/firmware/memmap.c
28807 --- linux-2.6.32.45/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
28808 +++ linux-2.6.32.45/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
28809 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
28810 NULL
28811 };
28812
28813 -static struct sysfs_ops memmap_attr_ops = {
28814 +static const struct sysfs_ops memmap_attr_ops = {
28815 .show = memmap_attr_show,
28816 };
28817
28818 diff -urNp linux-2.6.32.45/drivers/gpio/vr41xx_giu.c linux-2.6.32.45/drivers/gpio/vr41xx_giu.c
28819 --- linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
28820 +++ linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
28821 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
28822 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
28823 maskl, pendl, maskh, pendh);
28824
28825 - atomic_inc(&irq_err_count);
28826 + atomic_inc_unchecked(&irq_err_count);
28827
28828 return -EINVAL;
28829 }
28830 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c
28831 --- linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
28832 +++ linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
28833 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
28834 struct drm_crtc *tmp;
28835 int crtc_mask = 1;
28836
28837 - WARN(!crtc, "checking null crtc?");
28838 + BUG_ON(!crtc);
28839
28840 dev = crtc->dev;
28841
28842 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
28843
28844 adjusted_mode = drm_mode_duplicate(dev, mode);
28845
28846 + pax_track_stack();
28847 +
28848 crtc->enabled = drm_helper_crtc_in_use(crtc);
28849
28850 if (!crtc->enabled)
28851 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_drv.c linux-2.6.32.45/drivers/gpu/drm/drm_drv.c
28852 --- linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
28853 +++ linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
28854 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
28855 char *kdata = NULL;
28856
28857 atomic_inc(&dev->ioctl_count);
28858 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
28859 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
28860 ++file_priv->ioctl_count;
28861
28862 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
28863 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_fops.c linux-2.6.32.45/drivers/gpu/drm/drm_fops.c
28864 --- linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
28865 +++ linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
28866 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
28867 }
28868
28869 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
28870 - atomic_set(&dev->counts[i], 0);
28871 + atomic_set_unchecked(&dev->counts[i], 0);
28872
28873 dev->sigdata.lock = NULL;
28874
28875 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
28876
28877 retcode = drm_open_helper(inode, filp, dev);
28878 if (!retcode) {
28879 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
28880 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
28881 spin_lock(&dev->count_lock);
28882 - if (!dev->open_count++) {
28883 + if (local_inc_return(&dev->open_count) == 1) {
28884 spin_unlock(&dev->count_lock);
28885 retcode = drm_setup(dev);
28886 goto out;
28887 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
28888
28889 lock_kernel();
28890
28891 - DRM_DEBUG("open_count = %d\n", dev->open_count);
28892 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
28893
28894 if (dev->driver->preclose)
28895 dev->driver->preclose(dev, file_priv);
28896 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
28897 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
28898 task_pid_nr(current),
28899 (long)old_encode_dev(file_priv->minor->device),
28900 - dev->open_count);
28901 + local_read(&dev->open_count));
28902
28903 /* if the master has gone away we can't do anything with the lock */
28904 if (file_priv->minor->master)
28905 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
28906 * End inline drm_release
28907 */
28908
28909 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
28910 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
28911 spin_lock(&dev->count_lock);
28912 - if (!--dev->open_count) {
28913 + if (local_dec_and_test(&dev->open_count)) {
28914 if (atomic_read(&dev->ioctl_count)) {
28915 DRM_ERROR("Device busy: %d\n",
28916 atomic_read(&dev->ioctl_count));
28917 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_gem.c linux-2.6.32.45/drivers/gpu/drm/drm_gem.c
28918 --- linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
28919 +++ linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
28920 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
28921 spin_lock_init(&dev->object_name_lock);
28922 idr_init(&dev->object_name_idr);
28923 atomic_set(&dev->object_count, 0);
28924 - atomic_set(&dev->object_memory, 0);
28925 + atomic_set_unchecked(&dev->object_memory, 0);
28926 atomic_set(&dev->pin_count, 0);
28927 - atomic_set(&dev->pin_memory, 0);
28928 + atomic_set_unchecked(&dev->pin_memory, 0);
28929 atomic_set(&dev->gtt_count, 0);
28930 - atomic_set(&dev->gtt_memory, 0);
28931 + atomic_set_unchecked(&dev->gtt_memory, 0);
28932
28933 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
28934 if (!mm) {
28935 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
28936 goto fput;
28937 }
28938 atomic_inc(&dev->object_count);
28939 - atomic_add(obj->size, &dev->object_memory);
28940 + atomic_add_unchecked(obj->size, &dev->object_memory);
28941 return obj;
28942 fput:
28943 fput(obj->filp);
28944 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
28945
28946 fput(obj->filp);
28947 atomic_dec(&dev->object_count);
28948 - atomic_sub(obj->size, &dev->object_memory);
28949 + atomic_sub_unchecked(obj->size, &dev->object_memory);
28950 kfree(obj);
28951 }
28952 EXPORT_SYMBOL(drm_gem_object_free);
28953 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_info.c linux-2.6.32.45/drivers/gpu/drm/drm_info.c
28954 --- linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
28955 +++ linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
28956 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
28957 struct drm_local_map *map;
28958 struct drm_map_list *r_list;
28959
28960 - /* Hardcoded from _DRM_FRAME_BUFFER,
28961 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28962 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28963 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28964 + static const char * const types[] = {
28965 + [_DRM_FRAME_BUFFER] = "FB",
28966 + [_DRM_REGISTERS] = "REG",
28967 + [_DRM_SHM] = "SHM",
28968 + [_DRM_AGP] = "AGP",
28969 + [_DRM_SCATTER_GATHER] = "SG",
28970 + [_DRM_CONSISTENT] = "PCI",
28971 + [_DRM_GEM] = "GEM" };
28972 const char *type;
28973 int i;
28974
28975 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
28976 map = r_list->map;
28977 if (!map)
28978 continue;
28979 - if (map->type < 0 || map->type > 5)
28980 + if (map->type >= ARRAY_SIZE(types))
28981 type = "??";
28982 else
28983 type = types[map->type];
28984 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
28985 struct drm_device *dev = node->minor->dev;
28986
28987 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
28988 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
28989 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
28990 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
28991 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
28992 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
28993 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
28994 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
28995 seq_printf(m, "%d gtt total\n", dev->gtt_total);
28996 return 0;
28997 }
28998 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
28999 mutex_lock(&dev->struct_mutex);
29000 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
29001 atomic_read(&dev->vma_count),
29002 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29003 + NULL, 0);
29004 +#else
29005 high_memory, (u64)virt_to_phys(high_memory));
29006 +#endif
29007
29008 list_for_each_entry(pt, &dev->vmalist, head) {
29009 vma = pt->vma;
29010 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
29011 continue;
29012 seq_printf(m,
29013 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
29014 - pt->pid, vma->vm_start, vma->vm_end,
29015 + pt->pid,
29016 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29017 + 0, 0,
29018 +#else
29019 + vma->vm_start, vma->vm_end,
29020 +#endif
29021 vma->vm_flags & VM_READ ? 'r' : '-',
29022 vma->vm_flags & VM_WRITE ? 'w' : '-',
29023 vma->vm_flags & VM_EXEC ? 'x' : '-',
29024 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29025 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29026 vma->vm_flags & VM_IO ? 'i' : '-',
29027 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29028 + 0);
29029 +#else
29030 vma->vm_pgoff);
29031 +#endif
29032
29033 #if defined(__i386__)
29034 pgprot = pgprot_val(vma->vm_page_prot);
29035 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c
29036 --- linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
29037 +++ linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
29038 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
29039 stats->data[i].value =
29040 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29041 else
29042 - stats->data[i].value = atomic_read(&dev->counts[i]);
29043 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29044 stats->data[i].type = dev->types[i];
29045 }
29046
29047 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_lock.c linux-2.6.32.45/drivers/gpu/drm/drm_lock.c
29048 --- linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
29049 +++ linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
29050 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
29051 if (drm_lock_take(&master->lock, lock->context)) {
29052 master->lock.file_priv = file_priv;
29053 master->lock.lock_time = jiffies;
29054 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29055 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29056 break; /* Got lock */
29057 }
29058
29059 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
29060 return -EINVAL;
29061 }
29062
29063 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29064 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29065
29066 /* kernel_context_switch isn't used by any of the x86 drm
29067 * modules but is required by the Sparc driver.
29068 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c
29069 --- linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
29070 +++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
29071 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
29072 dma->buflist[vertex->idx],
29073 vertex->discard, vertex->used);
29074
29075 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29076 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29077 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29078 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29079 sarea_priv->last_enqueue = dev_priv->counter - 1;
29080 sarea_priv->last_dispatch = (int)hw_status[5];
29081
29082 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
29083 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29084 mc->last_render);
29085
29086 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29087 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29088 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29089 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29090 sarea_priv->last_enqueue = dev_priv->counter - 1;
29091 sarea_priv->last_dispatch = (int)hw_status[5];
29092
29093 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h
29094 --- linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
29095 +++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
29096 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29097 int page_flipping;
29098
29099 wait_queue_head_t irq_queue;
29100 - atomic_t irq_received;
29101 - atomic_t irq_emitted;
29102 + atomic_unchecked_t irq_received;
29103 + atomic_unchecked_t irq_emitted;
29104
29105 int front_offset;
29106 } drm_i810_private_t;
29107 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h
29108 --- linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
29109 +++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
29110 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
29111 int page_flipping;
29112
29113 wait_queue_head_t irq_queue;
29114 - atomic_t irq_received;
29115 - atomic_t irq_emitted;
29116 + atomic_unchecked_t irq_received;
29117 + atomic_unchecked_t irq_emitted;
29118
29119 int use_mi_batchbuffer_start;
29120
29121 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c
29122 --- linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
29123 +++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
29124 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
29125
29126 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
29127
29128 - atomic_inc(&dev_priv->irq_received);
29129 + atomic_inc_unchecked(&dev_priv->irq_received);
29130 wake_up_interruptible(&dev_priv->irq_queue);
29131
29132 return IRQ_HANDLED;
29133 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
29134
29135 DRM_DEBUG("%s\n", __func__);
29136
29137 - atomic_inc(&dev_priv->irq_emitted);
29138 + atomic_inc_unchecked(&dev_priv->irq_emitted);
29139
29140 BEGIN_LP_RING(2);
29141 OUT_RING(0);
29142 OUT_RING(GFX_OP_USER_INTERRUPT);
29143 ADVANCE_LP_RING();
29144
29145 - return atomic_read(&dev_priv->irq_emitted);
29146 + return atomic_read_unchecked(&dev_priv->irq_emitted);
29147 }
29148
29149 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
29150 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
29151
29152 DRM_DEBUG("%s\n", __func__);
29153
29154 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29155 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29156 return 0;
29157
29158 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
29159 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
29160
29161 for (;;) {
29162 __set_current_state(TASK_INTERRUPTIBLE);
29163 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29164 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29165 break;
29166 if ((signed)(end - jiffies) <= 0) {
29167 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
29168 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
29169 I830_WRITE16(I830REG_HWSTAM, 0xffff);
29170 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
29171 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
29172 - atomic_set(&dev_priv->irq_received, 0);
29173 - atomic_set(&dev_priv->irq_emitted, 0);
29174 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29175 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
29176 init_waitqueue_head(&dev_priv->irq_queue);
29177 }
29178
29179 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c
29180 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
29181 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
29182 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
29183 }
29184 }
29185
29186 -struct intel_dvo_dev_ops ch7017_ops = {
29187 +const struct intel_dvo_dev_ops ch7017_ops = {
29188 .init = ch7017_init,
29189 .detect = ch7017_detect,
29190 .mode_valid = ch7017_mode_valid,
29191 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c
29192 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
29193 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
29194 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
29195 }
29196 }
29197
29198 -struct intel_dvo_dev_ops ch7xxx_ops = {
29199 +const struct intel_dvo_dev_ops ch7xxx_ops = {
29200 .init = ch7xxx_init,
29201 .detect = ch7xxx_detect,
29202 .mode_valid = ch7xxx_mode_valid,
29203 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h
29204 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
29205 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
29206 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
29207 *
29208 * \return singly-linked list of modes or NULL if no modes found.
29209 */
29210 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
29211 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
29212
29213 /**
29214 * Clean up driver-specific bits of the output
29215 */
29216 - void (*destroy) (struct intel_dvo_device *dvo);
29217 + void (* const destroy) (struct intel_dvo_device *dvo);
29218
29219 /**
29220 * Debugging hook to dump device registers to log file
29221 */
29222 - void (*dump_regs)(struct intel_dvo_device *dvo);
29223 + void (* const dump_regs)(struct intel_dvo_device *dvo);
29224 };
29225
29226 -extern struct intel_dvo_dev_ops sil164_ops;
29227 -extern struct intel_dvo_dev_ops ch7xxx_ops;
29228 -extern struct intel_dvo_dev_ops ivch_ops;
29229 -extern struct intel_dvo_dev_ops tfp410_ops;
29230 -extern struct intel_dvo_dev_ops ch7017_ops;
29231 +extern const struct intel_dvo_dev_ops sil164_ops;
29232 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
29233 +extern const struct intel_dvo_dev_ops ivch_ops;
29234 +extern const struct intel_dvo_dev_ops tfp410_ops;
29235 +extern const struct intel_dvo_dev_ops ch7017_ops;
29236
29237 #endif /* _INTEL_DVO_H */
29238 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c
29239 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
29240 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
29241 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
29242 }
29243 }
29244
29245 -struct intel_dvo_dev_ops ivch_ops= {
29246 +const struct intel_dvo_dev_ops ivch_ops= {
29247 .init = ivch_init,
29248 .dpms = ivch_dpms,
29249 .save = ivch_save,
29250 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c
29251 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
29252 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
29253 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
29254 }
29255 }
29256
29257 -struct intel_dvo_dev_ops sil164_ops = {
29258 +const struct intel_dvo_dev_ops sil164_ops = {
29259 .init = sil164_init,
29260 .detect = sil164_detect,
29261 .mode_valid = sil164_mode_valid,
29262 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c
29263 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
29264 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
29265 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
29266 }
29267 }
29268
29269 -struct intel_dvo_dev_ops tfp410_ops = {
29270 +const struct intel_dvo_dev_ops tfp410_ops = {
29271 .init = tfp410_init,
29272 .detect = tfp410_detect,
29273 .mode_valid = tfp410_mode_valid,
29274 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c
29275 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
29276 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
29277 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
29278 I915_READ(GTIMR));
29279 }
29280 seq_printf(m, "Interrupts received: %d\n",
29281 - atomic_read(&dev_priv->irq_received));
29282 + atomic_read_unchecked(&dev_priv->irq_received));
29283 if (dev_priv->hw_status_page != NULL) {
29284 seq_printf(m, "Current sequence: %d\n",
29285 i915_get_gem_seqno(dev));
29286 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c
29287 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
29288 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
29289 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
29290 return i915_resume(dev);
29291 }
29292
29293 -static struct vm_operations_struct i915_gem_vm_ops = {
29294 +static const struct vm_operations_struct i915_gem_vm_ops = {
29295 .fault = i915_gem_fault,
29296 .open = drm_gem_vm_open,
29297 .close = drm_gem_vm_close,
29298 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h
29299 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
29300 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:33:55.000000000 -0400
29301 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
29302 /* display clock increase/decrease */
29303 /* pll clock increase/decrease */
29304 /* clock gating init */
29305 -};
29306 +} __no_const;
29307
29308 typedef struct drm_i915_private {
29309 struct drm_device *dev;
29310 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
29311 int page_flipping;
29312
29313 wait_queue_head_t irq_queue;
29314 - atomic_t irq_received;
29315 + atomic_unchecked_t irq_received;
29316 /** Protects user_irq_refcount and irq_mask_reg */
29317 spinlock_t user_irq_lock;
29318 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
29319 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c
29320 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
29321 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
29322 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
29323
29324 args->aper_size = dev->gtt_total;
29325 args->aper_available_size = (args->aper_size -
29326 - atomic_read(&dev->pin_memory));
29327 + atomic_read_unchecked(&dev->pin_memory));
29328
29329 return 0;
29330 }
29331 @@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
29332 return -EINVAL;
29333 }
29334
29335 + if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29336 + drm_gem_object_unreference(obj);
29337 + return -EFAULT;
29338 + }
29339 +
29340 if (i915_gem_object_needs_bit17_swizzle(obj)) {
29341 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
29342 } else {
29343 @@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
29344 return -EINVAL;
29345 }
29346
29347 + if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29348 + drm_gem_object_unreference(obj);
29349 + return -EFAULT;
29350 + }
29351 +
29352 /* We can only do the GTT pwrite on untiled buffers, as otherwise
29353 * it would end up going through the fenced access, and we'll get
29354 * different detiling behavior between reading and writing.
29355 @@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
29356
29357 if (obj_priv->gtt_space) {
29358 atomic_dec(&dev->gtt_count);
29359 - atomic_sub(obj->size, &dev->gtt_memory);
29360 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
29361
29362 drm_mm_put_block(obj_priv->gtt_space);
29363 obj_priv->gtt_space = NULL;
29364 @@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
29365 goto search_free;
29366 }
29367 atomic_inc(&dev->gtt_count);
29368 - atomic_add(obj->size, &dev->gtt_memory);
29369 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
29370
29371 /* Assert that the object is not currently in any GPU domain. As it
29372 * wasn't in the GTT, there shouldn't be any way it could have been in
29373 @@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
29374 "%d/%d gtt bytes\n",
29375 atomic_read(&dev->object_count),
29376 atomic_read(&dev->pin_count),
29377 - atomic_read(&dev->object_memory),
29378 - atomic_read(&dev->pin_memory),
29379 - atomic_read(&dev->gtt_memory),
29380 + atomic_read_unchecked(&dev->object_memory),
29381 + atomic_read_unchecked(&dev->pin_memory),
29382 + atomic_read_unchecked(&dev->gtt_memory),
29383 dev->gtt_total);
29384 }
29385 goto err;
29386 @@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
29387 */
29388 if (obj_priv->pin_count == 1) {
29389 atomic_inc(&dev->pin_count);
29390 - atomic_add(obj->size, &dev->pin_memory);
29391 + atomic_add_unchecked(obj->size, &dev->pin_memory);
29392 if (!obj_priv->active &&
29393 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
29394 !list_empty(&obj_priv->list))
29395 @@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
29396 list_move_tail(&obj_priv->list,
29397 &dev_priv->mm.inactive_list);
29398 atomic_dec(&dev->pin_count);
29399 - atomic_sub(obj->size, &dev->pin_memory);
29400 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
29401 }
29402 i915_verify_inactive(dev, __FILE__, __LINE__);
29403 }
29404 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c
29405 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
29406 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
29407 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
29408 int irq_received;
29409 int ret = IRQ_NONE;
29410
29411 - atomic_inc(&dev_priv->irq_received);
29412 + atomic_inc_unchecked(&dev_priv->irq_received);
29413
29414 if (IS_IGDNG(dev))
29415 return igdng_irq_handler(dev);
29416 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
29417 {
29418 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29419
29420 - atomic_set(&dev_priv->irq_received, 0);
29421 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29422
29423 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29424 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29425 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c
29426 --- linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-03-27 14:31:47.000000000 -0400
29427 +++ linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-08-05 20:33:55.000000000 -0400
29428 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *
29429 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
29430
29431 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
29432 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29433 + pax_open_kernel();
29434 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29435 + pax_close_kernel();
29436
29437 /* Read the regs to test if we can talk to the device */
29438 for (i = 0; i < 0x40; i++) {
29439 diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h
29440 --- linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
29441 +++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
29442 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29443 u32 clear_cmd;
29444 u32 maccess;
29445
29446 - atomic_t vbl_received; /**< Number of vblanks received. */
29447 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29448 wait_queue_head_t fence_queue;
29449 - atomic_t last_fence_retired;
29450 + atomic_unchecked_t last_fence_retired;
29451 u32 next_fence_to_post;
29452
29453 unsigned int fb_cpp;
29454 diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c
29455 --- linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29456 +++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29457 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29458 if (crtc != 0)
29459 return 0;
29460
29461 - return atomic_read(&dev_priv->vbl_received);
29462 + return atomic_read_unchecked(&dev_priv->vbl_received);
29463 }
29464
29465
29466 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29467 /* VBLANK interrupt */
29468 if (status & MGA_VLINEPEN) {
29469 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29470 - atomic_inc(&dev_priv->vbl_received);
29471 + atomic_inc_unchecked(&dev_priv->vbl_received);
29472 drm_handle_vblank(dev, 0);
29473 handled = 1;
29474 }
29475 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29476 MGA_WRITE(MGA_PRIMEND, prim_end);
29477 }
29478
29479 - atomic_inc(&dev_priv->last_fence_retired);
29480 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
29481 DRM_WAKEUP(&dev_priv->fence_queue);
29482 handled = 1;
29483 }
29484 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29485 * using fences.
29486 */
29487 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29488 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29489 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29490 - *sequence) <= (1 << 23)));
29491
29492 *sequence = cur_fence;
29493 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c
29494 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29495 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29496 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29497
29498 /* GH: Simple idle check.
29499 */
29500 - atomic_set(&dev_priv->idle_count, 0);
29501 + atomic_set_unchecked(&dev_priv->idle_count, 0);
29502
29503 /* We don't support anything other than bus-mastering ring mode,
29504 * but the ring can be in either AGP or PCI space for the ring
29505 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h
29506 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29507 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29508 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29509 int is_pci;
29510 unsigned long cce_buffers_offset;
29511
29512 - atomic_t idle_count;
29513 + atomic_unchecked_t idle_count;
29514
29515 int page_flipping;
29516 int current_page;
29517 u32 crtc_offset;
29518 u32 crtc_offset_cntl;
29519
29520 - atomic_t vbl_received;
29521 + atomic_unchecked_t vbl_received;
29522
29523 u32 color_fmt;
29524 unsigned int front_offset;
29525 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c
29526 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
29527 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
29528 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
29529 if (crtc != 0)
29530 return 0;
29531
29532 - return atomic_read(&dev_priv->vbl_received);
29533 + return atomic_read_unchecked(&dev_priv->vbl_received);
29534 }
29535
29536 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
29537 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
29538 /* VBLANK interrupt */
29539 if (status & R128_CRTC_VBLANK_INT) {
29540 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
29541 - atomic_inc(&dev_priv->vbl_received);
29542 + atomic_inc_unchecked(&dev_priv->vbl_received);
29543 drm_handle_vblank(dev, 0);
29544 return IRQ_HANDLED;
29545 }
29546 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c
29547 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
29548 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
29549 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
29550
29551 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
29552 {
29553 - if (atomic_read(&dev_priv->idle_count) == 0) {
29554 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
29555 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
29556 } else {
29557 - atomic_set(&dev_priv->idle_count, 0);
29558 + atomic_set_unchecked(&dev_priv->idle_count, 0);
29559 }
29560 }
29561
29562 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c
29563 --- linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
29564 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
29565 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
29566 char name[512];
29567 int i;
29568
29569 + pax_track_stack();
29570 +
29571 ctx->card = card;
29572 ctx->bios = bios;
29573
29574 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c
29575 --- linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
29576 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
29577 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
29578 regex_t mask_rex;
29579 regmatch_t match[4];
29580 char buf[1024];
29581 - size_t end;
29582 + long end;
29583 int len;
29584 int done = 0;
29585 int r;
29586 unsigned o;
29587 struct offset *offset;
29588 char last_reg_s[10];
29589 - int last_reg;
29590 + unsigned long last_reg;
29591
29592 if (regcomp
29593 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
29594 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c
29595 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
29596 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
29597 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
29598 bool linkb;
29599 struct radeon_i2c_bus_rec ddc_bus;
29600
29601 + pax_track_stack();
29602 +
29603 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29604
29605 if (data_offset == 0)
29606 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
29607 }
29608 }
29609
29610 -struct bios_connector {
29611 +static struct bios_connector {
29612 bool valid;
29613 uint16_t line_mux;
29614 uint16_t devices;
29615 int connector_type;
29616 struct radeon_i2c_bus_rec ddc_bus;
29617 -};
29618 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29619
29620 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
29621 drm_device
29622 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
29623 uint8_t dac;
29624 union atom_supported_devices *supported_devices;
29625 int i, j;
29626 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29627
29628 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29629
29630 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c
29631 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
29632 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
29633 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
29634
29635 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
29636 error = freq - current_freq;
29637 - error = error < 0 ? 0xffffffff : error;
29638 + error = (int32_t)error < 0 ? 0xffffffff : error;
29639 } else
29640 error = abs(current_freq - freq);
29641 vco_diff = abs(vco - best_vco);
29642 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h
29643 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
29644 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
29645 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
29646
29647 /* SW interrupt */
29648 wait_queue_head_t swi_queue;
29649 - atomic_t swi_emitted;
29650 + atomic_unchecked_t swi_emitted;
29651 int vblank_crtc;
29652 uint32_t irq_enable_reg;
29653 uint32_t r500_disp_irq_reg;
29654 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c
29655 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
29656 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
29657 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
29658 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
29659 return 0;
29660 }
29661 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
29662 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
29663 if (!rdev->cp.ready) {
29664 /* FIXME: cp is not running assume everythings is done right
29665 * away
29666 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
29667 return r;
29668 }
29669 WREG32(rdev->fence_drv.scratch_reg, 0);
29670 - atomic_set(&rdev->fence_drv.seq, 0);
29671 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
29672 INIT_LIST_HEAD(&rdev->fence_drv.created);
29673 INIT_LIST_HEAD(&rdev->fence_drv.emited);
29674 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
29675 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h
29676 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
29677 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:33:55.000000000 -0400
29678 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
29679 */
29680 struct radeon_fence_driver {
29681 uint32_t scratch_reg;
29682 - atomic_t seq;
29683 + atomic_unchecked_t seq;
29684 uint32_t last_seq;
29685 unsigned long count_timeout;
29686 wait_queue_head_t queue;
29687 @@ -640,7 +640,7 @@ struct radeon_asic {
29688 uint32_t offset, uint32_t obj_size);
29689 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
29690 void (*bandwidth_update)(struct radeon_device *rdev);
29691 -};
29692 +} __no_const;
29693
29694 /*
29695 * Asic structures
29696 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c
29697 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
29698 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
29699 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
29700 request = compat_alloc_user_space(sizeof(*request));
29701 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
29702 || __put_user(req32.param, &request->param)
29703 - || __put_user((void __user *)(unsigned long)req32.value,
29704 + || __put_user((unsigned long)req32.value,
29705 &request->value))
29706 return -EFAULT;
29707
29708 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c
29709 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
29710 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
29711 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
29712 unsigned int ret;
29713 RING_LOCALS;
29714
29715 - atomic_inc(&dev_priv->swi_emitted);
29716 - ret = atomic_read(&dev_priv->swi_emitted);
29717 + atomic_inc_unchecked(&dev_priv->swi_emitted);
29718 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
29719
29720 BEGIN_RING(4);
29721 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
29722 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
29723 drm_radeon_private_t *dev_priv =
29724 (drm_radeon_private_t *) dev->dev_private;
29725
29726 - atomic_set(&dev_priv->swi_emitted, 0);
29727 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
29728 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
29729
29730 dev->max_vblank_count = 0x001fffff;
29731 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c
29732 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
29733 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
29734 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
29735 {
29736 drm_radeon_private_t *dev_priv = dev->dev_private;
29737 drm_radeon_getparam_t *param = data;
29738 - int value;
29739 + int value = 0;
29740
29741 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
29742
29743 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c
29744 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
29745 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
29746 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
29747 DRM_INFO("radeon: ttm finalized\n");
29748 }
29749
29750 -static struct vm_operations_struct radeon_ttm_vm_ops;
29751 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
29752 -
29753 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
29754 -{
29755 - struct ttm_buffer_object *bo;
29756 - int r;
29757 -
29758 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
29759 - if (bo == NULL) {
29760 - return VM_FAULT_NOPAGE;
29761 - }
29762 - r = ttm_vm_ops->fault(vma, vmf);
29763 - return r;
29764 -}
29765 -
29766 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29767 {
29768 struct drm_file *file_priv;
29769 struct radeon_device *rdev;
29770 - int r;
29771
29772 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
29773 return drm_mmap(filp, vma);
29774 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
29775
29776 file_priv = (struct drm_file *)filp->private_data;
29777 rdev = file_priv->minor->dev->dev_private;
29778 - if (rdev == NULL) {
29779 + if (!rdev)
29780 return -EINVAL;
29781 - }
29782 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29783 - if (unlikely(r != 0)) {
29784 - return r;
29785 - }
29786 - if (unlikely(ttm_vm_ops == NULL)) {
29787 - ttm_vm_ops = vma->vm_ops;
29788 - radeon_ttm_vm_ops = *ttm_vm_ops;
29789 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29790 - }
29791 - vma->vm_ops = &radeon_ttm_vm_ops;
29792 - return 0;
29793 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29794 }
29795
29796
29797 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c
29798 --- linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
29799 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
29800 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
29801 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29802 rdev->pm.sideport_bandwidth.full)
29803 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29804 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
29805 + read_delay_latency.full = rfixed_const(800 * 1000);
29806 read_delay_latency.full = rfixed_div(read_delay_latency,
29807 rdev->pm.igp_sideport_mclk);
29808 + a.full = rfixed_const(370);
29809 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
29810 } else {
29811 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29812 rdev->pm.k8_bandwidth.full)
29813 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c
29814 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
29815 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
29816 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
29817 NULL
29818 };
29819
29820 -static struct sysfs_ops ttm_bo_global_ops = {
29821 +static const struct sysfs_ops ttm_bo_global_ops = {
29822 .show = &ttm_bo_global_show
29823 };
29824
29825 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c
29826 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
29827 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
29828 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
29829 {
29830 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
29831 vma->vm_private_data;
29832 - struct ttm_bo_device *bdev = bo->bdev;
29833 + struct ttm_bo_device *bdev;
29834 unsigned long bus_base;
29835 unsigned long bus_offset;
29836 unsigned long bus_size;
29837 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
29838 unsigned long address = (unsigned long)vmf->virtual_address;
29839 int retval = VM_FAULT_NOPAGE;
29840
29841 + if (!bo)
29842 + return VM_FAULT_NOPAGE;
29843 + bdev = bo->bdev;
29844 +
29845 /*
29846 * Work around locking order reversal in fault / nopfn
29847 * between mmap_sem and bo_reserve: Perform a trylock operation
29848 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c
29849 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
29850 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
29851 @@ -36,7 +36,7 @@
29852 struct ttm_global_item {
29853 struct mutex mutex;
29854 void *object;
29855 - int refcount;
29856 + atomic_t refcount;
29857 };
29858
29859 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
29860 @@ -49,7 +49,7 @@ void ttm_global_init(void)
29861 struct ttm_global_item *item = &glob[i];
29862 mutex_init(&item->mutex);
29863 item->object = NULL;
29864 - item->refcount = 0;
29865 + atomic_set(&item->refcount, 0);
29866 }
29867 }
29868
29869 @@ -59,7 +59,7 @@ void ttm_global_release(void)
29870 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
29871 struct ttm_global_item *item = &glob[i];
29872 BUG_ON(item->object != NULL);
29873 - BUG_ON(item->refcount != 0);
29874 + BUG_ON(atomic_read(&item->refcount) != 0);
29875 }
29876 }
29877
29878 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
29879 void *object;
29880
29881 mutex_lock(&item->mutex);
29882 - if (item->refcount == 0) {
29883 + if (atomic_read(&item->refcount) == 0) {
29884 item->object = kzalloc(ref->size, GFP_KERNEL);
29885 if (unlikely(item->object == NULL)) {
29886 ret = -ENOMEM;
29887 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
29888 goto out_err;
29889
29890 }
29891 - ++item->refcount;
29892 + atomic_inc(&item->refcount);
29893 ref->object = item->object;
29894 object = item->object;
29895 mutex_unlock(&item->mutex);
29896 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
29897 struct ttm_global_item *item = &glob[ref->global_type];
29898
29899 mutex_lock(&item->mutex);
29900 - BUG_ON(item->refcount == 0);
29901 + BUG_ON(atomic_read(&item->refcount) == 0);
29902 BUG_ON(ref->object != item->object);
29903 - if (--item->refcount == 0) {
29904 + if (atomic_dec_and_test(&item->refcount)) {
29905 ref->release(ref);
29906 item->object = NULL;
29907 }
29908 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c
29909 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
29910 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
29911 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
29912 NULL
29913 };
29914
29915 -static struct sysfs_ops ttm_mem_zone_ops = {
29916 +static const struct sysfs_ops ttm_mem_zone_ops = {
29917 .show = &ttm_mem_zone_show,
29918 .store = &ttm_mem_zone_store
29919 };
29920 diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h
29921 --- linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
29922 +++ linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
29923 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
29924 typedef uint32_t maskarray_t[5];
29925
29926 typedef struct drm_via_irq {
29927 - atomic_t irq_received;
29928 + atomic_unchecked_t irq_received;
29929 uint32_t pending_mask;
29930 uint32_t enable_mask;
29931 wait_queue_head_t irq_queue;
29932 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
29933 struct timeval last_vblank;
29934 int last_vblank_valid;
29935 unsigned usec_per_vblank;
29936 - atomic_t vbl_received;
29937 + atomic_unchecked_t vbl_received;
29938 drm_via_state_t hc_state;
29939 char pci_buf[VIA_PCI_BUF_SIZE];
29940 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
29941 diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c
29942 --- linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
29943 +++ linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
29944 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
29945 if (crtc != 0)
29946 return 0;
29947
29948 - return atomic_read(&dev_priv->vbl_received);
29949 + return atomic_read_unchecked(&dev_priv->vbl_received);
29950 }
29951
29952 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29953 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
29954
29955 status = VIA_READ(VIA_REG_INTERRUPT);
29956 if (status & VIA_IRQ_VBLANK_PENDING) {
29957 - atomic_inc(&dev_priv->vbl_received);
29958 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29959 + atomic_inc_unchecked(&dev_priv->vbl_received);
29960 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29961 do_gettimeofday(&cur_vblank);
29962 if (dev_priv->last_vblank_valid) {
29963 dev_priv->usec_per_vblank =
29964 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29965 dev_priv->last_vblank = cur_vblank;
29966 dev_priv->last_vblank_valid = 1;
29967 }
29968 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
29969 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
29970 DRM_DEBUG("US per vblank is: %u\n",
29971 dev_priv->usec_per_vblank);
29972 }
29973 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29974
29975 for (i = 0; i < dev_priv->num_irqs; ++i) {
29976 if (status & cur_irq->pending_mask) {
29977 - atomic_inc(&cur_irq->irq_received);
29978 + atomic_inc_unchecked(&cur_irq->irq_received);
29979 DRM_WAKEUP(&cur_irq->irq_queue);
29980 handled = 1;
29981 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
29982 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
29983 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29984 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
29985 masks[irq][4]));
29986 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
29987 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
29988 } else {
29989 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29990 (((cur_irq_sequence =
29991 - atomic_read(&cur_irq->irq_received)) -
29992 + atomic_read_unchecked(&cur_irq->irq_received)) -
29993 *sequence) <= (1 << 23)));
29994 }
29995 *sequence = cur_irq_sequence;
29996 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
29997 }
29998
29999 for (i = 0; i < dev_priv->num_irqs; ++i) {
30000 - atomic_set(&cur_irq->irq_received, 0);
30001 + atomic_set_unchecked(&cur_irq->irq_received, 0);
30002 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30003 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30004 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30005 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
30006 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30007 case VIA_IRQ_RELATIVE:
30008 irqwait->request.sequence +=
30009 - atomic_read(&cur_irq->irq_received);
30010 + atomic_read_unchecked(&cur_irq->irq_received);
30011 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30012 case VIA_IRQ_ABSOLUTE:
30013 break;
30014 diff -urNp linux-2.6.32.45/drivers/hid/hid-core.c linux-2.6.32.45/drivers/hid/hid-core.c
30015 --- linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
30016 +++ linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
30017 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
30018
30019 int hid_add_device(struct hid_device *hdev)
30020 {
30021 - static atomic_t id = ATOMIC_INIT(0);
30022 + static atomic_unchecked_t id = ATOMIC_INIT(0);
30023 int ret;
30024
30025 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30026 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
30027 /* XXX hack, any other cleaner solution after the driver core
30028 * is converted to allow more than 20 bytes as the device name? */
30029 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30030 - hdev->vendor, hdev->product, atomic_inc_return(&id));
30031 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30032
30033 ret = device_add(&hdev->dev);
30034 if (!ret)
30035 diff -urNp linux-2.6.32.45/drivers/hid/usbhid/hiddev.c linux-2.6.32.45/drivers/hid/usbhid/hiddev.c
30036 --- linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
30037 +++ linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
30038 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
30039 return put_user(HID_VERSION, (int __user *)arg);
30040
30041 case HIDIOCAPPLICATION:
30042 - if (arg < 0 || arg >= hid->maxapplication)
30043 + if (arg >= hid->maxapplication)
30044 return -EINVAL;
30045
30046 for (i = 0; i < hid->maxcollection; i++)
30047 diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.c linux-2.6.32.45/drivers/hwmon/lis3lv02d.c
30048 --- linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
30049 +++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
30050 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
30051 * the lid is closed. This leads to interrupts as soon as a little move
30052 * is done.
30053 */
30054 - atomic_inc(&lis3_dev.count);
30055 + atomic_inc_unchecked(&lis3_dev.count);
30056
30057 wake_up_interruptible(&lis3_dev.misc_wait);
30058 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30059 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
30060 if (test_and_set_bit(0, &lis3_dev.misc_opened))
30061 return -EBUSY; /* already open */
30062
30063 - atomic_set(&lis3_dev.count, 0);
30064 + atomic_set_unchecked(&lis3_dev.count, 0);
30065
30066 /*
30067 * The sensor can generate interrupts for free-fall and direction
30068 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
30069 add_wait_queue(&lis3_dev.misc_wait, &wait);
30070 while (true) {
30071 set_current_state(TASK_INTERRUPTIBLE);
30072 - data = atomic_xchg(&lis3_dev.count, 0);
30073 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30074 if (data)
30075 break;
30076
30077 @@ -244,7 +244,7 @@ out:
30078 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30079 {
30080 poll_wait(file, &lis3_dev.misc_wait, wait);
30081 - if (atomic_read(&lis3_dev.count))
30082 + if (atomic_read_unchecked(&lis3_dev.count))
30083 return POLLIN | POLLRDNORM;
30084 return 0;
30085 }
30086 diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.h linux-2.6.32.45/drivers/hwmon/lis3lv02d.h
30087 --- linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
30088 +++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
30089 @@ -201,7 +201,7 @@ struct lis3lv02d {
30090
30091 struct input_polled_dev *idev; /* input device */
30092 struct platform_device *pdev; /* platform device */
30093 - atomic_t count; /* interrupt count after last read */
30094 + atomic_unchecked_t count; /* interrupt count after last read */
30095 int xcalib; /* calibrated null value for x */
30096 int ycalib; /* calibrated null value for y */
30097 int zcalib; /* calibrated null value for z */
30098 diff -urNp linux-2.6.32.45/drivers/hwmon/sht15.c linux-2.6.32.45/drivers/hwmon/sht15.c
30099 --- linux-2.6.32.45/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
30100 +++ linux-2.6.32.45/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
30101 @@ -112,7 +112,7 @@ struct sht15_data {
30102 int supply_uV;
30103 int supply_uV_valid;
30104 struct work_struct update_supply_work;
30105 - atomic_t interrupt_handled;
30106 + atomic_unchecked_t interrupt_handled;
30107 };
30108
30109 /**
30110 @@ -245,13 +245,13 @@ static inline int sht15_update_single_va
30111 return ret;
30112
30113 gpio_direction_input(data->pdata->gpio_data);
30114 - atomic_set(&data->interrupt_handled, 0);
30115 + atomic_set_unchecked(&data->interrupt_handled, 0);
30116
30117 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30118 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30119 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30120 /* Only relevant if the interrupt hasn't occured. */
30121 - if (!atomic_read(&data->interrupt_handled))
30122 + if (!atomic_read_unchecked(&data->interrupt_handled))
30123 schedule_work(&data->read_work);
30124 }
30125 ret = wait_event_timeout(data->wait_queue,
30126 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
30127 struct sht15_data *data = d;
30128 /* First disable the interrupt */
30129 disable_irq_nosync(irq);
30130 - atomic_inc(&data->interrupt_handled);
30131 + atomic_inc_unchecked(&data->interrupt_handled);
30132 /* Then schedule a reading work struct */
30133 if (data->flag != SHT15_READING_NOTHING)
30134 schedule_work(&data->read_work);
30135 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
30136 here as could have gone low in meantime so verify
30137 it hasn't!
30138 */
30139 - atomic_set(&data->interrupt_handled, 0);
30140 + atomic_set_unchecked(&data->interrupt_handled, 0);
30141 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30142 /* If still not occured or another handler has been scheduled */
30143 if (gpio_get_value(data->pdata->gpio_data)
30144 - || atomic_read(&data->interrupt_handled))
30145 + || atomic_read_unchecked(&data->interrupt_handled))
30146 return;
30147 }
30148 /* Read the data back from the device */
30149 diff -urNp linux-2.6.32.45/drivers/hwmon/w83791d.c linux-2.6.32.45/drivers/hwmon/w83791d.c
30150 --- linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
30151 +++ linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
30152 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
30153 struct i2c_board_info *info);
30154 static int w83791d_remove(struct i2c_client *client);
30155
30156 -static int w83791d_read(struct i2c_client *client, u8 register);
30157 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
30158 +static int w83791d_read(struct i2c_client *client, u8 reg);
30159 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
30160 static struct w83791d_data *w83791d_update_device(struct device *dev);
30161
30162 #ifdef DEBUG
30163 diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c
30164 --- linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-03-27 14:31:47.000000000 -0400
30165 +++ linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-05 20:33:55.000000000 -0400
30166 @@ -189,23 +189,23 @@ static int __init amd756_s4882_init(void
30167 }
30168
30169 /* Fill in the new structures */
30170 - s4882_algo[0] = *(amd756_smbus.algo);
30171 - s4882_algo[0].smbus_xfer = amd756_access_virt0;
30172 + memcpy((void *)&s4882_algo[0], amd756_smbus.algo, sizeof(s4882_algo[0]));
30173 + *(void **)&s4882_algo[0].smbus_xfer = amd756_access_virt0;
30174 s4882_adapter[0] = amd756_smbus;
30175 s4882_adapter[0].algo = s4882_algo;
30176 - s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
30177 + *(void **)&s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
30178 for (i = 1; i < 5; i++) {
30179 - s4882_algo[i] = *(amd756_smbus.algo);
30180 + memcpy((void *)&s4882_algo[i], amd756_smbus.algo, sizeof(s4882_algo[i]));
30181 s4882_adapter[i] = amd756_smbus;
30182 snprintf(s4882_adapter[i].name, sizeof(s4882_adapter[i].name),
30183 "SMBus 8111 adapter (CPU%d)", i-1);
30184 s4882_adapter[i].algo = s4882_algo+i;
30185 s4882_adapter[i].dev.parent = amd756_smbus.dev.parent;
30186 }
30187 - s4882_algo[1].smbus_xfer = amd756_access_virt1;
30188 - s4882_algo[2].smbus_xfer = amd756_access_virt2;
30189 - s4882_algo[3].smbus_xfer = amd756_access_virt3;
30190 - s4882_algo[4].smbus_xfer = amd756_access_virt4;
30191 + *(void **)&s4882_algo[1].smbus_xfer = amd756_access_virt1;
30192 + *(void **)&s4882_algo[2].smbus_xfer = amd756_access_virt2;
30193 + *(void **)&s4882_algo[3].smbus_xfer = amd756_access_virt3;
30194 + *(void **)&s4882_algo[4].smbus_xfer = amd756_access_virt4;
30195
30196 /* Register virtual adapters */
30197 for (i = 0; i < 5; i++) {
30198 diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c
30199 --- linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-03-27 14:31:47.000000000 -0400
30200 +++ linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-05 20:33:55.000000000 -0400
30201 @@ -184,23 +184,23 @@ static int __init nforce2_s4985_init(voi
30202 }
30203
30204 /* Fill in the new structures */
30205 - s4985_algo[0] = *(nforce2_smbus->algo);
30206 - s4985_algo[0].smbus_xfer = nforce2_access_virt0;
30207 + memcpy((void *)&s4985_algo[0], nforce2_smbus->algo, sizeof(s4985_algo[0]));
30208 + *(void **)&s4985_algo[0].smbus_xfer = nforce2_access_virt0;
30209 s4985_adapter[0] = *nforce2_smbus;
30210 s4985_adapter[0].algo = s4985_algo;
30211 s4985_adapter[0].dev.parent = nforce2_smbus->dev.parent;
30212 for (i = 1; i < 5; i++) {
30213 - s4985_algo[i] = *(nforce2_smbus->algo);
30214 + memcpy((void *)&s4985_algo[i], nforce2_smbus->algo, sizeof(s4985_algo[i]));
30215 s4985_adapter[i] = *nforce2_smbus;
30216 snprintf(s4985_adapter[i].name, sizeof(s4985_adapter[i].name),
30217 "SMBus nForce2 adapter (CPU%d)", i - 1);
30218 s4985_adapter[i].algo = s4985_algo + i;
30219 s4985_adapter[i].dev.parent = nforce2_smbus->dev.parent;
30220 }
30221 - s4985_algo[1].smbus_xfer = nforce2_access_virt1;
30222 - s4985_algo[2].smbus_xfer = nforce2_access_virt2;
30223 - s4985_algo[3].smbus_xfer = nforce2_access_virt3;
30224 - s4985_algo[4].smbus_xfer = nforce2_access_virt4;
30225 + *(void **)&s4985_algo[1].smbus_xfer = nforce2_access_virt1;
30226 + *(void **)&s4985_algo[2].smbus_xfer = nforce2_access_virt2;
30227 + *(void **)&s4985_algo[3].smbus_xfer = nforce2_access_virt3;
30228 + *(void **)&s4985_algo[4].smbus_xfer = nforce2_access_virt4;
30229
30230 /* Register virtual adapters */
30231 for (i = 0; i < 5; i++) {
30232 diff -urNp linux-2.6.32.45/drivers/ide/ide-cd.c linux-2.6.32.45/drivers/ide/ide-cd.c
30233 --- linux-2.6.32.45/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
30234 +++ linux-2.6.32.45/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
30235 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
30236 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30237 if ((unsigned long)buf & alignment
30238 || blk_rq_bytes(rq) & q->dma_pad_mask
30239 - || object_is_on_stack(buf))
30240 + || object_starts_on_stack(buf))
30241 drive->dma = 0;
30242 }
30243 }
30244 diff -urNp linux-2.6.32.45/drivers/ide/ide-floppy.c linux-2.6.32.45/drivers/ide/ide-floppy.c
30245 --- linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
30246 +++ linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
30247 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
30248 u8 pc_buf[256], header_len, desc_cnt;
30249 int i, rc = 1, blocks, length;
30250
30251 + pax_track_stack();
30252 +
30253 ide_debug_log(IDE_DBG_FUNC, "enter");
30254
30255 drive->bios_cyl = 0;
30256 diff -urNp linux-2.6.32.45/drivers/ide/setup-pci.c linux-2.6.32.45/drivers/ide/setup-pci.c
30257 --- linux-2.6.32.45/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
30258 +++ linux-2.6.32.45/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
30259 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
30260 int ret, i, n_ports = dev2 ? 4 : 2;
30261 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
30262
30263 + pax_track_stack();
30264 +
30265 for (i = 0; i < n_ports / 2; i++) {
30266 ret = ide_setup_pci_controller(pdev[i], d, !i);
30267 if (ret < 0)
30268 diff -urNp linux-2.6.32.45/drivers/ieee1394/dv1394.c linux-2.6.32.45/drivers/ieee1394/dv1394.c
30269 --- linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
30270 +++ linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
30271 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
30272 based upon DIF section and sequence
30273 */
30274
30275 -static void inline
30276 +static inline void
30277 frame_put_packet (struct frame *f, struct packet *p)
30278 {
30279 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
30280 diff -urNp linux-2.6.32.45/drivers/ieee1394/hosts.c linux-2.6.32.45/drivers/ieee1394/hosts.c
30281 --- linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
30282 +++ linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
30283 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
30284 }
30285
30286 static struct hpsb_host_driver dummy_driver = {
30287 + .name = "dummy",
30288 .transmit_packet = dummy_transmit_packet,
30289 .devctl = dummy_devctl,
30290 .isoctl = dummy_isoctl
30291 diff -urNp linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c
30292 --- linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
30293 +++ linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
30294 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
30295 for (func = 0; func < 8; func++) {
30296 u32 class = read_pci_config(num,slot,func,
30297 PCI_CLASS_REVISION);
30298 - if ((class == 0xffffffff))
30299 + if (class == 0xffffffff)
30300 continue; /* No device at this func */
30301
30302 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
30303 diff -urNp linux-2.6.32.45/drivers/ieee1394/ohci1394.c linux-2.6.32.45/drivers/ieee1394/ohci1394.c
30304 --- linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
30305 +++ linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
30306 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
30307 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
30308
30309 /* Module Parameters */
30310 -static int phys_dma = 1;
30311 +static int phys_dma;
30312 module_param(phys_dma, int, 0444);
30313 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
30314 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
30315
30316 static void dma_trm_tasklet(unsigned long data);
30317 static void dma_trm_reset(struct dma_trm_ctx *d);
30318 diff -urNp linux-2.6.32.45/drivers/ieee1394/sbp2.c linux-2.6.32.45/drivers/ieee1394/sbp2.c
30319 --- linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
30320 +++ linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
30321 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
30322 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
30323 MODULE_LICENSE("GPL");
30324
30325 -static int sbp2_module_init(void)
30326 +static int __init sbp2_module_init(void)
30327 {
30328 int ret;
30329
30330 diff -urNp linux-2.6.32.45/drivers/infiniband/core/cm.c linux-2.6.32.45/drivers/infiniband/core/cm.c
30331 --- linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
30332 +++ linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
30333 @@ -112,7 +112,7 @@ static char const counter_group_names[CM
30334
30335 struct cm_counter_group {
30336 struct kobject obj;
30337 - atomic_long_t counter[CM_ATTR_COUNT];
30338 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
30339 };
30340
30341 struct cm_counter_attribute {
30342 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
30343 struct ib_mad_send_buf *msg = NULL;
30344 int ret;
30345
30346 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30347 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30348 counter[CM_REQ_COUNTER]);
30349
30350 /* Quick state check to discard duplicate REQs. */
30351 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
30352 if (!cm_id_priv)
30353 return;
30354
30355 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30356 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30357 counter[CM_REP_COUNTER]);
30358 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30359 if (ret)
30360 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
30361 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30362 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30363 spin_unlock_irq(&cm_id_priv->lock);
30364 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30365 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30366 counter[CM_RTU_COUNTER]);
30367 goto out;
30368 }
30369 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
30370 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30371 dreq_msg->local_comm_id);
30372 if (!cm_id_priv) {
30373 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30374 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30375 counter[CM_DREQ_COUNTER]);
30376 cm_issue_drep(work->port, work->mad_recv_wc);
30377 return -EINVAL;
30378 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
30379 case IB_CM_MRA_REP_RCVD:
30380 break;
30381 case IB_CM_TIMEWAIT:
30382 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30383 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30384 counter[CM_DREQ_COUNTER]);
30385 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30386 goto unlock;
30387 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
30388 cm_free_msg(msg);
30389 goto deref;
30390 case IB_CM_DREQ_RCVD:
30391 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30392 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30393 counter[CM_DREQ_COUNTER]);
30394 goto unlock;
30395 default:
30396 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
30397 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30398 cm_id_priv->msg, timeout)) {
30399 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30400 - atomic_long_inc(&work->port->
30401 + atomic_long_inc_unchecked(&work->port->
30402 counter_group[CM_RECV_DUPLICATES].
30403 counter[CM_MRA_COUNTER]);
30404 goto out;
30405 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
30406 break;
30407 case IB_CM_MRA_REQ_RCVD:
30408 case IB_CM_MRA_REP_RCVD:
30409 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30410 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30411 counter[CM_MRA_COUNTER]);
30412 /* fall through */
30413 default:
30414 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
30415 case IB_CM_LAP_IDLE:
30416 break;
30417 case IB_CM_MRA_LAP_SENT:
30418 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30419 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30420 counter[CM_LAP_COUNTER]);
30421 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30422 goto unlock;
30423 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
30424 cm_free_msg(msg);
30425 goto deref;
30426 case IB_CM_LAP_RCVD:
30427 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30428 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30429 counter[CM_LAP_COUNTER]);
30430 goto unlock;
30431 default:
30432 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
30433 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30434 if (cur_cm_id_priv) {
30435 spin_unlock_irq(&cm.lock);
30436 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30437 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30438 counter[CM_SIDR_REQ_COUNTER]);
30439 goto out; /* Duplicate message. */
30440 }
30441 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
30442 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30443 msg->retries = 1;
30444
30445 - atomic_long_add(1 + msg->retries,
30446 + atomic_long_add_unchecked(1 + msg->retries,
30447 &port->counter_group[CM_XMIT].counter[attr_index]);
30448 if (msg->retries)
30449 - atomic_long_add(msg->retries,
30450 + atomic_long_add_unchecked(msg->retries,
30451 &port->counter_group[CM_XMIT_RETRIES].
30452 counter[attr_index]);
30453
30454 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
30455 }
30456
30457 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30458 - atomic_long_inc(&port->counter_group[CM_RECV].
30459 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30460 counter[attr_id - CM_ATTR_ID_OFFSET]);
30461
30462 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30463 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
30464 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30465
30466 return sprintf(buf, "%ld\n",
30467 - atomic_long_read(&group->counter[cm_attr->index]));
30468 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30469 }
30470
30471 -static struct sysfs_ops cm_counter_ops = {
30472 +static const struct sysfs_ops cm_counter_ops = {
30473 .show = cm_show_counter
30474 };
30475
30476 diff -urNp linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c
30477 --- linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
30478 +++ linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
30479 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
30480
30481 struct task_struct *thread;
30482
30483 - atomic_t req_ser;
30484 - atomic_t flush_ser;
30485 + atomic_unchecked_t req_ser;
30486 + atomic_unchecked_t flush_ser;
30487
30488 wait_queue_head_t force_wait;
30489 };
30490 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
30491 struct ib_fmr_pool *pool = pool_ptr;
30492
30493 do {
30494 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30495 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30496 ib_fmr_batch_release(pool);
30497
30498 - atomic_inc(&pool->flush_ser);
30499 + atomic_inc_unchecked(&pool->flush_ser);
30500 wake_up_interruptible(&pool->force_wait);
30501
30502 if (pool->flush_function)
30503 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
30504 }
30505
30506 set_current_state(TASK_INTERRUPTIBLE);
30507 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30508 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30509 !kthread_should_stop())
30510 schedule();
30511 __set_current_state(TASK_RUNNING);
30512 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
30513 pool->dirty_watermark = params->dirty_watermark;
30514 pool->dirty_len = 0;
30515 spin_lock_init(&pool->pool_lock);
30516 - atomic_set(&pool->req_ser, 0);
30517 - atomic_set(&pool->flush_ser, 0);
30518 + atomic_set_unchecked(&pool->req_ser, 0);
30519 + atomic_set_unchecked(&pool->flush_ser, 0);
30520 init_waitqueue_head(&pool->force_wait);
30521
30522 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30523 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
30524 }
30525 spin_unlock_irq(&pool->pool_lock);
30526
30527 - serial = atomic_inc_return(&pool->req_ser);
30528 + serial = atomic_inc_return_unchecked(&pool->req_ser);
30529 wake_up_process(pool->thread);
30530
30531 if (wait_event_interruptible(pool->force_wait,
30532 - atomic_read(&pool->flush_ser) - serial >= 0))
30533 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30534 return -EINTR;
30535
30536 return 0;
30537 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
30538 } else {
30539 list_add_tail(&fmr->list, &pool->dirty_list);
30540 if (++pool->dirty_len >= pool->dirty_watermark) {
30541 - atomic_inc(&pool->req_ser);
30542 + atomic_inc_unchecked(&pool->req_ser);
30543 wake_up_process(pool->thread);
30544 }
30545 }
30546 diff -urNp linux-2.6.32.45/drivers/infiniband/core/sysfs.c linux-2.6.32.45/drivers/infiniband/core/sysfs.c
30547 --- linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
30548 +++ linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
30549 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
30550 return port_attr->show(p, port_attr, buf);
30551 }
30552
30553 -static struct sysfs_ops port_sysfs_ops = {
30554 +static const struct sysfs_ops port_sysfs_ops = {
30555 .show = port_attr_show
30556 };
30557
30558 diff -urNp linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c
30559 --- linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
30560 +++ linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
30561 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
30562 dst->grh.sgid_index = src->grh.sgid_index;
30563 dst->grh.hop_limit = src->grh.hop_limit;
30564 dst->grh.traffic_class = src->grh.traffic_class;
30565 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
30566 dst->dlid = src->dlid;
30567 dst->sl = src->sl;
30568 dst->src_path_bits = src->src_path_bits;
30569 dst->static_rate = src->static_rate;
30570 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
30571 dst->port_num = src->port_num;
30572 + dst->reserved = 0;
30573 }
30574 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
30575
30576 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
30577 struct ib_qp_attr *src)
30578 {
30579 + dst->qp_state = src->qp_state;
30580 dst->cur_qp_state = src->cur_qp_state;
30581 dst->path_mtu = src->path_mtu;
30582 dst->path_mig_state = src->path_mig_state;
30583 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
30584 dst->rnr_retry = src->rnr_retry;
30585 dst->alt_port_num = src->alt_port_num;
30586 dst->alt_timeout = src->alt_timeout;
30587 + memset(dst->reserved, 0, sizeof(dst->reserved));
30588 }
30589 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
30590
30591 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c
30592 --- linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
30593 +++ linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
30594 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
30595 struct infinipath_counters counters;
30596 struct ipath_devdata *dd;
30597
30598 + pax_track_stack();
30599 +
30600 dd = file->f_path.dentry->d_inode->i_private;
30601 dd->ipath_f_read_counters(dd, &counters);
30602
30603 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c
30604 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
30605 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
30606 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
30607 LIST_HEAD(nes_adapter_list);
30608 static LIST_HEAD(nes_dev_list);
30609
30610 -atomic_t qps_destroyed;
30611 +atomic_unchecked_t qps_destroyed;
30612
30613 static unsigned int ee_flsh_adapter;
30614 static unsigned int sysfs_nonidx_addr;
30615 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
30616 struct nes_adapter *nesadapter = nesdev->nesadapter;
30617 u32 qp_id;
30618
30619 - atomic_inc(&qps_destroyed);
30620 + atomic_inc_unchecked(&qps_destroyed);
30621
30622 /* Free the control structures */
30623
30624 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c
30625 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
30626 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
30627 @@ -69,11 +69,11 @@ u32 cm_packets_received;
30628 u32 cm_listens_created;
30629 u32 cm_listens_destroyed;
30630 u32 cm_backlog_drops;
30631 -atomic_t cm_loopbacks;
30632 -atomic_t cm_nodes_created;
30633 -atomic_t cm_nodes_destroyed;
30634 -atomic_t cm_accel_dropped_pkts;
30635 -atomic_t cm_resets_recvd;
30636 +atomic_unchecked_t cm_loopbacks;
30637 +atomic_unchecked_t cm_nodes_created;
30638 +atomic_unchecked_t cm_nodes_destroyed;
30639 +atomic_unchecked_t cm_accel_dropped_pkts;
30640 +atomic_unchecked_t cm_resets_recvd;
30641
30642 static inline int mini_cm_accelerated(struct nes_cm_core *,
30643 struct nes_cm_node *);
30644 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
30645
30646 static struct nes_cm_core *g_cm_core;
30647
30648 -atomic_t cm_connects;
30649 -atomic_t cm_accepts;
30650 -atomic_t cm_disconnects;
30651 -atomic_t cm_closes;
30652 -atomic_t cm_connecteds;
30653 -atomic_t cm_connect_reqs;
30654 -atomic_t cm_rejects;
30655 +atomic_unchecked_t cm_connects;
30656 +atomic_unchecked_t cm_accepts;
30657 +atomic_unchecked_t cm_disconnects;
30658 +atomic_unchecked_t cm_closes;
30659 +atomic_unchecked_t cm_connecteds;
30660 +atomic_unchecked_t cm_connect_reqs;
30661 +atomic_unchecked_t cm_rejects;
30662
30663
30664 /**
30665 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
30666 cm_node->rem_mac);
30667
30668 add_hte_node(cm_core, cm_node);
30669 - atomic_inc(&cm_nodes_created);
30670 + atomic_inc_unchecked(&cm_nodes_created);
30671
30672 return cm_node;
30673 }
30674 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
30675 }
30676
30677 atomic_dec(&cm_core->node_cnt);
30678 - atomic_inc(&cm_nodes_destroyed);
30679 + atomic_inc_unchecked(&cm_nodes_destroyed);
30680 nesqp = cm_node->nesqp;
30681 if (nesqp) {
30682 nesqp->cm_node = NULL;
30683 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
30684
30685 static void drop_packet(struct sk_buff *skb)
30686 {
30687 - atomic_inc(&cm_accel_dropped_pkts);
30688 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30689 dev_kfree_skb_any(skb);
30690 }
30691
30692 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
30693
30694 int reset = 0; /* whether to send reset in case of err.. */
30695 int passive_state;
30696 - atomic_inc(&cm_resets_recvd);
30697 + atomic_inc_unchecked(&cm_resets_recvd);
30698 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30699 " refcnt=%d\n", cm_node, cm_node->state,
30700 atomic_read(&cm_node->ref_count));
30701 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
30702 rem_ref_cm_node(cm_node->cm_core, cm_node);
30703 return NULL;
30704 }
30705 - atomic_inc(&cm_loopbacks);
30706 + atomic_inc_unchecked(&cm_loopbacks);
30707 loopbackremotenode->loopbackpartner = cm_node;
30708 loopbackremotenode->tcp_cntxt.rcv_wscale =
30709 NES_CM_DEFAULT_RCV_WND_SCALE;
30710 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
30711 add_ref_cm_node(cm_node);
30712 } else if (cm_node->state == NES_CM_STATE_TSA) {
30713 rem_ref_cm_node(cm_core, cm_node);
30714 - atomic_inc(&cm_accel_dropped_pkts);
30715 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30716 dev_kfree_skb_any(skb);
30717 break;
30718 }
30719 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
30720
30721 if ((cm_id) && (cm_id->event_handler)) {
30722 if (issue_disconn) {
30723 - atomic_inc(&cm_disconnects);
30724 + atomic_inc_unchecked(&cm_disconnects);
30725 cm_event.event = IW_CM_EVENT_DISCONNECT;
30726 cm_event.status = disconn_status;
30727 cm_event.local_addr = cm_id->local_addr;
30728 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
30729 }
30730
30731 if (issue_close) {
30732 - atomic_inc(&cm_closes);
30733 + atomic_inc_unchecked(&cm_closes);
30734 nes_disconnect(nesqp, 1);
30735
30736 cm_id->provider_data = nesqp;
30737 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
30738
30739 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30740 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30741 - atomic_inc(&cm_accepts);
30742 + atomic_inc_unchecked(&cm_accepts);
30743
30744 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30745 atomic_read(&nesvnic->netdev->refcnt));
30746 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
30747
30748 struct nes_cm_core *cm_core;
30749
30750 - atomic_inc(&cm_rejects);
30751 + atomic_inc_unchecked(&cm_rejects);
30752 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30753 loopback = cm_node->loopbackpartner;
30754 cm_core = cm_node->cm_core;
30755 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
30756 ntohl(cm_id->local_addr.sin_addr.s_addr),
30757 ntohs(cm_id->local_addr.sin_port));
30758
30759 - atomic_inc(&cm_connects);
30760 + atomic_inc_unchecked(&cm_connects);
30761 nesqp->active_conn = 1;
30762
30763 /* cache the cm_id in the qp */
30764 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
30765 if (nesqp->destroyed) {
30766 return;
30767 }
30768 - atomic_inc(&cm_connecteds);
30769 + atomic_inc_unchecked(&cm_connecteds);
30770 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30771 " local port 0x%04X. jiffies = %lu.\n",
30772 nesqp->hwqp.qp_id,
30773 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
30774
30775 ret = cm_id->event_handler(cm_id, &cm_event);
30776 cm_id->add_ref(cm_id);
30777 - atomic_inc(&cm_closes);
30778 + atomic_inc_unchecked(&cm_closes);
30779 cm_event.event = IW_CM_EVENT_CLOSE;
30780 cm_event.status = IW_CM_EVENT_STATUS_OK;
30781 cm_event.provider_data = cm_id->provider_data;
30782 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
30783 return;
30784 cm_id = cm_node->cm_id;
30785
30786 - atomic_inc(&cm_connect_reqs);
30787 + atomic_inc_unchecked(&cm_connect_reqs);
30788 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30789 cm_node, cm_id, jiffies);
30790
30791 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
30792 return;
30793 cm_id = cm_node->cm_id;
30794
30795 - atomic_inc(&cm_connect_reqs);
30796 + atomic_inc_unchecked(&cm_connect_reqs);
30797 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30798 cm_node, cm_id, jiffies);
30799
30800 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h
30801 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
30802 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
30803 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
30804 extern unsigned int wqm_quanta;
30805 extern struct list_head nes_adapter_list;
30806
30807 -extern atomic_t cm_connects;
30808 -extern atomic_t cm_accepts;
30809 -extern atomic_t cm_disconnects;
30810 -extern atomic_t cm_closes;
30811 -extern atomic_t cm_connecteds;
30812 -extern atomic_t cm_connect_reqs;
30813 -extern atomic_t cm_rejects;
30814 -extern atomic_t mod_qp_timouts;
30815 -extern atomic_t qps_created;
30816 -extern atomic_t qps_destroyed;
30817 -extern atomic_t sw_qps_destroyed;
30818 +extern atomic_unchecked_t cm_connects;
30819 +extern atomic_unchecked_t cm_accepts;
30820 +extern atomic_unchecked_t cm_disconnects;
30821 +extern atomic_unchecked_t cm_closes;
30822 +extern atomic_unchecked_t cm_connecteds;
30823 +extern atomic_unchecked_t cm_connect_reqs;
30824 +extern atomic_unchecked_t cm_rejects;
30825 +extern atomic_unchecked_t mod_qp_timouts;
30826 +extern atomic_unchecked_t qps_created;
30827 +extern atomic_unchecked_t qps_destroyed;
30828 +extern atomic_unchecked_t sw_qps_destroyed;
30829 extern u32 mh_detected;
30830 extern u32 mh_pauses_sent;
30831 extern u32 cm_packets_sent;
30832 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
30833 extern u32 cm_listens_created;
30834 extern u32 cm_listens_destroyed;
30835 extern u32 cm_backlog_drops;
30836 -extern atomic_t cm_loopbacks;
30837 -extern atomic_t cm_nodes_created;
30838 -extern atomic_t cm_nodes_destroyed;
30839 -extern atomic_t cm_accel_dropped_pkts;
30840 -extern atomic_t cm_resets_recvd;
30841 +extern atomic_unchecked_t cm_loopbacks;
30842 +extern atomic_unchecked_t cm_nodes_created;
30843 +extern atomic_unchecked_t cm_nodes_destroyed;
30844 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30845 +extern atomic_unchecked_t cm_resets_recvd;
30846
30847 extern u32 int_mod_timer_init;
30848 extern u32 int_mod_cq_depth_256;
30849 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c
30850 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
30851 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
30852 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
30853 target_stat_values[++index] = mh_detected;
30854 target_stat_values[++index] = mh_pauses_sent;
30855 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30856 - target_stat_values[++index] = atomic_read(&cm_connects);
30857 - target_stat_values[++index] = atomic_read(&cm_accepts);
30858 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30859 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30860 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30861 - target_stat_values[++index] = atomic_read(&cm_rejects);
30862 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30863 - target_stat_values[++index] = atomic_read(&qps_created);
30864 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30865 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30866 - target_stat_values[++index] = atomic_read(&cm_closes);
30867 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30868 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30869 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30870 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30871 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30872 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30873 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30874 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30875 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30876 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30877 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30878 target_stat_values[++index] = cm_packets_sent;
30879 target_stat_values[++index] = cm_packets_bounced;
30880 target_stat_values[++index] = cm_packets_created;
30881 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
30882 target_stat_values[++index] = cm_listens_created;
30883 target_stat_values[++index] = cm_listens_destroyed;
30884 target_stat_values[++index] = cm_backlog_drops;
30885 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30886 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30887 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30888 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30889 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30890 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30891 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30892 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30893 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30894 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30895 target_stat_values[++index] = int_mod_timer_init;
30896 target_stat_values[++index] = int_mod_cq_depth_1;
30897 target_stat_values[++index] = int_mod_cq_depth_4;
30898 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c
30899 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
30900 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
30901 @@ -45,9 +45,9 @@
30902
30903 #include <rdma/ib_umem.h>
30904
30905 -atomic_t mod_qp_timouts;
30906 -atomic_t qps_created;
30907 -atomic_t sw_qps_destroyed;
30908 +atomic_unchecked_t mod_qp_timouts;
30909 +atomic_unchecked_t qps_created;
30910 +atomic_unchecked_t sw_qps_destroyed;
30911
30912 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30913
30914 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
30915 if (init_attr->create_flags)
30916 return ERR_PTR(-EINVAL);
30917
30918 - atomic_inc(&qps_created);
30919 + atomic_inc_unchecked(&qps_created);
30920 switch (init_attr->qp_type) {
30921 case IB_QPT_RC:
30922 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30923 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
30924 struct iw_cm_event cm_event;
30925 int ret;
30926
30927 - atomic_inc(&sw_qps_destroyed);
30928 + atomic_inc_unchecked(&sw_qps_destroyed);
30929 nesqp->destroyed = 1;
30930
30931 /* Blow away the connection if it exists. */
30932 diff -urNp linux-2.6.32.45/drivers/input/gameport/gameport.c linux-2.6.32.45/drivers/input/gameport/gameport.c
30933 --- linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
30934 +++ linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
30935 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
30936 */
30937 static void gameport_init_port(struct gameport *gameport)
30938 {
30939 - static atomic_t gameport_no = ATOMIC_INIT(0);
30940 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30941
30942 __module_get(THIS_MODULE);
30943
30944 mutex_init(&gameport->drv_mutex);
30945 device_initialize(&gameport->dev);
30946 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
30947 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30948 gameport->dev.bus = &gameport_bus;
30949 gameport->dev.release = gameport_release_port;
30950 if (gameport->parent)
30951 diff -urNp linux-2.6.32.45/drivers/input/input.c linux-2.6.32.45/drivers/input/input.c
30952 --- linux-2.6.32.45/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
30953 +++ linux-2.6.32.45/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
30954 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
30955 */
30956 int input_register_device(struct input_dev *dev)
30957 {
30958 - static atomic_t input_no = ATOMIC_INIT(0);
30959 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30960 struct input_handler *handler;
30961 const char *path;
30962 int error;
30963 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
30964 dev->setkeycode = input_default_setkeycode;
30965
30966 dev_set_name(&dev->dev, "input%ld",
30967 - (unsigned long) atomic_inc_return(&input_no) - 1);
30968 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30969
30970 error = device_add(&dev->dev);
30971 if (error)
30972 diff -urNp linux-2.6.32.45/drivers/input/joystick/sidewinder.c linux-2.6.32.45/drivers/input/joystick/sidewinder.c
30973 --- linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
30974 +++ linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
30975 @@ -30,6 +30,7 @@
30976 #include <linux/kernel.h>
30977 #include <linux/module.h>
30978 #include <linux/slab.h>
30979 +#include <linux/sched.h>
30980 #include <linux/init.h>
30981 #include <linux/input.h>
30982 #include <linux/gameport.h>
30983 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
30984 unsigned char buf[SW_LENGTH];
30985 int i;
30986
30987 + pax_track_stack();
30988 +
30989 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
30990
30991 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
30992 diff -urNp linux-2.6.32.45/drivers/input/joystick/xpad.c linux-2.6.32.45/drivers/input/joystick/xpad.c
30993 --- linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
30994 +++ linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
30995 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
30996
30997 static int xpad_led_probe(struct usb_xpad *xpad)
30998 {
30999 - static atomic_t led_seq = ATOMIC_INIT(0);
31000 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
31001 long led_no;
31002 struct xpad_led *led;
31003 struct led_classdev *led_cdev;
31004 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
31005 if (!led)
31006 return -ENOMEM;
31007
31008 - led_no = (long)atomic_inc_return(&led_seq) - 1;
31009 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
31010
31011 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
31012 led->xpad = xpad;
31013 diff -urNp linux-2.6.32.45/drivers/input/serio/serio.c linux-2.6.32.45/drivers/input/serio/serio.c
31014 --- linux-2.6.32.45/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
31015 +++ linux-2.6.32.45/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
31016 @@ -527,7 +527,7 @@ static void serio_release_port(struct de
31017 */
31018 static void serio_init_port(struct serio *serio)
31019 {
31020 - static atomic_t serio_no = ATOMIC_INIT(0);
31021 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
31022
31023 __module_get(THIS_MODULE);
31024
31025 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio
31026 mutex_init(&serio->drv_mutex);
31027 device_initialize(&serio->dev);
31028 dev_set_name(&serio->dev, "serio%ld",
31029 - (long)atomic_inc_return(&serio_no) - 1);
31030 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
31031 serio->dev.bus = &serio_bus;
31032 serio->dev.release = serio_release_port;
31033 if (serio->parent) {
31034 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/common.c linux-2.6.32.45/drivers/isdn/gigaset/common.c
31035 --- linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
31036 +++ linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
31037 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
31038 cs->commands_pending = 0;
31039 cs->cur_at_seq = 0;
31040 cs->gotfwver = -1;
31041 - cs->open_count = 0;
31042 + local_set(&cs->open_count, 0);
31043 cs->dev = NULL;
31044 cs->tty = NULL;
31045 cs->tty_dev = NULL;
31046 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h
31047 --- linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
31048 +++ linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
31049 @@ -34,6 +34,7 @@
31050 #include <linux/tty_driver.h>
31051 #include <linux/list.h>
31052 #include <asm/atomic.h>
31053 +#include <asm/local.h>
31054
31055 #define GIG_VERSION {0,5,0,0}
31056 #define GIG_COMPAT {0,4,0,0}
31057 @@ -446,7 +447,7 @@ struct cardstate {
31058 spinlock_t cmdlock;
31059 unsigned curlen, cmdbytes;
31060
31061 - unsigned open_count;
31062 + local_t open_count;
31063 struct tty_struct *tty;
31064 struct tasklet_struct if_wake_tasklet;
31065 unsigned control_state;
31066 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/interface.c linux-2.6.32.45/drivers/isdn/gigaset/interface.c
31067 --- linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
31068 +++ linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
31069 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
31070 return -ERESTARTSYS; // FIXME -EINTR?
31071 tty->driver_data = cs;
31072
31073 - ++cs->open_count;
31074 -
31075 - if (cs->open_count == 1) {
31076 + if (local_inc_return(&cs->open_count) == 1) {
31077 spin_lock_irqsave(&cs->lock, flags);
31078 cs->tty = tty;
31079 spin_unlock_irqrestore(&cs->lock, flags);
31080 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
31081
31082 if (!cs->connected)
31083 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31084 - else if (!cs->open_count)
31085 + else if (!local_read(&cs->open_count))
31086 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31087 else {
31088 - if (!--cs->open_count) {
31089 + if (!local_dec_return(&cs->open_count)) {
31090 spin_lock_irqsave(&cs->lock, flags);
31091 cs->tty = NULL;
31092 spin_unlock_irqrestore(&cs->lock, flags);
31093 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
31094 if (!cs->connected) {
31095 gig_dbg(DEBUG_IF, "not connected");
31096 retval = -ENODEV;
31097 - } else if (!cs->open_count)
31098 + } else if (!local_read(&cs->open_count))
31099 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31100 else {
31101 retval = 0;
31102 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
31103 if (!cs->connected) {
31104 gig_dbg(DEBUG_IF, "not connected");
31105 retval = -ENODEV;
31106 - } else if (!cs->open_count)
31107 + } else if (!local_read(&cs->open_count))
31108 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31109 else if (cs->mstate != MS_LOCKED) {
31110 dev_warn(cs->dev, "can't write to unlocked device\n");
31111 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
31112 if (!cs->connected) {
31113 gig_dbg(DEBUG_IF, "not connected");
31114 retval = -ENODEV;
31115 - } else if (!cs->open_count)
31116 + } else if (!local_read(&cs->open_count))
31117 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31118 else if (cs->mstate != MS_LOCKED) {
31119 dev_warn(cs->dev, "can't write to unlocked device\n");
31120 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
31121
31122 if (!cs->connected)
31123 gig_dbg(DEBUG_IF, "not connected");
31124 - else if (!cs->open_count)
31125 + else if (!local_read(&cs->open_count))
31126 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31127 else if (cs->mstate != MS_LOCKED)
31128 dev_warn(cs->dev, "can't write to unlocked device\n");
31129 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
31130
31131 if (!cs->connected)
31132 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31133 - else if (!cs->open_count)
31134 + else if (!local_read(&cs->open_count))
31135 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31136 else {
31137 //FIXME
31138 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
31139
31140 if (!cs->connected)
31141 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31142 - else if (!cs->open_count)
31143 + else if (!local_read(&cs->open_count))
31144 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31145 else {
31146 //FIXME
31147 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
31148 goto out;
31149 }
31150
31151 - if (!cs->open_count) {
31152 + if (!local_read(&cs->open_count)) {
31153 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31154 goto out;
31155 }
31156 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c
31157 --- linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
31158 +++ linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
31159 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
31160 }
31161 if (left) {
31162 if (t4file->user) {
31163 - if (copy_from_user(buf, dp, left))
31164 + if (left > sizeof buf || copy_from_user(buf, dp, left))
31165 return -EFAULT;
31166 } else {
31167 memcpy(buf, dp, left);
31168 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
31169 }
31170 if (left) {
31171 if (config->user) {
31172 - if (copy_from_user(buf, dp, left))
31173 + if (left > sizeof buf || copy_from_user(buf, dp, left))
31174 return -EFAULT;
31175 } else {
31176 memcpy(buf, dp, left);
31177 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c
31178 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
31179 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
31180 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
31181 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
31182 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
31183
31184 + pax_track_stack();
31185
31186 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
31187 {
31188 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c
31189 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
31190 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
31191 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
31192 IDI_SYNC_REQ req;
31193 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31194
31195 + pax_track_stack();
31196 +
31197 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31198
31199 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31200 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c
31201 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
31202 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
31203 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
31204 IDI_SYNC_REQ req;
31205 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31206
31207 + pax_track_stack();
31208 +
31209 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31210
31211 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31212 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c
31213 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
31214 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
31215 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
31216 IDI_SYNC_REQ req;
31217 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31218
31219 + pax_track_stack();
31220 +
31221 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31222
31223 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31224 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h
31225 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-03-27 14:31:47.000000000 -0400
31226 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:33:55.000000000 -0400
31227 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31228 } diva_didd_add_adapter_t;
31229 typedef struct _diva_didd_remove_adapter {
31230 IDI_CALL p_request;
31231 -} diva_didd_remove_adapter_t;
31232 +} __no_const diva_didd_remove_adapter_t;
31233 typedef struct _diva_didd_read_adapter_array {
31234 void * buffer;
31235 dword length;
31236 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c
31237 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
31238 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
31239 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
31240 IDI_SYNC_REQ req;
31241 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31242
31243 + pax_track_stack();
31244 +
31245 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31246
31247 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31248 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c
31249 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
31250 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
31251 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
31252 dword d;
31253 word w;
31254
31255 + pax_track_stack();
31256 +
31257 a = plci->adapter;
31258 Id = ((word)plci->Id<<8)|a->Id;
31259 PUT_WORD(&SS_Ind[4],0x0000);
31260 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
31261 word j, n, w;
31262 dword d;
31263
31264 + pax_track_stack();
31265 +
31266
31267 for(i=0;i<8;i++) bp_parms[i].length = 0;
31268 for(i=0;i<2;i++) global_config[i].length = 0;
31269 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
31270 const byte llc3[] = {4,3,2,2,6,6,0};
31271 const byte header[] = {0,2,3,3,0,0,0};
31272
31273 + pax_track_stack();
31274 +
31275 for(i=0;i<8;i++) bp_parms[i].length = 0;
31276 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
31277 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
31278 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
31279 word appl_number_group_type[MAX_APPL];
31280 PLCI *auxplci;
31281
31282 + pax_track_stack();
31283 +
31284 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
31285
31286 if(!a->group_optimization_enabled)
31287 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c
31288 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
31289 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
31290 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
31291 IDI_SYNC_REQ req;
31292 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31293
31294 + pax_track_stack();
31295 +
31296 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31297
31298 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31299 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h
31300 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-03-27 14:31:47.000000000 -0400
31301 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:33:55.000000000 -0400
31302 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31303 typedef struct _diva_os_idi_adapter_interface {
31304 diva_init_card_proc_t cleanup_adapter_proc;
31305 diva_cmd_card_proc_t cmd_proc;
31306 -} diva_os_idi_adapter_interface_t;
31307 +} __no_const diva_os_idi_adapter_interface_t;
31308
31309 typedef struct _diva_os_xdi_adapter {
31310 struct list_head link;
31311 diff -urNp linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c
31312 --- linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
31313 +++ linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
31314 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
31315 } iocpar;
31316 void __user *argp = (void __user *)arg;
31317
31318 + pax_track_stack();
31319 +
31320 #define name iocpar.name
31321 #define bname iocpar.bname
31322 #define iocts iocpar.iocts
31323 diff -urNp linux-2.6.32.45/drivers/isdn/icn/icn.c linux-2.6.32.45/drivers/isdn/icn/icn.c
31324 --- linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
31325 +++ linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
31326 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
31327 if (count > len)
31328 count = len;
31329 if (user) {
31330 - if (copy_from_user(msg, buf, count))
31331 + if (count > sizeof msg || copy_from_user(msg, buf, count))
31332 return -EFAULT;
31333 } else
31334 memcpy(msg, buf, count);
31335 diff -urNp linux-2.6.32.45/drivers/isdn/mISDN/socket.c linux-2.6.32.45/drivers/isdn/mISDN/socket.c
31336 --- linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
31337 +++ linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
31338 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
31339 if (dev) {
31340 struct mISDN_devinfo di;
31341
31342 + memset(&di, 0, sizeof(di));
31343 di.id = dev->id;
31344 di.Dprotocols = dev->Dprotocols;
31345 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31346 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
31347 if (dev) {
31348 struct mISDN_devinfo di;
31349
31350 + memset(&di, 0, sizeof(di));
31351 di.id = dev->id;
31352 di.Dprotocols = dev->Dprotocols;
31353 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31354 diff -urNp linux-2.6.32.45/drivers/isdn/sc/interrupt.c linux-2.6.32.45/drivers/isdn/sc/interrupt.c
31355 --- linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
31356 +++ linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
31357 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
31358 }
31359 else if(callid>=0x0000 && callid<=0x7FFF)
31360 {
31361 + int len;
31362 +
31363 pr_debug("%s: Got Incoming Call\n",
31364 sc_adapter[card]->devicename);
31365 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
31366 - strcpy(setup.eazmsn,
31367 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
31368 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
31369 + sizeof(setup.phone));
31370 + if (len >= sizeof(setup.phone))
31371 + continue;
31372 + len = strlcpy(setup.eazmsn,
31373 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31374 + sizeof(setup.eazmsn));
31375 + if (len >= sizeof(setup.eazmsn))
31376 + continue;
31377 setup.si1 = 7;
31378 setup.si2 = 0;
31379 setup.plan = 0;
31380 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
31381 * Handle a GetMyNumber Rsp
31382 */
31383 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
31384 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
31385 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31386 + rcvmsg.msg_data.byte_array,
31387 + sizeof(rcvmsg.msg_data.byte_array));
31388 continue;
31389 }
31390
31391 diff -urNp linux-2.6.32.45/drivers/lguest/core.c linux-2.6.32.45/drivers/lguest/core.c
31392 --- linux-2.6.32.45/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
31393 +++ linux-2.6.32.45/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
31394 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
31395 * it's worked so far. The end address needs +1 because __get_vm_area
31396 * allocates an extra guard page, so we need space for that.
31397 */
31398 +
31399 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31400 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31401 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31402 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31403 +#else
31404 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31405 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31406 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31407 +#endif
31408 +
31409 if (!switcher_vma) {
31410 err = -ENOMEM;
31411 printk("lguest: could not map switcher pages high\n");
31412 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
31413 * Now the Switcher is mapped at the right address, we can't fail!
31414 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
31415 */
31416 - memcpy(switcher_vma->addr, start_switcher_text,
31417 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31418 end_switcher_text - start_switcher_text);
31419
31420 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31421 diff -urNp linux-2.6.32.45/drivers/lguest/x86/core.c linux-2.6.32.45/drivers/lguest/x86/core.c
31422 --- linux-2.6.32.45/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
31423 +++ linux-2.6.32.45/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
31424 @@ -59,7 +59,7 @@ static struct {
31425 /* Offset from where switcher.S was compiled to where we've copied it */
31426 static unsigned long switcher_offset(void)
31427 {
31428 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31429 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31430 }
31431
31432 /* This cpu's struct lguest_pages. */
31433 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
31434 * These copies are pretty cheap, so we do them unconditionally: */
31435 /* Save the current Host top-level page directory.
31436 */
31437 +
31438 +#ifdef CONFIG_PAX_PER_CPU_PGD
31439 + pages->state.host_cr3 = read_cr3();
31440 +#else
31441 pages->state.host_cr3 = __pa(current->mm->pgd);
31442 +#endif
31443 +
31444 /*
31445 * Set up the Guest's page tables to see this CPU's pages (and no
31446 * other CPU's pages).
31447 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
31448 * compiled-in switcher code and the high-mapped copy we just made.
31449 */
31450 for (i = 0; i < IDT_ENTRIES; i++)
31451 - default_idt_entries[i] += switcher_offset();
31452 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31453
31454 /*
31455 * Set up the Switcher's per-cpu areas.
31456 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
31457 * it will be undisturbed when we switch. To change %cs and jump we
31458 * need this structure to feed to Intel's "lcall" instruction.
31459 */
31460 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31461 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31462 lguest_entry.segment = LGUEST_CS;
31463
31464 /*
31465 diff -urNp linux-2.6.32.45/drivers/lguest/x86/switcher_32.S linux-2.6.32.45/drivers/lguest/x86/switcher_32.S
31466 --- linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
31467 +++ linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
31468 @@ -87,6 +87,7 @@
31469 #include <asm/page.h>
31470 #include <asm/segment.h>
31471 #include <asm/lguest.h>
31472 +#include <asm/processor-flags.h>
31473
31474 // We mark the start of the code to copy
31475 // It's placed in .text tho it's never run here
31476 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31477 // Changes type when we load it: damn Intel!
31478 // For after we switch over our page tables
31479 // That entry will be read-only: we'd crash.
31480 +
31481 +#ifdef CONFIG_PAX_KERNEXEC
31482 + mov %cr0, %edx
31483 + xor $X86_CR0_WP, %edx
31484 + mov %edx, %cr0
31485 +#endif
31486 +
31487 movl $(GDT_ENTRY_TSS*8), %edx
31488 ltr %dx
31489
31490 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31491 // Let's clear it again for our return.
31492 // The GDT descriptor of the Host
31493 // Points to the table after two "size" bytes
31494 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31495 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31496 // Clear "used" from type field (byte 5, bit 2)
31497 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31498 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31499 +
31500 +#ifdef CONFIG_PAX_KERNEXEC
31501 + mov %cr0, %eax
31502 + xor $X86_CR0_WP, %eax
31503 + mov %eax, %cr0
31504 +#endif
31505
31506 // Once our page table's switched, the Guest is live!
31507 // The Host fades as we run this final step.
31508 @@ -295,13 +309,12 @@ deliver_to_host:
31509 // I consulted gcc, and it gave
31510 // These instructions, which I gladly credit:
31511 leal (%edx,%ebx,8), %eax
31512 - movzwl (%eax),%edx
31513 - movl 4(%eax), %eax
31514 - xorw %ax, %ax
31515 - orl %eax, %edx
31516 + movl 4(%eax), %edx
31517 + movw (%eax), %dx
31518 // Now the address of the handler's in %edx
31519 // We call it now: its "iret" drops us home.
31520 - jmp *%edx
31521 + ljmp $__KERNEL_CS, $1f
31522 +1: jmp *%edx
31523
31524 // Every interrupt can come to us here
31525 // But we must truly tell each apart.
31526 diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c
31527 --- linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
31528 +++ linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
31529 @@ -15,7 +15,7 @@
31530
31531 #define MAX_PMU_LEVEL 0xFF
31532
31533 -static struct backlight_ops pmu_backlight_data;
31534 +static const struct backlight_ops pmu_backlight_data;
31535 static DEFINE_SPINLOCK(pmu_backlight_lock);
31536 static int sleeping, uses_pmu_bl;
31537 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
31538 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
31539 return bd->props.brightness;
31540 }
31541
31542 -static struct backlight_ops pmu_backlight_data = {
31543 +static const struct backlight_ops pmu_backlight_data = {
31544 .get_brightness = pmu_backlight_get_brightness,
31545 .update_status = pmu_backlight_update_status,
31546
31547 diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu.c linux-2.6.32.45/drivers/macintosh/via-pmu.c
31548 --- linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
31549 +++ linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
31550 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
31551 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
31552 }
31553
31554 -static struct platform_suspend_ops pmu_pm_ops = {
31555 +static const struct platform_suspend_ops pmu_pm_ops = {
31556 .enter = powerbook_sleep,
31557 .valid = pmu_sleep_valid,
31558 };
31559 diff -urNp linux-2.6.32.45/drivers/md/dm.c linux-2.6.32.45/drivers/md/dm.c
31560 --- linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:35:29.000000000 -0400
31561 +++ linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:33:59.000000000 -0400
31562 @@ -165,9 +165,9 @@ struct mapped_device {
31563 /*
31564 * Event handling.
31565 */
31566 - atomic_t event_nr;
31567 + atomic_unchecked_t event_nr;
31568 wait_queue_head_t eventq;
31569 - atomic_t uevent_seq;
31570 + atomic_unchecked_t uevent_seq;
31571 struct list_head uevent_list;
31572 spinlock_t uevent_lock; /* Protect access to uevent_list */
31573
31574 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(i
31575 rwlock_init(&md->map_lock);
31576 atomic_set(&md->holders, 1);
31577 atomic_set(&md->open_count, 0);
31578 - atomic_set(&md->event_nr, 0);
31579 - atomic_set(&md->uevent_seq, 0);
31580 + atomic_set_unchecked(&md->event_nr, 0);
31581 + atomic_set_unchecked(&md->uevent_seq, 0);
31582 INIT_LIST_HEAD(&md->uevent_list);
31583 spin_lock_init(&md->uevent_lock);
31584
31585 @@ -1927,7 +1927,7 @@ static void event_callback(void *context
31586
31587 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31588
31589 - atomic_inc(&md->event_nr);
31590 + atomic_inc_unchecked(&md->event_nr);
31591 wake_up(&md->eventq);
31592 }
31593
31594 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_dev
31595
31596 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31597 {
31598 - return atomic_add_return(1, &md->uevent_seq);
31599 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31600 }
31601
31602 uint32_t dm_get_event_nr(struct mapped_device *md)
31603 {
31604 - return atomic_read(&md->event_nr);
31605 + return atomic_read_unchecked(&md->event_nr);
31606 }
31607
31608 int dm_wait_event(struct mapped_device *md, int event_nr)
31609 {
31610 return wait_event_interruptible(md->eventq,
31611 - (event_nr != atomic_read(&md->event_nr)));
31612 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31613 }
31614
31615 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31616 diff -urNp linux-2.6.32.45/drivers/md/dm-ioctl.c linux-2.6.32.45/drivers/md/dm-ioctl.c
31617 --- linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
31618 +++ linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
31619 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
31620 cmd == DM_LIST_VERSIONS_CMD)
31621 return 0;
31622
31623 - if ((cmd == DM_DEV_CREATE_CMD)) {
31624 + if (cmd == DM_DEV_CREATE_CMD) {
31625 if (!*param->name) {
31626 DMWARN("name not supplied when creating device");
31627 return -EINVAL;
31628 diff -urNp linux-2.6.32.45/drivers/md/dm-raid1.c linux-2.6.32.45/drivers/md/dm-raid1.c
31629 --- linux-2.6.32.45/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
31630 +++ linux-2.6.32.45/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
31631 @@ -41,7 +41,7 @@ enum dm_raid1_error {
31632
31633 struct mirror {
31634 struct mirror_set *ms;
31635 - atomic_t error_count;
31636 + atomic_unchecked_t error_count;
31637 unsigned long error_type;
31638 struct dm_dev *dev;
31639 sector_t offset;
31640 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
31641 * simple way to tell if a device has encountered
31642 * errors.
31643 */
31644 - atomic_inc(&m->error_count);
31645 + atomic_inc_unchecked(&m->error_count);
31646
31647 if (test_and_set_bit(error_type, &m->error_type))
31648 return;
31649 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
31650 }
31651
31652 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
31653 - if (!atomic_read(&new->error_count)) {
31654 + if (!atomic_read_unchecked(&new->error_count)) {
31655 set_default_mirror(new);
31656 break;
31657 }
31658 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
31659 struct mirror *m = get_default_mirror(ms);
31660
31661 do {
31662 - if (likely(!atomic_read(&m->error_count)))
31663 + if (likely(!atomic_read_unchecked(&m->error_count)))
31664 return m;
31665
31666 if (m-- == ms->mirror)
31667 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
31668 {
31669 struct mirror *default_mirror = get_default_mirror(m->ms);
31670
31671 - return !atomic_read(&default_mirror->error_count);
31672 + return !atomic_read_unchecked(&default_mirror->error_count);
31673 }
31674
31675 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31676 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
31677 */
31678 if (likely(region_in_sync(ms, region, 1)))
31679 m = choose_mirror(ms, bio->bi_sector);
31680 - else if (m && atomic_read(&m->error_count))
31681 + else if (m && atomic_read_unchecked(&m->error_count))
31682 m = NULL;
31683
31684 if (likely(m))
31685 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
31686 }
31687
31688 ms->mirror[mirror].ms = ms;
31689 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31690 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31691 ms->mirror[mirror].error_type = 0;
31692 ms->mirror[mirror].offset = offset;
31693
31694 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
31695 */
31696 static char device_status_char(struct mirror *m)
31697 {
31698 - if (!atomic_read(&(m->error_count)))
31699 + if (!atomic_read_unchecked(&(m->error_count)))
31700 return 'A';
31701
31702 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
31703 diff -urNp linux-2.6.32.45/drivers/md/dm-stripe.c linux-2.6.32.45/drivers/md/dm-stripe.c
31704 --- linux-2.6.32.45/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
31705 +++ linux-2.6.32.45/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
31706 @@ -20,7 +20,7 @@ struct stripe {
31707 struct dm_dev *dev;
31708 sector_t physical_start;
31709
31710 - atomic_t error_count;
31711 + atomic_unchecked_t error_count;
31712 };
31713
31714 struct stripe_c {
31715 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
31716 kfree(sc);
31717 return r;
31718 }
31719 - atomic_set(&(sc->stripe[i].error_count), 0);
31720 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31721 }
31722
31723 ti->private = sc;
31724 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
31725 DMEMIT("%d ", sc->stripes);
31726 for (i = 0; i < sc->stripes; i++) {
31727 DMEMIT("%s ", sc->stripe[i].dev->name);
31728 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31729 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31730 'D' : 'A';
31731 }
31732 buffer[i] = '\0';
31733 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
31734 */
31735 for (i = 0; i < sc->stripes; i++)
31736 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31737 - atomic_inc(&(sc->stripe[i].error_count));
31738 - if (atomic_read(&(sc->stripe[i].error_count)) <
31739 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31740 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31741 DM_IO_ERROR_THRESHOLD)
31742 queue_work(kstriped, &sc->kstriped_ws);
31743 }
31744 diff -urNp linux-2.6.32.45/drivers/md/dm-sysfs.c linux-2.6.32.45/drivers/md/dm-sysfs.c
31745 --- linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
31746 +++ linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
31747 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
31748 NULL,
31749 };
31750
31751 -static struct sysfs_ops dm_sysfs_ops = {
31752 +static const struct sysfs_ops dm_sysfs_ops = {
31753 .show = dm_attr_show,
31754 };
31755
31756 diff -urNp linux-2.6.32.45/drivers/md/dm-table.c linux-2.6.32.45/drivers/md/dm-table.c
31757 --- linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
31758 +++ linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
31759 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
31760 if (!dev_size)
31761 return 0;
31762
31763 - if ((start >= dev_size) || (start + len > dev_size)) {
31764 + if ((start >= dev_size) || (len > dev_size - start)) {
31765 DMWARN("%s: %s too small for target: "
31766 "start=%llu, len=%llu, dev_size=%llu",
31767 dm_device_name(ti->table->md), bdevname(bdev, b),
31768 diff -urNp linux-2.6.32.45/drivers/md/md.c linux-2.6.32.45/drivers/md/md.c
31769 --- linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:04.000000000 -0400
31770 +++ linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:18.000000000 -0400
31771 @@ -153,10 +153,10 @@ static int start_readonly;
31772 * start build, activate spare
31773 */
31774 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31775 -static atomic_t md_event_count;
31776 +static atomic_unchecked_t md_event_count;
31777 void md_new_event(mddev_t *mddev)
31778 {
31779 - atomic_inc(&md_event_count);
31780 + atomic_inc_unchecked(&md_event_count);
31781 wake_up(&md_event_waiters);
31782 }
31783 EXPORT_SYMBOL_GPL(md_new_event);
31784 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31785 */
31786 static void md_new_event_inintr(mddev_t *mddev)
31787 {
31788 - atomic_inc(&md_event_count);
31789 + atomic_inc_unchecked(&md_event_count);
31790 wake_up(&md_event_waiters);
31791 }
31792
31793 @@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
31794
31795 rdev->preferred_minor = 0xffff;
31796 rdev->data_offset = le64_to_cpu(sb->data_offset);
31797 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31798 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31799
31800 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31801 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31802 @@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
31803 else
31804 sb->resync_offset = cpu_to_le64(0);
31805
31806 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31807 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31808
31809 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31810 sb->size = cpu_to_le64(mddev->dev_sectors);
31811 @@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
31812 static ssize_t
31813 errors_show(mdk_rdev_t *rdev, char *page)
31814 {
31815 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31816 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31817 }
31818
31819 static ssize_t
31820 @@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
31821 char *e;
31822 unsigned long n = simple_strtoul(buf, &e, 10);
31823 if (*buf && (*e == 0 || *e == '\n')) {
31824 - atomic_set(&rdev->corrected_errors, n);
31825 + atomic_set_unchecked(&rdev->corrected_errors, n);
31826 return len;
31827 }
31828 return -EINVAL;
31829 @@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
31830 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
31831 kfree(rdev);
31832 }
31833 -static struct sysfs_ops rdev_sysfs_ops = {
31834 +static const struct sysfs_ops rdev_sysfs_ops = {
31835 .show = rdev_attr_show,
31836 .store = rdev_attr_store,
31837 };
31838 @@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
31839 rdev->data_offset = 0;
31840 rdev->sb_events = 0;
31841 atomic_set(&rdev->nr_pending, 0);
31842 - atomic_set(&rdev->read_errors, 0);
31843 - atomic_set(&rdev->corrected_errors, 0);
31844 + atomic_set_unchecked(&rdev->read_errors, 0);
31845 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31846
31847 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
31848 if (!size) {
31849 @@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
31850 kfree(mddev);
31851 }
31852
31853 -static struct sysfs_ops md_sysfs_ops = {
31854 +static const struct sysfs_ops md_sysfs_ops = {
31855 .show = md_attr_show,
31856 .store = md_attr_store,
31857 };
31858 @@ -4474,7 +4474,8 @@ out:
31859 err = 0;
31860 blk_integrity_unregister(disk);
31861 md_new_event(mddev);
31862 - sysfs_notify_dirent(mddev->sysfs_state);
31863 + if (mddev->sysfs_state)
31864 + sysfs_notify_dirent(mddev->sysfs_state);
31865 return err;
31866 }
31867
31868 @@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
31869
31870 spin_unlock(&pers_lock);
31871 seq_printf(seq, "\n");
31872 - mi->event = atomic_read(&md_event_count);
31873 + mi->event = atomic_read_unchecked(&md_event_count);
31874 return 0;
31875 }
31876 if (v == (void*)2) {
31877 @@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
31878 chunk_kb ? "KB" : "B");
31879 if (bitmap->file) {
31880 seq_printf(seq, ", file: ");
31881 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31882 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31883 }
31884
31885 seq_printf(seq, "\n");
31886 @@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
31887 else {
31888 struct seq_file *p = file->private_data;
31889 p->private = mi;
31890 - mi->event = atomic_read(&md_event_count);
31891 + mi->event = atomic_read_unchecked(&md_event_count);
31892 }
31893 return error;
31894 }
31895 @@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
31896 /* always allow read */
31897 mask = POLLIN | POLLRDNORM;
31898
31899 - if (mi->event != atomic_read(&md_event_count))
31900 + if (mi->event != atomic_read_unchecked(&md_event_count))
31901 mask |= POLLERR | POLLPRI;
31902 return mask;
31903 }
31904 @@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
31905 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31906 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31907 (int)part_stat_read(&disk->part0, sectors[1]) -
31908 - atomic_read(&disk->sync_io);
31909 + atomic_read_unchecked(&disk->sync_io);
31910 /* sync IO will cause sync_io to increase before the disk_stats
31911 * as sync_io is counted when a request starts, and
31912 * disk_stats is counted when it completes.
31913 diff -urNp linux-2.6.32.45/drivers/md/md.h linux-2.6.32.45/drivers/md/md.h
31914 --- linux-2.6.32.45/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
31915 +++ linux-2.6.32.45/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
31916 @@ -94,10 +94,10 @@ struct mdk_rdev_s
31917 * only maintained for arrays that
31918 * support hot removal
31919 */
31920 - atomic_t read_errors; /* number of consecutive read errors that
31921 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31922 * we have tried to ignore.
31923 */
31924 - atomic_t corrected_errors; /* number of corrected read errors,
31925 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31926 * for reporting to userspace and storing
31927 * in superblock.
31928 */
31929 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
31930
31931 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31932 {
31933 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31934 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31935 }
31936
31937 struct mdk_personality
31938 diff -urNp linux-2.6.32.45/drivers/md/raid10.c linux-2.6.32.45/drivers/md/raid10.c
31939 --- linux-2.6.32.45/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
31940 +++ linux-2.6.32.45/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
31941 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
31942 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
31943 set_bit(R10BIO_Uptodate, &r10_bio->state);
31944 else {
31945 - atomic_add(r10_bio->sectors,
31946 + atomic_add_unchecked(r10_bio->sectors,
31947 &conf->mirrors[d].rdev->corrected_errors);
31948 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
31949 md_error(r10_bio->mddev,
31950 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
31951 test_bit(In_sync, &rdev->flags)) {
31952 atomic_inc(&rdev->nr_pending);
31953 rcu_read_unlock();
31954 - atomic_add(s, &rdev->corrected_errors);
31955 + atomic_add_unchecked(s, &rdev->corrected_errors);
31956 if (sync_page_io(rdev->bdev,
31957 r10_bio->devs[sl].addr +
31958 sect + rdev->data_offset,
31959 diff -urNp linux-2.6.32.45/drivers/md/raid1.c linux-2.6.32.45/drivers/md/raid1.c
31960 --- linux-2.6.32.45/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
31961 +++ linux-2.6.32.45/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
31962 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
31963 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
31964 continue;
31965 rdev = conf->mirrors[d].rdev;
31966 - atomic_add(s, &rdev->corrected_errors);
31967 + atomic_add_unchecked(s, &rdev->corrected_errors);
31968 if (sync_page_io(rdev->bdev,
31969 sect + rdev->data_offset,
31970 s<<9,
31971 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
31972 /* Well, this device is dead */
31973 md_error(mddev, rdev);
31974 else {
31975 - atomic_add(s, &rdev->corrected_errors);
31976 + atomic_add_unchecked(s, &rdev->corrected_errors);
31977 printk(KERN_INFO
31978 "raid1:%s: read error corrected "
31979 "(%d sectors at %llu on %s)\n",
31980 diff -urNp linux-2.6.32.45/drivers/md/raid5.c linux-2.6.32.45/drivers/md/raid5.c
31981 --- linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
31982 +++ linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
31983 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
31984 bi->bi_next = NULL;
31985 if ((rw & WRITE) &&
31986 test_bit(R5_ReWrite, &sh->dev[i].flags))
31987 - atomic_add(STRIPE_SECTORS,
31988 + atomic_add_unchecked(STRIPE_SECTORS,
31989 &rdev->corrected_errors);
31990 generic_make_request(bi);
31991 } else {
31992 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
31993 clear_bit(R5_ReadError, &sh->dev[i].flags);
31994 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31995 }
31996 - if (atomic_read(&conf->disks[i].rdev->read_errors))
31997 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
31998 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31999 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
32000 } else {
32001 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
32002 int retry = 0;
32003 rdev = conf->disks[i].rdev;
32004
32005 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
32006 - atomic_inc(&rdev->read_errors);
32007 + atomic_inc_unchecked(&rdev->read_errors);
32008 if (conf->mddev->degraded >= conf->max_degraded)
32009 printk_rl(KERN_WARNING
32010 "raid5:%s: read error not correctable "
32011 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
32012 (unsigned long long)(sh->sector
32013 + rdev->data_offset),
32014 bdn);
32015 - else if (atomic_read(&rdev->read_errors)
32016 + else if (atomic_read_unchecked(&rdev->read_errors)
32017 > conf->max_nr_stripes)
32018 printk(KERN_WARNING
32019 "raid5:%s: Too many read errors, failing device %s.\n",
32020 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
32021 sector_t r_sector;
32022 struct stripe_head sh2;
32023
32024 + pax_track_stack();
32025
32026 chunk_offset = sector_div(new_sector, sectors_per_chunk);
32027 stripe = new_sector;
32028 diff -urNp linux-2.6.32.45/drivers/media/common/saa7146_fops.c linux-2.6.32.45/drivers/media/common/saa7146_fops.c
32029 --- linux-2.6.32.45/drivers/media/common/saa7146_fops.c 2011-03-27 14:31:47.000000000 -0400
32030 +++ linux-2.6.32.45/drivers/media/common/saa7146_fops.c 2011-08-05 20:33:55.000000000 -0400
32031 @@ -458,7 +458,7 @@ int saa7146_vv_init(struct saa7146_dev*
32032 ERR(("out of memory. aborting.\n"));
32033 return -ENOMEM;
32034 }
32035 - ext_vv->ops = saa7146_video_ioctl_ops;
32036 + memcpy((void *)&ext_vv->ops, &saa7146_video_ioctl_ops, sizeof(saa7146_video_ioctl_ops));
32037 ext_vv->core_ops = &saa7146_video_ioctl_ops;
32038
32039 DEB_EE(("dev:%p\n",dev));
32040 diff -urNp linux-2.6.32.45/drivers/media/common/saa7146_hlp.c linux-2.6.32.45/drivers/media/common/saa7146_hlp.c
32041 --- linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
32042 +++ linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
32043 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
32044
32045 int x[32], y[32], w[32], h[32];
32046
32047 + pax_track_stack();
32048 +
32049 /* clear out memory */
32050 memset(&line_list[0], 0x00, sizeof(u32)*32);
32051 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
32052 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
32053 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
32054 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
32055 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
32056 u8 buf[HOST_LINK_BUF_SIZE];
32057 int i;
32058
32059 + pax_track_stack();
32060 +
32061 dprintk("%s\n", __func__);
32062
32063 /* check if we have space for a link buf in the rx_buffer */
32064 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
32065 unsigned long timeout;
32066 int written;
32067
32068 + pax_track_stack();
32069 +
32070 dprintk("%s\n", __func__);
32071
32072 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
32073 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h
32074 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-03-27 14:31:47.000000000 -0400
32075 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:33:55.000000000 -0400
32076 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
32077 union {
32078 dmx_ts_cb ts;
32079 dmx_section_cb sec;
32080 - } cb;
32081 + } __no_const cb;
32082
32083 struct dvb_demux *demux;
32084 void *priv;
32085 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c
32086 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
32087 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-05 20:33:55.000000000 -0400
32088 @@ -228,8 +228,8 @@ int dvb_register_device(struct dvb_adapt
32089 dvbdev->fops = dvbdevfops;
32090 init_waitqueue_head (&dvbdev->wait_queue);
32091
32092 - memcpy(dvbdevfops, template->fops, sizeof(struct file_operations));
32093 - dvbdevfops->owner = adap->module;
32094 + memcpy((void *)dvbdevfops, template->fops, sizeof(struct file_operations));
32095 + *(void **)&dvbdevfops->owner = adap->module;
32096
32097 list_add_tail (&dvbdev->list_head, &adap->device_list);
32098
32099 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c
32100 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-03-27 14:31:47.000000000 -0400
32101 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:33:55.000000000 -0400
32102 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_di
32103 struct dib0700_adapter_state {
32104 int (*set_param_save) (struct dvb_frontend *,
32105 struct dvb_frontend_parameters *);
32106 -};
32107 +} __no_const;
32108
32109 static int dib7070_set_param_override(struct dvb_frontend *fe,
32110 struct dvb_frontend_parameters *fep)
32111 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c
32112 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
32113 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
32114 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
32115
32116 u8 buf[260];
32117
32118 + pax_track_stack();
32119 +
32120 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
32121 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
32122
32123 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c
32124 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-05-10 22:12:01.000000000 -0400
32125 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-08-05 20:33:55.000000000 -0400
32126 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "
32127
32128 struct dib0700_adapter_state {
32129 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
32130 -};
32131 +} __no_const;
32132
32133 /* Hauppauge Nova-T 500 (aka Bristol)
32134 * has a LNA on GPIO0 which is enabled by setting 1 */
32135 diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h
32136 --- linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-03-27 14:31:47.000000000 -0400
32137 +++ linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:33:55.000000000 -0400
32138 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
32139 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
32140 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
32141 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
32142 -};
32143 +} __no_const;
32144
32145 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
32146 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
32147 diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c
32148 --- linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
32149 +++ linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
32150 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
32151 u8 tudata[585];
32152 int i;
32153
32154 + pax_track_stack();
32155 +
32156 dprintk("Firmware is %zd bytes\n",fw->size);
32157
32158 /* Get eprom data */
32159 diff -urNp linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c
32160 --- linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c 2011-03-27 14:31:47.000000000 -0400
32161 +++ linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c 2011-08-05 20:33:55.000000000 -0400
32162 @@ -796,18 +796,18 @@ int av7110_init_v4l(struct av7110 *av711
32163 ERR(("cannot init capture device. skipping.\n"));
32164 return -ENODEV;
32165 }
32166 - vv_data->ops.vidioc_enum_input = vidioc_enum_input;
32167 - vv_data->ops.vidioc_g_input = vidioc_g_input;
32168 - vv_data->ops.vidioc_s_input = vidioc_s_input;
32169 - vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
32170 - vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
32171 - vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
32172 - vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
32173 - vv_data->ops.vidioc_g_audio = vidioc_g_audio;
32174 - vv_data->ops.vidioc_s_audio = vidioc_s_audio;
32175 - vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
32176 - vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
32177 - vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
32178 + *(void **)&vv_data->ops.vidioc_enum_input = vidioc_enum_input;
32179 + *(void **)&vv_data->ops.vidioc_g_input = vidioc_g_input;
32180 + *(void **)&vv_data->ops.vidioc_s_input = vidioc_s_input;
32181 + *(void **)&vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
32182 + *(void **)&vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
32183 + *(void **)&vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
32184 + *(void **)&vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
32185 + *(void **)&vv_data->ops.vidioc_g_audio = vidioc_g_audio;
32186 + *(void **)&vv_data->ops.vidioc_s_audio = vidioc_s_audio;
32187 + *(void **)&vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
32188 + *(void **)&vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
32189 + *(void **)&vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
32190
32191 if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) {
32192 ERR(("cannot register capture device. skipping.\n"));
32193 diff -urNp linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c
32194 --- linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c 2011-03-27 14:31:47.000000000 -0400
32195 +++ linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c 2011-08-05 20:33:55.000000000 -0400
32196 @@ -1477,9 +1477,9 @@ static int budget_av_attach(struct saa71
32197 ERR(("cannot init vv subsystem.\n"));
32198 return err;
32199 }
32200 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32201 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32202 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32203 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32204 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32205 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32206
32207 if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) {
32208 /* fixme: proper cleanup here */
32209 diff -urNp linux-2.6.32.45/drivers/media/radio/radio-cadet.c linux-2.6.32.45/drivers/media/radio/radio-cadet.c
32210 --- linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
32211 +++ linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
32212 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
32213 while (i < count && dev->rdsin != dev->rdsout)
32214 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
32215
32216 - if (copy_to_user(data, readbuf, i))
32217 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
32218 return -EFAULT;
32219 return i;
32220 }
32221 diff -urNp linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c
32222 --- linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
32223 +++ linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
32224 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
32225
32226 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
32227
32228 -static atomic_t cx18_instance = ATOMIC_INIT(0);
32229 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
32230
32231 /* Parameter declarations */
32232 static int cardtype[CX18_MAX_CARDS];
32233 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
32234 struct i2c_client c;
32235 u8 eedata[256];
32236
32237 + pax_track_stack();
32238 +
32239 memset(&c, 0, sizeof(c));
32240 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
32241 c.adapter = &cx->i2c_adap[0];
32242 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
32243 struct cx18 *cx;
32244
32245 /* FIXME - module parameter arrays constrain max instances */
32246 - i = atomic_inc_return(&cx18_instance) - 1;
32247 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
32248 if (i >= CX18_MAX_CARDS) {
32249 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
32250 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
32251 diff -urNp linux-2.6.32.45/drivers/media/video/hexium_gemini.c linux-2.6.32.45/drivers/media/video/hexium_gemini.c
32252 --- linux-2.6.32.45/drivers/media/video/hexium_gemini.c 2011-03-27 14:31:47.000000000 -0400
32253 +++ linux-2.6.32.45/drivers/media/video/hexium_gemini.c 2011-08-05 20:33:55.000000000 -0400
32254 @@ -394,12 +394,12 @@ static int hexium_attach(struct saa7146_
32255 hexium->cur_input = 0;
32256
32257 saa7146_vv_init(dev, &vv_data);
32258 - vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32259 - vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32260 - vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32261 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32262 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32263 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32264 + *(void **)&vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32265 + *(void **)&vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32266 + *(void **)&vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32267 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32268 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32269 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32270 if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER)) {
32271 printk("hexium_gemini: cannot register capture v4l2 device. skipping.\n");
32272 return -1;
32273 diff -urNp linux-2.6.32.45/drivers/media/video/hexium_orion.c linux-2.6.32.45/drivers/media/video/hexium_orion.c
32274 --- linux-2.6.32.45/drivers/media/video/hexium_orion.c 2011-03-27 14:31:47.000000000 -0400
32275 +++ linux-2.6.32.45/drivers/media/video/hexium_orion.c 2011-08-05 20:33:55.000000000 -0400
32276 @@ -369,9 +369,9 @@ static int hexium_attach(struct saa7146_
32277 DEB_EE((".\n"));
32278
32279 saa7146_vv_init(dev, &vv_data);
32280 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32281 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32282 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32283 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32284 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32285 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32286 if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) {
32287 printk("hexium_orion: cannot register capture v4l2 device. skipping.\n");
32288 return -1;
32289 diff -urNp linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c
32290 --- linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
32291 +++ linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
32292 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
32293 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
32294
32295 /* ivtv instance counter */
32296 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
32297 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
32298
32299 /* Parameter declarations */
32300 static int cardtype[IVTV_MAX_CARDS];
32301 diff -urNp linux-2.6.32.45/drivers/media/video/mxb.c linux-2.6.32.45/drivers/media/video/mxb.c
32302 --- linux-2.6.32.45/drivers/media/video/mxb.c 2011-03-27 14:31:47.000000000 -0400
32303 +++ linux-2.6.32.45/drivers/media/video/mxb.c 2011-08-05 20:33:55.000000000 -0400
32304 @@ -703,23 +703,23 @@ static int mxb_attach(struct saa7146_dev
32305 already did this in "mxb_vl42_probe" */
32306
32307 saa7146_vv_init(dev, &vv_data);
32308 - vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32309 - vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32310 - vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32311 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32312 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32313 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32314 - vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
32315 - vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
32316 - vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
32317 - vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
32318 - vv_data.ops.vidioc_g_audio = vidioc_g_audio;
32319 - vv_data.ops.vidioc_s_audio = vidioc_s_audio;
32320 + *(void **)&vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32321 + *(void **)&vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32322 + *(void **)&vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32323 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32324 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32325 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32326 + *(void **)&vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
32327 + *(void **)&vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
32328 + *(void **)&vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
32329 + *(void **)&vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
32330 + *(void **)&vv_data.ops.vidioc_g_audio = vidioc_g_audio;
32331 + *(void **)&vv_data.ops.vidioc_s_audio = vidioc_s_audio;
32332 #ifdef CONFIG_VIDEO_ADV_DEBUG
32333 - vv_data.ops.vidioc_g_register = vidioc_g_register;
32334 - vv_data.ops.vidioc_s_register = vidioc_s_register;
32335 + *(void **)&vv_data.ops.vidioc_g_register = vidioc_g_register;
32336 + *(void **)&vv_data.ops.vidioc_s_register = vidioc_s_register;
32337 #endif
32338 - vv_data.ops.vidioc_default = vidioc_default;
32339 + *(void **)&vv_data.ops.vidioc_default = vidioc_default;
32340 if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
32341 ERR(("cannot register capture v4l2 device. skipping.\n"));
32342 return -1;
32343 diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.c linux-2.6.32.45/drivers/media/video/omap24xxcam.c
32344 --- linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
32345 +++ linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
32346 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
32347 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
32348
32349 do_gettimeofday(&vb->ts);
32350 - vb->field_count = atomic_add_return(2, &fh->field_count);
32351 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
32352 if (csr & csr_error) {
32353 vb->state = VIDEOBUF_ERROR;
32354 if (!atomic_read(&fh->cam->in_reset)) {
32355 diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.h linux-2.6.32.45/drivers/media/video/omap24xxcam.h
32356 --- linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
32357 +++ linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
32358 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
32359 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
32360 struct videobuf_queue vbq;
32361 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
32362 - atomic_t field_count; /* field counter for videobuf_buffer */
32363 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
32364 /* accessing cam here doesn't need serialisation: it's constant */
32365 struct omap24xxcam_device *cam;
32366 };
32367 diff -urNp linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32368 --- linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
32369 +++ linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
32370 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
32371 u8 *eeprom;
32372 struct tveeprom tvdata;
32373
32374 + pax_track_stack();
32375 +
32376 memset(&tvdata,0,sizeof(tvdata));
32377
32378 eeprom = pvr2_eeprom_fetch(hdw);
32379 diff -urNp linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c
32380 --- linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
32381 +++ linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
32382 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
32383 unsigned char localPAT[256];
32384 unsigned char localPMT[256];
32385
32386 + pax_track_stack();
32387 +
32388 /* Set video format - must be done first as it resets other settings */
32389 set_reg8(client, 0x41, h->video_format);
32390
32391 diff -urNp linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c
32392 --- linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
32393 +++ linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
32394 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
32395 wait_queue_head_t *q = 0;
32396 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32397
32398 + pax_track_stack();
32399 +
32400 /* While any outstand message on the bus exists... */
32401 do {
32402
32403 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
32404 u8 tmp[512];
32405 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32406
32407 + pax_track_stack();
32408 +
32409 while (loop) {
32410
32411 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
32412 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c
32413 --- linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-03-27 14:31:47.000000000 -0400
32414 +++ linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-08-05 20:33:55.000000000 -0400
32415 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] =
32416 static int __init ibmcam_init(void)
32417 {
32418 struct usbvideo_cb cbTbl;
32419 - memset(&cbTbl, 0, sizeof(cbTbl));
32420 - cbTbl.probe = ibmcam_probe;
32421 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
32422 - cbTbl.videoStart = ibmcam_video_start;
32423 - cbTbl.videoStop = ibmcam_video_stop;
32424 - cbTbl.processData = ibmcam_ProcessIsocData;
32425 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32426 - cbTbl.adjustPicture = ibmcam_adjust_picture;
32427 - cbTbl.getFPS = ibmcam_calculate_fps;
32428 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
32429 + *(void **)&cbTbl.probe = ibmcam_probe;
32430 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
32431 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
32432 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
32433 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
32434 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32435 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
32436 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
32437 return usbvideo_register(
32438 &cams,
32439 MAX_IBMCAM,
32440 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c
32441 --- linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
32442 +++ linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-08-05 20:33:55.000000000 -0400
32443 @@ -225,7 +225,7 @@ static void konicawc_register_input(stru
32444 int error;
32445
32446 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32447 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32448 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32449
32450 cam->input = input_dev = input_allocate_device();
32451 if (!input_dev) {
32452 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
32453 struct usbvideo_cb cbTbl;
32454 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
32455 DRIVER_DESC "\n");
32456 - memset(&cbTbl, 0, sizeof(cbTbl));
32457 - cbTbl.probe = konicawc_probe;
32458 - cbTbl.setupOnOpen = konicawc_setup_on_open;
32459 - cbTbl.processData = konicawc_process_isoc;
32460 - cbTbl.getFPS = konicawc_calculate_fps;
32461 - cbTbl.setVideoMode = konicawc_set_video_mode;
32462 - cbTbl.startDataPump = konicawc_start_data;
32463 - cbTbl.stopDataPump = konicawc_stop_data;
32464 - cbTbl.adjustPicture = konicawc_adjust_picture;
32465 - cbTbl.userFree = konicawc_free_uvd;
32466 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
32467 + *(void **)&cbTbl.probe = konicawc_probe;
32468 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
32469 + *(void **)&cbTbl.processData = konicawc_process_isoc;
32470 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
32471 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
32472 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
32473 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
32474 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
32475 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
32476 return usbvideo_register(
32477 &cams,
32478 MAX_CAMERAS,
32479 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c
32480 --- linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
32481 +++ linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
32482 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
32483 int error;
32484
32485 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32486 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32487 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32488
32489 cam->input = input_dev = input_allocate_device();
32490 if (!input_dev) {
32491 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c
32492 --- linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-03-27 14:31:47.000000000 -0400
32493 +++ linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-08-05 20:33:55.000000000 -0400
32494 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
32495 {
32496 struct usbvideo_cb cbTbl;
32497 memset(&cbTbl, 0, sizeof(cbTbl));
32498 - cbTbl.probe = ultracam_probe;
32499 - cbTbl.setupOnOpen = ultracam_setup_on_open;
32500 - cbTbl.videoStart = ultracam_video_start;
32501 - cbTbl.videoStop = ultracam_video_stop;
32502 - cbTbl.processData = ultracam_ProcessIsocData;
32503 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32504 - cbTbl.adjustPicture = ultracam_adjust_picture;
32505 - cbTbl.getFPS = ultracam_calculate_fps;
32506 + *(void **)&cbTbl.probe = ultracam_probe;
32507 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
32508 + *(void **)&cbTbl.videoStart = ultracam_video_start;
32509 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
32510 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
32511 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32512 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
32513 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
32514 return usbvideo_register(
32515 &cams,
32516 MAX_CAMERAS,
32517 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c
32518 --- linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-03-27 14:31:47.000000000 -0400
32519 +++ linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-08-05 20:33:55.000000000 -0400
32520 @@ -697,15 +697,15 @@ int usbvideo_register(
32521 __func__, cams, base_size, num_cams);
32522
32523 /* Copy callbacks, apply defaults for those that are not set */
32524 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
32525 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
32526 if (cams->cb.getFrame == NULL)
32527 - cams->cb.getFrame = usbvideo_GetFrame;
32528 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
32529 if (cams->cb.disconnect == NULL)
32530 - cams->cb.disconnect = usbvideo_Disconnect;
32531 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
32532 if (cams->cb.startDataPump == NULL)
32533 - cams->cb.startDataPump = usbvideo_StartDataPump;
32534 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
32535 if (cams->cb.stopDataPump == NULL)
32536 - cams->cb.stopDataPump = usbvideo_StopDataPump;
32537 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
32538
32539 cams->num_cameras = num_cams;
32540 cams->cam = (struct uvd *) &cams[1];
32541 diff -urNp linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c
32542 --- linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
32543 +++ linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
32544 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
32545 unsigned char rv, gv, bv;
32546 static unsigned char *Y, *U, *V;
32547
32548 + pax_track_stack();
32549 +
32550 frame = usbvision->curFrame;
32551 imageSize = frame->frmwidth * frame->frmheight;
32552 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
32553 diff -urNp linux-2.6.32.45/drivers/media/video/v4l2-device.c linux-2.6.32.45/drivers/media/video/v4l2-device.c
32554 --- linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
32555 +++ linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
32556 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
32557 EXPORT_SYMBOL_GPL(v4l2_device_register);
32558
32559 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
32560 - atomic_t *instance)
32561 + atomic_unchecked_t *instance)
32562 {
32563 - int num = atomic_inc_return(instance) - 1;
32564 + int num = atomic_inc_return_unchecked(instance) - 1;
32565 int len = strlen(basename);
32566
32567 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
32568 diff -urNp linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c
32569 --- linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
32570 +++ linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
32571 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
32572 {
32573 struct videobuf_queue q;
32574
32575 + pax_track_stack();
32576 +
32577 /* Required to make generic handler to call __videobuf_alloc */
32578 q.int_ops = &sg_ops;
32579
32580 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptbase.c linux-2.6.32.45/drivers/message/fusion/mptbase.c
32581 --- linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
32582 +++ linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
32583 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
32584 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
32585 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
32586
32587 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32588 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32589 + NULL, NULL);
32590 +#else
32591 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32592 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
32593 +#endif
32594 +
32595 /*
32596 * Rounding UP to nearest 4-kB boundary here...
32597 */
32598 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptsas.c linux-2.6.32.45/drivers/message/fusion/mptsas.c
32599 --- linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
32600 +++ linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
32601 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
32602 return 0;
32603 }
32604
32605 +static inline void
32606 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32607 +{
32608 + if (phy_info->port_details) {
32609 + phy_info->port_details->rphy = rphy;
32610 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32611 + ioc->name, rphy));
32612 + }
32613 +
32614 + if (rphy) {
32615 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32616 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32617 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32618 + ioc->name, rphy, rphy->dev.release));
32619 + }
32620 +}
32621 +
32622 /* no mutex */
32623 static void
32624 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
32625 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
32626 return NULL;
32627 }
32628
32629 -static inline void
32630 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32631 -{
32632 - if (phy_info->port_details) {
32633 - phy_info->port_details->rphy = rphy;
32634 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32635 - ioc->name, rphy));
32636 - }
32637 -
32638 - if (rphy) {
32639 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32640 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32641 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32642 - ioc->name, rphy, rphy->dev.release));
32643 - }
32644 -}
32645 -
32646 static inline struct sas_port *
32647 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32648 {
32649 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptscsih.c linux-2.6.32.45/drivers/message/fusion/mptscsih.c
32650 --- linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
32651 +++ linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
32652 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32653
32654 h = shost_priv(SChost);
32655
32656 - if (h) {
32657 - if (h->info_kbuf == NULL)
32658 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32659 - return h->info_kbuf;
32660 - h->info_kbuf[0] = '\0';
32661 + if (!h)
32662 + return NULL;
32663
32664 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32665 - h->info_kbuf[size-1] = '\0';
32666 - }
32667 + if (h->info_kbuf == NULL)
32668 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32669 + return h->info_kbuf;
32670 + h->info_kbuf[0] = '\0';
32671 +
32672 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32673 + h->info_kbuf[size-1] = '\0';
32674
32675 return h->info_kbuf;
32676 }
32677 diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_config.c linux-2.6.32.45/drivers/message/i2o/i2o_config.c
32678 --- linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
32679 +++ linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
32680 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
32681 struct i2o_message *msg;
32682 unsigned int iop;
32683
32684 + pax_track_stack();
32685 +
32686 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
32687 return -EFAULT;
32688
32689 diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_proc.c linux-2.6.32.45/drivers/message/i2o/i2o_proc.c
32690 --- linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
32691 +++ linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
32692 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
32693 "Array Controller Device"
32694 };
32695
32696 -static char *chtostr(u8 * chars, int n)
32697 -{
32698 - char tmp[256];
32699 - tmp[0] = 0;
32700 - return strncat(tmp, (char *)chars, n);
32701 -}
32702 -
32703 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32704 char *group)
32705 {
32706 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
32707
32708 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32709 seq_printf(seq, "%-#8x", ddm_table.module_id);
32710 - seq_printf(seq, "%-29s",
32711 - chtostr(ddm_table.module_name_version, 28));
32712 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32713 seq_printf(seq, "%9d ", ddm_table.data_size);
32714 seq_printf(seq, "%8d", ddm_table.code_size);
32715
32716 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
32717
32718 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32719 seq_printf(seq, "%-#8x", dst->module_id);
32720 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32721 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32722 + seq_printf(seq, "%-.28s", dst->module_name_version);
32723 + seq_printf(seq, "%-.8s", dst->date);
32724 seq_printf(seq, "%8d ", dst->module_size);
32725 seq_printf(seq, "%8d ", dst->mpb_size);
32726 seq_printf(seq, "0x%04x", dst->module_flags);
32727 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
32728 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32729 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32730 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32731 - seq_printf(seq, "Vendor info : %s\n",
32732 - chtostr((u8 *) (work32 + 2), 16));
32733 - seq_printf(seq, "Product info : %s\n",
32734 - chtostr((u8 *) (work32 + 6), 16));
32735 - seq_printf(seq, "Description : %s\n",
32736 - chtostr((u8 *) (work32 + 10), 16));
32737 - seq_printf(seq, "Product rev. : %s\n",
32738 - chtostr((u8 *) (work32 + 14), 8));
32739 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32740 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32741 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32742 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32743
32744 seq_printf(seq, "Serial number : ");
32745 print_serial_number(seq, (u8 *) (work32 + 16),
32746 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
32747 }
32748
32749 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32750 - seq_printf(seq, "Module name : %s\n",
32751 - chtostr(result.module_name, 24));
32752 - seq_printf(seq, "Module revision : %s\n",
32753 - chtostr(result.module_rev, 8));
32754 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
32755 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32756
32757 seq_printf(seq, "Serial number : ");
32758 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32759 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
32760 return 0;
32761 }
32762
32763 - seq_printf(seq, "Device name : %s\n",
32764 - chtostr(result.device_name, 64));
32765 - seq_printf(seq, "Service name : %s\n",
32766 - chtostr(result.service_name, 64));
32767 - seq_printf(seq, "Physical name : %s\n",
32768 - chtostr(result.physical_location, 64));
32769 - seq_printf(seq, "Instance number : %s\n",
32770 - chtostr(result.instance_number, 4));
32771 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
32772 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
32773 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32774 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32775
32776 return 0;
32777 }
32778 diff -urNp linux-2.6.32.45/drivers/message/i2o/iop.c linux-2.6.32.45/drivers/message/i2o/iop.c
32779 --- linux-2.6.32.45/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
32780 +++ linux-2.6.32.45/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
32781 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
32782
32783 spin_lock_irqsave(&c->context_list_lock, flags);
32784
32785 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32786 - atomic_inc(&c->context_list_counter);
32787 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32788 + atomic_inc_unchecked(&c->context_list_counter);
32789
32790 - entry->context = atomic_read(&c->context_list_counter);
32791 + entry->context = atomic_read_unchecked(&c->context_list_counter);
32792
32793 list_add(&entry->list, &c->context_list);
32794
32795 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
32796
32797 #if BITS_PER_LONG == 64
32798 spin_lock_init(&c->context_list_lock);
32799 - atomic_set(&c->context_list_counter, 0);
32800 + atomic_set_unchecked(&c->context_list_counter, 0);
32801 INIT_LIST_HEAD(&c->context_list);
32802 #endif
32803
32804 diff -urNp linux-2.6.32.45/drivers/mfd/wm8350-i2c.c linux-2.6.32.45/drivers/mfd/wm8350-i2c.c
32805 --- linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
32806 +++ linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
32807 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
32808 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32809 int ret;
32810
32811 + pax_track_stack();
32812 +
32813 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32814 return -EINVAL;
32815
32816 diff -urNp linux-2.6.32.45/drivers/misc/kgdbts.c linux-2.6.32.45/drivers/misc/kgdbts.c
32817 --- linux-2.6.32.45/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
32818 +++ linux-2.6.32.45/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
32819 @@ -118,7 +118,7 @@
32820 } while (0)
32821 #define MAX_CONFIG_LEN 40
32822
32823 -static struct kgdb_io kgdbts_io_ops;
32824 +static const struct kgdb_io kgdbts_io_ops;
32825 static char get_buf[BUFMAX];
32826 static int get_buf_cnt;
32827 static char put_buf[BUFMAX];
32828 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
32829 module_put(THIS_MODULE);
32830 }
32831
32832 -static struct kgdb_io kgdbts_io_ops = {
32833 +static const struct kgdb_io kgdbts_io_ops = {
32834 .name = "kgdbts",
32835 .read_char = kgdbts_get_char,
32836 .write_char = kgdbts_put_char,
32837 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c
32838 --- linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
32839 +++ linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
32840 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
32841
32842 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32843 {
32844 - atomic_long_inc(&mcs_op_statistics[op].count);
32845 - atomic_long_add(clks, &mcs_op_statistics[op].total);
32846 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32847 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
32848 if (mcs_op_statistics[op].max < clks)
32849 mcs_op_statistics[op].max = clks;
32850 }
32851 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c
32852 --- linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
32853 +++ linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
32854 @@ -32,9 +32,9 @@
32855
32856 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32857
32858 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32859 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32860 {
32861 - unsigned long val = atomic_long_read(v);
32862 + unsigned long val = atomic_long_read_unchecked(v);
32863
32864 if (val)
32865 seq_printf(s, "%16lu %s\n", val, id);
32866 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
32867 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
32868
32869 for (op = 0; op < mcsop_last; op++) {
32870 - count = atomic_long_read(&mcs_op_statistics[op].count);
32871 - total = atomic_long_read(&mcs_op_statistics[op].total);
32872 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32873 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32874 max = mcs_op_statistics[op].max;
32875 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32876 count ? total / count : 0, max);
32877 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h
32878 --- linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
32879 +++ linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
32880 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
32881 * GRU statistics.
32882 */
32883 struct gru_stats_s {
32884 - atomic_long_t vdata_alloc;
32885 - atomic_long_t vdata_free;
32886 - atomic_long_t gts_alloc;
32887 - atomic_long_t gts_free;
32888 - atomic_long_t vdata_double_alloc;
32889 - atomic_long_t gts_double_allocate;
32890 - atomic_long_t assign_context;
32891 - atomic_long_t assign_context_failed;
32892 - atomic_long_t free_context;
32893 - atomic_long_t load_user_context;
32894 - atomic_long_t load_kernel_context;
32895 - atomic_long_t lock_kernel_context;
32896 - atomic_long_t unlock_kernel_context;
32897 - atomic_long_t steal_user_context;
32898 - atomic_long_t steal_kernel_context;
32899 - atomic_long_t steal_context_failed;
32900 - atomic_long_t nopfn;
32901 - atomic_long_t break_cow;
32902 - atomic_long_t asid_new;
32903 - atomic_long_t asid_next;
32904 - atomic_long_t asid_wrap;
32905 - atomic_long_t asid_reuse;
32906 - atomic_long_t intr;
32907 - atomic_long_t intr_mm_lock_failed;
32908 - atomic_long_t call_os;
32909 - atomic_long_t call_os_offnode_reference;
32910 - atomic_long_t call_os_check_for_bug;
32911 - atomic_long_t call_os_wait_queue;
32912 - atomic_long_t user_flush_tlb;
32913 - atomic_long_t user_unload_context;
32914 - atomic_long_t user_exception;
32915 - atomic_long_t set_context_option;
32916 - atomic_long_t migrate_check;
32917 - atomic_long_t migrated_retarget;
32918 - atomic_long_t migrated_unload;
32919 - atomic_long_t migrated_unload_delay;
32920 - atomic_long_t migrated_nopfn_retarget;
32921 - atomic_long_t migrated_nopfn_unload;
32922 - atomic_long_t tlb_dropin;
32923 - atomic_long_t tlb_dropin_fail_no_asid;
32924 - atomic_long_t tlb_dropin_fail_upm;
32925 - atomic_long_t tlb_dropin_fail_invalid;
32926 - atomic_long_t tlb_dropin_fail_range_active;
32927 - atomic_long_t tlb_dropin_fail_idle;
32928 - atomic_long_t tlb_dropin_fail_fmm;
32929 - atomic_long_t tlb_dropin_fail_no_exception;
32930 - atomic_long_t tlb_dropin_fail_no_exception_war;
32931 - atomic_long_t tfh_stale_on_fault;
32932 - atomic_long_t mmu_invalidate_range;
32933 - atomic_long_t mmu_invalidate_page;
32934 - atomic_long_t mmu_clear_flush_young;
32935 - atomic_long_t flush_tlb;
32936 - atomic_long_t flush_tlb_gru;
32937 - atomic_long_t flush_tlb_gru_tgh;
32938 - atomic_long_t flush_tlb_gru_zero_asid;
32939 -
32940 - atomic_long_t copy_gpa;
32941 -
32942 - atomic_long_t mesq_receive;
32943 - atomic_long_t mesq_receive_none;
32944 - atomic_long_t mesq_send;
32945 - atomic_long_t mesq_send_failed;
32946 - atomic_long_t mesq_noop;
32947 - atomic_long_t mesq_send_unexpected_error;
32948 - atomic_long_t mesq_send_lb_overflow;
32949 - atomic_long_t mesq_send_qlimit_reached;
32950 - atomic_long_t mesq_send_amo_nacked;
32951 - atomic_long_t mesq_send_put_nacked;
32952 - atomic_long_t mesq_qf_not_full;
32953 - atomic_long_t mesq_qf_locked;
32954 - atomic_long_t mesq_qf_noop_not_full;
32955 - atomic_long_t mesq_qf_switch_head_failed;
32956 - atomic_long_t mesq_qf_unexpected_error;
32957 - atomic_long_t mesq_noop_unexpected_error;
32958 - atomic_long_t mesq_noop_lb_overflow;
32959 - atomic_long_t mesq_noop_qlimit_reached;
32960 - atomic_long_t mesq_noop_amo_nacked;
32961 - atomic_long_t mesq_noop_put_nacked;
32962 + atomic_long_unchecked_t vdata_alloc;
32963 + atomic_long_unchecked_t vdata_free;
32964 + atomic_long_unchecked_t gts_alloc;
32965 + atomic_long_unchecked_t gts_free;
32966 + atomic_long_unchecked_t vdata_double_alloc;
32967 + atomic_long_unchecked_t gts_double_allocate;
32968 + atomic_long_unchecked_t assign_context;
32969 + atomic_long_unchecked_t assign_context_failed;
32970 + atomic_long_unchecked_t free_context;
32971 + atomic_long_unchecked_t load_user_context;
32972 + atomic_long_unchecked_t load_kernel_context;
32973 + atomic_long_unchecked_t lock_kernel_context;
32974 + atomic_long_unchecked_t unlock_kernel_context;
32975 + atomic_long_unchecked_t steal_user_context;
32976 + atomic_long_unchecked_t steal_kernel_context;
32977 + atomic_long_unchecked_t steal_context_failed;
32978 + atomic_long_unchecked_t nopfn;
32979 + atomic_long_unchecked_t break_cow;
32980 + atomic_long_unchecked_t asid_new;
32981 + atomic_long_unchecked_t asid_next;
32982 + atomic_long_unchecked_t asid_wrap;
32983 + atomic_long_unchecked_t asid_reuse;
32984 + atomic_long_unchecked_t intr;
32985 + atomic_long_unchecked_t intr_mm_lock_failed;
32986 + atomic_long_unchecked_t call_os;
32987 + atomic_long_unchecked_t call_os_offnode_reference;
32988 + atomic_long_unchecked_t call_os_check_for_bug;
32989 + atomic_long_unchecked_t call_os_wait_queue;
32990 + atomic_long_unchecked_t user_flush_tlb;
32991 + atomic_long_unchecked_t user_unload_context;
32992 + atomic_long_unchecked_t user_exception;
32993 + atomic_long_unchecked_t set_context_option;
32994 + atomic_long_unchecked_t migrate_check;
32995 + atomic_long_unchecked_t migrated_retarget;
32996 + atomic_long_unchecked_t migrated_unload;
32997 + atomic_long_unchecked_t migrated_unload_delay;
32998 + atomic_long_unchecked_t migrated_nopfn_retarget;
32999 + atomic_long_unchecked_t migrated_nopfn_unload;
33000 + atomic_long_unchecked_t tlb_dropin;
33001 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33002 + atomic_long_unchecked_t tlb_dropin_fail_upm;
33003 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
33004 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
33005 + atomic_long_unchecked_t tlb_dropin_fail_idle;
33006 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
33007 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33008 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
33009 + atomic_long_unchecked_t tfh_stale_on_fault;
33010 + atomic_long_unchecked_t mmu_invalidate_range;
33011 + atomic_long_unchecked_t mmu_invalidate_page;
33012 + atomic_long_unchecked_t mmu_clear_flush_young;
33013 + atomic_long_unchecked_t flush_tlb;
33014 + atomic_long_unchecked_t flush_tlb_gru;
33015 + atomic_long_unchecked_t flush_tlb_gru_tgh;
33016 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33017 +
33018 + atomic_long_unchecked_t copy_gpa;
33019 +
33020 + atomic_long_unchecked_t mesq_receive;
33021 + atomic_long_unchecked_t mesq_receive_none;
33022 + atomic_long_unchecked_t mesq_send;
33023 + atomic_long_unchecked_t mesq_send_failed;
33024 + atomic_long_unchecked_t mesq_noop;
33025 + atomic_long_unchecked_t mesq_send_unexpected_error;
33026 + atomic_long_unchecked_t mesq_send_lb_overflow;
33027 + atomic_long_unchecked_t mesq_send_qlimit_reached;
33028 + atomic_long_unchecked_t mesq_send_amo_nacked;
33029 + atomic_long_unchecked_t mesq_send_put_nacked;
33030 + atomic_long_unchecked_t mesq_qf_not_full;
33031 + atomic_long_unchecked_t mesq_qf_locked;
33032 + atomic_long_unchecked_t mesq_qf_noop_not_full;
33033 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
33034 + atomic_long_unchecked_t mesq_qf_unexpected_error;
33035 + atomic_long_unchecked_t mesq_noop_unexpected_error;
33036 + atomic_long_unchecked_t mesq_noop_lb_overflow;
33037 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
33038 + atomic_long_unchecked_t mesq_noop_amo_nacked;
33039 + atomic_long_unchecked_t mesq_noop_put_nacked;
33040
33041 };
33042
33043 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
33044 cchop_deallocate, tghop_invalidate, mcsop_last};
33045
33046 struct mcs_op_statistic {
33047 - atomic_long_t count;
33048 - atomic_long_t total;
33049 + atomic_long_unchecked_t count;
33050 + atomic_long_unchecked_t total;
33051 unsigned long max;
33052 };
33053
33054 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
33055
33056 #define STAT(id) do { \
33057 if (gru_options & OPT_STATS) \
33058 - atomic_long_inc(&gru_stats.id); \
33059 + atomic_long_inc_unchecked(&gru_stats.id); \
33060 } while (0)
33061
33062 #ifdef CONFIG_SGI_GRU_DEBUG
33063 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h
33064 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-03-27 14:31:47.000000000 -0400
33065 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-08-05 20:33:55.000000000 -0400
33066 @@ -876,7 +876,7 @@ extern struct xpc_registration xpc_regis
33067 /* found in xpc_main.c */
33068 extern struct device *xpc_part;
33069 extern struct device *xpc_chan;
33070 -extern struct xpc_arch_operations xpc_arch_ops;
33071 +extern const struct xpc_arch_operations xpc_arch_ops;
33072 extern int xpc_disengage_timelimit;
33073 extern int xpc_disengage_timedout;
33074 extern int xpc_activate_IRQ_rcvd;
33075 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c
33076 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-03-27 14:31:47.000000000 -0400
33077 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-08-05 20:33:55.000000000 -0400
33078 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_not
33079 .notifier_call = xpc_system_die,
33080 };
33081
33082 -struct xpc_arch_operations xpc_arch_ops;
33083 +const struct xpc_arch_operations xpc_arch_ops;
33084
33085 /*
33086 * Timer function to enforce the timelimit on the partition disengage.
33087 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c
33088 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-03-27 14:31:47.000000000 -0400
33089 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-08-05 20:33:55.000000000 -0400
33090 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_chan
33091 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
33092 }
33093
33094 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
33095 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
33096 .setup_partitions = xpc_setup_partitions_sn2,
33097 .teardown_partitions = xpc_teardown_partitions_sn2,
33098 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
33099 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
33100 int ret;
33101 size_t buf_size;
33102
33103 - xpc_arch_ops = xpc_arch_ops_sn2;
33104 + pax_open_kernel();
33105 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
33106 + pax_close_kernel();
33107
33108 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
33109 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
33110 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c
33111 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-03-27 14:31:47.000000000 -0400
33112 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-08-05 20:33:55.000000000 -0400
33113 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_chann
33114 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
33115 }
33116
33117 -static struct xpc_arch_operations xpc_arch_ops_uv = {
33118 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
33119 .setup_partitions = xpc_setup_partitions_uv,
33120 .teardown_partitions = xpc_teardown_partitions_uv,
33121 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
33122 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_ar
33123 int
33124 xpc_init_uv(void)
33125 {
33126 - xpc_arch_ops = xpc_arch_ops_uv;
33127 + pax_open_kernel();
33128 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
33129 + pax_close_kernel();
33130
33131 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
33132 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
33133 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xp.h linux-2.6.32.45/drivers/misc/sgi-xp/xp.h
33134 --- linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-03-27 14:31:47.000000000 -0400
33135 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-08-05 20:33:55.000000000 -0400
33136 @@ -289,7 +289,7 @@ struct xpc_interface {
33137 xpc_notify_func, void *);
33138 void (*received) (short, int, void *);
33139 enum xp_retval (*partid_to_nasids) (short, void *);
33140 -};
33141 +} __no_const;
33142
33143 extern struct xpc_interface xpc_interface;
33144
33145 diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c
33146 --- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
33147 +++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
33148 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
33149 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
33150 unsigned long timeo = jiffies + HZ;
33151
33152 + pax_track_stack();
33153 +
33154 /* Prevent setting state FL_SYNCING for chip in suspended state. */
33155 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
33156 goto sleep;
33157 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
33158 unsigned long initial_adr;
33159 int initial_len = len;
33160
33161 + pax_track_stack();
33162 +
33163 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
33164 adr += chip->start;
33165 initial_adr = adr;
33166 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
33167 int retries = 3;
33168 int ret;
33169
33170 + pax_track_stack();
33171 +
33172 adr += chip->start;
33173
33174 retry:
33175 diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c
33176 --- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
33177 +++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
33178 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
33179 unsigned long cmd_addr;
33180 struct cfi_private *cfi = map->fldrv_priv;
33181
33182 + pax_track_stack();
33183 +
33184 adr += chip->start;
33185
33186 /* Ensure cmd read/writes are aligned. */
33187 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
33188 DECLARE_WAITQUEUE(wait, current);
33189 int wbufsize, z;
33190
33191 + pax_track_stack();
33192 +
33193 /* M58LW064A requires bus alignment for buffer wriets -- saw */
33194 if (adr & (map_bankwidth(map)-1))
33195 return -EINVAL;
33196 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
33197 DECLARE_WAITQUEUE(wait, current);
33198 int ret = 0;
33199
33200 + pax_track_stack();
33201 +
33202 adr += chip->start;
33203
33204 /* Let's determine this according to the interleave only once */
33205 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
33206 unsigned long timeo = jiffies + HZ;
33207 DECLARE_WAITQUEUE(wait, current);
33208
33209 + pax_track_stack();
33210 +
33211 adr += chip->start;
33212
33213 /* Let's determine this according to the interleave only once */
33214 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
33215 unsigned long timeo = jiffies + HZ;
33216 DECLARE_WAITQUEUE(wait, current);
33217
33218 + pax_track_stack();
33219 +
33220 adr += chip->start;
33221
33222 /* Let's determine this according to the interleave only once */
33223 diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2000.c linux-2.6.32.45/drivers/mtd/devices/doc2000.c
33224 --- linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
33225 +++ linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
33226 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
33227
33228 /* The ECC will not be calculated correctly if less than 512 is written */
33229 /* DBB-
33230 - if (len != 0x200 && eccbuf)
33231 + if (len != 0x200)
33232 printk(KERN_WARNING
33233 "ECC needs a full sector write (adr: %lx size %lx)\n",
33234 (long) to, (long) len);
33235 diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2001.c linux-2.6.32.45/drivers/mtd/devices/doc2001.c
33236 --- linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
33237 +++ linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
33238 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
33239 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33240
33241 /* Don't allow read past end of device */
33242 - if (from >= this->totlen)
33243 + if (from >= this->totlen || !len)
33244 return -EINVAL;
33245
33246 /* Don't allow a single read to cross a 512-byte block boundary */
33247 diff -urNp linux-2.6.32.45/drivers/mtd/ftl.c linux-2.6.32.45/drivers/mtd/ftl.c
33248 --- linux-2.6.32.45/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
33249 +++ linux-2.6.32.45/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
33250 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
33251 loff_t offset;
33252 uint16_t srcunitswap = cpu_to_le16(srcunit);
33253
33254 + pax_track_stack();
33255 +
33256 eun = &part->EUNInfo[srcunit];
33257 xfer = &part->XferInfo[xferunit];
33258 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
33259 diff -urNp linux-2.6.32.45/drivers/mtd/inftlcore.c linux-2.6.32.45/drivers/mtd/inftlcore.c
33260 --- linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
33261 +++ linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
33262 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
33263 struct inftl_oob oob;
33264 size_t retlen;
33265
33266 + pax_track_stack();
33267 +
33268 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
33269 "pending=%d)\n", inftl, thisVUC, pendingblock);
33270
33271 diff -urNp linux-2.6.32.45/drivers/mtd/inftlmount.c linux-2.6.32.45/drivers/mtd/inftlmount.c
33272 --- linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
33273 +++ linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
33274 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
33275 struct INFTLPartition *ip;
33276 size_t retlen;
33277
33278 + pax_track_stack();
33279 +
33280 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
33281
33282 /*
33283 diff -urNp linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c
33284 --- linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
33285 +++ linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
33286 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
33287 {
33288 map_word pfow_val[4];
33289
33290 + pax_track_stack();
33291 +
33292 /* Check identification string */
33293 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
33294 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
33295 diff -urNp linux-2.6.32.45/drivers/mtd/mtdchar.c linux-2.6.32.45/drivers/mtd/mtdchar.c
33296 --- linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
33297 +++ linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
33298 @@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
33299 u_long size;
33300 struct mtd_info_user info;
33301
33302 + pax_track_stack();
33303 +
33304 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
33305
33306 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
33307 diff -urNp linux-2.6.32.45/drivers/mtd/nftlcore.c linux-2.6.32.45/drivers/mtd/nftlcore.c
33308 --- linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
33309 +++ linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
33310 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
33311 int inplace = 1;
33312 size_t retlen;
33313
33314 + pax_track_stack();
33315 +
33316 memset(BlockMap, 0xff, sizeof(BlockMap));
33317 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
33318
33319 diff -urNp linux-2.6.32.45/drivers/mtd/nftlmount.c linux-2.6.32.45/drivers/mtd/nftlmount.c
33320 --- linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
33321 +++ linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
33322 @@ -23,6 +23,7 @@
33323 #include <asm/errno.h>
33324 #include <linux/delay.h>
33325 #include <linux/slab.h>
33326 +#include <linux/sched.h>
33327 #include <linux/mtd/mtd.h>
33328 #include <linux/mtd/nand.h>
33329 #include <linux/mtd/nftl.h>
33330 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
33331 struct mtd_info *mtd = nftl->mbd.mtd;
33332 unsigned int i;
33333
33334 + pax_track_stack();
33335 +
33336 /* Assume logical EraseSize == physical erasesize for starting the scan.
33337 We'll sort it out later if we find a MediaHeader which says otherwise */
33338 /* Actually, we won't. The new DiskOnChip driver has already scanned
33339 diff -urNp linux-2.6.32.45/drivers/mtd/ubi/build.c linux-2.6.32.45/drivers/mtd/ubi/build.c
33340 --- linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
33341 +++ linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
33342 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
33343 static int __init bytes_str_to_int(const char *str)
33344 {
33345 char *endp;
33346 - unsigned long result;
33347 + unsigned long result, scale = 1;
33348
33349 result = simple_strtoul(str, &endp, 0);
33350 if (str == endp || result >= INT_MAX) {
33351 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
33352
33353 switch (*endp) {
33354 case 'G':
33355 - result *= 1024;
33356 + scale *= 1024;
33357 case 'M':
33358 - result *= 1024;
33359 + scale *= 1024;
33360 case 'K':
33361 - result *= 1024;
33362 + scale *= 1024;
33363 if (endp[1] == 'i' && endp[2] == 'B')
33364 endp += 2;
33365 case '\0':
33366 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
33367 return -EINVAL;
33368 }
33369
33370 - return result;
33371 + if ((intoverflow_t)result*scale >= INT_MAX) {
33372 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33373 + str);
33374 + return -EINVAL;
33375 + }
33376 +
33377 + return result*scale;
33378 }
33379
33380 /**
33381 diff -urNp linux-2.6.32.45/drivers/net/bnx2.c linux-2.6.32.45/drivers/net/bnx2.c
33382 --- linux-2.6.32.45/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
33383 +++ linux-2.6.32.45/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
33384 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
33385 int rc = 0;
33386 u32 magic, csum;
33387
33388 + pax_track_stack();
33389 +
33390 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
33391 goto test_nvram_done;
33392
33393 diff -urNp linux-2.6.32.45/drivers/net/cxgb3/l2t.h linux-2.6.32.45/drivers/net/cxgb3/l2t.h
33394 --- linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-03-27 14:31:47.000000000 -0400
33395 +++ linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-08-05 20:33:55.000000000 -0400
33396 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
33397 */
33398 struct l2t_skb_cb {
33399 arp_failure_handler_func arp_failure_handler;
33400 -};
33401 +} __no_const;
33402
33403 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33404
33405 diff -urNp linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c
33406 --- linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
33407 +++ linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
33408 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
33409 int i, addr, ret;
33410 struct t3_vpd vpd;
33411
33412 + pax_track_stack();
33413 +
33414 /*
33415 * Card information is normally at VPD_BASE but some early cards had
33416 * it at 0.
33417 diff -urNp linux-2.6.32.45/drivers/net/e1000e/82571.c linux-2.6.32.45/drivers/net/e1000e/82571.c
33418 --- linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
33419 +++ linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-08-05 20:33:55.000000000 -0400
33420 @@ -245,22 +245,22 @@ static s32 e1000_init_mac_params_82571(s
33421 /* check for link */
33422 switch (hw->phy.media_type) {
33423 case e1000_media_type_copper:
33424 - func->setup_physical_interface = e1000_setup_copper_link_82571;
33425 - func->check_for_link = e1000e_check_for_copper_link;
33426 - func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
33427 + *(void **)&func->setup_physical_interface = e1000_setup_copper_link_82571;
33428 + *(void **)&func->check_for_link = e1000e_check_for_copper_link;
33429 + *(void **)&func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
33430 break;
33431 case e1000_media_type_fiber:
33432 - func->setup_physical_interface =
33433 + *(void **)&func->setup_physical_interface =
33434 e1000_setup_fiber_serdes_link_82571;
33435 - func->check_for_link = e1000e_check_for_fiber_link;
33436 - func->get_link_up_info =
33437 + *(void **)&func->check_for_link = e1000e_check_for_fiber_link;
33438 + *(void **)&func->get_link_up_info =
33439 e1000e_get_speed_and_duplex_fiber_serdes;
33440 break;
33441 case e1000_media_type_internal_serdes:
33442 - func->setup_physical_interface =
33443 + *(void **)&func->setup_physical_interface =
33444 e1000_setup_fiber_serdes_link_82571;
33445 - func->check_for_link = e1000_check_for_serdes_link_82571;
33446 - func->get_link_up_info =
33447 + *(void **)&func->check_for_link = e1000_check_for_serdes_link_82571;
33448 + *(void **)&func->get_link_up_info =
33449 e1000e_get_speed_and_duplex_fiber_serdes;
33450 break;
33451 default:
33452 @@ -271,12 +271,12 @@ static s32 e1000_init_mac_params_82571(s
33453 switch (hw->mac.type) {
33454 case e1000_82574:
33455 case e1000_82583:
33456 - func->check_mng_mode = e1000_check_mng_mode_82574;
33457 - func->led_on = e1000_led_on_82574;
33458 + *(void **)&func->check_mng_mode = e1000_check_mng_mode_82574;
33459 + *(void **)&func->led_on = e1000_led_on_82574;
33460 break;
33461 default:
33462 - func->check_mng_mode = e1000e_check_mng_mode_generic;
33463 - func->led_on = e1000e_led_on_generic;
33464 + *(void **)&func->check_mng_mode = e1000e_check_mng_mode_generic;
33465 + *(void **)&func->led_on = e1000e_led_on_generic;
33466 break;
33467 }
33468
33469 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(s
33470 temp = er32(ICRXDMTC);
33471 }
33472
33473 -static struct e1000_mac_operations e82571_mac_ops = {
33474 +static const struct e1000_mac_operations e82571_mac_ops = {
33475 /* .check_mng_mode: mac type dependent */
33476 /* .check_for_link: media type dependent */
33477 .id_led_init = e1000e_id_led_init,
33478 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e8257
33479 .setup_led = e1000e_setup_led_generic,
33480 };
33481
33482 -static struct e1000_phy_operations e82_phy_ops_igp = {
33483 +static const struct e1000_phy_operations e82_phy_ops_igp = {
33484 .acquire_phy = e1000_get_hw_semaphore_82571,
33485 .check_reset_block = e1000e_check_reset_block_generic,
33486 .commit_phy = NULL,
33487 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_p
33488 .cfg_on_link_up = NULL,
33489 };
33490
33491 -static struct e1000_phy_operations e82_phy_ops_m88 = {
33492 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
33493 .acquire_phy = e1000_get_hw_semaphore_82571,
33494 .check_reset_block = e1000e_check_reset_block_generic,
33495 .commit_phy = e1000e_phy_sw_reset,
33496 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_p
33497 .cfg_on_link_up = NULL,
33498 };
33499
33500 -static struct e1000_phy_operations e82_phy_ops_bm = {
33501 +static const struct e1000_phy_operations e82_phy_ops_bm = {
33502 .acquire_phy = e1000_get_hw_semaphore_82571,
33503 .check_reset_block = e1000e_check_reset_block_generic,
33504 .commit_phy = e1000e_phy_sw_reset,
33505 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_p
33506 .cfg_on_link_up = NULL,
33507 };
33508
33509 -static struct e1000_nvm_operations e82571_nvm_ops = {
33510 +static const struct e1000_nvm_operations e82571_nvm_ops = {
33511 .acquire_nvm = e1000_acquire_nvm_82571,
33512 .read_nvm = e1000e_read_nvm_eerd,
33513 .release_nvm = e1000_release_nvm_82571,
33514 diff -urNp linux-2.6.32.45/drivers/net/e1000e/e1000.h linux-2.6.32.45/drivers/net/e1000e/e1000.h
33515 --- linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
33516 +++ linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
33517 @@ -375,9 +375,9 @@ struct e1000_info {
33518 u32 pba;
33519 u32 max_hw_frame_size;
33520 s32 (*get_variants)(struct e1000_adapter *);
33521 - struct e1000_mac_operations *mac_ops;
33522 - struct e1000_phy_operations *phy_ops;
33523 - struct e1000_nvm_operations *nvm_ops;
33524 + const struct e1000_mac_operations *mac_ops;
33525 + const struct e1000_phy_operations *phy_ops;
33526 + const struct e1000_nvm_operations *nvm_ops;
33527 };
33528
33529 /* hardware capability, feature, and workaround flags */
33530 diff -urNp linux-2.6.32.45/drivers/net/e1000e/es2lan.c linux-2.6.32.45/drivers/net/e1000e/es2lan.c
33531 --- linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
33532 +++ linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-08-05 20:33:55.000000000 -0400
33533 @@ -229,16 +229,16 @@ static s32 e1000_init_mac_params_80003es
33534 /* check for link */
33535 switch (hw->phy.media_type) {
33536 case e1000_media_type_copper:
33537 - func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
33538 - func->check_for_link = e1000e_check_for_copper_link;
33539 + *(void **)&func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
33540 + *(void **)&func->check_for_link = e1000e_check_for_copper_link;
33541 break;
33542 case e1000_media_type_fiber:
33543 - func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33544 - func->check_for_link = e1000e_check_for_fiber_link;
33545 + *(void **)&func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33546 + *(void **)&func->check_for_link = e1000e_check_for_fiber_link;
33547 break;
33548 case e1000_media_type_internal_serdes:
33549 - func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33550 - func->check_for_link = e1000e_check_for_serdes_link;
33551 + *(void **)&func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33552 + *(void **)&func->check_for_link = e1000e_check_for_serdes_link;
33553 break;
33554 default:
33555 return -E1000_ERR_CONFIG;
33556 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es
33557 temp = er32(ICRXDMTC);
33558 }
33559
33560 -static struct e1000_mac_operations es2_mac_ops = {
33561 +static const struct e1000_mac_operations es2_mac_ops = {
33562 .id_led_init = e1000e_id_led_init,
33563 .check_mng_mode = e1000e_check_mng_mode_generic,
33564 /* check_for_link dependent on media type */
33565 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_m
33566 .setup_led = e1000e_setup_led_generic,
33567 };
33568
33569 -static struct e1000_phy_operations es2_phy_ops = {
33570 +static const struct e1000_phy_operations es2_phy_ops = {
33571 .acquire_phy = e1000_acquire_phy_80003es2lan,
33572 .check_reset_block = e1000e_check_reset_block_generic,
33573 .commit_phy = e1000e_phy_sw_reset,
33574 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_p
33575 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
33576 };
33577
33578 -static struct e1000_nvm_operations es2_nvm_ops = {
33579 +static const struct e1000_nvm_operations es2_nvm_ops = {
33580 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
33581 .read_nvm = e1000e_read_nvm_eerd,
33582 .release_nvm = e1000_release_nvm_80003es2lan,
33583 diff -urNp linux-2.6.32.45/drivers/net/e1000e/hw.h linux-2.6.32.45/drivers/net/e1000e/hw.h
33584 --- linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
33585 +++ linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
33586 @@ -756,34 +756,34 @@ struct e1000_mac_operations {
33587
33588 /* Function pointers for the PHY. */
33589 struct e1000_phy_operations {
33590 - s32 (*acquire_phy)(struct e1000_hw *);
33591 - s32 (*check_polarity)(struct e1000_hw *);
33592 - s32 (*check_reset_block)(struct e1000_hw *);
33593 - s32 (*commit_phy)(struct e1000_hw *);
33594 - s32 (*force_speed_duplex)(struct e1000_hw *);
33595 - s32 (*get_cfg_done)(struct e1000_hw *hw);
33596 - s32 (*get_cable_length)(struct e1000_hw *);
33597 - s32 (*get_phy_info)(struct e1000_hw *);
33598 - s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
33599 - s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
33600 - void (*release_phy)(struct e1000_hw *);
33601 - s32 (*reset_phy)(struct e1000_hw *);
33602 - s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
33603 - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33604 - s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
33605 - s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33606 - s32 (*cfg_on_link_up)(struct e1000_hw *);
33607 + s32 (* acquire_phy)(struct e1000_hw *);
33608 + s32 (* check_polarity)(struct e1000_hw *);
33609 + s32 (* check_reset_block)(struct e1000_hw *);
33610 + s32 (* commit_phy)(struct e1000_hw *);
33611 + s32 (* force_speed_duplex)(struct e1000_hw *);
33612 + s32 (* get_cfg_done)(struct e1000_hw *hw);
33613 + s32 (* get_cable_length)(struct e1000_hw *);
33614 + s32 (* get_phy_info)(struct e1000_hw *);
33615 + s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
33616 + s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
33617 + void (* release_phy)(struct e1000_hw *);
33618 + s32 (* reset_phy)(struct e1000_hw *);
33619 + s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
33620 + s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
33621 + s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
33622 + s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33623 + s32 (* cfg_on_link_up)(struct e1000_hw *);
33624 };
33625
33626 /* Function pointers for the NVM. */
33627 struct e1000_nvm_operations {
33628 - s32 (*acquire_nvm)(struct e1000_hw *);
33629 - s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
33630 - void (*release_nvm)(struct e1000_hw *);
33631 - s32 (*update_nvm)(struct e1000_hw *);
33632 - s32 (*valid_led_default)(struct e1000_hw *, u16 *);
33633 - s32 (*validate_nvm)(struct e1000_hw *);
33634 - s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33635 + s32 (* const acquire_nvm)(struct e1000_hw *);
33636 + s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
33637 + void (* const release_nvm)(struct e1000_hw *);
33638 + s32 (* const update_nvm)(struct e1000_hw *);
33639 + s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
33640 + s32 (* const validate_nvm)(struct e1000_hw *);
33641 + s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33642 };
33643
33644 struct e1000_mac_info {
33645 diff -urNp linux-2.6.32.45/drivers/net/e1000e/ich8lan.c linux-2.6.32.45/drivers/net/e1000e/ich8lan.c
33646 --- linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
33647 +++ linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-08-05 20:33:55.000000000 -0400
33648 @@ -265,13 +265,13 @@ static s32 e1000_init_phy_params_pchlan(
33649 phy->addr = 1;
33650 phy->reset_delay_us = 100;
33651
33652 - phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33653 - phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
33654 - phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
33655 - phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
33656 - phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
33657 - phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
33658 - phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
33659 + *(void **)&phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33660 + *(void **)&phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
33661 + *(void **)&phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
33662 + *(void **)&phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
33663 + *(void **)&phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
33664 + *(void **)&phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
33665 + *(void **)&phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
33666 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33667
33668 /*
33669 @@ -289,12 +289,12 @@ static s32 e1000_init_phy_params_pchlan(
33670 phy->type = e1000e_get_phy_type_from_id(phy->id);
33671
33672 if (phy->type == e1000_phy_82577) {
33673 - phy->ops.check_polarity = e1000_check_polarity_82577;
33674 - phy->ops.force_speed_duplex =
33675 + *(void **)&phy->ops.check_polarity = e1000_check_polarity_82577;
33676 + *(void **)&phy->ops.force_speed_duplex =
33677 e1000_phy_force_speed_duplex_82577;
33678 - phy->ops.get_cable_length = e1000_get_cable_length_82577;
33679 - phy->ops.get_phy_info = e1000_get_phy_info_82577;
33680 - phy->ops.commit_phy = e1000e_phy_sw_reset;
33681 + *(void **)&phy->ops.get_cable_length = e1000_get_cable_length_82577;
33682 + *(void **)&phy->ops.get_phy_info = e1000_get_phy_info_82577;
33683 + *(void **)&phy->ops.commit_phy = e1000e_phy_sw_reset;
33684 }
33685
33686 out:
33687 @@ -322,8 +322,8 @@ static s32 e1000_init_phy_params_ich8lan
33688 */
33689 ret_val = e1000e_determine_phy_address(hw);
33690 if (ret_val) {
33691 - hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33692 - hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33693 + *(void **)&hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33694 + *(void **)&hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33695 ret_val = e1000e_determine_phy_address(hw);
33696 if (ret_val)
33697 return ret_val;
33698 @@ -343,8 +343,8 @@ static s32 e1000_init_phy_params_ich8lan
33699 case IGP03E1000_E_PHY_ID:
33700 phy->type = e1000_phy_igp_3;
33701 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33702 - phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
33703 - phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
33704 + *(void **)&phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
33705 + *(void **)&phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
33706 break;
33707 case IFE_E_PHY_ID:
33708 case IFE_PLUS_E_PHY_ID:
33709 @@ -355,16 +355,16 @@ static s32 e1000_init_phy_params_ich8lan
33710 case BME1000_E_PHY_ID:
33711 phy->type = e1000_phy_bm;
33712 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33713 - hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33714 - hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33715 - hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
33716 + *(void **)&hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33717 + *(void **)&hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33718 + *(void **)&hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
33719 break;
33720 default:
33721 return -E1000_ERR_PHY;
33722 break;
33723 }
33724
33725 - phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33726 + *(void **)&phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33727
33728 return 0;
33729 }
33730 @@ -455,25 +455,25 @@ static s32 e1000_init_mac_params_ich8lan
33731 case e1000_ich9lan:
33732 case e1000_ich10lan:
33733 /* ID LED init */
33734 - mac->ops.id_led_init = e1000e_id_led_init;
33735 + *(void **)&mac->ops.id_led_init = e1000e_id_led_init;
33736 /* setup LED */
33737 - mac->ops.setup_led = e1000e_setup_led_generic;
33738 + *(void **)&mac->ops.setup_led = e1000e_setup_led_generic;
33739 /* cleanup LED */
33740 - mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
33741 + *(void **)&mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
33742 /* turn on/off LED */
33743 - mac->ops.led_on = e1000_led_on_ich8lan;
33744 - mac->ops.led_off = e1000_led_off_ich8lan;
33745 + *(void **)&mac->ops.led_on = e1000_led_on_ich8lan;
33746 + *(void **)&mac->ops.led_off = e1000_led_off_ich8lan;
33747 break;
33748 case e1000_pchlan:
33749 /* ID LED init */
33750 - mac->ops.id_led_init = e1000_id_led_init_pchlan;
33751 + *(void **)&mac->ops.id_led_init = e1000_id_led_init_pchlan;
33752 /* setup LED */
33753 - mac->ops.setup_led = e1000_setup_led_pchlan;
33754 + *(void **)&mac->ops.setup_led = e1000_setup_led_pchlan;
33755 /* cleanup LED */
33756 - mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
33757 + *(void **)&mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
33758 /* turn on/off LED */
33759 - mac->ops.led_on = e1000_led_on_pchlan;
33760 - mac->ops.led_off = e1000_led_off_pchlan;
33761 + *(void **)&mac->ops.led_on = e1000_led_on_pchlan;
33762 + *(void **)&mac->ops.led_off = e1000_led_off_pchlan;
33763 break;
33764 default:
33765 break;
33766 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
33767 }
33768 }
33769
33770 -static struct e1000_mac_operations ich8_mac_ops = {
33771 +static const struct e1000_mac_operations ich8_mac_ops = {
33772 .id_led_init = e1000e_id_led_init,
33773 .check_mng_mode = e1000_check_mng_mode_ich8lan,
33774 .check_for_link = e1000_check_for_copper_link_ich8lan,
33775 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
33776 /* id_led_init dependent on mac type */
33777 };
33778
33779 -static struct e1000_phy_operations ich8_phy_ops = {
33780 +static const struct e1000_phy_operations ich8_phy_ops = {
33781 .acquire_phy = e1000_acquire_swflag_ich8lan,
33782 .check_reset_block = e1000_check_reset_block_ich8lan,
33783 .commit_phy = NULL,
33784 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
33785 .write_phy_reg = e1000e_write_phy_reg_igp,
33786 };
33787
33788 -static struct e1000_nvm_operations ich8_nvm_ops = {
33789 +static const struct e1000_nvm_operations ich8_nvm_ops = {
33790 .acquire_nvm = e1000_acquire_nvm_ich8lan,
33791 .read_nvm = e1000_read_nvm_ich8lan,
33792 .release_nvm = e1000_release_nvm_ich8lan,
33793 diff -urNp linux-2.6.32.45/drivers/net/e1000e/netdev.c linux-2.6.32.45/drivers/net/e1000e/netdev.c
33794 --- linux-2.6.32.45/drivers/net/e1000e/netdev.c 2011-03-27 14:31:47.000000000 -0400
33795 +++ linux-2.6.32.45/drivers/net/e1000e/netdev.c 2011-08-05 20:33:55.000000000 -0400
33796 @@ -5071,9 +5071,9 @@ static int __devinit e1000_probe(struct
33797
33798 err = -EIO;
33799
33800 - memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
33801 - memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
33802 - memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
33803 + memcpy((void *)&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
33804 + memcpy((void *)&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
33805 + memcpy((void *)&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
33806
33807 err = ei->get_variants(adapter);
33808 if (err)
33809 diff -urNp linux-2.6.32.45/drivers/net/hamradio/6pack.c linux-2.6.32.45/drivers/net/hamradio/6pack.c
33810 --- linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:04.000000000 -0400
33811 +++ linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:18.000000000 -0400
33812 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
33813 unsigned char buf[512];
33814 int count1;
33815
33816 + pax_track_stack();
33817 +
33818 if (!count)
33819 return;
33820
33821 diff -urNp linux-2.6.32.45/drivers/net/ibmveth.c linux-2.6.32.45/drivers/net/ibmveth.c
33822 --- linux-2.6.32.45/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
33823 +++ linux-2.6.32.45/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
33824 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
33825 NULL,
33826 };
33827
33828 -static struct sysfs_ops veth_pool_ops = {
33829 +static const struct sysfs_ops veth_pool_ops = {
33830 .show = veth_pool_show,
33831 .store = veth_pool_store,
33832 };
33833 diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_82575.c linux-2.6.32.45/drivers/net/igb/e1000_82575.c
33834 --- linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
33835 +++ linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-08-05 20:33:55.000000000 -0400
33836 @@ -135,7 +135,7 @@ static s32 igb_get_invariants_82575(stru
33837 ? true : false;
33838
33839 /* physical interface link setup */
33840 - mac->ops.setup_physical_interface =
33841 + *(void **)&mac->ops.setup_physical_interface =
33842 (hw->phy.media_type == e1000_media_type_copper)
33843 ? igb_setup_copper_link_82575
33844 : igb_setup_serdes_link_82575;
33845 @@ -191,13 +191,13 @@ static s32 igb_get_invariants_82575(stru
33846
33847 /* PHY function pointers */
33848 if (igb_sgmii_active_82575(hw)) {
33849 - phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
33850 - phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
33851 - phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
33852 + *(void **)&phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
33853 + *(void **)&phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
33854 + *(void **)&phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
33855 } else {
33856 - phy->ops.reset = igb_phy_hw_reset;
33857 - phy->ops.read_reg = igb_read_phy_reg_igp;
33858 - phy->ops.write_reg = igb_write_phy_reg_igp;
33859 + *(void **)&phy->ops.reset = igb_phy_hw_reset;
33860 + *(void **)&phy->ops.read_reg = igb_read_phy_reg_igp;
33861 + *(void **)&phy->ops.write_reg = igb_write_phy_reg_igp;
33862 }
33863
33864 /* set lan id */
33865 @@ -213,17 +213,17 @@ static s32 igb_get_invariants_82575(stru
33866 switch (phy->id) {
33867 case M88E1111_I_PHY_ID:
33868 phy->type = e1000_phy_m88;
33869 - phy->ops.get_phy_info = igb_get_phy_info_m88;
33870 - phy->ops.get_cable_length = igb_get_cable_length_m88;
33871 - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
33872 + *(void **)&phy->ops.get_phy_info = igb_get_phy_info_m88;
33873 + *(void **)&phy->ops.get_cable_length = igb_get_cable_length_m88;
33874 + *(void **)&phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
33875 break;
33876 case IGP03E1000_E_PHY_ID:
33877 phy->type = e1000_phy_igp_3;
33878 - phy->ops.get_phy_info = igb_get_phy_info_igp;
33879 - phy->ops.get_cable_length = igb_get_cable_length_igp_2;
33880 - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
33881 - phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
33882 - phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
33883 + *(void **)&phy->ops.get_phy_info = igb_get_phy_info_igp;
33884 + *(void **)&phy->ops.get_cable_length = igb_get_cable_length_igp_2;
33885 + *(void **)&phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
33886 + *(void **)&phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
33887 + *(void **)&phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
33888 break;
33889 default:
33890 return -E1000_ERR_PHY;
33891 @@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
33892 wr32(E1000_VT_CTL, vt_ctl);
33893 }
33894
33895 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
33896 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
33897 .reset_hw = igb_reset_hw_82575,
33898 .init_hw = igb_init_hw_82575,
33899 .check_for_link = igb_check_for_link_82575,
33900 @@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
33901 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
33902 };
33903
33904 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
33905 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
33906 .acquire = igb_acquire_phy_82575,
33907 .get_cfg_done = igb_get_cfg_done_82575,
33908 .release = igb_release_phy_82575,
33909 };
33910
33911 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33912 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33913 .acquire = igb_acquire_nvm_82575,
33914 .read = igb_read_nvm_eerd,
33915 .release = igb_release_nvm_82575,
33916 diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_hw.h linux-2.6.32.45/drivers/net/igb/e1000_hw.h
33917 --- linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
33918 +++ linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
33919 @@ -305,17 +305,17 @@ struct e1000_phy_operations {
33920 };
33921
33922 struct e1000_nvm_operations {
33923 - s32 (*acquire)(struct e1000_hw *);
33924 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
33925 - void (*release)(struct e1000_hw *);
33926 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
33927 + s32 (* const acquire)(struct e1000_hw *);
33928 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
33929 + void (* const release)(struct e1000_hw *);
33930 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
33931 };
33932
33933 struct e1000_info {
33934 s32 (*get_invariants)(struct e1000_hw *);
33935 - struct e1000_mac_operations *mac_ops;
33936 - struct e1000_phy_operations *phy_ops;
33937 - struct e1000_nvm_operations *nvm_ops;
33938 + const struct e1000_mac_operations *mac_ops;
33939 + const struct e1000_phy_operations *phy_ops;
33940 + const struct e1000_nvm_operations *nvm_ops;
33941 };
33942
33943 extern const struct e1000_info e1000_82575_info;
33944 diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_mbx.c linux-2.6.32.45/drivers/net/igb/e1000_mbx.c
33945 --- linux-2.6.32.45/drivers/net/igb/e1000_mbx.c 2011-03-27 14:31:47.000000000 -0400
33946 +++ linux-2.6.32.45/drivers/net/igb/e1000_mbx.c 2011-08-05 20:33:55.000000000 -0400
33947 @@ -414,13 +414,13 @@ s32 igb_init_mbx_params_pf(struct e1000_
33948
33949 mbx->size = E1000_VFMAILBOX_SIZE;
33950
33951 - mbx->ops.read = igb_read_mbx_pf;
33952 - mbx->ops.write = igb_write_mbx_pf;
33953 - mbx->ops.read_posted = igb_read_posted_mbx;
33954 - mbx->ops.write_posted = igb_write_posted_mbx;
33955 - mbx->ops.check_for_msg = igb_check_for_msg_pf;
33956 - mbx->ops.check_for_ack = igb_check_for_ack_pf;
33957 - mbx->ops.check_for_rst = igb_check_for_rst_pf;
33958 + *(void **)&mbx->ops.read = igb_read_mbx_pf;
33959 + *(void **)&mbx->ops.write = igb_write_mbx_pf;
33960 + *(void **)&mbx->ops.read_posted = igb_read_posted_mbx;
33961 + *(void **)&mbx->ops.write_posted = igb_write_posted_mbx;
33962 + *(void **)&mbx->ops.check_for_msg = igb_check_for_msg_pf;
33963 + *(void **)&mbx->ops.check_for_ack = igb_check_for_ack_pf;
33964 + *(void **)&mbx->ops.check_for_rst = igb_check_for_rst_pf;
33965
33966 mbx->stats.msgs_tx = 0;
33967 mbx->stats.msgs_rx = 0;
33968 diff -urNp linux-2.6.32.45/drivers/net/igb/igb_main.c linux-2.6.32.45/drivers/net/igb/igb_main.c
33969 --- linux-2.6.32.45/drivers/net/igb/igb_main.c 2011-03-27 14:31:47.000000000 -0400
33970 +++ linux-2.6.32.45/drivers/net/igb/igb_main.c 2011-08-05 20:33:55.000000000 -0400
33971 @@ -1295,9 +1295,9 @@ static int __devinit igb_probe(struct pc
33972 /* setup the private structure */
33973 hw->back = adapter;
33974 /* Copy the default MAC, PHY and NVM function pointers */
33975 - memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
33976 - memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
33977 - memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
33978 + memcpy((void *)&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
33979 + memcpy((void *)&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
33980 + memcpy((void *)&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
33981 /* Initialize skew-specific constants */
33982 err = ei->get_invariants(hw);
33983 if (err)
33984 diff -urNp linux-2.6.32.45/drivers/net/igbvf/mbx.c linux-2.6.32.45/drivers/net/igbvf/mbx.c
33985 --- linux-2.6.32.45/drivers/net/igbvf/mbx.c 2011-03-27 14:31:47.000000000 -0400
33986 +++ linux-2.6.32.45/drivers/net/igbvf/mbx.c 2011-08-05 20:33:55.000000000 -0400
33987 @@ -331,13 +331,13 @@ s32 e1000_init_mbx_params_vf(struct e100
33988
33989 mbx->size = E1000_VFMAILBOX_SIZE;
33990
33991 - mbx->ops.read = e1000_read_mbx_vf;
33992 - mbx->ops.write = e1000_write_mbx_vf;
33993 - mbx->ops.read_posted = e1000_read_posted_mbx;
33994 - mbx->ops.write_posted = e1000_write_posted_mbx;
33995 - mbx->ops.check_for_msg = e1000_check_for_msg_vf;
33996 - mbx->ops.check_for_ack = e1000_check_for_ack_vf;
33997 - mbx->ops.check_for_rst = e1000_check_for_rst_vf;
33998 + *(void **)&mbx->ops.read = e1000_read_mbx_vf;
33999 + *(void **)&mbx->ops.write = e1000_write_mbx_vf;
34000 + *(void **)&mbx->ops.read_posted = e1000_read_posted_mbx;
34001 + *(void **)&mbx->ops.write_posted = e1000_write_posted_mbx;
34002 + *(void **)&mbx->ops.check_for_msg = e1000_check_for_msg_vf;
34003 + *(void **)&mbx->ops.check_for_ack = e1000_check_for_ack_vf;
34004 + *(void **)&mbx->ops.check_for_rst = e1000_check_for_rst_vf;
34005
34006 mbx->stats.msgs_tx = 0;
34007 mbx->stats.msgs_rx = 0;
34008 diff -urNp linux-2.6.32.45/drivers/net/igbvf/vf.c linux-2.6.32.45/drivers/net/igbvf/vf.c
34009 --- linux-2.6.32.45/drivers/net/igbvf/vf.c 2011-03-27 14:31:47.000000000 -0400
34010 +++ linux-2.6.32.45/drivers/net/igbvf/vf.c 2011-08-05 20:33:55.000000000 -0400
34011 @@ -55,21 +55,21 @@ static s32 e1000_init_mac_params_vf(stru
34012
34013 /* Function pointers */
34014 /* reset */
34015 - mac->ops.reset_hw = e1000_reset_hw_vf;
34016 + *(void **)&mac->ops.reset_hw = e1000_reset_hw_vf;
34017 /* hw initialization */
34018 - mac->ops.init_hw = e1000_init_hw_vf;
34019 + *(void **)&mac->ops.init_hw = e1000_init_hw_vf;
34020 /* check for link */
34021 - mac->ops.check_for_link = e1000_check_for_link_vf;
34022 + *(void **)&mac->ops.check_for_link = e1000_check_for_link_vf;
34023 /* link info */
34024 - mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
34025 + *(void **)&mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
34026 /* multicast address update */
34027 - mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
34028 + *(void **)&mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
34029 /* set mac address */
34030 - mac->ops.rar_set = e1000_rar_set_vf;
34031 + *(void **)&mac->ops.rar_set = e1000_rar_set_vf;
34032 /* read mac address */
34033 - mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
34034 + *(void **)&mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
34035 /* set vlan filter table array */
34036 - mac->ops.set_vfta = e1000_set_vfta_vf;
34037 + *(void **)&mac->ops.set_vfta = e1000_set_vfta_vf;
34038
34039 return E1000_SUCCESS;
34040 }
34041 @@ -80,8 +80,8 @@ static s32 e1000_init_mac_params_vf(stru
34042 **/
34043 void e1000_init_function_pointers_vf(struct e1000_hw *hw)
34044 {
34045 - hw->mac.ops.init_params = e1000_init_mac_params_vf;
34046 - hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
34047 + *(void **)&hw->mac.ops.init_params = e1000_init_mac_params_vf;
34048 + *(void **)&hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
34049 }
34050
34051 /**
34052 diff -urNp linux-2.6.32.45/drivers/net/iseries_veth.c linux-2.6.32.45/drivers/net/iseries_veth.c
34053 --- linux-2.6.32.45/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
34054 +++ linux-2.6.32.45/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
34055 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
34056 NULL
34057 };
34058
34059 -static struct sysfs_ops veth_cnx_sysfs_ops = {
34060 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
34061 .show = veth_cnx_attribute_show
34062 };
34063
34064 @@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
34065 NULL
34066 };
34067
34068 -static struct sysfs_ops veth_port_sysfs_ops = {
34069 +static const struct sysfs_ops veth_port_sysfs_ops = {
34070 .show = veth_port_attribute_show
34071 };
34072
34073 diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c
34074 --- linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
34075 +++ linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
34076 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
34077 u32 rctl;
34078 int i;
34079
34080 + pax_track_stack();
34081 +
34082 /* Check for Promiscuous and All Multicast modes */
34083
34084 rctl = IXGB_READ_REG(hw, RCTL);
34085 diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c
34086 --- linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
34087 +++ linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
34088 @@ -260,6 +260,9 @@ void __devinit
34089 ixgb_check_options(struct ixgb_adapter *adapter)
34090 {
34091 int bd = adapter->bd_number;
34092 +
34093 + pax_track_stack();
34094 +
34095 if (bd >= IXGB_MAX_NIC) {
34096 printk(KERN_NOTICE
34097 "Warning: no configuration for board #%i\n", bd);
34098 diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c
34099 --- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c 2011-03-27 14:31:47.000000000 -0400
34100 +++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c 2011-08-05 20:33:55.000000000 -0400
34101 @@ -154,19 +154,19 @@ static s32 ixgbe_init_phy_ops_82598(stru
34102
34103 /* Overwrite the link function pointers if copper PHY */
34104 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
34105 - mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
34106 - mac->ops.get_link_capabilities =
34107 + *(void **)&mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
34108 + *(void **)&mac->ops.get_link_capabilities =
34109 &ixgbe_get_copper_link_capabilities_82598;
34110 }
34111
34112 switch (hw->phy.type) {
34113 case ixgbe_phy_tn:
34114 - phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34115 - phy->ops.get_firmware_version =
34116 + *(void **)&phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34117 + *(void **)&phy->ops.get_firmware_version =
34118 &ixgbe_get_phy_firmware_version_tnx;
34119 break;
34120 case ixgbe_phy_nl:
34121 - phy->ops.reset = &ixgbe_reset_phy_nl;
34122 + *(void **)&phy->ops.reset = &ixgbe_reset_phy_nl;
34123
34124 /* Call SFP+ identify routine to get the SFP+ module type */
34125 ret_val = phy->ops.identify_sfp(hw);
34126 diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c
34127 --- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c 2011-03-27 14:31:47.000000000 -0400
34128 +++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c 2011-08-05 20:33:55.000000000 -0400
34129 @@ -62,9 +62,9 @@ static void ixgbe_init_mac_link_ops_8259
34130 struct ixgbe_mac_info *mac = &hw->mac;
34131 if (hw->phy.multispeed_fiber) {
34132 /* Set up dual speed SFP+ support */
34133 - mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
34134 + *(void **)&mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
34135 } else {
34136 - mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
34137 + *(void **)&mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
34138 }
34139 }
34140
34141 @@ -76,7 +76,7 @@ static s32 ixgbe_setup_sfp_modules_82599
34142 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
34143 ixgbe_init_mac_link_ops_82599(hw);
34144
34145 - hw->phy.ops.reset = NULL;
34146 + *(void **)&hw->phy.ops.reset = NULL;
34147
34148 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
34149 &data_offset);
34150 @@ -171,16 +171,16 @@ static s32 ixgbe_init_phy_ops_82599(stru
34151
34152 /* If copper media, overwrite with copper function pointers */
34153 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
34154 - mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
34155 - mac->ops.get_link_capabilities =
34156 + *(void **)&mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
34157 + *(void **)&mac->ops.get_link_capabilities =
34158 &ixgbe_get_copper_link_capabilities_82599;
34159 }
34160
34161 /* Set necessary function pointers based on phy type */
34162 switch (hw->phy.type) {
34163 case ixgbe_phy_tn:
34164 - phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34165 - phy->ops.get_firmware_version =
34166 + *(void **)&phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34167 + *(void **)&phy->ops.get_firmware_version =
34168 &ixgbe_get_phy_firmware_version_tnx;
34169 break;
34170 default:
34171 diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c
34172 --- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c 2011-03-27 14:31:47.000000000 -0400
34173 +++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c 2011-08-05 20:33:55.000000000 -0400
34174 @@ -5638,18 +5638,18 @@ static int __devinit ixgbe_probe(struct
34175 adapter->bd_number = cards_found;
34176
34177 /* Setup hw api */
34178 - memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
34179 + memcpy((void *)&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
34180 hw->mac.type = ii->mac;
34181
34182 /* EEPROM */
34183 - memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
34184 + memcpy((void *)&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
34185 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
34186 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
34187 if (!(eec & (1 << 8)))
34188 - hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
34189 + *(void **)&hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
34190
34191 /* PHY */
34192 - memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
34193 + memcpy((void *)&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
34194 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
34195 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
34196 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
34197 diff -urNp linux-2.6.32.45/drivers/net/mlx4/main.c linux-2.6.32.45/drivers/net/mlx4/main.c
34198 --- linux-2.6.32.45/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
34199 +++ linux-2.6.32.45/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
34200 @@ -38,6 +38,7 @@
34201 #include <linux/errno.h>
34202 #include <linux/pci.h>
34203 #include <linux/dma-mapping.h>
34204 +#include <linux/sched.h>
34205
34206 #include <linux/mlx4/device.h>
34207 #include <linux/mlx4/doorbell.h>
34208 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
34209 u64 icm_size;
34210 int err;
34211
34212 + pax_track_stack();
34213 +
34214 err = mlx4_QUERY_FW(dev);
34215 if (err) {
34216 if (err == -EACCES)
34217 diff -urNp linux-2.6.32.45/drivers/net/niu.c linux-2.6.32.45/drivers/net/niu.c
34218 --- linux-2.6.32.45/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
34219 +++ linux-2.6.32.45/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
34220 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
34221 int i, num_irqs, err;
34222 u8 first_ldg;
34223
34224 + pax_track_stack();
34225 +
34226 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
34227 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
34228 ldg_num_map[i] = first_ldg + i;
34229 diff -urNp linux-2.6.32.45/drivers/net/pcnet32.c linux-2.6.32.45/drivers/net/pcnet32.c
34230 --- linux-2.6.32.45/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
34231 +++ linux-2.6.32.45/drivers/net/pcnet32.c 2011-08-05 20:33:55.000000000 -0400
34232 @@ -79,7 +79,7 @@ static int cards_found;
34233 /*
34234 * VLB I/O addresses
34235 */
34236 -static unsigned int pcnet32_portlist[] __initdata =
34237 +static unsigned int pcnet32_portlist[] __devinitdata =
34238 { 0x300, 0x320, 0x340, 0x360, 0 };
34239
34240 static int pcnet32_debug = 0;
34241 @@ -267,7 +267,7 @@ struct pcnet32_private {
34242 struct sk_buff **rx_skbuff;
34243 dma_addr_t *tx_dma_addr;
34244 dma_addr_t *rx_dma_addr;
34245 - struct pcnet32_access a;
34246 + struct pcnet32_access *a;
34247 spinlock_t lock; /* Guard lock */
34248 unsigned int cur_rx, cur_tx; /* The next free ring entry */
34249 unsigned int rx_ring_size; /* current rx ring size */
34250 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct n
34251 u16 val;
34252
34253 netif_wake_queue(dev);
34254 - val = lp->a.read_csr(ioaddr, CSR3);
34255 + val = lp->a->read_csr(ioaddr, CSR3);
34256 val &= 0x00ff;
34257 - lp->a.write_csr(ioaddr, CSR3, val);
34258 + lp->a->write_csr(ioaddr, CSR3, val);
34259 napi_enable(&lp->napi);
34260 }
34261
34262 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_d
34263 r = mii_link_ok(&lp->mii_if);
34264 } else if (lp->chip_version >= PCNET32_79C970A) {
34265 ulong ioaddr = dev->base_addr; /* card base I/O address */
34266 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34267 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34268 } else { /* can not detect link on really old chips */
34269 r = 1;
34270 }
34271 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct
34272 pcnet32_netif_stop(dev);
34273
34274 spin_lock_irqsave(&lp->lock, flags);
34275 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34276 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34277
34278 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
34279
34280 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct
34281 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
34282 {
34283 struct pcnet32_private *lp = netdev_priv(dev);
34284 - struct pcnet32_access *a = &lp->a; /* access to registers */
34285 + struct pcnet32_access *a = lp->a; /* access to registers */
34286 ulong ioaddr = dev->base_addr; /* card base I/O address */
34287 struct sk_buff *skb; /* sk buff */
34288 int x, i; /* counters */
34289 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct
34290 pcnet32_netif_stop(dev);
34291
34292 spin_lock_irqsave(&lp->lock, flags);
34293 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34294 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34295
34296 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
34297
34298 /* Reset the PCNET32 */
34299 - lp->a.reset(ioaddr);
34300 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34301 + lp->a->reset(ioaddr);
34302 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34303
34304 /* switch pcnet32 to 32bit mode */
34305 - lp->a.write_bcr(ioaddr, 20, 2);
34306 + lp->a->write_bcr(ioaddr, 20, 2);
34307
34308 /* purge & init rings but don't actually restart */
34309 pcnet32_restart(dev, 0x0000);
34310
34311 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34312 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34313
34314 /* Initialize Transmit buffers. */
34315 size = data_len + 15;
34316 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct
34317
34318 /* set int loopback in CSR15 */
34319 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
34320 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
34321 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
34322
34323 teststatus = cpu_to_le16(0x8000);
34324 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34325 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34326
34327 /* Check status of descriptors */
34328 for (x = 0; x < numbuffs; x++) {
34329 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct
34330 }
34331 }
34332
34333 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34334 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34335 wmb();
34336 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
34337 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
34338 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct
34339 pcnet32_restart(dev, CSR0_NORMAL);
34340 } else {
34341 pcnet32_purge_rx_ring(dev);
34342 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34343 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34344 }
34345 spin_unlock_irqrestore(&lp->lock, flags);
34346
34347 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct
34348 static void pcnet32_led_blink_callback(struct net_device *dev)
34349 {
34350 struct pcnet32_private *lp = netdev_priv(dev);
34351 - struct pcnet32_access *a = &lp->a;
34352 + struct pcnet32_access *a = lp->a;
34353 ulong ioaddr = dev->base_addr;
34354 unsigned long flags;
34355 int i;
34356 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(s
34357 static int pcnet32_phys_id(struct net_device *dev, u32 data)
34358 {
34359 struct pcnet32_private *lp = netdev_priv(dev);
34360 - struct pcnet32_access *a = &lp->a;
34361 + struct pcnet32_access *a = lp->a;
34362 ulong ioaddr = dev->base_addr;
34363 unsigned long flags;
34364 int i, regs[4];
34365 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_de
34366 {
34367 int csr5;
34368 struct pcnet32_private *lp = netdev_priv(dev);
34369 - struct pcnet32_access *a = &lp->a;
34370 + struct pcnet32_access *a = lp->a;
34371 ulong ioaddr = dev->base_addr;
34372 int ticks;
34373
34374 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_stru
34375 spin_lock_irqsave(&lp->lock, flags);
34376 if (pcnet32_tx(dev)) {
34377 /* reset the chip to clear the error condition, then restart */
34378 - lp->a.reset(ioaddr);
34379 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34380 + lp->a->reset(ioaddr);
34381 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34382 pcnet32_restart(dev, CSR0_START);
34383 netif_wake_queue(dev);
34384 }
34385 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_stru
34386 __napi_complete(napi);
34387
34388 /* clear interrupt masks */
34389 - val = lp->a.read_csr(ioaddr, CSR3);
34390 + val = lp->a->read_csr(ioaddr, CSR3);
34391 val &= 0x00ff;
34392 - lp->a.write_csr(ioaddr, CSR3, val);
34393 + lp->a->write_csr(ioaddr, CSR3, val);
34394
34395 /* Set interrupt enable. */
34396 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
34397 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
34398
34399 spin_unlock_irqrestore(&lp->lock, flags);
34400 }
34401 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_
34402 int i, csr0;
34403 u16 *buff = ptr;
34404 struct pcnet32_private *lp = netdev_priv(dev);
34405 - struct pcnet32_access *a = &lp->a;
34406 + struct pcnet32_access *a = lp->a;
34407 ulong ioaddr = dev->base_addr;
34408 unsigned long flags;
34409
34410 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_
34411 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
34412 if (lp->phymask & (1 << j)) {
34413 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
34414 - lp->a.write_bcr(ioaddr, 33,
34415 + lp->a->write_bcr(ioaddr, 33,
34416 (j << 5) | i);
34417 - *buff++ = lp->a.read_bcr(ioaddr, 34);
34418 + *buff++ = lp->a->read_bcr(ioaddr, 34);
34419 }
34420 }
34421 }
34422 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34423 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
34424 lp->options |= PCNET32_PORT_FD;
34425
34426 - lp->a = *a;
34427 + lp->a = a;
34428
34429 /* prior to register_netdev, dev->name is not yet correct */
34430 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
34431 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34432 if (lp->mii) {
34433 /* lp->phycount and lp->phymask are set to 0 by memset above */
34434
34435 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34436 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34437 /* scan for PHYs */
34438 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34439 unsigned short id1, id2;
34440 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34441 "Found PHY %04x:%04x at address %d.\n",
34442 id1, id2, i);
34443 }
34444 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34445 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34446 if (lp->phycount > 1) {
34447 lp->options |= PCNET32_PORT_MII;
34448 }
34449 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_devic
34450 }
34451
34452 /* Reset the PCNET32 */
34453 - lp->a.reset(ioaddr);
34454 + lp->a->reset(ioaddr);
34455
34456 /* switch pcnet32 to 32bit mode */
34457 - lp->a.write_bcr(ioaddr, 20, 2);
34458 + lp->a->write_bcr(ioaddr, 20, 2);
34459
34460 if (netif_msg_ifup(lp))
34461 printk(KERN_DEBUG
34462 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_devic
34463 (u32) (lp->init_dma_addr));
34464
34465 /* set/reset autoselect bit */
34466 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
34467 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
34468 if (lp->options & PCNET32_PORT_ASEL)
34469 val |= 2;
34470 - lp->a.write_bcr(ioaddr, 2, val);
34471 + lp->a->write_bcr(ioaddr, 2, val);
34472
34473 /* handle full duplex setting */
34474 if (lp->mii_if.full_duplex) {
34475 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
34476 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
34477 if (lp->options & PCNET32_PORT_FD) {
34478 val |= 1;
34479 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
34480 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_devic
34481 if (lp->chip_version == 0x2627)
34482 val |= 3;
34483 }
34484 - lp->a.write_bcr(ioaddr, 9, val);
34485 + lp->a->write_bcr(ioaddr, 9, val);
34486 }
34487
34488 /* set/reset GPSI bit in test register */
34489 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
34490 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
34491 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
34492 val |= 0x10;
34493 - lp->a.write_csr(ioaddr, 124, val);
34494 + lp->a->write_csr(ioaddr, 124, val);
34495
34496 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
34497 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
34498 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_devic
34499 * duplex, and/or enable auto negotiation, and clear DANAS
34500 */
34501 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
34502 - lp->a.write_bcr(ioaddr, 32,
34503 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
34504 + lp->a->write_bcr(ioaddr, 32,
34505 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
34506 /* disable Auto Negotiation, set 10Mpbs, HD */
34507 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
34508 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
34509 if (lp->options & PCNET32_PORT_FD)
34510 val |= 0x10;
34511 if (lp->options & PCNET32_PORT_100)
34512 val |= 0x08;
34513 - lp->a.write_bcr(ioaddr, 32, val);
34514 + lp->a->write_bcr(ioaddr, 32, val);
34515 } else {
34516 if (lp->options & PCNET32_PORT_ASEL) {
34517 - lp->a.write_bcr(ioaddr, 32,
34518 - lp->a.read_bcr(ioaddr,
34519 + lp->a->write_bcr(ioaddr, 32,
34520 + lp->a->read_bcr(ioaddr,
34521 32) | 0x0080);
34522 /* enable auto negotiate, setup, disable fd */
34523 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
34524 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
34525 val |= 0x20;
34526 - lp->a.write_bcr(ioaddr, 32, val);
34527 + lp->a->write_bcr(ioaddr, 32, val);
34528 }
34529 }
34530 } else {
34531 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_devic
34532 * There is really no good other way to handle multiple PHYs
34533 * other than turning off all automatics
34534 */
34535 - val = lp->a.read_bcr(ioaddr, 2);
34536 - lp->a.write_bcr(ioaddr, 2, val & ~2);
34537 - val = lp->a.read_bcr(ioaddr, 32);
34538 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34539 + val = lp->a->read_bcr(ioaddr, 2);
34540 + lp->a->write_bcr(ioaddr, 2, val & ~2);
34541 + val = lp->a->read_bcr(ioaddr, 32);
34542 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34543
34544 if (!(lp->options & PCNET32_PORT_ASEL)) {
34545 /* setup ecmd */
34546 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_devic
34547 ecmd.speed =
34548 lp->
34549 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
34550 - bcr9 = lp->a.read_bcr(ioaddr, 9);
34551 + bcr9 = lp->a->read_bcr(ioaddr, 9);
34552
34553 if (lp->options & PCNET32_PORT_FD) {
34554 ecmd.duplex = DUPLEX_FULL;
34555 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_devic
34556 ecmd.duplex = DUPLEX_HALF;
34557 bcr9 |= ~(1 << 0);
34558 }
34559 - lp->a.write_bcr(ioaddr, 9, bcr9);
34560 + lp->a->write_bcr(ioaddr, 9, bcr9);
34561 }
34562
34563 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34564 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_devic
34565
34566 #ifdef DO_DXSUFLO
34567 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
34568 - val = lp->a.read_csr(ioaddr, CSR3);
34569 + val = lp->a->read_csr(ioaddr, CSR3);
34570 val |= 0x40;
34571 - lp->a.write_csr(ioaddr, CSR3, val);
34572 + lp->a->write_csr(ioaddr, CSR3, val);
34573 }
34574 #endif
34575
34576 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_devic
34577 napi_enable(&lp->napi);
34578
34579 /* Re-initialize the PCNET32, and start it when done. */
34580 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34581 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34582 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34583 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34584
34585 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34586 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34587 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34588 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34589
34590 netif_start_queue(dev);
34591
34592 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_devic
34593
34594 i = 0;
34595 while (i++ < 100)
34596 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34597 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34598 break;
34599 /*
34600 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
34601 * reports that doing so triggers a bug in the '974.
34602 */
34603 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
34604 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
34605
34606 if (netif_msg_ifup(lp))
34607 printk(KERN_DEBUG
34608 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
34609 dev->name, i,
34610 (u32) (lp->init_dma_addr),
34611 - lp->a.read_csr(ioaddr, CSR0));
34612 + lp->a->read_csr(ioaddr, CSR0));
34613
34614 spin_unlock_irqrestore(&lp->lock, flags);
34615
34616 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_devic
34617 * Switch back to 16bit mode to avoid problems with dumb
34618 * DOS packet driver after a warm reboot
34619 */
34620 - lp->a.write_bcr(ioaddr, 20, 4);
34621 + lp->a->write_bcr(ioaddr, 20, 4);
34622
34623 err_free_irq:
34624 spin_unlock_irqrestore(&lp->lock, flags);
34625 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_d
34626
34627 /* wait for stop */
34628 for (i = 0; i < 100; i++)
34629 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
34630 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
34631 break;
34632
34633 if (i >= 100 && netif_msg_drv(lp))
34634 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_d
34635 return;
34636
34637 /* ReInit Ring */
34638 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34639 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34640 i = 0;
34641 while (i++ < 1000)
34642 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34643 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34644 break;
34645
34646 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
34647 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
34648 }
34649
34650 static void pcnet32_tx_timeout(struct net_device *dev)
34651 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct ne
34652 if (pcnet32_debug & NETIF_MSG_DRV)
34653 printk(KERN_ERR
34654 "%s: transmit timed out, status %4.4x, resetting.\n",
34655 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34656 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34657 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34658 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34659 dev->stats.tx_errors++;
34660 if (netif_msg_tx_err(lp)) {
34661 int i;
34662 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34663 if (netif_msg_tx_queued(lp)) {
34664 printk(KERN_DEBUG
34665 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
34666 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34667 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34668 }
34669
34670 /* Default status -- will not enable Successful-TxDone
34671 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34672 dev->stats.tx_bytes += skb->len;
34673
34674 /* Trigger an immediate send poll. */
34675 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34676 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34677
34678 dev->trans_start = jiffies;
34679
34680 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
34681
34682 spin_lock(&lp->lock);
34683
34684 - csr0 = lp->a.read_csr(ioaddr, CSR0);
34685 + csr0 = lp->a->read_csr(ioaddr, CSR0);
34686 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
34687 if (csr0 == 0xffff) {
34688 break; /* PCMCIA remove happened */
34689 }
34690 /* Acknowledge all of the current interrupt sources ASAP. */
34691 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34692 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34693
34694 if (netif_msg_intr(lp))
34695 printk(KERN_DEBUG
34696 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
34697 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
34698 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
34699
34700 /* Log misc errors. */
34701 if (csr0 & 0x4000)
34702 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
34703 if (napi_schedule_prep(&lp->napi)) {
34704 u16 val;
34705 /* set interrupt masks */
34706 - val = lp->a.read_csr(ioaddr, CSR3);
34707 + val = lp->a->read_csr(ioaddr, CSR3);
34708 val |= 0x5f00;
34709 - lp->a.write_csr(ioaddr, CSR3, val);
34710 + lp->a->write_csr(ioaddr, CSR3, val);
34711
34712 __napi_schedule(&lp->napi);
34713 break;
34714 }
34715 - csr0 = lp->a.read_csr(ioaddr, CSR0);
34716 + csr0 = lp->a->read_csr(ioaddr, CSR0);
34717 }
34718
34719 if (netif_msg_intr(lp))
34720 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
34721 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34722 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34723
34724 spin_unlock(&lp->lock);
34725
34726 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_devi
34727
34728 spin_lock_irqsave(&lp->lock, flags);
34729
34730 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34731 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34732
34733 if (netif_msg_ifdown(lp))
34734 printk(KERN_DEBUG
34735 "%s: Shutting down ethercard, status was %2.2x.\n",
34736 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34737 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34738
34739 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
34740 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34741 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34742
34743 /*
34744 * Switch back to 16bit mode to avoid problems with dumb
34745 * DOS packet driver after a warm reboot
34746 */
34747 - lp->a.write_bcr(ioaddr, 20, 4);
34748 + lp->a->write_bcr(ioaddr, 20, 4);
34749
34750 spin_unlock_irqrestore(&lp->lock, flags);
34751
34752 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_
34753 unsigned long flags;
34754
34755 spin_lock_irqsave(&lp->lock, flags);
34756 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34757 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34758 spin_unlock_irqrestore(&lp->lock, flags);
34759
34760 return &dev->stats;
34761 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struc
34762 if (dev->flags & IFF_ALLMULTI) {
34763 ib->filter[0] = cpu_to_le32(~0U);
34764 ib->filter[1] = cpu_to_le32(~0U);
34765 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34766 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34767 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34768 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34769 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34770 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34771 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34772 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34773 return;
34774 }
34775 /* clear the multicast filter */
34776 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struc
34777 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
34778 }
34779 for (i = 0; i < 4; i++)
34780 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
34781 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
34782 le16_to_cpu(mcast_table[i]));
34783 return;
34784 }
34785 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(s
34786
34787 spin_lock_irqsave(&lp->lock, flags);
34788 suspended = pcnet32_suspend(dev, &flags, 0);
34789 - csr15 = lp->a.read_csr(ioaddr, CSR15);
34790 + csr15 = lp->a->read_csr(ioaddr, CSR15);
34791 if (dev->flags & IFF_PROMISC) {
34792 /* Log any net taps. */
34793 if (netif_msg_hw(lp))
34794 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(s
34795 lp->init_block->mode =
34796 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
34797 7);
34798 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
34799 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
34800 } else {
34801 lp->init_block->mode =
34802 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
34803 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34804 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34805 pcnet32_load_multicast(dev);
34806 }
34807
34808 if (suspended) {
34809 int csr5;
34810 /* clear SUSPEND (SPND) - CSR5 bit 0 */
34811 - csr5 = lp->a.read_csr(ioaddr, CSR5);
34812 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34813 + csr5 = lp->a->read_csr(ioaddr, CSR5);
34814 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34815 } else {
34816 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34817 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34818 pcnet32_restart(dev, CSR0_NORMAL);
34819 netif_wake_queue(dev);
34820 }
34821 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *
34822 if (!lp->mii)
34823 return 0;
34824
34825 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34826 - val_out = lp->a.read_bcr(ioaddr, 34);
34827 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34828 + val_out = lp->a->read_bcr(ioaddr, 34);
34829
34830 return val_out;
34831 }
34832 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device
34833 if (!lp->mii)
34834 return;
34835
34836 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34837 - lp->a.write_bcr(ioaddr, 34, val);
34838 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34839 + lp->a->write_bcr(ioaddr, 34, val);
34840 }
34841
34842 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34843 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct n
34844 curr_link = mii_link_ok(&lp->mii_if);
34845 } else {
34846 ulong ioaddr = dev->base_addr; /* card base I/O address */
34847 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34848 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34849 }
34850 if (!curr_link) {
34851 if (prev_link || verbose) {
34852 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct n
34853 (ecmd.duplex ==
34854 DUPLEX_FULL) ? "full" : "half");
34855 }
34856 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
34857 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
34858 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
34859 if (lp->mii_if.full_duplex)
34860 bcr9 |= (1 << 0);
34861 else
34862 bcr9 &= ~(1 << 0);
34863 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
34864 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
34865 }
34866 } else {
34867 if (netif_msg_link(lp))
34868 diff -urNp linux-2.6.32.45/drivers/net/tg3.h linux-2.6.32.45/drivers/net/tg3.h
34869 --- linux-2.6.32.45/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
34870 +++ linux-2.6.32.45/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
34871 @@ -95,6 +95,7 @@
34872 #define CHIPREV_ID_5750_A0 0x4000
34873 #define CHIPREV_ID_5750_A1 0x4001
34874 #define CHIPREV_ID_5750_A3 0x4003
34875 +#define CHIPREV_ID_5750_C1 0x4201
34876 #define CHIPREV_ID_5750_C2 0x4202
34877 #define CHIPREV_ID_5752_A0_HW 0x5000
34878 #define CHIPREV_ID_5752_A0 0x6000
34879 diff -urNp linux-2.6.32.45/drivers/net/tokenring/abyss.c linux-2.6.32.45/drivers/net/tokenring/abyss.c
34880 --- linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-03-27 14:31:47.000000000 -0400
34881 +++ linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-08-05 20:33:55.000000000 -0400
34882 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
34883
34884 static int __init abyss_init (void)
34885 {
34886 - abyss_netdev_ops = tms380tr_netdev_ops;
34887 + pax_open_kernel();
34888 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34889
34890 - abyss_netdev_ops.ndo_open = abyss_open;
34891 - abyss_netdev_ops.ndo_stop = abyss_close;
34892 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34893 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34894 + pax_close_kernel();
34895
34896 return pci_register_driver(&abyss_driver);
34897 }
34898 diff -urNp linux-2.6.32.45/drivers/net/tokenring/madgemc.c linux-2.6.32.45/drivers/net/tokenring/madgemc.c
34899 --- linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-03-27 14:31:47.000000000 -0400
34900 +++ linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-08-05 20:33:55.000000000 -0400
34901 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver
34902
34903 static int __init madgemc_init (void)
34904 {
34905 - madgemc_netdev_ops = tms380tr_netdev_ops;
34906 - madgemc_netdev_ops.ndo_open = madgemc_open;
34907 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34908 + pax_open_kernel();
34909 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34910 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34911 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34912 + pax_close_kernel();
34913
34914 return mca_register_driver (&madgemc_driver);
34915 }
34916 diff -urNp linux-2.6.32.45/drivers/net/tokenring/proteon.c linux-2.6.32.45/drivers/net/tokenring/proteon.c
34917 --- linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-03-27 14:31:47.000000000 -0400
34918 +++ linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-08-05 20:33:55.000000000 -0400
34919 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
34920 struct platform_device *pdev;
34921 int i, num = 0, err = 0;
34922
34923 - proteon_netdev_ops = tms380tr_netdev_ops;
34924 - proteon_netdev_ops.ndo_open = proteon_open;
34925 - proteon_netdev_ops.ndo_stop = tms380tr_close;
34926 + pax_open_kernel();
34927 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34928 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34929 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34930 + pax_close_kernel();
34931
34932 err = platform_driver_register(&proteon_driver);
34933 if (err)
34934 diff -urNp linux-2.6.32.45/drivers/net/tokenring/skisa.c linux-2.6.32.45/drivers/net/tokenring/skisa.c
34935 --- linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-03-27 14:31:47.000000000 -0400
34936 +++ linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-08-05 20:33:55.000000000 -0400
34937 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34938 struct platform_device *pdev;
34939 int i, num = 0, err = 0;
34940
34941 - sk_isa_netdev_ops = tms380tr_netdev_ops;
34942 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
34943 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34944 + pax_open_kernel();
34945 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34946 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34947 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34948 + pax_close_kernel();
34949
34950 err = platform_driver_register(&sk_isa_driver);
34951 if (err)
34952 diff -urNp linux-2.6.32.45/drivers/net/tulip/de2104x.c linux-2.6.32.45/drivers/net/tulip/de2104x.c
34953 --- linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
34954 +++ linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
34955 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
34956 struct de_srom_info_leaf *il;
34957 void *bufp;
34958
34959 + pax_track_stack();
34960 +
34961 /* download entire eeprom */
34962 for (i = 0; i < DE_EEPROM_WORDS; i++)
34963 ((__le16 *)ee_data)[i] =
34964 diff -urNp linux-2.6.32.45/drivers/net/tulip/de4x5.c linux-2.6.32.45/drivers/net/tulip/de4x5.c
34965 --- linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
34966 +++ linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
34967 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
34968 for (i=0; i<ETH_ALEN; i++) {
34969 tmp.addr[i] = dev->dev_addr[i];
34970 }
34971 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34972 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34973 break;
34974
34975 case DE4X5_SET_HWADDR: /* Set the hardware address */
34976 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
34977 spin_lock_irqsave(&lp->lock, flags);
34978 memcpy(&statbuf, &lp->pktStats, ioc->len);
34979 spin_unlock_irqrestore(&lp->lock, flags);
34980 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34981 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34982 return -EFAULT;
34983 break;
34984 }
34985 diff -urNp linux-2.6.32.45/drivers/net/usb/hso.c linux-2.6.32.45/drivers/net/usb/hso.c
34986 --- linux-2.6.32.45/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
34987 +++ linux-2.6.32.45/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
34988 @@ -71,7 +71,7 @@
34989 #include <asm/byteorder.h>
34990 #include <linux/serial_core.h>
34991 #include <linux/serial.h>
34992 -
34993 +#include <asm/local.h>
34994
34995 #define DRIVER_VERSION "1.2"
34996 #define MOD_AUTHOR "Option Wireless"
34997 @@ -258,7 +258,7 @@ struct hso_serial {
34998
34999 /* from usb_serial_port */
35000 struct tty_struct *tty;
35001 - int open_count;
35002 + local_t open_count;
35003 spinlock_t serial_lock;
35004
35005 int (*write_data) (struct hso_serial *serial);
35006 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
35007 struct urb *urb;
35008
35009 urb = serial->rx_urb[0];
35010 - if (serial->open_count > 0) {
35011 + if (local_read(&serial->open_count) > 0) {
35012 count = put_rxbuf_data(urb, serial);
35013 if (count == -1)
35014 return;
35015 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
35016 DUMP1(urb->transfer_buffer, urb->actual_length);
35017
35018 /* Anyone listening? */
35019 - if (serial->open_count == 0)
35020 + if (local_read(&serial->open_count) == 0)
35021 return;
35022
35023 if (status == 0) {
35024 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
35025 spin_unlock_irq(&serial->serial_lock);
35026
35027 /* check for port already opened, if not set the termios */
35028 - serial->open_count++;
35029 - if (serial->open_count == 1) {
35030 + if (local_inc_return(&serial->open_count) == 1) {
35031 tty->low_latency = 1;
35032 serial->rx_state = RX_IDLE;
35033 /* Force default termio settings */
35034 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
35035 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35036 if (result) {
35037 hso_stop_serial_device(serial->parent);
35038 - serial->open_count--;
35039 + local_dec(&serial->open_count);
35040 kref_put(&serial->parent->ref, hso_serial_ref_free);
35041 }
35042 } else {
35043 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
35044
35045 /* reset the rts and dtr */
35046 /* do the actual close */
35047 - serial->open_count--;
35048 + local_dec(&serial->open_count);
35049
35050 - if (serial->open_count <= 0) {
35051 - serial->open_count = 0;
35052 + if (local_read(&serial->open_count) <= 0) {
35053 + local_set(&serial->open_count, 0);
35054 spin_lock_irq(&serial->serial_lock);
35055 if (serial->tty == tty) {
35056 serial->tty->driver_data = NULL;
35057 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
35058
35059 /* the actual setup */
35060 spin_lock_irqsave(&serial->serial_lock, flags);
35061 - if (serial->open_count)
35062 + if (local_read(&serial->open_count))
35063 _hso_serial_set_termios(tty, old);
35064 else
35065 tty->termios = old;
35066 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
35067 /* Start all serial ports */
35068 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35069 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35070 - if (dev2ser(serial_table[i])->open_count) {
35071 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
35072 result =
35073 hso_start_serial_device(serial_table[i], GFP_NOIO);
35074 hso_kick_transmit(dev2ser(serial_table[i]));
35075 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-config.h linux-2.6.32.45/drivers/net/vxge/vxge-config.h
35076 --- linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-03-27 14:31:47.000000000 -0400
35077 +++ linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-08-05 20:33:55.000000000 -0400
35078 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
35079 void (*link_down)(struct __vxge_hw_device *devh);
35080 void (*crit_err)(struct __vxge_hw_device *devh,
35081 enum vxge_hw_event type, u64 ext_data);
35082 -};
35083 +} __no_const;
35084
35085 /*
35086 * struct __vxge_hw_blockpool_entry - Block private data structure
35087 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-main.c linux-2.6.32.45/drivers/net/vxge/vxge-main.c
35088 --- linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
35089 +++ linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
35090 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
35091 struct sk_buff *completed[NR_SKB_COMPLETED];
35092 int more;
35093
35094 + pax_track_stack();
35095 +
35096 do {
35097 more = 0;
35098 skb_ptr = completed;
35099 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
35100 u8 mtable[256] = {0}; /* CPU to vpath mapping */
35101 int index;
35102
35103 + pax_track_stack();
35104 +
35105 /*
35106 * Filling
35107 * - itable with bucket numbers
35108 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h
35109 --- linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-03-27 14:31:47.000000000 -0400
35110 +++ linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:33:55.000000000 -0400
35111 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
35112 struct vxge_hw_mempool_dma *dma_object,
35113 u32 index,
35114 u32 is_last);
35115 -};
35116 +} __no_const;
35117
35118 void
35119 __vxge_hw_mempool_destroy(
35120 diff -urNp linux-2.6.32.45/drivers/net/wan/cycx_x25.c linux-2.6.32.45/drivers/net/wan/cycx_x25.c
35121 --- linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
35122 +++ linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
35123 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
35124 unsigned char hex[1024],
35125 * phex = hex;
35126
35127 + pax_track_stack();
35128 +
35129 if (len >= (sizeof(hex) / 2))
35130 len = (sizeof(hex) / 2) - 1;
35131
35132 diff -urNp linux-2.6.32.45/drivers/net/wan/hdlc_x25.c linux-2.6.32.45/drivers/net/wan/hdlc_x25.c
35133 --- linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-03-27 14:31:47.000000000 -0400
35134 +++ linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-08-05 20:33:55.000000000 -0400
35135 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
35136
35137 static int x25_open(struct net_device *dev)
35138 {
35139 - struct lapb_register_struct cb;
35140 + static struct lapb_register_struct cb = {
35141 + .connect_confirmation = x25_connected,
35142 + .connect_indication = x25_connected,
35143 + .disconnect_confirmation = x25_disconnected,
35144 + .disconnect_indication = x25_disconnected,
35145 + .data_indication = x25_data_indication,
35146 + .data_transmit = x25_data_transmit
35147 + };
35148 int result;
35149
35150 - cb.connect_confirmation = x25_connected;
35151 - cb.connect_indication = x25_connected;
35152 - cb.disconnect_confirmation = x25_disconnected;
35153 - cb.disconnect_indication = x25_disconnected;
35154 - cb.data_indication = x25_data_indication;
35155 - cb.data_transmit = x25_data_transmit;
35156 -
35157 result = lapb_register(dev, &cb);
35158 if (result != LAPB_OK)
35159 return result;
35160 diff -urNp linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c
35161 --- linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
35162 +++ linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
35163 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
35164 int do_autopm = 1;
35165 DECLARE_COMPLETION_ONSTACK(notif_completion);
35166
35167 + pax_track_stack();
35168 +
35169 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
35170 i2400m, ack, ack_size);
35171 BUG_ON(_ack == i2400m->bm_ack_buf);
35172 diff -urNp linux-2.6.32.45/drivers/net/wireless/airo.c linux-2.6.32.45/drivers/net/wireless/airo.c
35173 --- linux-2.6.32.45/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
35174 +++ linux-2.6.32.45/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
35175 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
35176 BSSListElement * loop_net;
35177 BSSListElement * tmp_net;
35178
35179 + pax_track_stack();
35180 +
35181 /* Blow away current list of scan results */
35182 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
35183 list_move_tail (&loop_net->list, &ai->network_free_list);
35184 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
35185 WepKeyRid wkr;
35186 int rc;
35187
35188 + pax_track_stack();
35189 +
35190 memset( &mySsid, 0, sizeof( mySsid ) );
35191 kfree (ai->flash);
35192 ai->flash = NULL;
35193 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
35194 __le32 *vals = stats.vals;
35195 int len;
35196
35197 + pax_track_stack();
35198 +
35199 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35200 return -ENOMEM;
35201 data = (struct proc_data *)file->private_data;
35202 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
35203 /* If doLoseSync is not 1, we won't do a Lose Sync */
35204 int doLoseSync = -1;
35205
35206 + pax_track_stack();
35207 +
35208 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35209 return -ENOMEM;
35210 data = (struct proc_data *)file->private_data;
35211 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
35212 int i;
35213 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
35214
35215 + pax_track_stack();
35216 +
35217 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
35218 if (!qual)
35219 return -ENOMEM;
35220 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
35221 CapabilityRid cap_rid;
35222 __le32 *vals = stats_rid.vals;
35223
35224 + pax_track_stack();
35225 +
35226 /* Get stats out of the card */
35227 clear_bit(JOB_WSTATS, &local->jobs);
35228 if (local->power.event) {
35229 diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c
35230 --- linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
35231 +++ linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
35232 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
35233 unsigned int v;
35234 u64 tsf;
35235
35236 + pax_track_stack();
35237 +
35238 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
35239 len += snprintf(buf+len, sizeof(buf)-len,
35240 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
35241 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
35242 unsigned int len = 0;
35243 unsigned int i;
35244
35245 + pax_track_stack();
35246 +
35247 len += snprintf(buf+len, sizeof(buf)-len,
35248 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
35249
35250 diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c
35251 --- linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
35252 +++ linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
35253 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
35254 char buf[512];
35255 unsigned int len = 0;
35256
35257 + pax_track_stack();
35258 +
35259 len += snprintf(buf + len, sizeof(buf) - len,
35260 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
35261 len += snprintf(buf + len, sizeof(buf) - len,
35262 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
35263 int i;
35264 u8 addr[ETH_ALEN];
35265
35266 + pax_track_stack();
35267 +
35268 len += snprintf(buf + len, sizeof(buf) - len,
35269 "primary: %s (%s chan=%d ht=%d)\n",
35270 wiphy_name(sc->pri_wiphy->hw->wiphy),
35271 diff -urNp linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c
35272 --- linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35273 +++ linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35274 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
35275 struct b43_debugfs_fops {
35276 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
35277 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
35278 - struct file_operations fops;
35279 + const struct file_operations fops;
35280 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
35281 size_t file_struct_offset;
35282 };
35283 diff -urNp linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c
35284 --- linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35285 +++ linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35286 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
35287 struct b43legacy_debugfs_fops {
35288 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
35289 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
35290 - struct file_operations fops;
35291 + const struct file_operations fops;
35292 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
35293 size_t file_struct_offset;
35294 /* Take wl->irq_lock before calling read/write? */
35295 diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c
35296 --- linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
35297 +++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
35298 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
35299 int err;
35300 DECLARE_SSID_BUF(ssid);
35301
35302 + pax_track_stack();
35303 +
35304 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
35305
35306 if (ssid_len)
35307 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
35308 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
35309 int err;
35310
35311 + pax_track_stack();
35312 +
35313 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
35314 idx, keylen, len);
35315
35316 diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c
35317 --- linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
35318 +++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
35319 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
35320 unsigned long flags;
35321 DECLARE_SSID_BUF(ssid);
35322
35323 + pax_track_stack();
35324 +
35325 LIBIPW_DEBUG_SCAN("'%s' (%pM"
35326 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
35327 print_ssid(ssid, info_element->data, info_element->len),
35328 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c
35329 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
35330 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
35331 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
35332 },
35333 };
35334
35335 -static struct iwl_ops iwl1000_ops = {
35336 +static const struct iwl_ops iwl1000_ops = {
35337 .ucode = &iwl5000_ucode,
35338 .lib = &iwl1000_lib,
35339 .hcmd = &iwl5000_hcmd,
35340 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c
35341 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-03-27 14:31:47.000000000 -0400
35342 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-08-05 20:33:55.000000000 -0400
35343 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_
35344 */
35345 if (iwl3945_mod_params.disable_hw_scan) {
35346 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
35347 - iwl3945_hw_ops.hw_scan = NULL;
35348 + pax_open_kernel();
35349 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
35350 + pax_close_kernel();
35351 }
35352
35353
35354 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c
35355 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
35356 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
35357 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
35358 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
35359 };
35360
35361 -static struct iwl_ops iwl3945_ops = {
35362 +static const struct iwl_ops iwl3945_ops = {
35363 .ucode = &iwl3945_ucode,
35364 .lib = &iwl3945_lib,
35365 .hcmd = &iwl3945_hcmd,
35366 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c
35367 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
35368 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
35369 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
35370 },
35371 };
35372
35373 -static struct iwl_ops iwl4965_ops = {
35374 +static const struct iwl_ops iwl4965_ops = {
35375 .ucode = &iwl4965_ucode,
35376 .lib = &iwl4965_lib,
35377 .hcmd = &iwl4965_hcmd,
35378 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c
35379 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
35380 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
35381 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
35382 },
35383 };
35384
35385 -struct iwl_ops iwl5000_ops = {
35386 +const struct iwl_ops iwl5000_ops = {
35387 .ucode = &iwl5000_ucode,
35388 .lib = &iwl5000_lib,
35389 .hcmd = &iwl5000_hcmd,
35390 .utils = &iwl5000_hcmd_utils,
35391 };
35392
35393 -static struct iwl_ops iwl5150_ops = {
35394 +static const struct iwl_ops iwl5150_ops = {
35395 .ucode = &iwl5000_ucode,
35396 .lib = &iwl5150_lib,
35397 .hcmd = &iwl5000_hcmd,
35398 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c
35399 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
35400 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
35401 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
35402 .calc_rssi = iwl5000_calc_rssi,
35403 };
35404
35405 -static struct iwl_ops iwl6000_ops = {
35406 +static const struct iwl_ops iwl6000_ops = {
35407 .ucode = &iwl5000_ucode,
35408 .lib = &iwl6000_lib,
35409 .hcmd = &iwl5000_hcmd,
35410 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c
35411 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-03-27 14:31:47.000000000 -0400
35412 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:33:55.000000000 -0400
35413 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev
35414 if (iwl_debug_level & IWL_DL_INFO)
35415 dev_printk(KERN_DEBUG, &(pdev->dev),
35416 "Disabling hw_scan\n");
35417 - iwl_hw_ops.hw_scan = NULL;
35418 + pax_open_kernel();
35419 + *(void **)&iwl_hw_ops.hw_scan = NULL;
35420 + pax_close_kernel();
35421 }
35422
35423 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
35424 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35425 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
35426 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
35427 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
35428 u8 active_index = 0;
35429 s32 tpt = 0;
35430
35431 + pax_track_stack();
35432 +
35433 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
35434
35435 if (!ieee80211_is_data(hdr->frame_control) ||
35436 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
35437 u8 valid_tx_ant = 0;
35438 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
35439
35440 + pax_track_stack();
35441 +
35442 /* Override starting rate (index 0) if needed for debug purposes */
35443 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
35444
35445 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35446 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
35447 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
35448 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
35449 int pos = 0;
35450 const size_t bufsz = sizeof(buf);
35451
35452 + pax_track_stack();
35453 +
35454 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
35455 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
35456 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
35457 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
35458 const size_t bufsz = sizeof(buf);
35459 ssize_t ret;
35460
35461 + pax_track_stack();
35462 +
35463 for (i = 0; i < AC_NUM; i++) {
35464 pos += scnprintf(buf + pos, bufsz - pos,
35465 "\tcw_min\tcw_max\taifsn\ttxop\n");
35466 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h
35467 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
35468 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
35469 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
35470 #endif
35471
35472 #else
35473 -#define IWL_DEBUG(__priv, level, fmt, args...)
35474 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
35475 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
35476 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
35477 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
35478 void *p, u32 len)
35479 {}
35480 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h
35481 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
35482 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
35483 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
35484
35485 /* shared structures from iwl-5000.c */
35486 extern struct iwl_mod_params iwl50_mod_params;
35487 -extern struct iwl_ops iwl5000_ops;
35488 +extern const struct iwl_ops iwl5000_ops;
35489 extern struct iwl_ucode_ops iwl5000_ucode;
35490 extern struct iwl_lib_ops iwl5000_lib;
35491 extern struct iwl_hcmd_ops iwl5000_hcmd;
35492 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c
35493 --- linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35494 +++ linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
35495 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
35496 int buf_len = 512;
35497 size_t len = 0;
35498
35499 + pax_track_stack();
35500 +
35501 if (*ppos != 0)
35502 return 0;
35503 if (count < sizeof(buf))
35504 diff -urNp linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c
35505 --- linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35506 +++ linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35507 @@ -708,7 +708,7 @@ out_unlock:
35508 struct lbs_debugfs_files {
35509 const char *name;
35510 int perm;
35511 - struct file_operations fops;
35512 + const struct file_operations fops;
35513 };
35514
35515 static const struct lbs_debugfs_files debugfs_files[] = {
35516 diff -urNp linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c
35517 --- linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
35518 +++ linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
35519 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
35520
35521 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
35522
35523 - if (rts_threshold < 0 || rts_threshold > 2347)
35524 + if (rts_threshold > 2347)
35525 rts_threshold = 2347;
35526
35527 tmp = cpu_to_le32(rts_threshold);
35528 diff -urNp linux-2.6.32.45/drivers/oprofile/buffer_sync.c linux-2.6.32.45/drivers/oprofile/buffer_sync.c
35529 --- linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
35530 +++ linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
35531 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
35532 if (cookie == NO_COOKIE)
35533 offset = pc;
35534 if (cookie == INVALID_COOKIE) {
35535 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35536 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35537 offset = pc;
35538 }
35539 if (cookie != last_cookie) {
35540 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
35541 /* add userspace sample */
35542
35543 if (!mm) {
35544 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35545 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35546 return 0;
35547 }
35548
35549 cookie = lookup_dcookie(mm, s->eip, &offset);
35550
35551 if (cookie == INVALID_COOKIE) {
35552 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35553 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35554 return 0;
35555 }
35556
35557 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
35558 /* ignore backtraces if failed to add a sample */
35559 if (state == sb_bt_start) {
35560 state = sb_bt_ignore;
35561 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35562 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35563 }
35564 }
35565 release_mm(mm);
35566 diff -urNp linux-2.6.32.45/drivers/oprofile/event_buffer.c linux-2.6.32.45/drivers/oprofile/event_buffer.c
35567 --- linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
35568 +++ linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
35569 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
35570 }
35571
35572 if (buffer_pos == buffer_size) {
35573 - atomic_inc(&oprofile_stats.event_lost_overflow);
35574 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35575 return;
35576 }
35577
35578 diff -urNp linux-2.6.32.45/drivers/oprofile/oprof.c linux-2.6.32.45/drivers/oprofile/oprof.c
35579 --- linux-2.6.32.45/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
35580 +++ linux-2.6.32.45/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
35581 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
35582 if (oprofile_ops.switch_events())
35583 return;
35584
35585 - atomic_inc(&oprofile_stats.multiplex_counter);
35586 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35587 start_switch_worker();
35588 }
35589
35590 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofilefs.c linux-2.6.32.45/drivers/oprofile/oprofilefs.c
35591 --- linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
35592 +++ linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
35593 @@ -187,7 +187,7 @@ static const struct file_operations atom
35594
35595
35596 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35597 - char const *name, atomic_t *val)
35598 + char const *name, atomic_unchecked_t *val)
35599 {
35600 struct dentry *d = __oprofilefs_create_file(sb, root, name,
35601 &atomic_ro_fops, 0444);
35602 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.c linux-2.6.32.45/drivers/oprofile/oprofile_stats.c
35603 --- linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
35604 +++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
35605 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35606 cpu_buf->sample_invalid_eip = 0;
35607 }
35608
35609 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35610 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35611 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35612 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35613 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35614 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35615 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35616 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35617 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35618 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35619 }
35620
35621
35622 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.h linux-2.6.32.45/drivers/oprofile/oprofile_stats.h
35623 --- linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
35624 +++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
35625 @@ -13,11 +13,11 @@
35626 #include <asm/atomic.h>
35627
35628 struct oprofile_stat_struct {
35629 - atomic_t sample_lost_no_mm;
35630 - atomic_t sample_lost_no_mapping;
35631 - atomic_t bt_lost_no_mapping;
35632 - atomic_t event_lost_overflow;
35633 - atomic_t multiplex_counter;
35634 + atomic_unchecked_t sample_lost_no_mm;
35635 + atomic_unchecked_t sample_lost_no_mapping;
35636 + atomic_unchecked_t bt_lost_no_mapping;
35637 + atomic_unchecked_t event_lost_overflow;
35638 + atomic_unchecked_t multiplex_counter;
35639 };
35640
35641 extern struct oprofile_stat_struct oprofile_stats;
35642 diff -urNp linux-2.6.32.45/drivers/parisc/pdc_stable.c linux-2.6.32.45/drivers/parisc/pdc_stable.c
35643 --- linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
35644 +++ linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
35645 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
35646 return ret;
35647 }
35648
35649 -static struct sysfs_ops pdcspath_attr_ops = {
35650 +static const struct sysfs_ops pdcspath_attr_ops = {
35651 .show = pdcspath_attr_show,
35652 .store = pdcspath_attr_store,
35653 };
35654 diff -urNp linux-2.6.32.45/drivers/parport/procfs.c linux-2.6.32.45/drivers/parport/procfs.c
35655 --- linux-2.6.32.45/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
35656 +++ linux-2.6.32.45/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
35657 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
35658
35659 *ppos += len;
35660
35661 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35662 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35663 }
35664
35665 #ifdef CONFIG_PARPORT_1284
35666 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
35667
35668 *ppos += len;
35669
35670 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35671 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35672 }
35673 #endif /* IEEE1284.3 support. */
35674
35675 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c
35676 --- linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
35677 +++ linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
35678 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
35679 }
35680
35681
35682 -static struct acpi_dock_ops acpiphp_dock_ops = {
35683 +static const struct acpi_dock_ops acpiphp_dock_ops = {
35684 .handler = handle_hotplug_event_func,
35685 };
35686
35687 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h
35688 --- linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-03-27 14:31:47.000000000 -0400
35689 +++ linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:33:55.000000000 -0400
35690 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35691 int (*hardware_test) (struct slot* slot, u32 value);
35692 u8 (*get_power) (struct slot* slot);
35693 int (*set_power) (struct slot* slot, int value);
35694 -};
35695 +} __no_const;
35696
35697 struct cpci_hp_controller {
35698 unsigned int irq;
35699 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c
35700 --- linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
35701 +++ linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
35702 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
35703
35704 void compaq_nvram_init (void __iomem *rom_start)
35705 {
35706 +
35707 +#ifndef CONFIG_PAX_KERNEXEC
35708 if (rom_start) {
35709 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35710 }
35711 +#endif
35712 +
35713 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35714
35715 /* initialize our int15 lock */
35716 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/fakephp.c linux-2.6.32.45/drivers/pci/hotplug/fakephp.c
35717 --- linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
35718 +++ linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
35719 @@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
35720 }
35721
35722 static struct kobj_type legacy_ktype = {
35723 - .sysfs_ops = &(struct sysfs_ops){
35724 + .sysfs_ops = &(const struct sysfs_ops){
35725 .store = legacy_store, .show = legacy_show
35726 },
35727 .release = &legacy_release,
35728 diff -urNp linux-2.6.32.45/drivers/pci/intel-iommu.c linux-2.6.32.45/drivers/pci/intel-iommu.c
35729 --- linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
35730 +++ linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
35731 @@ -2643,7 +2643,7 @@ error:
35732 return 0;
35733 }
35734
35735 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
35736 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
35737 unsigned long offset, size_t size,
35738 enum dma_data_direction dir,
35739 struct dma_attrs *attrs)
35740 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
35741 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
35742 }
35743
35744 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35745 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35746 size_t size, enum dma_data_direction dir,
35747 struct dma_attrs *attrs)
35748 {
35749 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
35750 }
35751 }
35752
35753 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
35754 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
35755 dma_addr_t *dma_handle, gfp_t flags)
35756 {
35757 void *vaddr;
35758 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
35759 return NULL;
35760 }
35761
35762 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35763 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35764 dma_addr_t dma_handle)
35765 {
35766 int order;
35767 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
35768 free_pages((unsigned long)vaddr, order);
35769 }
35770
35771 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35772 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35773 int nelems, enum dma_data_direction dir,
35774 struct dma_attrs *attrs)
35775 {
35776 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
35777 return nelems;
35778 }
35779
35780 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35781 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35782 enum dma_data_direction dir, struct dma_attrs *attrs)
35783 {
35784 int i;
35785 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
35786 return nelems;
35787 }
35788
35789 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35790 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35791 {
35792 return !dma_addr;
35793 }
35794
35795 -struct dma_map_ops intel_dma_ops = {
35796 +const struct dma_map_ops intel_dma_ops = {
35797 .alloc_coherent = intel_alloc_coherent,
35798 .free_coherent = intel_free_coherent,
35799 .map_sg = intel_map_sg,
35800 diff -urNp linux-2.6.32.45/drivers/pci/pcie/aspm.c linux-2.6.32.45/drivers/pci/pcie/aspm.c
35801 --- linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
35802 +++ linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
35803 @@ -27,9 +27,9 @@
35804 #define MODULE_PARAM_PREFIX "pcie_aspm."
35805
35806 /* Note: those are not register definitions */
35807 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35808 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35809 -#define ASPM_STATE_L1 (4) /* L1 state */
35810 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35811 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35812 +#define ASPM_STATE_L1 (4U) /* L1 state */
35813 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35814 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35815
35816 diff -urNp linux-2.6.32.45/drivers/pci/probe.c linux-2.6.32.45/drivers/pci/probe.c
35817 --- linux-2.6.32.45/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
35818 +++ linux-2.6.32.45/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
35819 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
35820 return ret;
35821 }
35822
35823 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
35824 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
35825 struct device_attribute *attr,
35826 char *buf)
35827 {
35828 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
35829 }
35830
35831 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
35832 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
35833 struct device_attribute *attr,
35834 char *buf)
35835 {
35836 diff -urNp linux-2.6.32.45/drivers/pci/proc.c linux-2.6.32.45/drivers/pci/proc.c
35837 --- linux-2.6.32.45/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
35838 +++ linux-2.6.32.45/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
35839 @@ -480,7 +480,16 @@ static const struct file_operations proc
35840 static int __init pci_proc_init(void)
35841 {
35842 struct pci_dev *dev = NULL;
35843 +
35844 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
35845 +#ifdef CONFIG_GRKERNSEC_PROC_USER
35846 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35847 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35848 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35849 +#endif
35850 +#else
35851 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35852 +#endif
35853 proc_create("devices", 0, proc_bus_pci_dir,
35854 &proc_bus_pci_dev_operations);
35855 proc_initialized = 1;
35856 diff -urNp linux-2.6.32.45/drivers/pci/slot.c linux-2.6.32.45/drivers/pci/slot.c
35857 --- linux-2.6.32.45/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
35858 +++ linux-2.6.32.45/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
35859 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
35860 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
35861 }
35862
35863 -static struct sysfs_ops pci_slot_sysfs_ops = {
35864 +static const struct sysfs_ops pci_slot_sysfs_ops = {
35865 .show = pci_slot_attr_show,
35866 .store = pci_slot_attr_store,
35867 };
35868 diff -urNp linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c
35869 --- linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
35870 +++ linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
35871 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
35872 return -EFAULT;
35873 }
35874 }
35875 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35876 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35877 if (!buf)
35878 return -ENOMEM;
35879
35880 diff -urNp linux-2.6.32.45/drivers/platform/x86/acer-wmi.c linux-2.6.32.45/drivers/platform/x86/acer-wmi.c
35881 --- linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
35882 +++ linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
35883 @@ -918,7 +918,7 @@ static int update_bl_status(struct backl
35884 return 0;
35885 }
35886
35887 -static struct backlight_ops acer_bl_ops = {
35888 +static const struct backlight_ops acer_bl_ops = {
35889 .get_brightness = read_brightness,
35890 .update_status = update_bl_status,
35891 };
35892 diff -urNp linux-2.6.32.45/drivers/platform/x86/asus_acpi.c linux-2.6.32.45/drivers/platform/x86/asus_acpi.c
35893 --- linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
35894 +++ linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
35895 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
35896 return 0;
35897 }
35898
35899 -static struct backlight_ops asus_backlight_data = {
35900 +static const struct backlight_ops asus_backlight_data = {
35901 .get_brightness = read_brightness,
35902 .update_status = set_brightness_status,
35903 };
35904 diff -urNp linux-2.6.32.45/drivers/platform/x86/asus-laptop.c linux-2.6.32.45/drivers/platform/x86/asus-laptop.c
35905 --- linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
35906 +++ linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
35907 @@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
35908 */
35909 static int read_brightness(struct backlight_device *bd);
35910 static int update_bl_status(struct backlight_device *bd);
35911 -static struct backlight_ops asusbl_ops = {
35912 +static const struct backlight_ops asusbl_ops = {
35913 .get_brightness = read_brightness,
35914 .update_status = update_bl_status,
35915 };
35916 diff -urNp linux-2.6.32.45/drivers/platform/x86/compal-laptop.c linux-2.6.32.45/drivers/platform/x86/compal-laptop.c
35917 --- linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
35918 +++ linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
35919 @@ -163,7 +163,7 @@ static int bl_update_status(struct backl
35920 return set_lcd_level(b->props.brightness);
35921 }
35922
35923 -static struct backlight_ops compalbl_ops = {
35924 +static const struct backlight_ops compalbl_ops = {
35925 .get_brightness = bl_get_brightness,
35926 .update_status = bl_update_status,
35927 };
35928 diff -urNp linux-2.6.32.45/drivers/platform/x86/dell-laptop.c linux-2.6.32.45/drivers/platform/x86/dell-laptop.c
35929 --- linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
35930 +++ linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
35931 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
35932 return buffer.output[1];
35933 }
35934
35935 -static struct backlight_ops dell_ops = {
35936 +static const struct backlight_ops dell_ops = {
35937 .get_brightness = dell_get_intensity,
35938 .update_status = dell_send_intensity,
35939 };
35940 diff -urNp linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c
35941 --- linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
35942 +++ linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
35943 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
35944 */
35945 static int read_brightness(struct backlight_device *bd);
35946 static int update_bl_status(struct backlight_device *bd);
35947 -static struct backlight_ops eeepcbl_ops = {
35948 +static const struct backlight_ops eeepcbl_ops = {
35949 .get_brightness = read_brightness,
35950 .update_status = update_bl_status,
35951 };
35952 diff -urNp linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c
35953 --- linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
35954 +++ linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
35955 @@ -436,7 +436,7 @@ static int bl_update_status(struct backl
35956 return ret;
35957 }
35958
35959 -static struct backlight_ops fujitsubl_ops = {
35960 +static const struct backlight_ops fujitsubl_ops = {
35961 .get_brightness = bl_get_brightness,
35962 .update_status = bl_update_status,
35963 };
35964 diff -urNp linux-2.6.32.45/drivers/platform/x86/msi-laptop.c linux-2.6.32.45/drivers/platform/x86/msi-laptop.c
35965 --- linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
35966 +++ linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
35967 @@ -161,7 +161,7 @@ static int bl_update_status(struct backl
35968 return set_lcd_level(b->props.brightness);
35969 }
35970
35971 -static struct backlight_ops msibl_ops = {
35972 +static const struct backlight_ops msibl_ops = {
35973 .get_brightness = bl_get_brightness,
35974 .update_status = bl_update_status,
35975 };
35976 diff -urNp linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c
35977 --- linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
35978 +++ linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
35979 @@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
35980 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
35981 }
35982
35983 -static struct backlight_ops pcc_backlight_ops = {
35984 +static const struct backlight_ops pcc_backlight_ops = {
35985 .get_brightness = bl_get,
35986 .update_status = bl_set_status,
35987 };
35988 diff -urNp linux-2.6.32.45/drivers/platform/x86/sony-laptop.c linux-2.6.32.45/drivers/platform/x86/sony-laptop.c
35989 --- linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
35990 +++ linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
35991 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
35992 }
35993
35994 static struct backlight_device *sony_backlight_device;
35995 -static struct backlight_ops sony_backlight_ops = {
35996 +static const struct backlight_ops sony_backlight_ops = {
35997 .update_status = sony_backlight_update_status,
35998 .get_brightness = sony_backlight_get_brightness,
35999 };
36000 diff -urNp linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c
36001 --- linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
36002 +++ linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:33:55.000000000 -0400
36003 @@ -2137,7 +2137,7 @@ static int hotkey_mask_get(void)
36004 return 0;
36005 }
36006
36007 -void static hotkey_mask_warn_incomplete_mask(void)
36008 +static void hotkey_mask_warn_incomplete_mask(void)
36009 {
36010 /* log only what the user can fix... */
36011 const u32 wantedmask = hotkey_driver_mask &
36012 @@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
36013 BACKLIGHT_UPDATE_HOTKEY);
36014 }
36015
36016 -static struct backlight_ops ibm_backlight_data = {
36017 +static const struct backlight_ops ibm_backlight_data = {
36018 .get_brightness = brightness_get,
36019 .update_status = brightness_update_status,
36020 };
36021 diff -urNp linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c
36022 --- linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
36023 +++ linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
36024 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
36025 return AE_OK;
36026 }
36027
36028 -static struct backlight_ops toshiba_backlight_data = {
36029 +static const struct backlight_ops toshiba_backlight_data = {
36030 .get_brightness = get_lcd,
36031 .update_status = set_lcd_status,
36032 };
36033 diff -urNp linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c
36034 --- linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
36035 +++ linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
36036 @@ -60,7 +60,7 @@ do { \
36037 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36038 } while(0)
36039
36040 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36041 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36042 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36043
36044 /*
36045 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
36046
36047 cpu = get_cpu();
36048 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36049 +
36050 + pax_open_kernel();
36051 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36052 + pax_close_kernel();
36053
36054 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36055 spin_lock_irqsave(&pnp_bios_lock, flags);
36056 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
36057 :"memory");
36058 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36059
36060 + pax_open_kernel();
36061 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36062 + pax_close_kernel();
36063 +
36064 put_cpu();
36065
36066 /* If we get here and this is set then the PnP BIOS faulted on us. */
36067 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
36068 return status;
36069 }
36070
36071 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
36072 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36073 {
36074 int i;
36075
36076 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
36077 pnp_bios_callpoint.offset = header->fields.pm16offset;
36078 pnp_bios_callpoint.segment = PNP_CS16;
36079
36080 + pax_open_kernel();
36081 +
36082 for_each_possible_cpu(i) {
36083 struct desc_struct *gdt = get_cpu_gdt_table(i);
36084 if (!gdt)
36085 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
36086 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36087 (unsigned long)__va(header->fields.pm16dseg));
36088 }
36089 +
36090 + pax_close_kernel();
36091 }
36092 diff -urNp linux-2.6.32.45/drivers/pnp/resource.c linux-2.6.32.45/drivers/pnp/resource.c
36093 --- linux-2.6.32.45/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
36094 +++ linux-2.6.32.45/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
36095 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
36096 return 1;
36097
36098 /* check if the resource is valid */
36099 - if (*irq < 0 || *irq > 15)
36100 + if (*irq > 15)
36101 return 0;
36102
36103 /* check if the resource is reserved */
36104 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
36105 return 1;
36106
36107 /* check if the resource is valid */
36108 - if (*dma < 0 || *dma == 4 || *dma > 7)
36109 + if (*dma == 4 || *dma > 7)
36110 return 0;
36111
36112 /* check if the resource is reserved */
36113 diff -urNp linux-2.6.32.45/drivers/power/bq27x00_battery.c linux-2.6.32.45/drivers/power/bq27x00_battery.c
36114 --- linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-03-27 14:31:47.000000000 -0400
36115 +++ linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-08-05 20:33:55.000000000 -0400
36116 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
36117 struct bq27x00_access_methods {
36118 int (*read)(u8 reg, int *rt_value, int b_single,
36119 struct bq27x00_device_info *di);
36120 -};
36121 +} __no_const;
36122
36123 struct bq27x00_device_info {
36124 struct device *dev;
36125 diff -urNp linux-2.6.32.45/drivers/rtc/rtc-dev.c linux-2.6.32.45/drivers/rtc/rtc-dev.c
36126 --- linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
36127 +++ linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
36128 @@ -14,6 +14,7 @@
36129 #include <linux/module.h>
36130 #include <linux/rtc.h>
36131 #include <linux/sched.h>
36132 +#include <linux/grsecurity.h>
36133 #include "rtc-core.h"
36134
36135 static dev_t rtc_devt;
36136 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
36137 if (copy_from_user(&tm, uarg, sizeof(tm)))
36138 return -EFAULT;
36139
36140 + gr_log_timechange();
36141 +
36142 return rtc_set_time(rtc, &tm);
36143
36144 case RTC_PIE_ON:
36145 diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.c linux-2.6.32.45/drivers/s390/cio/qdio_perf.c
36146 --- linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
36147 +++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
36148 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
36149 static int qdio_perf_proc_show(struct seq_file *m, void *v)
36150 {
36151 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
36152 - (long)atomic_long_read(&perf_stats.qdio_int));
36153 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
36154 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
36155 - (long)atomic_long_read(&perf_stats.pci_int));
36156 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
36157 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
36158 - (long)atomic_long_read(&perf_stats.thin_int));
36159 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
36160 seq_printf(m, "\n");
36161 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
36162 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
36163 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
36164 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
36165 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
36166 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
36167 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
36168 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
36169 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
36170 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
36171 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
36172 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
36173 - (long)atomic_long_read(&perf_stats.thinint_inbound),
36174 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
36175 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
36176 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
36177 seq_printf(m, "\n");
36178 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
36179 - (long)atomic_long_read(&perf_stats.siga_in));
36180 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
36181 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
36182 - (long)atomic_long_read(&perf_stats.siga_out));
36183 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
36184 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
36185 - (long)atomic_long_read(&perf_stats.siga_sync));
36186 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
36187 seq_printf(m, "\n");
36188 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
36189 - (long)atomic_long_read(&perf_stats.inbound_handler));
36190 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
36191 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
36192 - (long)atomic_long_read(&perf_stats.outbound_handler));
36193 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
36194 seq_printf(m, "\n");
36195 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
36196 - (long)atomic_long_read(&perf_stats.fast_requeue));
36197 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
36198 seq_printf(m, "Number of outbound target full condition\t: %li\n",
36199 - (long)atomic_long_read(&perf_stats.outbound_target_full));
36200 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
36201 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
36202 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
36203 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
36204 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
36205 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
36206 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
36207 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
36208 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
36209 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
36210 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
36211 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
36212 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
36213 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
36214 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
36215 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
36216 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
36217 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
36218 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
36219 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
36220 seq_printf(m, "\n");
36221 return 0;
36222 }
36223 diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.h linux-2.6.32.45/drivers/s390/cio/qdio_perf.h
36224 --- linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
36225 +++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
36226 @@ -13,46 +13,46 @@
36227
36228 struct qdio_perf_stats {
36229 /* interrupt handler calls */
36230 - atomic_long_t qdio_int;
36231 - atomic_long_t pci_int;
36232 - atomic_long_t thin_int;
36233 + atomic_long_unchecked_t qdio_int;
36234 + atomic_long_unchecked_t pci_int;
36235 + atomic_long_unchecked_t thin_int;
36236
36237 /* tasklet runs */
36238 - atomic_long_t tasklet_inbound;
36239 - atomic_long_t tasklet_outbound;
36240 - atomic_long_t tasklet_thinint;
36241 - atomic_long_t tasklet_thinint_loop;
36242 - atomic_long_t thinint_inbound;
36243 - atomic_long_t thinint_inbound_loop;
36244 - atomic_long_t thinint_inbound_loop2;
36245 + atomic_long_unchecked_t tasklet_inbound;
36246 + atomic_long_unchecked_t tasklet_outbound;
36247 + atomic_long_unchecked_t tasklet_thinint;
36248 + atomic_long_unchecked_t tasklet_thinint_loop;
36249 + atomic_long_unchecked_t thinint_inbound;
36250 + atomic_long_unchecked_t thinint_inbound_loop;
36251 + atomic_long_unchecked_t thinint_inbound_loop2;
36252
36253 /* signal adapter calls */
36254 - atomic_long_t siga_out;
36255 - atomic_long_t siga_in;
36256 - atomic_long_t siga_sync;
36257 + atomic_long_unchecked_t siga_out;
36258 + atomic_long_unchecked_t siga_in;
36259 + atomic_long_unchecked_t siga_sync;
36260
36261 /* misc */
36262 - atomic_long_t inbound_handler;
36263 - atomic_long_t outbound_handler;
36264 - atomic_long_t fast_requeue;
36265 - atomic_long_t outbound_target_full;
36266 + atomic_long_unchecked_t inbound_handler;
36267 + atomic_long_unchecked_t outbound_handler;
36268 + atomic_long_unchecked_t fast_requeue;
36269 + atomic_long_unchecked_t outbound_target_full;
36270
36271 /* for debugging */
36272 - atomic_long_t debug_tl_out_timer;
36273 - atomic_long_t debug_stop_polling;
36274 - atomic_long_t debug_eqbs_all;
36275 - atomic_long_t debug_eqbs_incomplete;
36276 - atomic_long_t debug_sqbs_all;
36277 - atomic_long_t debug_sqbs_incomplete;
36278 + atomic_long_unchecked_t debug_tl_out_timer;
36279 + atomic_long_unchecked_t debug_stop_polling;
36280 + atomic_long_unchecked_t debug_eqbs_all;
36281 + atomic_long_unchecked_t debug_eqbs_incomplete;
36282 + atomic_long_unchecked_t debug_sqbs_all;
36283 + atomic_long_unchecked_t debug_sqbs_incomplete;
36284 };
36285
36286 extern struct qdio_perf_stats perf_stats;
36287 extern int qdio_performance_stats;
36288
36289 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
36290 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
36291 {
36292 if (qdio_performance_stats)
36293 - atomic_long_inc(count);
36294 + atomic_long_inc_unchecked(count);
36295 }
36296
36297 int qdio_setup_perf_stats(void);
36298 diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h
36299 --- linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-03-27 14:31:47.000000000 -0400
36300 +++ linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:33:55.000000000 -0400
36301 @@ -471,7 +471,7 @@ struct adapter_ops
36302 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36303 /* Administrative operations */
36304 int (*adapter_comm)(struct aac_dev * dev, int comm);
36305 -};
36306 +} __no_const;
36307
36308 /*
36309 * Define which interrupt handler needs to be installed
36310 diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c
36311 --- linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
36312 +++ linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
36313 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
36314 u32 actual_fibsize64, actual_fibsize = 0;
36315 int i;
36316
36317 + pax_track_stack();
36318
36319 if (dev->in_reset) {
36320 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
36321 diff -urNp linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c
36322 --- linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
36323 +++ linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
36324 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
36325 flash_error_table[i].reason);
36326 }
36327
36328 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
36329 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
36330 asd_show_update_bios, asd_store_update_bios);
36331
36332 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
36333 diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h
36334 --- linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-03-27 14:31:47.000000000 -0400
36335 +++ linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-08-05 20:33:55.000000000 -0400
36336 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
36337 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
36338 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
36339 u32 *nvecs, u32 *maxvec);
36340 -};
36341 +} __no_const;
36342 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36343
36344 struct bfa_iocfc_s {
36345 diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h
36346 --- linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-03-27 14:31:47.000000000 -0400
36347 +++ linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:33:55.000000000 -0400
36348 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
36349 bfa_ioc_disable_cbfn_t disable_cbfn;
36350 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36351 bfa_ioc_reset_cbfn_t reset_cbfn;
36352 -};
36353 +} __no_const;
36354
36355 /**
36356 * Heartbeat failure notification queue element.
36357 diff -urNp linux-2.6.32.45/drivers/scsi/BusLogic.c linux-2.6.32.45/drivers/scsi/BusLogic.c
36358 --- linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
36359 +++ linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
36360 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
36361 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
36362 *PrototypeHostAdapter)
36363 {
36364 + pax_track_stack();
36365 +
36366 /*
36367 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
36368 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
36369 diff -urNp linux-2.6.32.45/drivers/scsi/dpt_i2o.c linux-2.6.32.45/drivers/scsi/dpt_i2o.c
36370 --- linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
36371 +++ linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
36372 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
36373 dma_addr_t addr;
36374 ulong flags = 0;
36375
36376 + pax_track_stack();
36377 +
36378 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
36379 // get user msg size in u32s
36380 if(get_user(size, &user_msg[0])){
36381 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
36382 s32 rcode;
36383 dma_addr_t addr;
36384
36385 + pax_track_stack();
36386 +
36387 memset(msg, 0 , sizeof(msg));
36388 len = scsi_bufflen(cmd);
36389 direction = 0x00000000;
36390 diff -urNp linux-2.6.32.45/drivers/scsi/eata.c linux-2.6.32.45/drivers/scsi/eata.c
36391 --- linux-2.6.32.45/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
36392 +++ linux-2.6.32.45/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
36393 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
36394 struct hostdata *ha;
36395 char name[16];
36396
36397 + pax_track_stack();
36398 +
36399 sprintf(name, "%s%d", driver_name, j);
36400
36401 if (!request_region(port_base, REGION_SIZE, driver_name)) {
36402 diff -urNp linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c
36403 --- linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
36404 +++ linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
36405 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
36406 size_t rlen;
36407 size_t dlen;
36408
36409 + pax_track_stack();
36410 +
36411 fiph = (struct fip_header *)skb->data;
36412 sub = fiph->fip_subcode;
36413 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
36414 diff -urNp linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c
36415 --- linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-03-27 14:31:47.000000000 -0400
36416 +++ linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-08-05 20:33:55.000000000 -0400
36417 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct p
36418 /* Start local port initiatialization */
36419
36420 lp->link_up = 0;
36421 - lp->tt = fnic_transport_template;
36422 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
36423
36424 lp->max_retry_count = fnic->config.flogi_retries;
36425 lp->max_rport_retry_count = fnic->config.plogi_retries;
36426 diff -urNp linux-2.6.32.45/drivers/scsi/gdth.c linux-2.6.32.45/drivers/scsi/gdth.c
36427 --- linux-2.6.32.45/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
36428 +++ linux-2.6.32.45/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
36429 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
36430 ulong flags;
36431 gdth_ha_str *ha;
36432
36433 + pax_track_stack();
36434 +
36435 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
36436 return -EFAULT;
36437 ha = gdth_find_ha(ldrv.ionode);
36438 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
36439 gdth_ha_str *ha;
36440 int rval;
36441
36442 + pax_track_stack();
36443 +
36444 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
36445 res.number >= MAX_HDRIVES)
36446 return -EFAULT;
36447 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
36448 gdth_ha_str *ha;
36449 int rval;
36450
36451 + pax_track_stack();
36452 +
36453 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
36454 return -EFAULT;
36455 ha = gdth_find_ha(gen.ionode);
36456 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
36457 int i;
36458 gdth_cmd_str gdtcmd;
36459 char cmnd[MAX_COMMAND_SIZE];
36460 +
36461 + pax_track_stack();
36462 +
36463 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
36464
36465 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
36466 diff -urNp linux-2.6.32.45/drivers/scsi/gdth_proc.c linux-2.6.32.45/drivers/scsi/gdth_proc.c
36467 --- linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
36468 +++ linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
36469 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
36470 ulong64 paddr;
36471
36472 char cmnd[MAX_COMMAND_SIZE];
36473 +
36474 + pax_track_stack();
36475 +
36476 memset(cmnd, 0xff, 12);
36477 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
36478
36479 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
36480 gdth_hget_str *phg;
36481 char cmnd[MAX_COMMAND_SIZE];
36482
36483 + pax_track_stack();
36484 +
36485 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
36486 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
36487 if (!gdtcmd || !estr)
36488 diff -urNp linux-2.6.32.45/drivers/scsi/hosts.c linux-2.6.32.45/drivers/scsi/hosts.c
36489 --- linux-2.6.32.45/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
36490 +++ linux-2.6.32.45/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
36491 @@ -40,7 +40,7 @@
36492 #include "scsi_logging.h"
36493
36494
36495 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36496 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36497
36498
36499 static void scsi_host_cls_release(struct device *dev)
36500 @@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
36501 * subtract one because we increment first then return, but we need to
36502 * know what the next host number was before increment
36503 */
36504 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36505 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36506 shost->dma_channel = 0xff;
36507
36508 /* These three are default values which can be overridden */
36509 diff -urNp linux-2.6.32.45/drivers/scsi/ipr.c linux-2.6.32.45/drivers/scsi/ipr.c
36510 --- linux-2.6.32.45/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
36511 +++ linux-2.6.32.45/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
36512 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
36513 return true;
36514 }
36515
36516 -static struct ata_port_operations ipr_sata_ops = {
36517 +static const struct ata_port_operations ipr_sata_ops = {
36518 .phy_reset = ipr_ata_phy_reset,
36519 .hardreset = ipr_sata_reset,
36520 .post_internal_cmd = ipr_ata_post_internal,
36521 diff -urNp linux-2.6.32.45/drivers/scsi/ips.h linux-2.6.32.45/drivers/scsi/ips.h
36522 --- linux-2.6.32.45/drivers/scsi/ips.h 2011-03-27 14:31:47.000000000 -0400
36523 +++ linux-2.6.32.45/drivers/scsi/ips.h 2011-08-05 20:33:55.000000000 -0400
36524 @@ -1027,7 +1027,7 @@ typedef struct {
36525 int (*intr)(struct ips_ha *);
36526 void (*enableint)(struct ips_ha *);
36527 uint32_t (*statupd)(struct ips_ha *);
36528 -} ips_hw_func_t;
36529 +} __no_const ips_hw_func_t;
36530
36531 typedef struct ips_ha {
36532 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36533 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c
36534 --- linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c 2011-03-27 14:31:47.000000000 -0400
36535 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c 2011-08-05 20:33:55.000000000 -0400
36536 @@ -715,16 +715,16 @@ int fc_disc_init(struct fc_lport *lport)
36537 struct fc_disc *disc;
36538
36539 if (!lport->tt.disc_start)
36540 - lport->tt.disc_start = fc_disc_start;
36541 + *(void **)&lport->tt.disc_start = fc_disc_start;
36542
36543 if (!lport->tt.disc_stop)
36544 - lport->tt.disc_stop = fc_disc_stop;
36545 + *(void **)&lport->tt.disc_stop = fc_disc_stop;
36546
36547 if (!lport->tt.disc_stop_final)
36548 - lport->tt.disc_stop_final = fc_disc_stop_final;
36549 + *(void **)&lport->tt.disc_stop_final = fc_disc_stop_final;
36550
36551 if (!lport->tt.disc_recv_req)
36552 - lport->tt.disc_recv_req = fc_disc_recv_req;
36553 + *(void **)&lport->tt.disc_recv_req = fc_disc_recv_req;
36554
36555 disc = &lport->disc;
36556 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
36557 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c
36558 --- linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c 2011-03-27 14:31:47.000000000 -0400
36559 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c 2011-08-05 20:33:55.000000000 -0400
36560 @@ -67,7 +67,7 @@ static struct fc_seq *fc_elsct_send(stru
36561 int fc_elsct_init(struct fc_lport *lport)
36562 {
36563 if (!lport->tt.elsct_send)
36564 - lport->tt.elsct_send = fc_elsct_send;
36565 + *(void **)&lport->tt.elsct_send = fc_elsct_send;
36566
36567 return 0;
36568 }
36569 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c
36570 --- linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
36571 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-08-05 20:33:55.000000000 -0400
36572 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
36573 * all together if not used XXX
36574 */
36575 struct {
36576 - atomic_t no_free_exch;
36577 - atomic_t no_free_exch_xid;
36578 - atomic_t xid_not_found;
36579 - atomic_t xid_busy;
36580 - atomic_t seq_not_found;
36581 - atomic_t non_bls_resp;
36582 + atomic_unchecked_t no_free_exch;
36583 + atomic_unchecked_t no_free_exch_xid;
36584 + atomic_unchecked_t xid_not_found;
36585 + atomic_unchecked_t xid_busy;
36586 + atomic_unchecked_t seq_not_found;
36587 + atomic_unchecked_t non_bls_resp;
36588 } stats;
36589 };
36590 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
36591 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
36592 /* allocate memory for exchange */
36593 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36594 if (!ep) {
36595 - atomic_inc(&mp->stats.no_free_exch);
36596 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36597 goto out;
36598 }
36599 memset(ep, 0, sizeof(*ep));
36600 @@ -557,7 +557,7 @@ out:
36601 return ep;
36602 err:
36603 spin_unlock_bh(&pool->lock);
36604 - atomic_inc(&mp->stats.no_free_exch_xid);
36605 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36606 mempool_free(ep, mp->ep_pool);
36607 return NULL;
36608 }
36609 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36610 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36611 ep = fc_exch_find(mp, xid);
36612 if (!ep) {
36613 - atomic_inc(&mp->stats.xid_not_found);
36614 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36615 reject = FC_RJT_OX_ID;
36616 goto out;
36617 }
36618 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36619 ep = fc_exch_find(mp, xid);
36620 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36621 if (ep) {
36622 - atomic_inc(&mp->stats.xid_busy);
36623 + atomic_inc_unchecked(&mp->stats.xid_busy);
36624 reject = FC_RJT_RX_ID;
36625 goto rel;
36626 }
36627 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36628 }
36629 xid = ep->xid; /* get our XID */
36630 } else if (!ep) {
36631 - atomic_inc(&mp->stats.xid_not_found);
36632 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36633 reject = FC_RJT_RX_ID; /* XID not found */
36634 goto out;
36635 }
36636 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36637 } else {
36638 sp = &ep->seq;
36639 if (sp->id != fh->fh_seq_id) {
36640 - atomic_inc(&mp->stats.seq_not_found);
36641 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36642 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
36643 goto rel;
36644 }
36645 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
36646
36647 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36648 if (!ep) {
36649 - atomic_inc(&mp->stats.xid_not_found);
36650 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36651 goto out;
36652 }
36653 if (ep->esb_stat & ESB_ST_COMPLETE) {
36654 - atomic_inc(&mp->stats.xid_not_found);
36655 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36656 goto out;
36657 }
36658 if (ep->rxid == FC_XID_UNKNOWN)
36659 ep->rxid = ntohs(fh->fh_rx_id);
36660 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36661 - atomic_inc(&mp->stats.xid_not_found);
36662 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36663 goto rel;
36664 }
36665 if (ep->did != ntoh24(fh->fh_s_id) &&
36666 ep->did != FC_FID_FLOGI) {
36667 - atomic_inc(&mp->stats.xid_not_found);
36668 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36669 goto rel;
36670 }
36671 sof = fr_sof(fp);
36672 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
36673 } else {
36674 sp = &ep->seq;
36675 if (sp->id != fh->fh_seq_id) {
36676 - atomic_inc(&mp->stats.seq_not_found);
36677 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36678 goto rel;
36679 }
36680 }
36681 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
36682 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36683
36684 if (!sp)
36685 - atomic_inc(&mp->stats.xid_not_found);
36686 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36687 else
36688 - atomic_inc(&mp->stats.non_bls_resp);
36689 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36690
36691 fc_frame_free(fp);
36692 }
36693 @@ -2027,25 +2027,25 @@ EXPORT_SYMBOL(fc_exch_recv);
36694 int fc_exch_init(struct fc_lport *lp)
36695 {
36696 if (!lp->tt.seq_start_next)
36697 - lp->tt.seq_start_next = fc_seq_start_next;
36698 + *(void **)&lp->tt.seq_start_next = fc_seq_start_next;
36699
36700 if (!lp->tt.exch_seq_send)
36701 - lp->tt.exch_seq_send = fc_exch_seq_send;
36702 + *(void **)&lp->tt.exch_seq_send = fc_exch_seq_send;
36703
36704 if (!lp->tt.seq_send)
36705 - lp->tt.seq_send = fc_seq_send;
36706 + *(void **)&lp->tt.seq_send = fc_seq_send;
36707
36708 if (!lp->tt.seq_els_rsp_send)
36709 - lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
36710 + *(void **)&lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
36711
36712 if (!lp->tt.exch_done)
36713 - lp->tt.exch_done = fc_exch_done;
36714 + *(void **)&lp->tt.exch_done = fc_exch_done;
36715
36716 if (!lp->tt.exch_mgr_reset)
36717 - lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
36718 + *(void **)&lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
36719
36720 if (!lp->tt.seq_exch_abort)
36721 - lp->tt.seq_exch_abort = fc_seq_exch_abort;
36722 + *(void **)&lp->tt.seq_exch_abort = fc_seq_exch_abort;
36723
36724 /*
36725 * Initialize fc_cpu_mask and fc_cpu_order. The
36726 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c
36727 --- linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c 2011-03-27 14:31:47.000000000 -0400
36728 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c 2011-08-05 20:33:55.000000000 -0400
36729 @@ -2105,13 +2105,13 @@ int fc_fcp_init(struct fc_lport *lp)
36730 struct fc_fcp_internal *si;
36731
36732 if (!lp->tt.fcp_cmd_send)
36733 - lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
36734 + *(void **)&lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
36735
36736 if (!lp->tt.fcp_cleanup)
36737 - lp->tt.fcp_cleanup = fc_fcp_cleanup;
36738 + *(void **)&lp->tt.fcp_cleanup = fc_fcp_cleanup;
36739
36740 if (!lp->tt.fcp_abort_io)
36741 - lp->tt.fcp_abort_io = fc_fcp_abort_io;
36742 + *(void **)&lp->tt.fcp_abort_io = fc_fcp_abort_io;
36743
36744 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
36745 if (!si)
36746 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c
36747 --- linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c 2011-03-27 14:31:47.000000000 -0400
36748 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c 2011-08-05 20:33:55.000000000 -0400
36749 @@ -569,7 +569,7 @@ int fc_lport_destroy(struct fc_lport *lp
36750 mutex_lock(&lport->lp_mutex);
36751 lport->state = LPORT_ST_DISABLED;
36752 lport->link_up = 0;
36753 - lport->tt.frame_send = fc_frame_drop;
36754 + *(void **)&lport->tt.frame_send = fc_frame_drop;
36755 mutex_unlock(&lport->lp_mutex);
36756
36757 lport->tt.fcp_abort_io(lport);
36758 @@ -1477,10 +1477,10 @@ EXPORT_SYMBOL(fc_lport_config);
36759 int fc_lport_init(struct fc_lport *lport)
36760 {
36761 if (!lport->tt.lport_recv)
36762 - lport->tt.lport_recv = fc_lport_recv_req;
36763 + *(void **)&lport->tt.lport_recv = fc_lport_recv_req;
36764
36765 if (!lport->tt.lport_reset)
36766 - lport->tt.lport_reset = fc_lport_reset;
36767 + *(void **)&lport->tt.lport_reset = fc_lport_reset;
36768
36769 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
36770 fc_host_node_name(lport->host) = lport->wwnn;
36771 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c
36772 --- linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c 2011-03-27 14:31:47.000000000 -0400
36773 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c 2011-08-05 20:33:55.000000000 -0400
36774 @@ -1566,25 +1566,25 @@ static void fc_rport_flush_queue(void)
36775 int fc_rport_init(struct fc_lport *lport)
36776 {
36777 if (!lport->tt.rport_lookup)
36778 - lport->tt.rport_lookup = fc_rport_lookup;
36779 + *(void **)&lport->tt.rport_lookup = fc_rport_lookup;
36780
36781 if (!lport->tt.rport_create)
36782 - lport->tt.rport_create = fc_rport_create;
36783 + *(void **)&lport->tt.rport_create = fc_rport_create;
36784
36785 if (!lport->tt.rport_login)
36786 - lport->tt.rport_login = fc_rport_login;
36787 + *(void **)&lport->tt.rport_login = fc_rport_login;
36788
36789 if (!lport->tt.rport_logoff)
36790 - lport->tt.rport_logoff = fc_rport_logoff;
36791 + *(void **)&lport->tt.rport_logoff = fc_rport_logoff;
36792
36793 if (!lport->tt.rport_recv_req)
36794 - lport->tt.rport_recv_req = fc_rport_recv_req;
36795 + *(void **)&lport->tt.rport_recv_req = fc_rport_recv_req;
36796
36797 if (!lport->tt.rport_flush_queue)
36798 - lport->tt.rport_flush_queue = fc_rport_flush_queue;
36799 + *(void **)&lport->tt.rport_flush_queue = fc_rport_flush_queue;
36800
36801 if (!lport->tt.rport_destroy)
36802 - lport->tt.rport_destroy = fc_rport_destroy;
36803 + *(void **)&lport->tt.rport_destroy = fc_rport_destroy;
36804
36805 return 0;
36806 }
36807 diff -urNp linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c
36808 --- linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
36809 +++ linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
36810 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
36811 }
36812 }
36813
36814 -static struct ata_port_operations sas_sata_ops = {
36815 +static const struct ata_port_operations sas_sata_ops = {
36816 .phy_reset = sas_ata_phy_reset,
36817 .post_internal_cmd = sas_ata_post_internal,
36818 .qc_defer = ata_std_qc_defer,
36819 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c
36820 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
36821 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
36822 @@ -124,7 +124,7 @@ struct lpfc_debug {
36823 int len;
36824 };
36825
36826 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36827 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36828 static unsigned long lpfc_debugfs_start_time = 0L;
36829
36830 /**
36831 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
36832 lpfc_debugfs_enable = 0;
36833
36834 len = 0;
36835 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36836 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36837 (lpfc_debugfs_max_disc_trc - 1);
36838 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36839 dtp = vport->disc_trc + i;
36840 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
36841 lpfc_debugfs_enable = 0;
36842
36843 len = 0;
36844 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36845 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36846 (lpfc_debugfs_max_slow_ring_trc - 1);
36847 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36848 dtp = phba->slow_ring_trc + i;
36849 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
36850 uint32_t *ptr;
36851 char buffer[1024];
36852
36853 + pax_track_stack();
36854 +
36855 off = 0;
36856 spin_lock_irq(&phba->hbalock);
36857
36858 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
36859 !vport || !vport->disc_trc)
36860 return;
36861
36862 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36863 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36864 (lpfc_debugfs_max_disc_trc - 1);
36865 dtp = vport->disc_trc + index;
36866 dtp->fmt = fmt;
36867 dtp->data1 = data1;
36868 dtp->data2 = data2;
36869 dtp->data3 = data3;
36870 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36871 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36872 dtp->jif = jiffies;
36873 #endif
36874 return;
36875 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
36876 !phba || !phba->slow_ring_trc)
36877 return;
36878
36879 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36880 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36881 (lpfc_debugfs_max_slow_ring_trc - 1);
36882 dtp = phba->slow_ring_trc + index;
36883 dtp->fmt = fmt;
36884 dtp->data1 = data1;
36885 dtp->data2 = data2;
36886 dtp->data3 = data3;
36887 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36888 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36889 dtp->jif = jiffies;
36890 #endif
36891 return;
36892 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36893 "slow_ring buffer\n");
36894 goto debug_failed;
36895 }
36896 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36897 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36898 memset(phba->slow_ring_trc, 0,
36899 (sizeof(struct lpfc_debugfs_trc) *
36900 lpfc_debugfs_max_slow_ring_trc));
36901 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36902 "buffer\n");
36903 goto debug_failed;
36904 }
36905 - atomic_set(&vport->disc_trc_cnt, 0);
36906 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36907
36908 snprintf(name, sizeof(name), "discovery_trace");
36909 vport->debug_disc_trc =
36910 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h
36911 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
36912 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
36913 @@ -400,7 +400,7 @@ struct lpfc_vport {
36914 struct dentry *debug_nodelist;
36915 struct dentry *vport_debugfs_root;
36916 struct lpfc_debugfs_trc *disc_trc;
36917 - atomic_t disc_trc_cnt;
36918 + atomic_unchecked_t disc_trc_cnt;
36919 #endif
36920 uint8_t stat_data_enabled;
36921 uint8_t stat_data_blocked;
36922 @@ -725,8 +725,8 @@ struct lpfc_hba {
36923 struct timer_list fabric_block_timer;
36924 unsigned long bit_flags;
36925 #define FABRIC_COMANDS_BLOCKED 0
36926 - atomic_t num_rsrc_err;
36927 - atomic_t num_cmd_success;
36928 + atomic_unchecked_t num_rsrc_err;
36929 + atomic_unchecked_t num_cmd_success;
36930 unsigned long last_rsrc_error_time;
36931 unsigned long last_ramp_down_time;
36932 unsigned long last_ramp_up_time;
36933 @@ -740,7 +740,7 @@ struct lpfc_hba {
36934 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
36935 struct dentry *debug_slow_ring_trc;
36936 struct lpfc_debugfs_trc *slow_ring_trc;
36937 - atomic_t slow_ring_trc_cnt;
36938 + atomic_unchecked_t slow_ring_trc_cnt;
36939 #endif
36940
36941 /* Used for deferred freeing of ELS data buffers */
36942 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c
36943 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-03-27 14:31:47.000000000 -0400
36944 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:33:55.000000000 -0400
36945 @@ -8021,8 +8021,10 @@ lpfc_init(void)
36946 printk(LPFC_COPYRIGHT "\n");
36947
36948 if (lpfc_enable_npiv) {
36949 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36950 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36951 + pax_open_kernel();
36952 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36953 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36954 + pax_close_kernel();
36955 }
36956 lpfc_transport_template =
36957 fc_attach_transport(&lpfc_transport_functions);
36958 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c
36959 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
36960 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
36961 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
36962 uint32_t evt_posted;
36963
36964 spin_lock_irqsave(&phba->hbalock, flags);
36965 - atomic_inc(&phba->num_rsrc_err);
36966 + atomic_inc_unchecked(&phba->num_rsrc_err);
36967 phba->last_rsrc_error_time = jiffies;
36968
36969 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36970 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
36971 unsigned long flags;
36972 struct lpfc_hba *phba = vport->phba;
36973 uint32_t evt_posted;
36974 - atomic_inc(&phba->num_cmd_success);
36975 + atomic_inc_unchecked(&phba->num_cmd_success);
36976
36977 if (vport->cfg_lun_queue_depth <= queue_depth)
36978 return;
36979 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
36980 int i;
36981 struct lpfc_rport_data *rdata;
36982
36983 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36984 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36985 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36986 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36987
36988 vports = lpfc_create_vport_work_array(phba);
36989 if (vports != NULL)
36990 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
36991 }
36992 }
36993 lpfc_destroy_vport_work_array(phba, vports);
36994 - atomic_set(&phba->num_rsrc_err, 0);
36995 - atomic_set(&phba->num_cmd_success, 0);
36996 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36997 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36998 }
36999
37000 /**
37001 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
37002 }
37003 }
37004 lpfc_destroy_vport_work_array(phba, vports);
37005 - atomic_set(&phba->num_rsrc_err, 0);
37006 - atomic_set(&phba->num_cmd_success, 0);
37007 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37008 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37009 }
37010
37011 /**
37012 diff -urNp linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c
37013 --- linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
37014 +++ linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
37015 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
37016 int rval;
37017 int i;
37018
37019 + pax_track_stack();
37020 +
37021 // Allocate memory for the base list of scb for management module.
37022 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
37023
37024 diff -urNp linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c
37025 --- linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
37026 +++ linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
37027 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
37028 int nelem = ARRAY_SIZE(get_attrs), a = 0;
37029 int ret;
37030
37031 + pax_track_stack();
37032 +
37033 or = osd_start_request(od, GFP_KERNEL);
37034 if (!or)
37035 return -ENOMEM;
37036 diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.c linux-2.6.32.45/drivers/scsi/pmcraid.c
37037 --- linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:35:29.000000000 -0400
37038 +++ linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:33:59.000000000 -0400
37039 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
37040 res->scsi_dev = scsi_dev;
37041 scsi_dev->hostdata = res;
37042 res->change_detected = 0;
37043 - atomic_set(&res->read_failures, 0);
37044 - atomic_set(&res->write_failures, 0);
37045 + atomic_set_unchecked(&res->read_failures, 0);
37046 + atomic_set_unchecked(&res->write_failures, 0);
37047 rc = 0;
37048 }
37049 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37050 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
37051
37052 /* If this was a SCSI read/write command keep count of errors */
37053 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37054 - atomic_inc(&res->read_failures);
37055 + atomic_inc_unchecked(&res->read_failures);
37056 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37057 - atomic_inc(&res->write_failures);
37058 + atomic_inc_unchecked(&res->write_failures);
37059
37060 if (!RES_IS_GSCSI(res->cfg_entry) &&
37061 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37062 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(stru
37063
37064 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37065 /* add resources only after host is added into system */
37066 - if (!atomic_read(&pinstance->expose_resources))
37067 + if (!atomic_read_unchecked(&pinstance->expose_resources))
37068 return;
37069
37070 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
37071 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instan
37072 init_waitqueue_head(&pinstance->reset_wait_q);
37073
37074 atomic_set(&pinstance->outstanding_cmds, 0);
37075 - atomic_set(&pinstance->expose_resources, 0);
37076 + atomic_set_unchecked(&pinstance->expose_resources, 0);
37077
37078 INIT_LIST_HEAD(&pinstance->free_res_q);
37079 INIT_LIST_HEAD(&pinstance->used_res_q);
37080 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
37081 /* Schedule worker thread to handle CCN and take care of adding and
37082 * removing devices to OS
37083 */
37084 - atomic_set(&pinstance->expose_resources, 1);
37085 + atomic_set_unchecked(&pinstance->expose_resources, 1);
37086 schedule_work(&pinstance->worker_q);
37087 return rc;
37088
37089 diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.h linux-2.6.32.45/drivers/scsi/pmcraid.h
37090 --- linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
37091 +++ linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
37092 @@ -690,7 +690,7 @@ struct pmcraid_instance {
37093 atomic_t outstanding_cmds;
37094
37095 /* should add/delete resources to mid-layer now ?*/
37096 - atomic_t expose_resources;
37097 + atomic_unchecked_t expose_resources;
37098
37099 /* Tasklet to handle deferred processing */
37100 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
37101 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
37102 struct list_head queue; /* link to "to be exposed" resources */
37103 struct pmcraid_config_table_entry cfg_entry;
37104 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37105 - atomic_t read_failures; /* count of failed READ commands */
37106 - atomic_t write_failures; /* count of failed WRITE commands */
37107 + atomic_unchecked_t read_failures; /* count of failed READ commands */
37108 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37109
37110 /* To indicate add/delete/modify during CCN */
37111 u8 change_detected;
37112 diff -urNp linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h
37113 --- linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-03-27 14:31:47.000000000 -0400
37114 +++ linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:33:55.000000000 -0400
37115 @@ -2089,7 +2089,7 @@ struct isp_operations {
37116
37117 int (*get_flash_version) (struct scsi_qla_host *, void *);
37118 int (*start_scsi) (srb_t *);
37119 -};
37120 +} __no_const;
37121
37122 /* MSI-X Support *************************************************************/
37123
37124 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h
37125 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
37126 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
37127 @@ -240,7 +240,7 @@ struct ddb_entry {
37128 atomic_t retry_relogin_timer; /* Min Time between relogins
37129 * (4000 only) */
37130 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
37131 - atomic_t relogin_retry_count; /* Num of times relogin has been
37132 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37133 * retried */
37134
37135 uint16_t port;
37136 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c
37137 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
37138 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
37139 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
37140 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
37141 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37142 atomic_set(&ddb_entry->relogin_timer, 0);
37143 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37144 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37145 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37146 list_add_tail(&ddb_entry->list, &ha->ddb_list);
37147 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
37148 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
37149 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37150 atomic_set(&ddb_entry->port_down_timer,
37151 ha->port_down_retry_count);
37152 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37153 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37154 atomic_set(&ddb_entry->relogin_timer, 0);
37155 clear_bit(DF_RELOGIN, &ddb_entry->flags);
37156 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
37157 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c
37158 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
37159 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
37160 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
37161 ddb_entry->fw_ddb_device_state ==
37162 DDB_DS_SESSION_FAILED) {
37163 /* Reset retry relogin timer */
37164 - atomic_inc(&ddb_entry->relogin_retry_count);
37165 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37166 DEBUG2(printk("scsi%ld: index[%d] relogin"
37167 " timed out-retrying"
37168 " relogin (%d)\n",
37169 ha->host_no,
37170 ddb_entry->fw_ddb_index,
37171 - atomic_read(&ddb_entry->
37172 + atomic_read_unchecked(&ddb_entry->
37173 relogin_retry_count))
37174 );
37175 start_dpc++;
37176 diff -urNp linux-2.6.32.45/drivers/scsi/scsi.c linux-2.6.32.45/drivers/scsi/scsi.c
37177 --- linux-2.6.32.45/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
37178 +++ linux-2.6.32.45/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
37179 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
37180 unsigned long timeout;
37181 int rtn = 0;
37182
37183 - atomic_inc(&cmd->device->iorequest_cnt);
37184 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37185
37186 /* check if the device is still usable */
37187 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37188 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_debug.c linux-2.6.32.45/drivers/scsi/scsi_debug.c
37189 --- linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
37190 +++ linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
37191 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
37192 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
37193 unsigned char *cmd = (unsigned char *)scp->cmnd;
37194
37195 + pax_track_stack();
37196 +
37197 if ((errsts = check_readiness(scp, 1, devip)))
37198 return errsts;
37199 memset(arr, 0, sizeof(arr));
37200 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
37201 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
37202 unsigned char *cmd = (unsigned char *)scp->cmnd;
37203
37204 + pax_track_stack();
37205 +
37206 if ((errsts = check_readiness(scp, 1, devip)))
37207 return errsts;
37208 memset(arr, 0, sizeof(arr));
37209 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_lib.c linux-2.6.32.45/drivers/scsi/scsi_lib.c
37210 --- linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
37211 +++ linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
37212 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
37213
37214 scsi_init_cmd_errh(cmd);
37215 cmd->result = DID_NO_CONNECT << 16;
37216 - atomic_inc(&cmd->device->iorequest_cnt);
37217 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37218
37219 /*
37220 * SCSI request completion path will do scsi_device_unbusy(),
37221 @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
37222 */
37223 cmd->serial_number = 0;
37224
37225 - atomic_inc(&cmd->device->iodone_cnt);
37226 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
37227 if (cmd->result)
37228 - atomic_inc(&cmd->device->ioerr_cnt);
37229 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37230
37231 disposition = scsi_decide_disposition(cmd);
37232 if (disposition != SUCCESS &&
37233 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_sysfs.c linux-2.6.32.45/drivers/scsi/scsi_sysfs.c
37234 --- linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
37235 +++ linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
37236 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
37237 char *buf) \
37238 { \
37239 struct scsi_device *sdev = to_scsi_device(dev); \
37240 - unsigned long long count = atomic_read(&sdev->field); \
37241 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
37242 return snprintf(buf, 20, "0x%llx\n", count); \
37243 } \
37244 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37245 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c
37246 --- linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
37247 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
37248 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
37249 * Netlink Infrastructure
37250 */
37251
37252 -static atomic_t fc_event_seq;
37253 +static atomic_unchecked_t fc_event_seq;
37254
37255 /**
37256 * fc_get_event_number - Obtain the next sequential FC event number
37257 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
37258 u32
37259 fc_get_event_number(void)
37260 {
37261 - return atomic_add_return(1, &fc_event_seq);
37262 + return atomic_add_return_unchecked(1, &fc_event_seq);
37263 }
37264 EXPORT_SYMBOL(fc_get_event_number);
37265
37266 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void
37267 {
37268 int error;
37269
37270 - atomic_set(&fc_event_seq, 0);
37271 + atomic_set_unchecked(&fc_event_seq, 0);
37272
37273 error = transport_class_register(&fc_host_class);
37274 if (error)
37275 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c
37276 --- linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
37277 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
37278 @@ -81,7 +81,7 @@ struct iscsi_internal {
37279 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
37280 };
37281
37282 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37283 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37284 static struct workqueue_struct *iscsi_eh_timer_workq;
37285
37286 /*
37287 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
37288 int err;
37289
37290 ihost = shost->shost_data;
37291 - session->sid = atomic_add_return(1, &iscsi_session_nr);
37292 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37293
37294 if (id == ISCSI_MAX_TARGET) {
37295 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
37296 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
37297 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37298 ISCSI_TRANSPORT_VERSION);
37299
37300 - atomic_set(&iscsi_session_nr, 0);
37301 + atomic_set_unchecked(&iscsi_session_nr, 0);
37302
37303 err = class_register(&iscsi_transport_class);
37304 if (err)
37305 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c
37306 --- linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
37307 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
37308 @@ -33,7 +33,7 @@
37309 #include "scsi_transport_srp_internal.h"
37310
37311 struct srp_host_attrs {
37312 - atomic_t next_port_id;
37313 + atomic_unchecked_t next_port_id;
37314 };
37315 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37316
37317 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
37318 struct Scsi_Host *shost = dev_to_shost(dev);
37319 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37320
37321 - atomic_set(&srp_host->next_port_id, 0);
37322 + atomic_set_unchecked(&srp_host->next_port_id, 0);
37323 return 0;
37324 }
37325
37326 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
37327 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37328 rport->roles = ids->roles;
37329
37330 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37331 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37332 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37333
37334 transport_setup_device(&rport->dev);
37335 diff -urNp linux-2.6.32.45/drivers/scsi/sg.c linux-2.6.32.45/drivers/scsi/sg.c
37336 --- linux-2.6.32.45/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
37337 +++ linux-2.6.32.45/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
37338 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
37339 const struct file_operations * fops;
37340 };
37341
37342 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37343 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37344 {"allow_dio", &adio_fops},
37345 {"debug", &debug_fops},
37346 {"def_reserved_size", &dressz_fops},
37347 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
37348 {
37349 int k, mask;
37350 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
37351 - struct sg_proc_leaf * leaf;
37352 + const struct sg_proc_leaf * leaf;
37353
37354 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
37355 if (!sg_proc_sgp)
37356 diff -urNp linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c
37357 --- linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
37358 +++ linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
37359 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
37360 int do_iounmap = 0;
37361 int do_disable_device = 1;
37362
37363 + pax_track_stack();
37364 +
37365 memset(&sym_dev, 0, sizeof(sym_dev));
37366 memset(&nvram, 0, sizeof(nvram));
37367 sym_dev.pdev = pdev;
37368 diff -urNp linux-2.6.32.45/drivers/serial/kgdboc.c linux-2.6.32.45/drivers/serial/kgdboc.c
37369 --- linux-2.6.32.45/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
37370 +++ linux-2.6.32.45/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
37371 @@ -18,7 +18,7 @@
37372
37373 #define MAX_CONFIG_LEN 40
37374
37375 -static struct kgdb_io kgdboc_io_ops;
37376 +static const struct kgdb_io kgdboc_io_ops;
37377
37378 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37379 static int configured = -1;
37380 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
37381 module_put(THIS_MODULE);
37382 }
37383
37384 -static struct kgdb_io kgdboc_io_ops = {
37385 +static const struct kgdb_io kgdboc_io_ops = {
37386 .name = "kgdboc",
37387 .read_char = kgdboc_get_char,
37388 .write_char = kgdboc_put_char,
37389 diff -urNp linux-2.6.32.45/drivers/spi/spi.c linux-2.6.32.45/drivers/spi/spi.c
37390 --- linux-2.6.32.45/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
37391 +++ linux-2.6.32.45/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
37392 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
37393 EXPORT_SYMBOL_GPL(spi_sync);
37394
37395 /* portable code must never pass more than 32 bytes */
37396 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37397 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
37398
37399 static u8 *buf;
37400
37401 diff -urNp linux-2.6.32.45/drivers/ssb/driver_gige.c linux-2.6.32.45/drivers/ssb/driver_gige.c
37402 --- linux-2.6.32.45/drivers/ssb/driver_gige.c 2011-03-27 14:31:47.000000000 -0400
37403 +++ linux-2.6.32.45/drivers/ssb/driver_gige.c 2011-08-05 20:33:55.000000000 -0400
37404 @@ -180,8 +180,8 @@ static int ssb_gige_probe(struct ssb_dev
37405 dev->pci_controller.io_resource = &dev->io_resource;
37406 dev->pci_controller.mem_resource = &dev->mem_resource;
37407 dev->pci_controller.io_map_base = 0x800;
37408 - dev->pci_ops.read = ssb_gige_pci_read_config;
37409 - dev->pci_ops.write = ssb_gige_pci_write_config;
37410 + *(void **)&dev->pci_ops.read = ssb_gige_pci_read_config;
37411 + *(void **)&dev->pci_ops.write = ssb_gige_pci_write_config;
37412
37413 dev->io_resource.name = SSB_GIGE_IO_RES_NAME;
37414 dev->io_resource.start = 0x800;
37415 diff -urNp linux-2.6.32.45/drivers/staging/android/binder.c linux-2.6.32.45/drivers/staging/android/binder.c
37416 --- linux-2.6.32.45/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
37417 +++ linux-2.6.32.45/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
37418 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
37419 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
37420 }
37421
37422 -static struct vm_operations_struct binder_vm_ops = {
37423 +static const struct vm_operations_struct binder_vm_ops = {
37424 .open = binder_vma_open,
37425 .close = binder_vma_close,
37426 };
37427 diff -urNp linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c
37428 --- linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
37429 +++ linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
37430 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
37431 return VM_FAULT_NOPAGE;
37432 }
37433
37434 -static struct vm_operations_struct b3dfg_vm_ops = {
37435 +static const struct vm_operations_struct b3dfg_vm_ops = {
37436 .fault = b3dfg_vma_fault,
37437 };
37438
37439 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
37440 return r;
37441 }
37442
37443 -static struct file_operations b3dfg_fops = {
37444 +static const struct file_operations b3dfg_fops = {
37445 .owner = THIS_MODULE,
37446 .open = b3dfg_open,
37447 .release = b3dfg_release,
37448 diff -urNp linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c
37449 --- linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:35:29.000000000 -0400
37450 +++ linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:34:00.000000000 -0400
37451 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
37452 mutex_unlock(&dev->mutex);
37453 }
37454
37455 -static struct vm_operations_struct comedi_vm_ops = {
37456 +static const struct vm_operations_struct comedi_vm_ops = {
37457 .close = comedi_unmap,
37458 };
37459
37460 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c
37461 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
37462 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
37463 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
37464 static dev_t adsp_devno;
37465 static struct class *adsp_class;
37466
37467 -static struct file_operations adsp_fops = {
37468 +static const struct file_operations adsp_fops = {
37469 .owner = THIS_MODULE,
37470 .open = adsp_open,
37471 .unlocked_ioctl = adsp_ioctl,
37472 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c
37473 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
37474 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
37475 @@ -1022,7 +1022,7 @@ done:
37476 return rc;
37477 }
37478
37479 -static struct file_operations audio_aac_fops = {
37480 +static const struct file_operations audio_aac_fops = {
37481 .owner = THIS_MODULE,
37482 .open = audio_open,
37483 .release = audio_release,
37484 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c
37485 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
37486 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
37487 @@ -833,7 +833,7 @@ done:
37488 return rc;
37489 }
37490
37491 -static struct file_operations audio_amrnb_fops = {
37492 +static const struct file_operations audio_amrnb_fops = {
37493 .owner = THIS_MODULE,
37494 .open = audamrnb_open,
37495 .release = audamrnb_release,
37496 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c
37497 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
37498 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
37499 @@ -805,7 +805,7 @@ dma_fail:
37500 return rc;
37501 }
37502
37503 -static struct file_operations audio_evrc_fops = {
37504 +static const struct file_operations audio_evrc_fops = {
37505 .owner = THIS_MODULE,
37506 .open = audevrc_open,
37507 .release = audevrc_release,
37508 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c
37509 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
37510 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
37511 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
37512 return 0;
37513 }
37514
37515 -static struct file_operations audio_fops = {
37516 +static const struct file_operations audio_fops = {
37517 .owner = THIS_MODULE,
37518 .open = audio_in_open,
37519 .release = audio_in_release,
37520 @@ -922,7 +922,7 @@ static struct file_operations audio_fops
37521 .unlocked_ioctl = audio_in_ioctl,
37522 };
37523
37524 -static struct file_operations audpre_fops = {
37525 +static const struct file_operations audpre_fops = {
37526 .owner = THIS_MODULE,
37527 .open = audpre_open,
37528 .unlocked_ioctl = audpre_ioctl,
37529 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c
37530 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
37531 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
37532 @@ -941,7 +941,7 @@ done:
37533 return rc;
37534 }
37535
37536 -static struct file_operations audio_mp3_fops = {
37537 +static const struct file_operations audio_mp3_fops = {
37538 .owner = THIS_MODULE,
37539 .open = audio_open,
37540 .release = audio_release,
37541 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c
37542 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
37543 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
37544 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
37545 return 0;
37546 }
37547
37548 -static struct file_operations audio_fops = {
37549 +static const struct file_operations audio_fops = {
37550 .owner = THIS_MODULE,
37551 .open = audio_open,
37552 .release = audio_release,
37553 @@ -819,7 +819,7 @@ static struct file_operations audio_fops
37554 .unlocked_ioctl = audio_ioctl,
37555 };
37556
37557 -static struct file_operations audpp_fops = {
37558 +static const struct file_operations audpp_fops = {
37559 .owner = THIS_MODULE,
37560 .open = audpp_open,
37561 .unlocked_ioctl = audpp_ioctl,
37562 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c
37563 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
37564 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
37565 @@ -816,7 +816,7 @@ err:
37566 return rc;
37567 }
37568
37569 -static struct file_operations audio_qcelp_fops = {
37570 +static const struct file_operations audio_qcelp_fops = {
37571 .owner = THIS_MODULE,
37572 .open = audqcelp_open,
37573 .release = audqcelp_release,
37574 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c
37575 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
37576 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
37577 @@ -242,7 +242,7 @@ err:
37578 return rc;
37579 }
37580
37581 -static struct file_operations snd_fops = {
37582 +static const struct file_operations snd_fops = {
37583 .owner = THIS_MODULE,
37584 .open = snd_open,
37585 .release = snd_release,
37586 diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c
37587 --- linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
37588 +++ linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
37589 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
37590 return 0;
37591 }
37592
37593 -static struct file_operations qmi_fops = {
37594 +static const struct file_operations qmi_fops = {
37595 .owner = THIS_MODULE,
37596 .read = qmi_read,
37597 .write = qmi_write,
37598 diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c
37599 --- linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
37600 +++ linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
37601 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
37602 return rc;
37603 }
37604
37605 -static struct file_operations rpcrouter_server_fops = {
37606 +static const struct file_operations rpcrouter_server_fops = {
37607 .owner = THIS_MODULE,
37608 .open = rpcrouter_open,
37609 .release = rpcrouter_release,
37610 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
37611 .unlocked_ioctl = rpcrouter_ioctl,
37612 };
37613
37614 -static struct file_operations rpcrouter_router_fops = {
37615 +static const struct file_operations rpcrouter_router_fops = {
37616 .owner = THIS_MODULE,
37617 .open = rpcrouter_open,
37618 .release = rpcrouter_release,
37619 diff -urNp linux-2.6.32.45/drivers/staging/dst/dcore.c linux-2.6.32.45/drivers/staging/dst/dcore.c
37620 --- linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
37621 +++ linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
37622 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
37623 return 0;
37624 }
37625
37626 -static struct block_device_operations dst_blk_ops = {
37627 +static const struct block_device_operations dst_blk_ops = {
37628 .open = dst_bdev_open,
37629 .release = dst_bdev_release,
37630 .owner = THIS_MODULE,
37631 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
37632 n->size = ctl->size;
37633
37634 atomic_set(&n->refcnt, 1);
37635 - atomic_long_set(&n->gen, 0);
37636 + atomic_long_set_unchecked(&n->gen, 0);
37637 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
37638
37639 err = dst_node_sysfs_init(n);
37640 diff -urNp linux-2.6.32.45/drivers/staging/dst/trans.c linux-2.6.32.45/drivers/staging/dst/trans.c
37641 --- linux-2.6.32.45/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
37642 +++ linux-2.6.32.45/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
37643 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
37644 t->error = 0;
37645 t->retries = 0;
37646 atomic_set(&t->refcnt, 1);
37647 - t->gen = atomic_long_inc_return(&n->gen);
37648 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
37649
37650 t->enc = bio_data_dir(bio);
37651 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
37652 diff -urNp linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c
37653 --- linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
37654 +++ linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
37655 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
37656 struct net_device_stats *stats = &etdev->net_stats;
37657
37658 if (pMpTcb->Flags & fMP_DEST_BROAD)
37659 - atomic_inc(&etdev->Stats.brdcstxmt);
37660 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
37661 else if (pMpTcb->Flags & fMP_DEST_MULTI)
37662 - atomic_inc(&etdev->Stats.multixmt);
37663 + atomic_inc_unchecked(&etdev->Stats.multixmt);
37664 else
37665 - atomic_inc(&etdev->Stats.unixmt);
37666 + atomic_inc_unchecked(&etdev->Stats.unixmt);
37667
37668 if (pMpTcb->Packet) {
37669 stats->tx_bytes += pMpTcb->Packet->len;
37670 diff -urNp linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h
37671 --- linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
37672 +++ linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
37673 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
37674 * operations
37675 */
37676 u32 unircv; /* # multicast packets received */
37677 - atomic_t unixmt; /* # multicast packets for Tx */
37678 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
37679 u32 multircv; /* # multicast packets received */
37680 - atomic_t multixmt; /* # multicast packets for Tx */
37681 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
37682 u32 brdcstrcv; /* # broadcast packets received */
37683 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
37684 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
37685 u32 norcvbuf; /* # Rx packets discarded */
37686 u32 noxmtbuf; /* # Tx packets discarded */
37687
37688 diff -urNp linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c
37689 --- linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
37690 +++ linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
37691 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
37692 return 0;
37693 }
37694
37695 -static struct vm_operations_struct go7007_vm_ops = {
37696 +static const struct vm_operations_struct go7007_vm_ops = {
37697 .open = go7007_vm_open,
37698 .close = go7007_vm_close,
37699 .fault = go7007_vm_fault,
37700 diff -urNp linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c
37701 --- linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
37702 +++ linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
37703 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
37704 /* The one and only one */
37705 static struct blkvsc_driver_context g_blkvsc_drv;
37706
37707 -static struct block_device_operations block_ops = {
37708 +static const struct block_device_operations block_ops = {
37709 .owner = THIS_MODULE,
37710 .open = blkvsc_open,
37711 .release = blkvsc_release,
37712 diff -urNp linux-2.6.32.45/drivers/staging/hv/Channel.c linux-2.6.32.45/drivers/staging/hv/Channel.c
37713 --- linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
37714 +++ linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
37715 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
37716
37717 DPRINT_ENTER(VMBUS);
37718
37719 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
37720 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
37721 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
37722 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
37723
37724 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
37725 ASSERT(msgInfo != NULL);
37726 diff -urNp linux-2.6.32.45/drivers/staging/hv/Hv.c linux-2.6.32.45/drivers/staging/hv/Hv.c
37727 --- linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
37728 +++ linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
37729 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
37730 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
37731 u32 outputAddressHi = outputAddress >> 32;
37732 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
37733 - volatile void *hypercallPage = gHvContext.HypercallPage;
37734 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
37735
37736 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
37737 Control, Input, Output);
37738 diff -urNp linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c
37739 --- linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
37740 +++ linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
37741 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
37742 to_device_context(root_device_obj);
37743 struct device_context *child_device_ctx =
37744 to_device_context(child_device_obj);
37745 - static atomic_t device_num = ATOMIC_INIT(0);
37746 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
37747
37748 DPRINT_ENTER(VMBUS_DRV);
37749
37750 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
37751
37752 /* Set the device name. Otherwise, device_register() will fail. */
37753 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
37754 - atomic_inc_return(&device_num));
37755 + atomic_inc_return_unchecked(&device_num));
37756
37757 /* The new device belongs to this bus */
37758 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
37759 diff -urNp linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h
37760 --- linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
37761 +++ linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
37762 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
37763 struct VMBUS_CONNECTION {
37764 enum VMBUS_CONNECT_STATE ConnectState;
37765
37766 - atomic_t NextGpadlHandle;
37767 + atomic_unchecked_t NextGpadlHandle;
37768
37769 /*
37770 * Represents channel interrupts. Each bit position represents a
37771 diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet.c linux-2.6.32.45/drivers/staging/octeon/ethernet.c
37772 --- linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
37773 +++ linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
37774 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
37775 * since the RX tasklet also increments it.
37776 */
37777 #ifdef CONFIG_64BIT
37778 - atomic64_add(rx_status.dropped_packets,
37779 - (atomic64_t *)&priv->stats.rx_dropped);
37780 + atomic64_add_unchecked(rx_status.dropped_packets,
37781 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37782 #else
37783 - atomic_add(rx_status.dropped_packets,
37784 - (atomic_t *)&priv->stats.rx_dropped);
37785 + atomic_add_unchecked(rx_status.dropped_packets,
37786 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37787 #endif
37788 }
37789
37790 diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c
37791 --- linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
37792 +++ linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
37793 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
37794 /* Increment RX stats for virtual ports */
37795 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37796 #ifdef CONFIG_64BIT
37797 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37798 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37799 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37800 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37801 #else
37802 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37803 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37804 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37805 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37806 #endif
37807 }
37808 netif_receive_skb(skb);
37809 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
37810 dev->name);
37811 */
37812 #ifdef CONFIG_64BIT
37813 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37814 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
37815 #else
37816 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37817 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
37818 #endif
37819 dev_kfree_skb_irq(skb);
37820 }
37821 diff -urNp linux-2.6.32.45/drivers/staging/panel/panel.c linux-2.6.32.45/drivers/staging/panel/panel.c
37822 --- linux-2.6.32.45/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
37823 +++ linux-2.6.32.45/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
37824 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
37825 return 0;
37826 }
37827
37828 -static struct file_operations lcd_fops = {
37829 +static const struct file_operations lcd_fops = {
37830 .write = lcd_write,
37831 .open = lcd_open,
37832 .release = lcd_release,
37833 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
37834 return 0;
37835 }
37836
37837 -static struct file_operations keypad_fops = {
37838 +static const struct file_operations keypad_fops = {
37839 .read = keypad_read, /* read */
37840 .open = keypad_open, /* open */
37841 .release = keypad_release, /* close */
37842 diff -urNp linux-2.6.32.45/drivers/staging/phison/phison.c linux-2.6.32.45/drivers/staging/phison/phison.c
37843 --- linux-2.6.32.45/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
37844 +++ linux-2.6.32.45/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
37845 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
37846 ATA_BMDMA_SHT(DRV_NAME),
37847 };
37848
37849 -static struct ata_port_operations phison_ops = {
37850 +static const struct ata_port_operations phison_ops = {
37851 .inherits = &ata_bmdma_port_ops,
37852 .prereset = phison_pre_reset,
37853 };
37854 diff -urNp linux-2.6.32.45/drivers/staging/poch/poch.c linux-2.6.32.45/drivers/staging/poch/poch.c
37855 --- linux-2.6.32.45/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
37856 +++ linux-2.6.32.45/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
37857 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
37858 return 0;
37859 }
37860
37861 -static struct file_operations poch_fops = {
37862 +static const struct file_operations poch_fops = {
37863 .owner = THIS_MODULE,
37864 .open = poch_open,
37865 .release = poch_release,
37866 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/inode.c linux-2.6.32.45/drivers/staging/pohmelfs/inode.c
37867 --- linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37868 +++ linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
37869 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
37870 mutex_init(&psb->mcache_lock);
37871 psb->mcache_root = RB_ROOT;
37872 psb->mcache_timeout = msecs_to_jiffies(5000);
37873 - atomic_long_set(&psb->mcache_gen, 0);
37874 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
37875
37876 psb->trans_max_pages = 100;
37877
37878 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
37879 INIT_LIST_HEAD(&psb->crypto_ready_list);
37880 INIT_LIST_HEAD(&psb->crypto_active_list);
37881
37882 - atomic_set(&psb->trans_gen, 1);
37883 + atomic_set_unchecked(&psb->trans_gen, 1);
37884 atomic_long_set(&psb->total_inodes, 0);
37885
37886 mutex_init(&psb->state_lock);
37887 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c
37888 --- linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
37889 +++ linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
37890 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
37891 m->data = data;
37892 m->start = start;
37893 m->size = size;
37894 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
37895 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
37896
37897 mutex_lock(&psb->mcache_lock);
37898 err = pohmelfs_mcache_insert(psb, m);
37899 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h
37900 --- linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
37901 +++ linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
37902 @@ -570,14 +570,14 @@ struct pohmelfs_config;
37903 struct pohmelfs_sb {
37904 struct rb_root mcache_root;
37905 struct mutex mcache_lock;
37906 - atomic_long_t mcache_gen;
37907 + atomic_long_unchecked_t mcache_gen;
37908 unsigned long mcache_timeout;
37909
37910 unsigned int idx;
37911
37912 unsigned int trans_retries;
37913
37914 - atomic_t trans_gen;
37915 + atomic_unchecked_t trans_gen;
37916
37917 unsigned int crypto_attached_size;
37918 unsigned int crypto_align_size;
37919 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/trans.c linux-2.6.32.45/drivers/staging/pohmelfs/trans.c
37920 --- linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
37921 +++ linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
37922 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
37923 int err;
37924 struct netfs_cmd *cmd = t->iovec.iov_base;
37925
37926 - t->gen = atomic_inc_return(&psb->trans_gen);
37927 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
37928
37929 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
37930 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
37931 diff -urNp linux-2.6.32.45/drivers/staging/sep/sep_driver.c linux-2.6.32.45/drivers/staging/sep/sep_driver.c
37932 --- linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
37933 +++ linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
37934 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
37935 static dev_t sep_devno;
37936
37937 /* the files operations structure of the driver */
37938 -static struct file_operations sep_file_operations = {
37939 +static const struct file_operations sep_file_operations = {
37940 .owner = THIS_MODULE,
37941 .ioctl = sep_ioctl,
37942 .poll = sep_poll,
37943 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci.h linux-2.6.32.45/drivers/staging/usbip/vhci.h
37944 --- linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
37945 +++ linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
37946 @@ -92,7 +92,7 @@ struct vhci_hcd {
37947 unsigned resuming:1;
37948 unsigned long re_timeout;
37949
37950 - atomic_t seqnum;
37951 + atomic_unchecked_t seqnum;
37952
37953 /*
37954 * NOTE:
37955 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c
37956 --- linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
37957 +++ linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
37958 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
37959 return;
37960 }
37961
37962 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37963 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37964 if (priv->seqnum == 0xffff)
37965 usbip_uinfo("seqnum max\n");
37966
37967 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
37968 return -ENOMEM;
37969 }
37970
37971 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37972 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37973 if (unlink->seqnum == 0xffff)
37974 usbip_uinfo("seqnum max\n");
37975
37976 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
37977 vdev->rhport = rhport;
37978 }
37979
37980 - atomic_set(&vhci->seqnum, 0);
37981 + atomic_set_unchecked(&vhci->seqnum, 0);
37982 spin_lock_init(&vhci->lock);
37983
37984
37985 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c
37986 --- linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
37987 +++ linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
37988 @@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
37989 usbip_uerr("cannot find a urb of seqnum %u\n",
37990 pdu->base.seqnum);
37991 usbip_uinfo("max seqnum %d\n",
37992 - atomic_read(&the_controller->seqnum));
37993 + atomic_read_unchecked(&the_controller->seqnum));
37994 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37995 return;
37996 }
37997 diff -urNp linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c
37998 --- linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
37999 +++ linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
38000 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
38001 static int __init vme_user_probe(struct device *, int, int);
38002 static int __exit vme_user_remove(struct device *, int, int);
38003
38004 -static struct file_operations vme_user_fops = {
38005 +static const struct file_operations vme_user_fops = {
38006 .open = vme_user_open,
38007 .release = vme_user_release,
38008 .read = vme_user_read,
38009 diff -urNp linux-2.6.32.45/drivers/telephony/ixj.c linux-2.6.32.45/drivers/telephony/ixj.c
38010 --- linux-2.6.32.45/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
38011 +++ linux-2.6.32.45/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
38012 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
38013 bool mContinue;
38014 char *pIn, *pOut;
38015
38016 + pax_track_stack();
38017 +
38018 if (!SCI_Prepare(j))
38019 return 0;
38020
38021 diff -urNp linux-2.6.32.45/drivers/uio/uio.c linux-2.6.32.45/drivers/uio/uio.c
38022 --- linux-2.6.32.45/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
38023 +++ linux-2.6.32.45/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
38024 @@ -23,6 +23,7 @@
38025 #include <linux/string.h>
38026 #include <linux/kobject.h>
38027 #include <linux/uio_driver.h>
38028 +#include <asm/local.h>
38029
38030 #define UIO_MAX_DEVICES 255
38031
38032 @@ -30,10 +31,10 @@ struct uio_device {
38033 struct module *owner;
38034 struct device *dev;
38035 int minor;
38036 - atomic_t event;
38037 + atomic_unchecked_t event;
38038 struct fasync_struct *async_queue;
38039 wait_queue_head_t wait;
38040 - int vma_count;
38041 + local_t vma_count;
38042 struct uio_info *info;
38043 struct kobject *map_dir;
38044 struct kobject *portio_dir;
38045 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
38046 return entry->show(mem, buf);
38047 }
38048
38049 -static struct sysfs_ops map_sysfs_ops = {
38050 +static const struct sysfs_ops map_sysfs_ops = {
38051 .show = map_type_show,
38052 };
38053
38054 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
38055 return entry->show(port, buf);
38056 }
38057
38058 -static struct sysfs_ops portio_sysfs_ops = {
38059 +static const struct sysfs_ops portio_sysfs_ops = {
38060 .show = portio_type_show,
38061 };
38062
38063 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device
38064 struct uio_device *idev = dev_get_drvdata(dev);
38065 if (idev)
38066 return sprintf(buf, "%u\n",
38067 - (unsigned int)atomic_read(&idev->event));
38068 + (unsigned int)atomic_read_unchecked(&idev->event));
38069 else
38070 return -ENODEV;
38071 }
38072 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
38073 {
38074 struct uio_device *idev = info->uio_dev;
38075
38076 - atomic_inc(&idev->event);
38077 + atomic_inc_unchecked(&idev->event);
38078 wake_up_interruptible(&idev->wait);
38079 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38080 }
38081 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
38082 }
38083
38084 listener->dev = idev;
38085 - listener->event_count = atomic_read(&idev->event);
38086 + listener->event_count = atomic_read_unchecked(&idev->event);
38087 filep->private_data = listener;
38088
38089 if (idev->info->open) {
38090 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
38091 return -EIO;
38092
38093 poll_wait(filep, &idev->wait, wait);
38094 - if (listener->event_count != atomic_read(&idev->event))
38095 + if (listener->event_count != atomic_read_unchecked(&idev->event))
38096 return POLLIN | POLLRDNORM;
38097 return 0;
38098 }
38099 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
38100 do {
38101 set_current_state(TASK_INTERRUPTIBLE);
38102
38103 - event_count = atomic_read(&idev->event);
38104 + event_count = atomic_read_unchecked(&idev->event);
38105 if (event_count != listener->event_count) {
38106 if (copy_to_user(buf, &event_count, count))
38107 retval = -EFAULT;
38108 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
38109 static void uio_vma_open(struct vm_area_struct *vma)
38110 {
38111 struct uio_device *idev = vma->vm_private_data;
38112 - idev->vma_count++;
38113 + local_inc(&idev->vma_count);
38114 }
38115
38116 static void uio_vma_close(struct vm_area_struct *vma)
38117 {
38118 struct uio_device *idev = vma->vm_private_data;
38119 - idev->vma_count--;
38120 + local_dec(&idev->vma_count);
38121 }
38122
38123 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38124 @@ -840,7 +841,7 @@ int __uio_register_device(struct module
38125 idev->owner = owner;
38126 idev->info = info;
38127 init_waitqueue_head(&idev->wait);
38128 - atomic_set(&idev->event, 0);
38129 + atomic_set_unchecked(&idev->event, 0);
38130
38131 ret = uio_get_minor(idev);
38132 if (ret)
38133 diff -urNp linux-2.6.32.45/drivers/usb/atm/usbatm.c linux-2.6.32.45/drivers/usb/atm/usbatm.c
38134 --- linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
38135 +++ linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
38136 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
38137 if (printk_ratelimit())
38138 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38139 __func__, vpi, vci);
38140 - atomic_inc(&vcc->stats->rx_err);
38141 + atomic_inc_unchecked(&vcc->stats->rx_err);
38142 return;
38143 }
38144
38145 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
38146 if (length > ATM_MAX_AAL5_PDU) {
38147 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38148 __func__, length, vcc);
38149 - atomic_inc(&vcc->stats->rx_err);
38150 + atomic_inc_unchecked(&vcc->stats->rx_err);
38151 goto out;
38152 }
38153
38154 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
38155 if (sarb->len < pdu_length) {
38156 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38157 __func__, pdu_length, sarb->len, vcc);
38158 - atomic_inc(&vcc->stats->rx_err);
38159 + atomic_inc_unchecked(&vcc->stats->rx_err);
38160 goto out;
38161 }
38162
38163 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38164 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38165 __func__, vcc);
38166 - atomic_inc(&vcc->stats->rx_err);
38167 + atomic_inc_unchecked(&vcc->stats->rx_err);
38168 goto out;
38169 }
38170
38171 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
38172 if (printk_ratelimit())
38173 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38174 __func__, length);
38175 - atomic_inc(&vcc->stats->rx_drop);
38176 + atomic_inc_unchecked(&vcc->stats->rx_drop);
38177 goto out;
38178 }
38179
38180 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
38181
38182 vcc->push(vcc, skb);
38183
38184 - atomic_inc(&vcc->stats->rx);
38185 + atomic_inc_unchecked(&vcc->stats->rx);
38186 out:
38187 skb_trim(sarb, 0);
38188 }
38189 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
38190 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38191
38192 usbatm_pop(vcc, skb);
38193 - atomic_inc(&vcc->stats->tx);
38194 + atomic_inc_unchecked(&vcc->stats->tx);
38195
38196 skb = skb_dequeue(&instance->sndqueue);
38197 }
38198 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
38199 if (!left--)
38200 return sprintf(page,
38201 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38202 - atomic_read(&atm_dev->stats.aal5.tx),
38203 - atomic_read(&atm_dev->stats.aal5.tx_err),
38204 - atomic_read(&atm_dev->stats.aal5.rx),
38205 - atomic_read(&atm_dev->stats.aal5.rx_err),
38206 - atomic_read(&atm_dev->stats.aal5.rx_drop));
38207 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38208 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38209 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38210 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38211 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38212
38213 if (!left--) {
38214 if (instance->disconnected)
38215 diff -urNp linux-2.6.32.45/drivers/usb/class/cdc-wdm.c linux-2.6.32.45/drivers/usb/class/cdc-wdm.c
38216 --- linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
38217 +++ linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
38218 @@ -314,7 +314,7 @@ static ssize_t wdm_write
38219 if (r < 0)
38220 goto outnp;
38221
38222 - if (!file->f_flags && O_NONBLOCK)
38223 + if (!(file->f_flags & O_NONBLOCK))
38224 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
38225 &desc->flags));
38226 else
38227 diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.c linux-2.6.32.45/drivers/usb/core/hcd.c
38228 --- linux-2.6.32.45/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
38229 +++ linux-2.6.32.45/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
38230 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
38231
38232 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38233
38234 -struct usb_mon_operations *mon_ops;
38235 +const struct usb_mon_operations *mon_ops;
38236
38237 /*
38238 * The registration is unlocked.
38239 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
38240 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
38241 */
38242
38243 -int usb_mon_register (struct usb_mon_operations *ops)
38244 +int usb_mon_register (const struct usb_mon_operations *ops)
38245 {
38246
38247 if (mon_ops)
38248 diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.h linux-2.6.32.45/drivers/usb/core/hcd.h
38249 --- linux-2.6.32.45/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
38250 +++ linux-2.6.32.45/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
38251 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
38252 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38253
38254 struct usb_mon_operations {
38255 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
38256 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38257 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38258 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
38259 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38260 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38261 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
38262 };
38263
38264 -extern struct usb_mon_operations *mon_ops;
38265 +extern const struct usb_mon_operations *mon_ops;
38266
38267 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
38268 {
38269 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
38270 (*mon_ops->urb_complete)(bus, urb, status);
38271 }
38272
38273 -int usb_mon_register(struct usb_mon_operations *ops);
38274 +int usb_mon_register(const struct usb_mon_operations *ops);
38275 void usb_mon_deregister(void);
38276
38277 #else
38278 diff -urNp linux-2.6.32.45/drivers/usb/core/message.c linux-2.6.32.45/drivers/usb/core/message.c
38279 --- linux-2.6.32.45/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
38280 +++ linux-2.6.32.45/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
38281 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
38282 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
38283 if (buf) {
38284 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
38285 - if (len > 0) {
38286 - smallbuf = kmalloc(++len, GFP_NOIO);
38287 + if (len++ > 0) {
38288 + smallbuf = kmalloc(len, GFP_NOIO);
38289 if (!smallbuf)
38290 return buf;
38291 memcpy(smallbuf, buf, len);
38292 diff -urNp linux-2.6.32.45/drivers/usb/misc/appledisplay.c linux-2.6.32.45/drivers/usb/misc/appledisplay.c
38293 --- linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
38294 +++ linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
38295 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
38296 return pdata->msgdata[1];
38297 }
38298
38299 -static struct backlight_ops appledisplay_bl_data = {
38300 +static const struct backlight_ops appledisplay_bl_data = {
38301 .get_brightness = appledisplay_bl_get_brightness,
38302 .update_status = appledisplay_bl_update_status,
38303 };
38304 diff -urNp linux-2.6.32.45/drivers/usb/mon/mon_main.c linux-2.6.32.45/drivers/usb/mon/mon_main.c
38305 --- linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
38306 +++ linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
38307 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
38308 /*
38309 * Ops
38310 */
38311 -static struct usb_mon_operations mon_ops_0 = {
38312 +static const struct usb_mon_operations mon_ops_0 = {
38313 .urb_submit = mon_submit,
38314 .urb_submit_error = mon_submit_error,
38315 .urb_complete = mon_complete,
38316 diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h
38317 --- linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
38318 +++ linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
38319 @@ -192,7 +192,7 @@ struct wahc {
38320 struct list_head xfer_delayed_list;
38321 spinlock_t xfer_list_lock;
38322 struct work_struct xfer_work;
38323 - atomic_t xfer_id_count;
38324 + atomic_unchecked_t xfer_id_count;
38325 };
38326
38327
38328 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
38329 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38330 spin_lock_init(&wa->xfer_list_lock);
38331 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38332 - atomic_set(&wa->xfer_id_count, 1);
38333 + atomic_set_unchecked(&wa->xfer_id_count, 1);
38334 }
38335
38336 /**
38337 diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c
38338 --- linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
38339 +++ linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
38340 @@ -293,7 +293,7 @@ out:
38341 */
38342 static void wa_xfer_id_init(struct wa_xfer *xfer)
38343 {
38344 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38345 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38346 }
38347
38348 /*
38349 diff -urNp linux-2.6.32.45/drivers/uwb/wlp/messages.c linux-2.6.32.45/drivers/uwb/wlp/messages.c
38350 --- linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
38351 +++ linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
38352 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
38353 size_t len = skb->len;
38354 size_t used;
38355 ssize_t result;
38356 - struct wlp_nonce enonce, rnonce;
38357 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
38358 enum wlp_assc_error assc_err;
38359 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
38360 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
38361 diff -urNp linux-2.6.32.45/drivers/uwb/wlp/sysfs.c linux-2.6.32.45/drivers/uwb/wlp/sysfs.c
38362 --- linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
38363 +++ linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
38364 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
38365 return ret;
38366 }
38367
38368 -static
38369 -struct sysfs_ops wss_sysfs_ops = {
38370 +static const struct sysfs_ops wss_sysfs_ops = {
38371 .show = wlp_wss_attr_show,
38372 .store = wlp_wss_attr_store,
38373 };
38374 diff -urNp linux-2.6.32.45/drivers/video/atmel_lcdfb.c linux-2.6.32.45/drivers/video/atmel_lcdfb.c
38375 --- linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
38376 +++ linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
38377 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
38378 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
38379 }
38380
38381 -static struct backlight_ops atmel_lcdc_bl_ops = {
38382 +static const struct backlight_ops atmel_lcdc_bl_ops = {
38383 .update_status = atmel_bl_update_status,
38384 .get_brightness = atmel_bl_get_brightness,
38385 };
38386 diff -urNp linux-2.6.32.45/drivers/video/aty/aty128fb.c linux-2.6.32.45/drivers/video/aty/aty128fb.c
38387 --- linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
38388 +++ linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
38389 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
38390 return bd->props.brightness;
38391 }
38392
38393 -static struct backlight_ops aty128_bl_data = {
38394 +static const struct backlight_ops aty128_bl_data = {
38395 .get_brightness = aty128_bl_get_brightness,
38396 .update_status = aty128_bl_update_status,
38397 };
38398 diff -urNp linux-2.6.32.45/drivers/video/aty/atyfb_base.c linux-2.6.32.45/drivers/video/aty/atyfb_base.c
38399 --- linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
38400 +++ linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
38401 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
38402 return bd->props.brightness;
38403 }
38404
38405 -static struct backlight_ops aty_bl_data = {
38406 +static const struct backlight_ops aty_bl_data = {
38407 .get_brightness = aty_bl_get_brightness,
38408 .update_status = aty_bl_update_status,
38409 };
38410 diff -urNp linux-2.6.32.45/drivers/video/aty/radeon_backlight.c linux-2.6.32.45/drivers/video/aty/radeon_backlight.c
38411 --- linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
38412 +++ linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
38413 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
38414 return bd->props.brightness;
38415 }
38416
38417 -static struct backlight_ops radeon_bl_data = {
38418 +static const struct backlight_ops radeon_bl_data = {
38419 .get_brightness = radeon_bl_get_brightness,
38420 .update_status = radeon_bl_update_status,
38421 };
38422 diff -urNp linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c
38423 --- linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
38424 +++ linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
38425 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
38426 return error ? data->current_brightness : reg_val;
38427 }
38428
38429 -static struct backlight_ops adp5520_bl_ops = {
38430 +static const struct backlight_ops adp5520_bl_ops = {
38431 .update_status = adp5520_bl_update_status,
38432 .get_brightness = adp5520_bl_get_brightness,
38433 };
38434 diff -urNp linux-2.6.32.45/drivers/video/backlight/adx_bl.c linux-2.6.32.45/drivers/video/backlight/adx_bl.c
38435 --- linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
38436 +++ linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
38437 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
38438 return 1;
38439 }
38440
38441 -static struct backlight_ops adx_backlight_ops = {
38442 +static const struct backlight_ops adx_backlight_ops = {
38443 .options = 0,
38444 .update_status = adx_backlight_update_status,
38445 .get_brightness = adx_backlight_get_brightness,
38446 diff -urNp linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c
38447 --- linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
38448 +++ linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
38449 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
38450 return pwm_channel_enable(&pwmbl->pwmc);
38451 }
38452
38453 -static struct backlight_ops atmel_pwm_bl_ops = {
38454 +static const struct backlight_ops atmel_pwm_bl_ops = {
38455 .get_brightness = atmel_pwm_bl_get_intensity,
38456 .update_status = atmel_pwm_bl_set_intensity,
38457 };
38458 diff -urNp linux-2.6.32.45/drivers/video/backlight/backlight.c linux-2.6.32.45/drivers/video/backlight/backlight.c
38459 --- linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
38460 +++ linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
38461 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
38462 * ERR_PTR() or a pointer to the newly allocated device.
38463 */
38464 struct backlight_device *backlight_device_register(const char *name,
38465 - struct device *parent, void *devdata, struct backlight_ops *ops)
38466 + struct device *parent, void *devdata, const struct backlight_ops *ops)
38467 {
38468 struct backlight_device *new_bd;
38469 int rc;
38470 diff -urNp linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c
38471 --- linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
38472 +++ linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
38473 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
38474 }
38475 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
38476
38477 -static struct backlight_ops corgi_bl_ops = {
38478 +static const struct backlight_ops corgi_bl_ops = {
38479 .get_brightness = corgi_bl_get_intensity,
38480 .update_status = corgi_bl_update_status,
38481 };
38482 diff -urNp linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c
38483 --- linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
38484 +++ linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
38485 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
38486 return intensity;
38487 }
38488
38489 -static struct backlight_ops cr_backlight_ops = {
38490 +static const struct backlight_ops cr_backlight_ops = {
38491 .get_brightness = cr_backlight_get_intensity,
38492 .update_status = cr_backlight_set_intensity,
38493 };
38494 diff -urNp linux-2.6.32.45/drivers/video/backlight/da903x_bl.c linux-2.6.32.45/drivers/video/backlight/da903x_bl.c
38495 --- linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
38496 +++ linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
38497 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
38498 return data->current_brightness;
38499 }
38500
38501 -static struct backlight_ops da903x_backlight_ops = {
38502 +static const struct backlight_ops da903x_backlight_ops = {
38503 .update_status = da903x_backlight_update_status,
38504 .get_brightness = da903x_backlight_get_brightness,
38505 };
38506 diff -urNp linux-2.6.32.45/drivers/video/backlight/generic_bl.c linux-2.6.32.45/drivers/video/backlight/generic_bl.c
38507 --- linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
38508 +++ linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
38509 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
38510 }
38511 EXPORT_SYMBOL(corgibl_limit_intensity);
38512
38513 -static struct backlight_ops genericbl_ops = {
38514 +static const struct backlight_ops genericbl_ops = {
38515 .options = BL_CORE_SUSPENDRESUME,
38516 .get_brightness = genericbl_get_intensity,
38517 .update_status = genericbl_send_intensity,
38518 diff -urNp linux-2.6.32.45/drivers/video/backlight/hp680_bl.c linux-2.6.32.45/drivers/video/backlight/hp680_bl.c
38519 --- linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
38520 +++ linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
38521 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
38522 return current_intensity;
38523 }
38524
38525 -static struct backlight_ops hp680bl_ops = {
38526 +static const struct backlight_ops hp680bl_ops = {
38527 .get_brightness = hp680bl_get_intensity,
38528 .update_status = hp680bl_set_intensity,
38529 };
38530 diff -urNp linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c
38531 --- linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
38532 +++ linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
38533 @@ -93,7 +93,7 @@ out:
38534 return ret;
38535 }
38536
38537 -static struct backlight_ops jornada_bl_ops = {
38538 +static const struct backlight_ops jornada_bl_ops = {
38539 .get_brightness = jornada_bl_get_brightness,
38540 .update_status = jornada_bl_update_status,
38541 .options = BL_CORE_SUSPENDRESUME,
38542 diff -urNp linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c
38543 --- linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
38544 +++ linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
38545 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
38546 return kb3886bl_intensity;
38547 }
38548
38549 -static struct backlight_ops kb3886bl_ops = {
38550 +static const struct backlight_ops kb3886bl_ops = {
38551 .get_brightness = kb3886bl_get_intensity,
38552 .update_status = kb3886bl_send_intensity,
38553 };
38554 diff -urNp linux-2.6.32.45/drivers/video/backlight/locomolcd.c linux-2.6.32.45/drivers/video/backlight/locomolcd.c
38555 --- linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
38556 +++ linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
38557 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
38558 return current_intensity;
38559 }
38560
38561 -static struct backlight_ops locomobl_data = {
38562 +static const struct backlight_ops locomobl_data = {
38563 .get_brightness = locomolcd_get_intensity,
38564 .update_status = locomolcd_set_intensity,
38565 };
38566 diff -urNp linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c
38567 --- linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
38568 +++ linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
38569 @@ -33,7 +33,7 @@ struct dmi_match_data {
38570 unsigned long iostart;
38571 unsigned long iolen;
38572 /* Backlight operations structure. */
38573 - struct backlight_ops backlight_ops;
38574 + const struct backlight_ops backlight_ops;
38575 };
38576
38577 /* Module parameters. */
38578 diff -urNp linux-2.6.32.45/drivers/video/backlight/omap1_bl.c linux-2.6.32.45/drivers/video/backlight/omap1_bl.c
38579 --- linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
38580 +++ linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
38581 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
38582 return bl->current_intensity;
38583 }
38584
38585 -static struct backlight_ops omapbl_ops = {
38586 +static const struct backlight_ops omapbl_ops = {
38587 .get_brightness = omapbl_get_intensity,
38588 .update_status = omapbl_update_status,
38589 };
38590 diff -urNp linux-2.6.32.45/drivers/video/backlight/progear_bl.c linux-2.6.32.45/drivers/video/backlight/progear_bl.c
38591 --- linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
38592 +++ linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
38593 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
38594 return intensity - HW_LEVEL_MIN;
38595 }
38596
38597 -static struct backlight_ops progearbl_ops = {
38598 +static const struct backlight_ops progearbl_ops = {
38599 .get_brightness = progearbl_get_intensity,
38600 .update_status = progearbl_set_intensity,
38601 };
38602 diff -urNp linux-2.6.32.45/drivers/video/backlight/pwm_bl.c linux-2.6.32.45/drivers/video/backlight/pwm_bl.c
38603 --- linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
38604 +++ linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
38605 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
38606 return bl->props.brightness;
38607 }
38608
38609 -static struct backlight_ops pwm_backlight_ops = {
38610 +static const struct backlight_ops pwm_backlight_ops = {
38611 .update_status = pwm_backlight_update_status,
38612 .get_brightness = pwm_backlight_get_brightness,
38613 };
38614 diff -urNp linux-2.6.32.45/drivers/video/backlight/tosa_bl.c linux-2.6.32.45/drivers/video/backlight/tosa_bl.c
38615 --- linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
38616 +++ linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
38617 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
38618 return props->brightness;
38619 }
38620
38621 -static struct backlight_ops bl_ops = {
38622 +static const struct backlight_ops bl_ops = {
38623 .get_brightness = tosa_bl_get_brightness,
38624 .update_status = tosa_bl_update_status,
38625 };
38626 diff -urNp linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c
38627 --- linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
38628 +++ linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
38629 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
38630 return data->current_brightness;
38631 }
38632
38633 -static struct backlight_ops wm831x_backlight_ops = {
38634 +static const struct backlight_ops wm831x_backlight_ops = {
38635 .options = BL_CORE_SUSPENDRESUME,
38636 .update_status = wm831x_backlight_update_status,
38637 .get_brightness = wm831x_backlight_get_brightness,
38638 diff -urNp linux-2.6.32.45/drivers/video/bf54x-lq043fb.c linux-2.6.32.45/drivers/video/bf54x-lq043fb.c
38639 --- linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
38640 +++ linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
38641 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
38642 return 0;
38643 }
38644
38645 -static struct backlight_ops bfin_lq043fb_bl_ops = {
38646 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
38647 .get_brightness = bl_get_brightness,
38648 };
38649
38650 diff -urNp linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c
38651 --- linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
38652 +++ linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
38653 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
38654 return 0;
38655 }
38656
38657 -static struct backlight_ops bfin_lq043fb_bl_ops = {
38658 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
38659 .get_brightness = bl_get_brightness,
38660 };
38661
38662 diff -urNp linux-2.6.32.45/drivers/video/fbcmap.c linux-2.6.32.45/drivers/video/fbcmap.c
38663 --- linux-2.6.32.45/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
38664 +++ linux-2.6.32.45/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
38665 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
38666 rc = -ENODEV;
38667 goto out;
38668 }
38669 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38670 - !info->fbops->fb_setcmap)) {
38671 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38672 rc = -EINVAL;
38673 goto out1;
38674 }
38675 diff -urNp linux-2.6.32.45/drivers/video/fbmem.c linux-2.6.32.45/drivers/video/fbmem.c
38676 --- linux-2.6.32.45/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
38677 +++ linux-2.6.32.45/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
38678 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
38679 image->dx += image->width + 8;
38680 }
38681 } else if (rotate == FB_ROTATE_UD) {
38682 - for (x = 0; x < num && image->dx >= 0; x++) {
38683 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38684 info->fbops->fb_imageblit(info, image);
38685 image->dx -= image->width + 8;
38686 }
38687 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
38688 image->dy += image->height + 8;
38689 }
38690 } else if (rotate == FB_ROTATE_CCW) {
38691 - for (x = 0; x < num && image->dy >= 0; x++) {
38692 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38693 info->fbops->fb_imageblit(info, image);
38694 image->dy -= image->height + 8;
38695 }
38696 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
38697 int flags = info->flags;
38698 int ret = 0;
38699
38700 + pax_track_stack();
38701 +
38702 if (var->activate & FB_ACTIVATE_INV_MODE) {
38703 struct fb_videomode mode1, mode2;
38704
38705 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
38706 void __user *argp = (void __user *)arg;
38707 long ret = 0;
38708
38709 + pax_track_stack();
38710 +
38711 switch (cmd) {
38712 case FBIOGET_VSCREENINFO:
38713 if (!lock_fb_info(info))
38714 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
38715 return -EFAULT;
38716 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38717 return -EINVAL;
38718 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38719 + if (con2fb.framebuffer >= FB_MAX)
38720 return -EINVAL;
38721 if (!registered_fb[con2fb.framebuffer])
38722 request_module("fb%d", con2fb.framebuffer);
38723 diff -urNp linux-2.6.32.45/drivers/video/i810/i810_accel.c linux-2.6.32.45/drivers/video/i810/i810_accel.c
38724 --- linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
38725 +++ linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
38726 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
38727 }
38728 }
38729 printk("ringbuffer lockup!!!\n");
38730 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38731 i810_report_error(mmio);
38732 par->dev_flags |= LOCKUP;
38733 info->pixmap.scan_align = 1;
38734 diff -urNp linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c
38735 --- linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
38736 +++ linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
38737 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
38738 return bd->props.brightness;
38739 }
38740
38741 -static struct backlight_ops nvidia_bl_ops = {
38742 +static const struct backlight_ops nvidia_bl_ops = {
38743 .get_brightness = nvidia_bl_get_brightness,
38744 .update_status = nvidia_bl_update_status,
38745 };
38746 diff -urNp linux-2.6.32.45/drivers/video/riva/fbdev.c linux-2.6.32.45/drivers/video/riva/fbdev.c
38747 --- linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
38748 +++ linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
38749 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
38750 return bd->props.brightness;
38751 }
38752
38753 -static struct backlight_ops riva_bl_ops = {
38754 +static const struct backlight_ops riva_bl_ops = {
38755 .get_brightness = riva_bl_get_brightness,
38756 .update_status = riva_bl_update_status,
38757 };
38758 diff -urNp linux-2.6.32.45/drivers/video/uvesafb.c linux-2.6.32.45/drivers/video/uvesafb.c
38759 --- linux-2.6.32.45/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
38760 +++ linux-2.6.32.45/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
38761 @@ -18,6 +18,7 @@
38762 #include <linux/fb.h>
38763 #include <linux/io.h>
38764 #include <linux/mutex.h>
38765 +#include <linux/moduleloader.h>
38766 #include <video/edid.h>
38767 #include <video/uvesafb.h>
38768 #ifdef CONFIG_X86
38769 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
38770 NULL,
38771 };
38772
38773 - return call_usermodehelper(v86d_path, argv, envp, 1);
38774 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
38775 }
38776
38777 /*
38778 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
38779 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
38780 par->pmi_setpal = par->ypan = 0;
38781 } else {
38782 +
38783 +#ifdef CONFIG_PAX_KERNEXEC
38784 +#ifdef CONFIG_MODULES
38785 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
38786 +#endif
38787 + if (!par->pmi_code) {
38788 + par->pmi_setpal = par->ypan = 0;
38789 + return 0;
38790 + }
38791 +#endif
38792 +
38793 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
38794 + task->t.regs.edi);
38795 +
38796 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38797 + pax_open_kernel();
38798 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
38799 + pax_close_kernel();
38800 +
38801 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
38802 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
38803 +#else
38804 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
38805 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
38806 +#endif
38807 +
38808 printk(KERN_INFO "uvesafb: protected mode interface info at "
38809 "%04x:%04x\n",
38810 (u16)task->t.regs.es, (u16)task->t.regs.edi);
38811 @@ -1799,6 +1822,11 @@ out:
38812 if (par->vbe_modes)
38813 kfree(par->vbe_modes);
38814
38815 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38816 + if (par->pmi_code)
38817 + module_free_exec(NULL, par->pmi_code);
38818 +#endif
38819 +
38820 framebuffer_release(info);
38821 return err;
38822 }
38823 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
38824 kfree(par->vbe_state_orig);
38825 if (par->vbe_state_saved)
38826 kfree(par->vbe_state_saved);
38827 +
38828 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38829 + if (par->pmi_code)
38830 + module_free_exec(NULL, par->pmi_code);
38831 +#endif
38832 +
38833 }
38834
38835 framebuffer_release(info);
38836 diff -urNp linux-2.6.32.45/drivers/video/vesafb.c linux-2.6.32.45/drivers/video/vesafb.c
38837 --- linux-2.6.32.45/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
38838 +++ linux-2.6.32.45/drivers/video/vesafb.c 2011-08-05 20:33:55.000000000 -0400
38839 @@ -9,6 +9,7 @@
38840 */
38841
38842 #include <linux/module.h>
38843 +#include <linux/moduleloader.h>
38844 #include <linux/kernel.h>
38845 #include <linux/errno.h>
38846 #include <linux/string.h>
38847 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
38848 static int vram_total __initdata; /* Set total amount of memory */
38849 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
38850 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
38851 -static void (*pmi_start)(void) __read_mostly;
38852 -static void (*pmi_pal) (void) __read_mostly;
38853 +static void (*pmi_start)(void) __read_only;
38854 +static void (*pmi_pal) (void) __read_only;
38855 static int depth __read_mostly;
38856 static int vga_compat __read_mostly;
38857 /* --------------------------------------------------------------------- */
38858 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
38859 unsigned int size_vmode;
38860 unsigned int size_remap;
38861 unsigned int size_total;
38862 + void *pmi_code = NULL;
38863
38864 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
38865 return -ENODEV;
38866 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
38867 size_remap = size_total;
38868 vesafb_fix.smem_len = size_remap;
38869
38870 -#ifndef __i386__
38871 - screen_info.vesapm_seg = 0;
38872 -#endif
38873 -
38874 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
38875 printk(KERN_WARNING
38876 "vesafb: cannot reserve video memory at 0x%lx\n",
38877 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
38878 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
38879 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
38880
38881 +#ifdef __i386__
38882 +
38883 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38884 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
38885 + if (!pmi_code)
38886 +#elif !defined(CONFIG_PAX_KERNEXEC)
38887 + if (0)
38888 +#endif
38889 +
38890 +#endif
38891 + screen_info.vesapm_seg = 0;
38892 +
38893 if (screen_info.vesapm_seg) {
38894 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
38895 - screen_info.vesapm_seg,screen_info.vesapm_off);
38896 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
38897 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
38898 }
38899
38900 if (screen_info.vesapm_seg < 0xc000)
38901 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
38902
38903 if (ypan || pmi_setpal) {
38904 unsigned short *pmi_base;
38905 +
38906 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
38907 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
38908 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
38909 +
38910 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38911 + pax_open_kernel();
38912 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
38913 +#else
38914 + pmi_code = pmi_base;
38915 +#endif
38916 +
38917 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
38918 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
38919 +
38920 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38921 + pmi_start = ktva_ktla(pmi_start);
38922 + pmi_pal = ktva_ktla(pmi_pal);
38923 + pax_close_kernel();
38924 +#endif
38925 +
38926 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
38927 if (pmi_base[3]) {
38928 printk(KERN_INFO "vesafb: pmi: ports = ");
38929 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
38930 info->node, info->fix.id);
38931 return 0;
38932 err:
38933 +
38934 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38935 + module_free_exec(NULL, pmi_code);
38936 +#endif
38937 +
38938 if (info->screen_base)
38939 iounmap(info->screen_base);
38940 framebuffer_release(info);
38941 diff -urNp linux-2.6.32.45/drivers/xen/sys-hypervisor.c linux-2.6.32.45/drivers/xen/sys-hypervisor.c
38942 --- linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
38943 +++ linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
38944 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
38945 return 0;
38946 }
38947
38948 -static struct sysfs_ops hyp_sysfs_ops = {
38949 +static const struct sysfs_ops hyp_sysfs_ops = {
38950 .show = hyp_sysfs_show,
38951 .store = hyp_sysfs_store,
38952 };
38953 diff -urNp linux-2.6.32.45/fs/9p/vfs_inode.c linux-2.6.32.45/fs/9p/vfs_inode.c
38954 --- linux-2.6.32.45/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
38955 +++ linux-2.6.32.45/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
38956 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
38957 static void
38958 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38959 {
38960 - char *s = nd_get_link(nd);
38961 + const char *s = nd_get_link(nd);
38962
38963 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
38964 IS_ERR(s) ? "<error>" : s);
38965 diff -urNp linux-2.6.32.45/fs/aio.c linux-2.6.32.45/fs/aio.c
38966 --- linux-2.6.32.45/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
38967 +++ linux-2.6.32.45/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
38968 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
38969 size += sizeof(struct io_event) * nr_events;
38970 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
38971
38972 - if (nr_pages < 0)
38973 + if (nr_pages <= 0)
38974 return -EINVAL;
38975
38976 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
38977 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
38978 struct aio_timeout to;
38979 int retry = 0;
38980
38981 + pax_track_stack();
38982 +
38983 /* needed to zero any padding within an entry (there shouldn't be
38984 * any, but C is fun!
38985 */
38986 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
38987 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
38988 {
38989 ssize_t ret;
38990 + struct iovec iovstack;
38991
38992 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
38993 kiocb->ki_nbytes, 1,
38994 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
38995 + &iovstack, &kiocb->ki_iovec);
38996 if (ret < 0)
38997 goto out;
38998
38999 + if (kiocb->ki_iovec == &iovstack) {
39000 + kiocb->ki_inline_vec = iovstack;
39001 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
39002 + }
39003 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39004 kiocb->ki_cur_seg = 0;
39005 /* ki_nbytes/left now reflect bytes instead of segs */
39006 diff -urNp linux-2.6.32.45/fs/attr.c linux-2.6.32.45/fs/attr.c
39007 --- linux-2.6.32.45/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
39008 +++ linux-2.6.32.45/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
39009 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
39010 unsigned long limit;
39011
39012 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
39013 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39014 if (limit != RLIM_INFINITY && offset > limit)
39015 goto out_sig;
39016 if (offset > inode->i_sb->s_maxbytes)
39017 diff -urNp linux-2.6.32.45/fs/autofs/root.c linux-2.6.32.45/fs/autofs/root.c
39018 --- linux-2.6.32.45/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
39019 +++ linux-2.6.32.45/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
39020 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
39021 set_bit(n,sbi->symlink_bitmap);
39022 sl = &sbi->symlink[n];
39023 sl->len = strlen(symname);
39024 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
39025 + slsize = sl->len+1;
39026 + sl->data = kmalloc(slsize, GFP_KERNEL);
39027 if (!sl->data) {
39028 clear_bit(n,sbi->symlink_bitmap);
39029 unlock_kernel();
39030 diff -urNp linux-2.6.32.45/fs/autofs4/symlink.c linux-2.6.32.45/fs/autofs4/symlink.c
39031 --- linux-2.6.32.45/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
39032 +++ linux-2.6.32.45/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
39033 @@ -15,7 +15,7 @@
39034 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
39035 {
39036 struct autofs_info *ino = autofs4_dentry_ino(dentry);
39037 - nd_set_link(nd, (char *)ino->u.symlink);
39038 + nd_set_link(nd, ino->u.symlink);
39039 return NULL;
39040 }
39041
39042 diff -urNp linux-2.6.32.45/fs/befs/linuxvfs.c linux-2.6.32.45/fs/befs/linuxvfs.c
39043 --- linux-2.6.32.45/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
39044 +++ linux-2.6.32.45/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
39045 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
39046 {
39047 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39048 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39049 - char *link = nd_get_link(nd);
39050 + const char *link = nd_get_link(nd);
39051 if (!IS_ERR(link))
39052 kfree(link);
39053 }
39054 diff -urNp linux-2.6.32.45/fs/binfmt_aout.c linux-2.6.32.45/fs/binfmt_aout.c
39055 --- linux-2.6.32.45/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
39056 +++ linux-2.6.32.45/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
39057 @@ -16,6 +16,7 @@
39058 #include <linux/string.h>
39059 #include <linux/fs.h>
39060 #include <linux/file.h>
39061 +#include <linux/security.h>
39062 #include <linux/stat.h>
39063 #include <linux/fcntl.h>
39064 #include <linux/ptrace.h>
39065 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
39066 #endif
39067 # define START_STACK(u) (u.start_stack)
39068
39069 + memset(&dump, 0, sizeof(dump));
39070 +
39071 fs = get_fs();
39072 set_fs(KERNEL_DS);
39073 has_dumped = 1;
39074 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
39075
39076 /* If the size of the dump file exceeds the rlimit, then see what would happen
39077 if we wrote the stack, but not the data area. */
39078 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39079 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
39080 dump.u_dsize = 0;
39081
39082 /* Make sure we have enough room to write the stack and data areas. */
39083 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39084 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
39085 dump.u_ssize = 0;
39086
39087 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
39088 dump_size = dump.u_ssize << PAGE_SHIFT;
39089 DUMP_WRITE(dump_start,dump_size);
39090 }
39091 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
39092 - set_fs(KERNEL_DS);
39093 - DUMP_WRITE(current,sizeof(*current));
39094 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
39095 end_coredump:
39096 set_fs(fs);
39097 return has_dumped;
39098 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
39099 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
39100 if (rlim >= RLIM_INFINITY)
39101 rlim = ~0;
39102 +
39103 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39104 if (ex.a_data + ex.a_bss > rlim)
39105 return -ENOMEM;
39106
39107 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
39108 install_exec_creds(bprm);
39109 current->flags &= ~PF_FORKNOEXEC;
39110
39111 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39112 + current->mm->pax_flags = 0UL;
39113 +#endif
39114 +
39115 +#ifdef CONFIG_PAX_PAGEEXEC
39116 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39117 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39118 +
39119 +#ifdef CONFIG_PAX_EMUTRAMP
39120 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39121 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39122 +#endif
39123 +
39124 +#ifdef CONFIG_PAX_MPROTECT
39125 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39126 + current->mm->pax_flags |= MF_PAX_MPROTECT;
39127 +#endif
39128 +
39129 + }
39130 +#endif
39131 +
39132 if (N_MAGIC(ex) == OMAGIC) {
39133 unsigned long text_addr, map_size;
39134 loff_t pos;
39135 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
39136
39137 down_write(&current->mm->mmap_sem);
39138 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39139 - PROT_READ | PROT_WRITE | PROT_EXEC,
39140 + PROT_READ | PROT_WRITE,
39141 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39142 fd_offset + ex.a_text);
39143 up_write(&current->mm->mmap_sem);
39144 diff -urNp linux-2.6.32.45/fs/binfmt_elf.c linux-2.6.32.45/fs/binfmt_elf.c
39145 --- linux-2.6.32.45/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
39146 +++ linux-2.6.32.45/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
39147 @@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
39148 #define elf_core_dump NULL
39149 #endif
39150
39151 +#ifdef CONFIG_PAX_MPROTECT
39152 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39153 +#endif
39154 +
39155 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39156 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39157 #else
39158 @@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
39159 .load_binary = load_elf_binary,
39160 .load_shlib = load_elf_library,
39161 .core_dump = elf_core_dump,
39162 +
39163 +#ifdef CONFIG_PAX_MPROTECT
39164 + .handle_mprotect= elf_handle_mprotect,
39165 +#endif
39166 +
39167 .min_coredump = ELF_EXEC_PAGESIZE,
39168 .hasvdso = 1
39169 };
39170 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39171
39172 static int set_brk(unsigned long start, unsigned long end)
39173 {
39174 + unsigned long e = end;
39175 +
39176 start = ELF_PAGEALIGN(start);
39177 end = ELF_PAGEALIGN(end);
39178 if (end > start) {
39179 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39180 if (BAD_ADDR(addr))
39181 return addr;
39182 }
39183 - current->mm->start_brk = current->mm->brk = end;
39184 + current->mm->start_brk = current->mm->brk = e;
39185 return 0;
39186 }
39187
39188 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39189 elf_addr_t __user *u_rand_bytes;
39190 const char *k_platform = ELF_PLATFORM;
39191 const char *k_base_platform = ELF_BASE_PLATFORM;
39192 - unsigned char k_rand_bytes[16];
39193 + u32 k_rand_bytes[4];
39194 int items;
39195 elf_addr_t *elf_info;
39196 int ei_index = 0;
39197 const struct cred *cred = current_cred();
39198 struct vm_area_struct *vma;
39199 + unsigned long saved_auxv[AT_VECTOR_SIZE];
39200 +
39201 + pax_track_stack();
39202
39203 /*
39204 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39205 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39206 * Generate 16 random bytes for userspace PRNG seeding.
39207 */
39208 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39209 - u_rand_bytes = (elf_addr_t __user *)
39210 - STACK_ALLOC(p, sizeof(k_rand_bytes));
39211 + srandom32(k_rand_bytes[0] ^ random32());
39212 + srandom32(k_rand_bytes[1] ^ random32());
39213 + srandom32(k_rand_bytes[2] ^ random32());
39214 + srandom32(k_rand_bytes[3] ^ random32());
39215 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
39216 + u_rand_bytes = (elf_addr_t __user *) p;
39217 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39218 return -EFAULT;
39219
39220 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39221 return -EFAULT;
39222 current->mm->env_end = p;
39223
39224 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39225 +
39226 /* Put the elf_info on the stack in the right place. */
39227 sp = (elf_addr_t __user *)envp + 1;
39228 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39229 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39230 return -EFAULT;
39231 return 0;
39232 }
39233 @@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
39234 {
39235 struct elf_phdr *elf_phdata;
39236 struct elf_phdr *eppnt;
39237 - unsigned long load_addr = 0;
39238 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39239 int load_addr_set = 0;
39240 unsigned long last_bss = 0, elf_bss = 0;
39241 - unsigned long error = ~0UL;
39242 + unsigned long error = -EINVAL;
39243 unsigned long total_size;
39244 int retval, i, size;
39245
39246 @@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
39247 goto out_close;
39248 }
39249
39250 +#ifdef CONFIG_PAX_SEGMEXEC
39251 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39252 + pax_task_size = SEGMEXEC_TASK_SIZE;
39253 +#endif
39254 +
39255 eppnt = elf_phdata;
39256 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39257 if (eppnt->p_type == PT_LOAD) {
39258 @@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
39259 k = load_addr + eppnt->p_vaddr;
39260 if (BAD_ADDR(k) ||
39261 eppnt->p_filesz > eppnt->p_memsz ||
39262 - eppnt->p_memsz > TASK_SIZE ||
39263 - TASK_SIZE - eppnt->p_memsz < k) {
39264 + eppnt->p_memsz > pax_task_size ||
39265 + pax_task_size - eppnt->p_memsz < k) {
39266 error = -ENOMEM;
39267 goto out_close;
39268 }
39269 @@ -532,6 +557,194 @@ out:
39270 return error;
39271 }
39272
39273 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39274 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39275 +{
39276 + unsigned long pax_flags = 0UL;
39277 +
39278 +#ifdef CONFIG_PAX_PAGEEXEC
39279 + if (elf_phdata->p_flags & PF_PAGEEXEC)
39280 + pax_flags |= MF_PAX_PAGEEXEC;
39281 +#endif
39282 +
39283 +#ifdef CONFIG_PAX_SEGMEXEC
39284 + if (elf_phdata->p_flags & PF_SEGMEXEC)
39285 + pax_flags |= MF_PAX_SEGMEXEC;
39286 +#endif
39287 +
39288 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39289 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39290 + if (nx_enabled)
39291 + pax_flags &= ~MF_PAX_SEGMEXEC;
39292 + else
39293 + pax_flags &= ~MF_PAX_PAGEEXEC;
39294 + }
39295 +#endif
39296 +
39297 +#ifdef CONFIG_PAX_EMUTRAMP
39298 + if (elf_phdata->p_flags & PF_EMUTRAMP)
39299 + pax_flags |= MF_PAX_EMUTRAMP;
39300 +#endif
39301 +
39302 +#ifdef CONFIG_PAX_MPROTECT
39303 + if (elf_phdata->p_flags & PF_MPROTECT)
39304 + pax_flags |= MF_PAX_MPROTECT;
39305 +#endif
39306 +
39307 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39308 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39309 + pax_flags |= MF_PAX_RANDMMAP;
39310 +#endif
39311 +
39312 + return pax_flags;
39313 +}
39314 +#endif
39315 +
39316 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39317 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39318 +{
39319 + unsigned long pax_flags = 0UL;
39320 +
39321 +#ifdef CONFIG_PAX_PAGEEXEC
39322 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39323 + pax_flags |= MF_PAX_PAGEEXEC;
39324 +#endif
39325 +
39326 +#ifdef CONFIG_PAX_SEGMEXEC
39327 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39328 + pax_flags |= MF_PAX_SEGMEXEC;
39329 +#endif
39330 +
39331 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39332 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39333 + if (nx_enabled)
39334 + pax_flags &= ~MF_PAX_SEGMEXEC;
39335 + else
39336 + pax_flags &= ~MF_PAX_PAGEEXEC;
39337 + }
39338 +#endif
39339 +
39340 +#ifdef CONFIG_PAX_EMUTRAMP
39341 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39342 + pax_flags |= MF_PAX_EMUTRAMP;
39343 +#endif
39344 +
39345 +#ifdef CONFIG_PAX_MPROTECT
39346 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39347 + pax_flags |= MF_PAX_MPROTECT;
39348 +#endif
39349 +
39350 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39351 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39352 + pax_flags |= MF_PAX_RANDMMAP;
39353 +#endif
39354 +
39355 + return pax_flags;
39356 +}
39357 +#endif
39358 +
39359 +#ifdef CONFIG_PAX_EI_PAX
39360 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39361 +{
39362 + unsigned long pax_flags = 0UL;
39363 +
39364 +#ifdef CONFIG_PAX_PAGEEXEC
39365 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39366 + pax_flags |= MF_PAX_PAGEEXEC;
39367 +#endif
39368 +
39369 +#ifdef CONFIG_PAX_SEGMEXEC
39370 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39371 + pax_flags |= MF_PAX_SEGMEXEC;
39372 +#endif
39373 +
39374 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39375 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39376 + if (nx_enabled)
39377 + pax_flags &= ~MF_PAX_SEGMEXEC;
39378 + else
39379 + pax_flags &= ~MF_PAX_PAGEEXEC;
39380 + }
39381 +#endif
39382 +
39383 +#ifdef CONFIG_PAX_EMUTRAMP
39384 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39385 + pax_flags |= MF_PAX_EMUTRAMP;
39386 +#endif
39387 +
39388 +#ifdef CONFIG_PAX_MPROTECT
39389 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39390 + pax_flags |= MF_PAX_MPROTECT;
39391 +#endif
39392 +
39393 +#ifdef CONFIG_PAX_ASLR
39394 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39395 + pax_flags |= MF_PAX_RANDMMAP;
39396 +#endif
39397 +
39398 + return pax_flags;
39399 +}
39400 +#endif
39401 +
39402 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39403 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39404 +{
39405 + unsigned long pax_flags = 0UL;
39406 +
39407 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39408 + unsigned long i;
39409 + int found_flags = 0;
39410 +#endif
39411 +
39412 +#ifdef CONFIG_PAX_EI_PAX
39413 + pax_flags = pax_parse_ei_pax(elf_ex);
39414 +#endif
39415 +
39416 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39417 + for (i = 0UL; i < elf_ex->e_phnum; i++)
39418 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39419 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39420 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39421 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39422 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39423 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39424 + return -EINVAL;
39425 +
39426 +#ifdef CONFIG_PAX_SOFTMODE
39427 + if (pax_softmode)
39428 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
39429 + else
39430 +#endif
39431 +
39432 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39433 + found_flags = 1;
39434 + break;
39435 + }
39436 +#endif
39437 +
39438 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39439 + if (found_flags == 0) {
39440 + struct elf_phdr phdr;
39441 + memset(&phdr, 0, sizeof(phdr));
39442 + phdr.p_flags = PF_NOEMUTRAMP;
39443 +#ifdef CONFIG_PAX_SOFTMODE
39444 + if (pax_softmode)
39445 + pax_flags = pax_parse_softmode(&phdr);
39446 + else
39447 +#endif
39448 + pax_flags = pax_parse_hardmode(&phdr);
39449 + }
39450 +#endif
39451 +
39452 +
39453 + if (0 > pax_check_flags(&pax_flags))
39454 + return -EINVAL;
39455 +
39456 + current->mm->pax_flags = pax_flags;
39457 + return 0;
39458 +}
39459 +#endif
39460 +
39461 /*
39462 * These are the functions used to load ELF style executables and shared
39463 * libraries. There is no binary dependent code anywhere else.
39464 @@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
39465 {
39466 unsigned int random_variable = 0;
39467
39468 +#ifdef CONFIG_PAX_RANDUSTACK
39469 + if (randomize_va_space)
39470 + return stack_top - current->mm->delta_stack;
39471 +#endif
39472 +
39473 if ((current->flags & PF_RANDOMIZE) &&
39474 !(current->personality & ADDR_NO_RANDOMIZE)) {
39475 random_variable = get_random_int() & STACK_RND_MASK;
39476 @@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
39477 unsigned long load_addr = 0, load_bias = 0;
39478 int load_addr_set = 0;
39479 char * elf_interpreter = NULL;
39480 - unsigned long error;
39481 + unsigned long error = 0;
39482 struct elf_phdr *elf_ppnt, *elf_phdata;
39483 unsigned long elf_bss, elf_brk;
39484 int retval, i;
39485 @@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
39486 unsigned long start_code, end_code, start_data, end_data;
39487 unsigned long reloc_func_desc = 0;
39488 int executable_stack = EXSTACK_DEFAULT;
39489 - unsigned long def_flags = 0;
39490 struct {
39491 struct elfhdr elf_ex;
39492 struct elfhdr interp_elf_ex;
39493 } *loc;
39494 + unsigned long pax_task_size = TASK_SIZE;
39495
39496 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39497 if (!loc) {
39498 @@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
39499
39500 /* OK, This is the point of no return */
39501 current->flags &= ~PF_FORKNOEXEC;
39502 - current->mm->def_flags = def_flags;
39503 +
39504 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39505 + current->mm->pax_flags = 0UL;
39506 +#endif
39507 +
39508 +#ifdef CONFIG_PAX_DLRESOLVE
39509 + current->mm->call_dl_resolve = 0UL;
39510 +#endif
39511 +
39512 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39513 + current->mm->call_syscall = 0UL;
39514 +#endif
39515 +
39516 +#ifdef CONFIG_PAX_ASLR
39517 + current->mm->delta_mmap = 0UL;
39518 + current->mm->delta_stack = 0UL;
39519 +#endif
39520 +
39521 + current->mm->def_flags = 0;
39522 +
39523 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39524 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39525 + send_sig(SIGKILL, current, 0);
39526 + goto out_free_dentry;
39527 + }
39528 +#endif
39529 +
39530 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39531 + pax_set_initial_flags(bprm);
39532 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39533 + if (pax_set_initial_flags_func)
39534 + (pax_set_initial_flags_func)(bprm);
39535 +#endif
39536 +
39537 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39538 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
39539 + current->mm->context.user_cs_limit = PAGE_SIZE;
39540 + current->mm->def_flags |= VM_PAGEEXEC;
39541 + }
39542 +#endif
39543 +
39544 +#ifdef CONFIG_PAX_SEGMEXEC
39545 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39546 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39547 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39548 + pax_task_size = SEGMEXEC_TASK_SIZE;
39549 + }
39550 +#endif
39551 +
39552 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39553 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39554 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39555 + put_cpu();
39556 + }
39557 +#endif
39558
39559 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39560 may depend on the personality. */
39561 SET_PERSONALITY(loc->elf_ex);
39562 +
39563 +#ifdef CONFIG_PAX_ASLR
39564 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
39565 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
39566 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
39567 + }
39568 +#endif
39569 +
39570 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39571 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39572 + executable_stack = EXSTACK_DISABLE_X;
39573 + current->personality &= ~READ_IMPLIES_EXEC;
39574 + } else
39575 +#endif
39576 +
39577 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
39578 current->personality |= READ_IMPLIES_EXEC;
39579
39580 @@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
39581 #else
39582 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
39583 #endif
39584 +
39585 +#ifdef CONFIG_PAX_RANDMMAP
39586 + /* PaX: randomize base address at the default exe base if requested */
39587 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
39588 +#ifdef CONFIG_SPARC64
39589 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
39590 +#else
39591 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
39592 +#endif
39593 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
39594 + elf_flags |= MAP_FIXED;
39595 + }
39596 +#endif
39597 +
39598 }
39599
39600 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
39601 @@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
39602 * allowed task size. Note that p_filesz must always be
39603 * <= p_memsz so it is only necessary to check p_memsz.
39604 */
39605 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39606 - elf_ppnt->p_memsz > TASK_SIZE ||
39607 - TASK_SIZE - elf_ppnt->p_memsz < k) {
39608 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39609 + elf_ppnt->p_memsz > pax_task_size ||
39610 + pax_task_size - elf_ppnt->p_memsz < k) {
39611 /* set_brk can never work. Avoid overflows. */
39612 send_sig(SIGKILL, current, 0);
39613 retval = -EINVAL;
39614 @@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
39615 start_data += load_bias;
39616 end_data += load_bias;
39617
39618 +#ifdef CONFIG_PAX_RANDMMAP
39619 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
39620 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
39621 +#endif
39622 +
39623 /* Calling set_brk effectively mmaps the pages that we need
39624 * for the bss and break sections. We must do this before
39625 * mapping in the interpreter, to make sure it doesn't wind
39626 @@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
39627 goto out_free_dentry;
39628 }
39629 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
39630 - send_sig(SIGSEGV, current, 0);
39631 - retval = -EFAULT; /* Nobody gets to see this, but.. */
39632 - goto out_free_dentry;
39633 + /*
39634 + * This bss-zeroing can fail if the ELF
39635 + * file specifies odd protections. So
39636 + * we don't check the return value
39637 + */
39638 }
39639
39640 if (elf_interpreter) {
39641 @@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
39642 unsigned long n = off;
39643 if (n > PAGE_SIZE)
39644 n = PAGE_SIZE;
39645 - if (!dump_write(file, buf, n))
39646 + if (!dump_write(file, buf, n)) {
39647 + free_page((unsigned long)buf);
39648 return 0;
39649 + }
39650 off -= n;
39651 }
39652 free_page((unsigned long)buf);
39653 @@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
39654 * Decide what to dump of a segment, part, all or none.
39655 */
39656 static unsigned long vma_dump_size(struct vm_area_struct *vma,
39657 - unsigned long mm_flags)
39658 + unsigned long mm_flags, long signr)
39659 {
39660 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
39661
39662 @@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
39663 if (vma->vm_file == NULL)
39664 return 0;
39665
39666 - if (FILTER(MAPPED_PRIVATE))
39667 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
39668 goto whole;
39669
39670 /*
39671 @@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
39672 #undef DUMP_WRITE
39673
39674 #define DUMP_WRITE(addr, nr) \
39675 + do { \
39676 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
39677 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
39678 - goto end_coredump;
39679 + goto end_coredump; \
39680 + } while (0);
39681
39682 static void fill_elf_header(struct elfhdr *elf, int segs,
39683 u16 machine, u32 flags, u8 osabi)
39684 @@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
39685 {
39686 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
39687 int i = 0;
39688 - do
39689 + do {
39690 i += 2;
39691 - while (auxv[i - 2] != AT_NULL);
39692 + } while (auxv[i - 2] != AT_NULL);
39693 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
39694 }
39695
39696 @@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
39697 phdr.p_offset = offset;
39698 phdr.p_vaddr = vma->vm_start;
39699 phdr.p_paddr = 0;
39700 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
39701 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
39702 phdr.p_memsz = vma->vm_end - vma->vm_start;
39703 offset += phdr.p_filesz;
39704 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
39705 @@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
39706 unsigned long addr;
39707 unsigned long end;
39708
39709 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
39710 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
39711
39712 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
39713 struct page *page;
39714 @@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
39715 page = get_dump_page(addr);
39716 if (page) {
39717 void *kaddr = kmap(page);
39718 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
39719 stop = ((size += PAGE_SIZE) > limit) ||
39720 !dump_write(file, kaddr, PAGE_SIZE);
39721 kunmap(page);
39722 @@ -2042,6 +2356,97 @@ out:
39723
39724 #endif /* USE_ELF_CORE_DUMP */
39725
39726 +#ifdef CONFIG_PAX_MPROTECT
39727 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
39728 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
39729 + * we'll remove VM_MAYWRITE for good on RELRO segments.
39730 + *
39731 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
39732 + * basis because we want to allow the common case and not the special ones.
39733 + */
39734 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
39735 +{
39736 + struct elfhdr elf_h;
39737 + struct elf_phdr elf_p;
39738 + unsigned long i;
39739 + unsigned long oldflags;
39740 + bool is_textrel_rw, is_textrel_rx, is_relro;
39741 +
39742 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
39743 + return;
39744 +
39745 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
39746 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
39747 +
39748 +#ifdef CONFIG_PAX_ELFRELOCS
39749 + /* possible TEXTREL */
39750 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
39751 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
39752 +#else
39753 + is_textrel_rw = false;
39754 + is_textrel_rx = false;
39755 +#endif
39756 +
39757 + /* possible RELRO */
39758 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
39759 +
39760 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
39761 + return;
39762 +
39763 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
39764 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
39765 +
39766 +#ifdef CONFIG_PAX_ETEXECRELOCS
39767 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39768 +#else
39769 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
39770 +#endif
39771 +
39772 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39773 + !elf_check_arch(&elf_h) ||
39774 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
39775 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
39776 + return;
39777 +
39778 + for (i = 0UL; i < elf_h.e_phnum; i++) {
39779 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
39780 + return;
39781 + switch (elf_p.p_type) {
39782 + case PT_DYNAMIC:
39783 + if (!is_textrel_rw && !is_textrel_rx)
39784 + continue;
39785 + i = 0UL;
39786 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
39787 + elf_dyn dyn;
39788 +
39789 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
39790 + return;
39791 + if (dyn.d_tag == DT_NULL)
39792 + return;
39793 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
39794 + gr_log_textrel(vma);
39795 + if (is_textrel_rw)
39796 + vma->vm_flags |= VM_MAYWRITE;
39797 + else
39798 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
39799 + vma->vm_flags &= ~VM_MAYWRITE;
39800 + return;
39801 + }
39802 + i++;
39803 + }
39804 + return;
39805 +
39806 + case PT_GNU_RELRO:
39807 + if (!is_relro)
39808 + continue;
39809 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
39810 + vma->vm_flags &= ~VM_MAYWRITE;
39811 + return;
39812 + }
39813 + }
39814 +}
39815 +#endif
39816 +
39817 static int __init init_elf_binfmt(void)
39818 {
39819 return register_binfmt(&elf_format);
39820 diff -urNp linux-2.6.32.45/fs/binfmt_flat.c linux-2.6.32.45/fs/binfmt_flat.c
39821 --- linux-2.6.32.45/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
39822 +++ linux-2.6.32.45/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
39823 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
39824 realdatastart = (unsigned long) -ENOMEM;
39825 printk("Unable to allocate RAM for process data, errno %d\n",
39826 (int)-realdatastart);
39827 + down_write(&current->mm->mmap_sem);
39828 do_munmap(current->mm, textpos, text_len);
39829 + up_write(&current->mm->mmap_sem);
39830 ret = realdatastart;
39831 goto err;
39832 }
39833 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
39834 }
39835 if (IS_ERR_VALUE(result)) {
39836 printk("Unable to read data+bss, errno %d\n", (int)-result);
39837 + down_write(&current->mm->mmap_sem);
39838 do_munmap(current->mm, textpos, text_len);
39839 do_munmap(current->mm, realdatastart, data_len + extra);
39840 + up_write(&current->mm->mmap_sem);
39841 ret = result;
39842 goto err;
39843 }
39844 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
39845 }
39846 if (IS_ERR_VALUE(result)) {
39847 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
39848 + down_write(&current->mm->mmap_sem);
39849 do_munmap(current->mm, textpos, text_len + data_len + extra +
39850 MAX_SHARED_LIBS * sizeof(unsigned long));
39851 + up_write(&current->mm->mmap_sem);
39852 ret = result;
39853 goto err;
39854 }
39855 diff -urNp linux-2.6.32.45/fs/bio.c linux-2.6.32.45/fs/bio.c
39856 --- linux-2.6.32.45/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
39857 +++ linux-2.6.32.45/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
39858 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
39859
39860 i = 0;
39861 while (i < bio_slab_nr) {
39862 - struct bio_slab *bslab = &bio_slabs[i];
39863 + bslab = &bio_slabs[i];
39864
39865 if (!bslab->slab && entry == -1)
39866 entry = i;
39867 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
39868 const int read = bio_data_dir(bio) == READ;
39869 struct bio_map_data *bmd = bio->bi_private;
39870 int i;
39871 - char *p = bmd->sgvecs[0].iov_base;
39872 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
39873
39874 __bio_for_each_segment(bvec, bio, i, 0) {
39875 char *addr = page_address(bvec->bv_page);
39876 diff -urNp linux-2.6.32.45/fs/block_dev.c linux-2.6.32.45/fs/block_dev.c
39877 --- linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:35:29.000000000 -0400
39878 +++ linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:34:00.000000000 -0400
39879 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
39880 else if (bdev->bd_contains == bdev)
39881 res = 0; /* is a whole device which isn't held */
39882
39883 - else if (bdev->bd_contains->bd_holder == bd_claim)
39884 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
39885 res = 0; /* is a partition of a device that is being partitioned */
39886 else if (bdev->bd_contains->bd_holder != NULL)
39887 res = -EBUSY; /* is a partition of a held device */
39888 diff -urNp linux-2.6.32.45/fs/btrfs/ctree.c linux-2.6.32.45/fs/btrfs/ctree.c
39889 --- linux-2.6.32.45/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
39890 +++ linux-2.6.32.45/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
39891 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
39892 free_extent_buffer(buf);
39893 add_root_to_dirty_list(root);
39894 } else {
39895 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
39896 - parent_start = parent->start;
39897 - else
39898 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
39899 + if (parent)
39900 + parent_start = parent->start;
39901 + else
39902 + parent_start = 0;
39903 + } else
39904 parent_start = 0;
39905
39906 WARN_ON(trans->transid != btrfs_header_generation(parent));
39907 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
39908
39909 ret = 0;
39910 if (slot == 0) {
39911 - struct btrfs_disk_key disk_key;
39912 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
39913 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
39914 }
39915 diff -urNp linux-2.6.32.45/fs/btrfs/disk-io.c linux-2.6.32.45/fs/btrfs/disk-io.c
39916 --- linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
39917 +++ linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
39918 @@ -39,7 +39,7 @@
39919 #include "tree-log.h"
39920 #include "free-space-cache.h"
39921
39922 -static struct extent_io_ops btree_extent_io_ops;
39923 +static const struct extent_io_ops btree_extent_io_ops;
39924 static void end_workqueue_fn(struct btrfs_work *work);
39925 static void free_fs_root(struct btrfs_root *root);
39926
39927 @@ -2607,7 +2607,7 @@ out:
39928 return 0;
39929 }
39930
39931 -static struct extent_io_ops btree_extent_io_ops = {
39932 +static const struct extent_io_ops btree_extent_io_ops = {
39933 .write_cache_pages_lock_hook = btree_lock_page_hook,
39934 .readpage_end_io_hook = btree_readpage_end_io_hook,
39935 .submit_bio_hook = btree_submit_bio_hook,
39936 diff -urNp linux-2.6.32.45/fs/btrfs/extent_io.h linux-2.6.32.45/fs/btrfs/extent_io.h
39937 --- linux-2.6.32.45/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
39938 +++ linux-2.6.32.45/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
39939 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
39940 struct bio *bio, int mirror_num,
39941 unsigned long bio_flags);
39942 struct extent_io_ops {
39943 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
39944 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
39945 u64 start, u64 end, int *page_started,
39946 unsigned long *nr_written);
39947 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
39948 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
39949 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
39950 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
39951 extent_submit_bio_hook_t *submit_bio_hook;
39952 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
39953 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
39954 size_t size, struct bio *bio,
39955 unsigned long bio_flags);
39956 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
39957 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
39958 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
39959 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
39960 u64 start, u64 end,
39961 struct extent_state *state);
39962 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
39963 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
39964 u64 start, u64 end,
39965 struct extent_state *state);
39966 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39967 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39968 struct extent_state *state);
39969 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39970 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39971 struct extent_state *state, int uptodate);
39972 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
39973 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
39974 unsigned long old, unsigned long bits);
39975 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
39976 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
39977 unsigned long bits);
39978 - int (*merge_extent_hook)(struct inode *inode,
39979 + int (* const merge_extent_hook)(struct inode *inode,
39980 struct extent_state *new,
39981 struct extent_state *other);
39982 - int (*split_extent_hook)(struct inode *inode,
39983 + int (* const split_extent_hook)(struct inode *inode,
39984 struct extent_state *orig, u64 split);
39985 - int (*write_cache_pages_lock_hook)(struct page *page);
39986 + int (* const write_cache_pages_lock_hook)(struct page *page);
39987 };
39988
39989 struct extent_io_tree {
39990 @@ -88,7 +88,7 @@ struct extent_io_tree {
39991 u64 dirty_bytes;
39992 spinlock_t lock;
39993 spinlock_t buffer_lock;
39994 - struct extent_io_ops *ops;
39995 + const struct extent_io_ops *ops;
39996 };
39997
39998 struct extent_state {
39999 diff -urNp linux-2.6.32.45/fs/btrfs/extent-tree.c linux-2.6.32.45/fs/btrfs/extent-tree.c
40000 --- linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
40001 +++ linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
40002 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
40003 u64 group_start = group->key.objectid;
40004 new_extents = kmalloc(sizeof(*new_extents),
40005 GFP_NOFS);
40006 + if (!new_extents) {
40007 + ret = -ENOMEM;
40008 + goto out;
40009 + }
40010 nr_extents = 1;
40011 ret = get_new_locations(reloc_inode,
40012 extent_key,
40013 diff -urNp linux-2.6.32.45/fs/btrfs/free-space-cache.c linux-2.6.32.45/fs/btrfs/free-space-cache.c
40014 --- linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
40015 +++ linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
40016 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
40017
40018 while(1) {
40019 if (entry->bytes < bytes || entry->offset < min_start) {
40020 - struct rb_node *node;
40021 -
40022 node = rb_next(&entry->offset_index);
40023 if (!node)
40024 break;
40025 @@ -1226,7 +1224,7 @@ again:
40026 */
40027 while (entry->bitmap || found_bitmap ||
40028 (!entry->bitmap && entry->bytes < min_bytes)) {
40029 - struct rb_node *node = rb_next(&entry->offset_index);
40030 + node = rb_next(&entry->offset_index);
40031
40032 if (entry->bitmap && entry->bytes > bytes + empty_size) {
40033 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
40034 diff -urNp linux-2.6.32.45/fs/btrfs/inode.c linux-2.6.32.45/fs/btrfs/inode.c
40035 --- linux-2.6.32.45/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40036 +++ linux-2.6.32.45/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
40037 @@ -63,7 +63,7 @@ static const struct inode_operations btr
40038 static const struct address_space_operations btrfs_aops;
40039 static const struct address_space_operations btrfs_symlink_aops;
40040 static const struct file_operations btrfs_dir_file_operations;
40041 -static struct extent_io_ops btrfs_extent_io_ops;
40042 +static const struct extent_io_ops btrfs_extent_io_ops;
40043
40044 static struct kmem_cache *btrfs_inode_cachep;
40045 struct kmem_cache *btrfs_trans_handle_cachep;
40046 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
40047 1, 0, NULL, GFP_NOFS);
40048 while (start < end) {
40049 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
40050 + BUG_ON(!async_cow);
40051 async_cow->inode = inode;
40052 async_cow->root = root;
40053 async_cow->locked_page = locked_page;
40054 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
40055 inline_size = btrfs_file_extent_inline_item_len(leaf,
40056 btrfs_item_nr(leaf, path->slots[0]));
40057 tmp = kmalloc(inline_size, GFP_NOFS);
40058 + if (!tmp)
40059 + return -ENOMEM;
40060 ptr = btrfs_file_extent_inline_start(item);
40061
40062 read_extent_buffer(leaf, tmp, ptr, inline_size);
40063 @@ -5410,7 +5413,7 @@ fail:
40064 return -ENOMEM;
40065 }
40066
40067 -static int btrfs_getattr(struct vfsmount *mnt,
40068 +int btrfs_getattr(struct vfsmount *mnt,
40069 struct dentry *dentry, struct kstat *stat)
40070 {
40071 struct inode *inode = dentry->d_inode;
40072 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
40073 return 0;
40074 }
40075
40076 +EXPORT_SYMBOL(btrfs_getattr);
40077 +
40078 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
40079 +{
40080 + return BTRFS_I(inode)->root->anon_super.s_dev;
40081 +}
40082 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40083 +
40084 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
40085 struct inode *new_dir, struct dentry *new_dentry)
40086 {
40087 @@ -5972,7 +5983,7 @@ static const struct file_operations btrf
40088 .fsync = btrfs_sync_file,
40089 };
40090
40091 -static struct extent_io_ops btrfs_extent_io_ops = {
40092 +static const struct extent_io_ops btrfs_extent_io_ops = {
40093 .fill_delalloc = run_delalloc_range,
40094 .submit_bio_hook = btrfs_submit_bio_hook,
40095 .merge_bio_hook = btrfs_merge_bio_hook,
40096 diff -urNp linux-2.6.32.45/fs/btrfs/relocation.c linux-2.6.32.45/fs/btrfs/relocation.c
40097 --- linux-2.6.32.45/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
40098 +++ linux-2.6.32.45/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
40099 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
40100 }
40101 spin_unlock(&rc->reloc_root_tree.lock);
40102
40103 - BUG_ON((struct btrfs_root *)node->data != root);
40104 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
40105
40106 if (!del) {
40107 spin_lock(&rc->reloc_root_tree.lock);
40108 diff -urNp linux-2.6.32.45/fs/btrfs/sysfs.c linux-2.6.32.45/fs/btrfs/sysfs.c
40109 --- linux-2.6.32.45/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
40110 +++ linux-2.6.32.45/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
40111 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
40112 complete(&root->kobj_unregister);
40113 }
40114
40115 -static struct sysfs_ops btrfs_super_attr_ops = {
40116 +static const struct sysfs_ops btrfs_super_attr_ops = {
40117 .show = btrfs_super_attr_show,
40118 .store = btrfs_super_attr_store,
40119 };
40120
40121 -static struct sysfs_ops btrfs_root_attr_ops = {
40122 +static const struct sysfs_ops btrfs_root_attr_ops = {
40123 .show = btrfs_root_attr_show,
40124 .store = btrfs_root_attr_store,
40125 };
40126 diff -urNp linux-2.6.32.45/fs/buffer.c linux-2.6.32.45/fs/buffer.c
40127 --- linux-2.6.32.45/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
40128 +++ linux-2.6.32.45/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
40129 @@ -25,6 +25,7 @@
40130 #include <linux/percpu.h>
40131 #include <linux/slab.h>
40132 #include <linux/capability.h>
40133 +#include <linux/security.h>
40134 #include <linux/blkdev.h>
40135 #include <linux/file.h>
40136 #include <linux/quotaops.h>
40137 diff -urNp linux-2.6.32.45/fs/cachefiles/bind.c linux-2.6.32.45/fs/cachefiles/bind.c
40138 --- linux-2.6.32.45/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
40139 +++ linux-2.6.32.45/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
40140 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40141 args);
40142
40143 /* start by checking things over */
40144 - ASSERT(cache->fstop_percent >= 0 &&
40145 - cache->fstop_percent < cache->fcull_percent &&
40146 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
40147 cache->fcull_percent < cache->frun_percent &&
40148 cache->frun_percent < 100);
40149
40150 - ASSERT(cache->bstop_percent >= 0 &&
40151 - cache->bstop_percent < cache->bcull_percent &&
40152 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
40153 cache->bcull_percent < cache->brun_percent &&
40154 cache->brun_percent < 100);
40155
40156 diff -urNp linux-2.6.32.45/fs/cachefiles/daemon.c linux-2.6.32.45/fs/cachefiles/daemon.c
40157 --- linux-2.6.32.45/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
40158 +++ linux-2.6.32.45/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
40159 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
40160 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40161 return -EIO;
40162
40163 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
40164 + if (datalen > PAGE_SIZE - 1)
40165 return -EOPNOTSUPP;
40166
40167 /* drag the command string into the kernel so we can parse it */
40168 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
40169 if (args[0] != '%' || args[1] != '\0')
40170 return -EINVAL;
40171
40172 - if (fstop < 0 || fstop >= cache->fcull_percent)
40173 + if (fstop >= cache->fcull_percent)
40174 return cachefiles_daemon_range_error(cache, args);
40175
40176 cache->fstop_percent = fstop;
40177 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
40178 if (args[0] != '%' || args[1] != '\0')
40179 return -EINVAL;
40180
40181 - if (bstop < 0 || bstop >= cache->bcull_percent)
40182 + if (bstop >= cache->bcull_percent)
40183 return cachefiles_daemon_range_error(cache, args);
40184
40185 cache->bstop_percent = bstop;
40186 diff -urNp linux-2.6.32.45/fs/cachefiles/internal.h linux-2.6.32.45/fs/cachefiles/internal.h
40187 --- linux-2.6.32.45/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
40188 +++ linux-2.6.32.45/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
40189 @@ -56,7 +56,7 @@ struct cachefiles_cache {
40190 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40191 struct rb_root active_nodes; /* active nodes (can't be culled) */
40192 rwlock_t active_lock; /* lock for active_nodes */
40193 - atomic_t gravecounter; /* graveyard uniquifier */
40194 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40195 unsigned frun_percent; /* when to stop culling (% files) */
40196 unsigned fcull_percent; /* when to start culling (% files) */
40197 unsigned fstop_percent; /* when to stop allocating (% files) */
40198 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
40199 * proc.c
40200 */
40201 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40202 -extern atomic_t cachefiles_lookup_histogram[HZ];
40203 -extern atomic_t cachefiles_mkdir_histogram[HZ];
40204 -extern atomic_t cachefiles_create_histogram[HZ];
40205 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40206 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40207 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40208
40209 extern int __init cachefiles_proc_init(void);
40210 extern void cachefiles_proc_cleanup(void);
40211 static inline
40212 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40213 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40214 {
40215 unsigned long jif = jiffies - start_jif;
40216 if (jif >= HZ)
40217 jif = HZ - 1;
40218 - atomic_inc(&histogram[jif]);
40219 + atomic_inc_unchecked(&histogram[jif]);
40220 }
40221
40222 #else
40223 diff -urNp linux-2.6.32.45/fs/cachefiles/namei.c linux-2.6.32.45/fs/cachefiles/namei.c
40224 --- linux-2.6.32.45/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
40225 +++ linux-2.6.32.45/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
40226 @@ -250,7 +250,7 @@ try_again:
40227 /* first step is to make up a grave dentry in the graveyard */
40228 sprintf(nbuffer, "%08x%08x",
40229 (uint32_t) get_seconds(),
40230 - (uint32_t) atomic_inc_return(&cache->gravecounter));
40231 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40232
40233 /* do the multiway lock magic */
40234 trap = lock_rename(cache->graveyard, dir);
40235 diff -urNp linux-2.6.32.45/fs/cachefiles/proc.c linux-2.6.32.45/fs/cachefiles/proc.c
40236 --- linux-2.6.32.45/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
40237 +++ linux-2.6.32.45/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
40238 @@ -14,9 +14,9 @@
40239 #include <linux/seq_file.h>
40240 #include "internal.h"
40241
40242 -atomic_t cachefiles_lookup_histogram[HZ];
40243 -atomic_t cachefiles_mkdir_histogram[HZ];
40244 -atomic_t cachefiles_create_histogram[HZ];
40245 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40246 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40247 +atomic_unchecked_t cachefiles_create_histogram[HZ];
40248
40249 /*
40250 * display the latency histogram
40251 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40252 return 0;
40253 default:
40254 index = (unsigned long) v - 3;
40255 - x = atomic_read(&cachefiles_lookup_histogram[index]);
40256 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
40257 - z = atomic_read(&cachefiles_create_histogram[index]);
40258 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40259 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40260 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40261 if (x == 0 && y == 0 && z == 0)
40262 return 0;
40263
40264 diff -urNp linux-2.6.32.45/fs/cachefiles/rdwr.c linux-2.6.32.45/fs/cachefiles/rdwr.c
40265 --- linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
40266 +++ linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
40267 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
40268 old_fs = get_fs();
40269 set_fs(KERNEL_DS);
40270 ret = file->f_op->write(
40271 - file, (const void __user *) data, len, &pos);
40272 + file, (__force const void __user *) data, len, &pos);
40273 set_fs(old_fs);
40274 kunmap(page);
40275 if (ret != len)
40276 diff -urNp linux-2.6.32.45/fs/cifs/cifs_debug.c linux-2.6.32.45/fs/cifs/cifs_debug.c
40277 --- linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
40278 +++ linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
40279 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
40280 tcon = list_entry(tmp3,
40281 struct cifsTconInfo,
40282 tcon_list);
40283 - atomic_set(&tcon->num_smbs_sent, 0);
40284 - atomic_set(&tcon->num_writes, 0);
40285 - atomic_set(&tcon->num_reads, 0);
40286 - atomic_set(&tcon->num_oplock_brks, 0);
40287 - atomic_set(&tcon->num_opens, 0);
40288 - atomic_set(&tcon->num_posixopens, 0);
40289 - atomic_set(&tcon->num_posixmkdirs, 0);
40290 - atomic_set(&tcon->num_closes, 0);
40291 - atomic_set(&tcon->num_deletes, 0);
40292 - atomic_set(&tcon->num_mkdirs, 0);
40293 - atomic_set(&tcon->num_rmdirs, 0);
40294 - atomic_set(&tcon->num_renames, 0);
40295 - atomic_set(&tcon->num_t2renames, 0);
40296 - atomic_set(&tcon->num_ffirst, 0);
40297 - atomic_set(&tcon->num_fnext, 0);
40298 - atomic_set(&tcon->num_fclose, 0);
40299 - atomic_set(&tcon->num_hardlinks, 0);
40300 - atomic_set(&tcon->num_symlinks, 0);
40301 - atomic_set(&tcon->num_locks, 0);
40302 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40303 + atomic_set_unchecked(&tcon->num_writes, 0);
40304 + atomic_set_unchecked(&tcon->num_reads, 0);
40305 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40306 + atomic_set_unchecked(&tcon->num_opens, 0);
40307 + atomic_set_unchecked(&tcon->num_posixopens, 0);
40308 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40309 + atomic_set_unchecked(&tcon->num_closes, 0);
40310 + atomic_set_unchecked(&tcon->num_deletes, 0);
40311 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
40312 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
40313 + atomic_set_unchecked(&tcon->num_renames, 0);
40314 + atomic_set_unchecked(&tcon->num_t2renames, 0);
40315 + atomic_set_unchecked(&tcon->num_ffirst, 0);
40316 + atomic_set_unchecked(&tcon->num_fnext, 0);
40317 + atomic_set_unchecked(&tcon->num_fclose, 0);
40318 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
40319 + atomic_set_unchecked(&tcon->num_symlinks, 0);
40320 + atomic_set_unchecked(&tcon->num_locks, 0);
40321 }
40322 }
40323 }
40324 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
40325 if (tcon->need_reconnect)
40326 seq_puts(m, "\tDISCONNECTED ");
40327 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40328 - atomic_read(&tcon->num_smbs_sent),
40329 - atomic_read(&tcon->num_oplock_brks));
40330 + atomic_read_unchecked(&tcon->num_smbs_sent),
40331 + atomic_read_unchecked(&tcon->num_oplock_brks));
40332 seq_printf(m, "\nReads: %d Bytes: %lld",
40333 - atomic_read(&tcon->num_reads),
40334 + atomic_read_unchecked(&tcon->num_reads),
40335 (long long)(tcon->bytes_read));
40336 seq_printf(m, "\nWrites: %d Bytes: %lld",
40337 - atomic_read(&tcon->num_writes),
40338 + atomic_read_unchecked(&tcon->num_writes),
40339 (long long)(tcon->bytes_written));
40340 seq_printf(m, "\nFlushes: %d",
40341 - atomic_read(&tcon->num_flushes));
40342 + atomic_read_unchecked(&tcon->num_flushes));
40343 seq_printf(m, "\nLocks: %d HardLinks: %d "
40344 "Symlinks: %d",
40345 - atomic_read(&tcon->num_locks),
40346 - atomic_read(&tcon->num_hardlinks),
40347 - atomic_read(&tcon->num_symlinks));
40348 + atomic_read_unchecked(&tcon->num_locks),
40349 + atomic_read_unchecked(&tcon->num_hardlinks),
40350 + atomic_read_unchecked(&tcon->num_symlinks));
40351 seq_printf(m, "\nOpens: %d Closes: %d "
40352 "Deletes: %d",
40353 - atomic_read(&tcon->num_opens),
40354 - atomic_read(&tcon->num_closes),
40355 - atomic_read(&tcon->num_deletes));
40356 + atomic_read_unchecked(&tcon->num_opens),
40357 + atomic_read_unchecked(&tcon->num_closes),
40358 + atomic_read_unchecked(&tcon->num_deletes));
40359 seq_printf(m, "\nPosix Opens: %d "
40360 "Posix Mkdirs: %d",
40361 - atomic_read(&tcon->num_posixopens),
40362 - atomic_read(&tcon->num_posixmkdirs));
40363 + atomic_read_unchecked(&tcon->num_posixopens),
40364 + atomic_read_unchecked(&tcon->num_posixmkdirs));
40365 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40366 - atomic_read(&tcon->num_mkdirs),
40367 - atomic_read(&tcon->num_rmdirs));
40368 + atomic_read_unchecked(&tcon->num_mkdirs),
40369 + atomic_read_unchecked(&tcon->num_rmdirs));
40370 seq_printf(m, "\nRenames: %d T2 Renames %d",
40371 - atomic_read(&tcon->num_renames),
40372 - atomic_read(&tcon->num_t2renames));
40373 + atomic_read_unchecked(&tcon->num_renames),
40374 + atomic_read_unchecked(&tcon->num_t2renames));
40375 seq_printf(m, "\nFindFirst: %d FNext %d "
40376 "FClose %d",
40377 - atomic_read(&tcon->num_ffirst),
40378 - atomic_read(&tcon->num_fnext),
40379 - atomic_read(&tcon->num_fclose));
40380 + atomic_read_unchecked(&tcon->num_ffirst),
40381 + atomic_read_unchecked(&tcon->num_fnext),
40382 + atomic_read_unchecked(&tcon->num_fclose));
40383 }
40384 }
40385 }
40386 diff -urNp linux-2.6.32.45/fs/cifs/cifsglob.h linux-2.6.32.45/fs/cifs/cifsglob.h
40387 --- linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:35:29.000000000 -0400
40388 +++ linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:34:00.000000000 -0400
40389 @@ -252,28 +252,28 @@ struct cifsTconInfo {
40390 __u16 Flags; /* optional support bits */
40391 enum statusEnum tidStatus;
40392 #ifdef CONFIG_CIFS_STATS
40393 - atomic_t num_smbs_sent;
40394 - atomic_t num_writes;
40395 - atomic_t num_reads;
40396 - atomic_t num_flushes;
40397 - atomic_t num_oplock_brks;
40398 - atomic_t num_opens;
40399 - atomic_t num_closes;
40400 - atomic_t num_deletes;
40401 - atomic_t num_mkdirs;
40402 - atomic_t num_posixopens;
40403 - atomic_t num_posixmkdirs;
40404 - atomic_t num_rmdirs;
40405 - atomic_t num_renames;
40406 - atomic_t num_t2renames;
40407 - atomic_t num_ffirst;
40408 - atomic_t num_fnext;
40409 - atomic_t num_fclose;
40410 - atomic_t num_hardlinks;
40411 - atomic_t num_symlinks;
40412 - atomic_t num_locks;
40413 - atomic_t num_acl_get;
40414 - atomic_t num_acl_set;
40415 + atomic_unchecked_t num_smbs_sent;
40416 + atomic_unchecked_t num_writes;
40417 + atomic_unchecked_t num_reads;
40418 + atomic_unchecked_t num_flushes;
40419 + atomic_unchecked_t num_oplock_brks;
40420 + atomic_unchecked_t num_opens;
40421 + atomic_unchecked_t num_closes;
40422 + atomic_unchecked_t num_deletes;
40423 + atomic_unchecked_t num_mkdirs;
40424 + atomic_unchecked_t num_posixopens;
40425 + atomic_unchecked_t num_posixmkdirs;
40426 + atomic_unchecked_t num_rmdirs;
40427 + atomic_unchecked_t num_renames;
40428 + atomic_unchecked_t num_t2renames;
40429 + atomic_unchecked_t num_ffirst;
40430 + atomic_unchecked_t num_fnext;
40431 + atomic_unchecked_t num_fclose;
40432 + atomic_unchecked_t num_hardlinks;
40433 + atomic_unchecked_t num_symlinks;
40434 + atomic_unchecked_t num_locks;
40435 + atomic_unchecked_t num_acl_get;
40436 + atomic_unchecked_t num_acl_set;
40437 #ifdef CONFIG_CIFS_STATS2
40438 unsigned long long time_writes;
40439 unsigned long long time_reads;
40440 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
40441 }
40442
40443 #ifdef CONFIG_CIFS_STATS
40444 -#define cifs_stats_inc atomic_inc
40445 +#define cifs_stats_inc atomic_inc_unchecked
40446
40447 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
40448 unsigned int bytes)
40449 diff -urNp linux-2.6.32.45/fs/cifs/link.c linux-2.6.32.45/fs/cifs/link.c
40450 --- linux-2.6.32.45/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
40451 +++ linux-2.6.32.45/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
40452 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
40453
40454 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40455 {
40456 - char *p = nd_get_link(nd);
40457 + const char *p = nd_get_link(nd);
40458 if (!IS_ERR(p))
40459 kfree(p);
40460 }
40461 diff -urNp linux-2.6.32.45/fs/coda/cache.c linux-2.6.32.45/fs/coda/cache.c
40462 --- linux-2.6.32.45/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
40463 +++ linux-2.6.32.45/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
40464 @@ -24,14 +24,14 @@
40465 #include <linux/coda_fs_i.h>
40466 #include <linux/coda_cache.h>
40467
40468 -static atomic_t permission_epoch = ATOMIC_INIT(0);
40469 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40470
40471 /* replace or extend an acl cache hit */
40472 void coda_cache_enter(struct inode *inode, int mask)
40473 {
40474 struct coda_inode_info *cii = ITOC(inode);
40475
40476 - cii->c_cached_epoch = atomic_read(&permission_epoch);
40477 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40478 if (cii->c_uid != current_fsuid()) {
40479 cii->c_uid = current_fsuid();
40480 cii->c_cached_perm = mask;
40481 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
40482 void coda_cache_clear_inode(struct inode *inode)
40483 {
40484 struct coda_inode_info *cii = ITOC(inode);
40485 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40486 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40487 }
40488
40489 /* remove all acl caches */
40490 void coda_cache_clear_all(struct super_block *sb)
40491 {
40492 - atomic_inc(&permission_epoch);
40493 + atomic_inc_unchecked(&permission_epoch);
40494 }
40495
40496
40497 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
40498
40499 hit = (mask & cii->c_cached_perm) == mask &&
40500 cii->c_uid == current_fsuid() &&
40501 - cii->c_cached_epoch == atomic_read(&permission_epoch);
40502 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40503
40504 return hit;
40505 }
40506 diff -urNp linux-2.6.32.45/fs/compat_binfmt_elf.c linux-2.6.32.45/fs/compat_binfmt_elf.c
40507 --- linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
40508 +++ linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
40509 @@ -29,10 +29,12 @@
40510 #undef elfhdr
40511 #undef elf_phdr
40512 #undef elf_note
40513 +#undef elf_dyn
40514 #undef elf_addr_t
40515 #define elfhdr elf32_hdr
40516 #define elf_phdr elf32_phdr
40517 #define elf_note elf32_note
40518 +#define elf_dyn Elf32_Dyn
40519 #define elf_addr_t Elf32_Addr
40520
40521 /*
40522 diff -urNp linux-2.6.32.45/fs/compat.c linux-2.6.32.45/fs/compat.c
40523 --- linux-2.6.32.45/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
40524 +++ linux-2.6.32.45/fs/compat.c 2011-08-11 19:56:56.000000000 -0400
40525 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
40526
40527 struct compat_readdir_callback {
40528 struct compat_old_linux_dirent __user *dirent;
40529 + struct file * file;
40530 int result;
40531 };
40532
40533 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
40534 buf->result = -EOVERFLOW;
40535 return -EOVERFLOW;
40536 }
40537 +
40538 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40539 + return 0;
40540 +
40541 buf->result++;
40542 dirent = buf->dirent;
40543 if (!access_ok(VERIFY_WRITE, dirent,
40544 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
40545
40546 buf.result = 0;
40547 buf.dirent = dirent;
40548 + buf.file = file;
40549
40550 error = vfs_readdir(file, compat_fillonedir, &buf);
40551 if (buf.result)
40552 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
40553 struct compat_getdents_callback {
40554 struct compat_linux_dirent __user *current_dir;
40555 struct compat_linux_dirent __user *previous;
40556 + struct file * file;
40557 int count;
40558 int error;
40559 };
40560 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
40561 buf->error = -EOVERFLOW;
40562 return -EOVERFLOW;
40563 }
40564 +
40565 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40566 + return 0;
40567 +
40568 dirent = buf->previous;
40569 if (dirent) {
40570 if (__put_user(offset, &dirent->d_off))
40571 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
40572 buf.previous = NULL;
40573 buf.count = count;
40574 buf.error = 0;
40575 + buf.file = file;
40576
40577 error = vfs_readdir(file, compat_filldir, &buf);
40578 if (error >= 0)
40579 @@ -987,6 +999,7 @@ out:
40580 struct compat_getdents_callback64 {
40581 struct linux_dirent64 __user *current_dir;
40582 struct linux_dirent64 __user *previous;
40583 + struct file * file;
40584 int count;
40585 int error;
40586 };
40587 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
40588 buf->error = -EINVAL; /* only used if we fail.. */
40589 if (reclen > buf->count)
40590 return -EINVAL;
40591 +
40592 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40593 + return 0;
40594 +
40595 dirent = buf->previous;
40596
40597 if (dirent) {
40598 @@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
40599 buf.previous = NULL;
40600 buf.count = count;
40601 buf.error = 0;
40602 + buf.file = file;
40603
40604 error = vfs_readdir(file, compat_filldir64, &buf);
40605 if (error >= 0)
40606 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
40607 * verify all the pointers
40608 */
40609 ret = -EINVAL;
40610 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
40611 + if (nr_segs > UIO_MAXIOV)
40612 goto out;
40613 if (!file->f_op)
40614 goto out;
40615 @@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
40616 compat_uptr_t __user *envp,
40617 struct pt_regs * regs)
40618 {
40619 +#ifdef CONFIG_GRKERNSEC
40620 + struct file *old_exec_file;
40621 + struct acl_subject_label *old_acl;
40622 + struct rlimit old_rlim[RLIM_NLIMITS];
40623 +#endif
40624 struct linux_binprm *bprm;
40625 struct file *file;
40626 struct files_struct *displaced;
40627 bool clear_in_exec;
40628 int retval;
40629 + const struct cred *cred = current_cred();
40630 +
40631 + /*
40632 + * We move the actual failure in case of RLIMIT_NPROC excess from
40633 + * set*uid() to execve() because too many poorly written programs
40634 + * don't check setuid() return code. Here we additionally recheck
40635 + * whether NPROC limit is still exceeded.
40636 + */
40637 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40638 +
40639 + if ((current->flags & PF_NPROC_EXCEEDED) &&
40640 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40641 + retval = -EAGAIN;
40642 + goto out_ret;
40643 + }
40644 +
40645 + /* We're below the limit (still or again), so we don't want to make
40646 + * further execve() calls fail. */
40647 + current->flags &= ~PF_NPROC_EXCEEDED;
40648
40649 retval = unshare_files(&displaced);
40650 if (retval)
40651 @@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
40652 bprm->filename = filename;
40653 bprm->interp = filename;
40654
40655 + if (gr_process_user_ban()) {
40656 + retval = -EPERM;
40657 + goto out_file;
40658 + }
40659 +
40660 + retval = -EACCES;
40661 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
40662 + goto out_file;
40663 +
40664 retval = bprm_mm_init(bprm);
40665 if (retval)
40666 goto out_file;
40667 @@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
40668 if (retval < 0)
40669 goto out;
40670
40671 + if (!gr_tpe_allow(file)) {
40672 + retval = -EACCES;
40673 + goto out;
40674 + }
40675 +
40676 + if (gr_check_crash_exec(file)) {
40677 + retval = -EACCES;
40678 + goto out;
40679 + }
40680 +
40681 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40682 +
40683 + gr_handle_exec_args_compat(bprm, argv);
40684 +
40685 +#ifdef CONFIG_GRKERNSEC
40686 + old_acl = current->acl;
40687 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40688 + old_exec_file = current->exec_file;
40689 + get_file(file);
40690 + current->exec_file = file;
40691 +#endif
40692 +
40693 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40694 + bprm->unsafe & LSM_UNSAFE_SHARE);
40695 + if (retval < 0)
40696 + goto out_fail;
40697 +
40698 retval = search_binary_handler(bprm, regs);
40699 if (retval < 0)
40700 - goto out;
40701 + goto out_fail;
40702 +#ifdef CONFIG_GRKERNSEC
40703 + if (old_exec_file)
40704 + fput(old_exec_file);
40705 +#endif
40706
40707 /* execve succeeded */
40708 current->fs->in_exec = 0;
40709 @@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
40710 put_files_struct(displaced);
40711 return retval;
40712
40713 +out_fail:
40714 +#ifdef CONFIG_GRKERNSEC
40715 + current->acl = old_acl;
40716 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40717 + fput(current->exec_file);
40718 + current->exec_file = old_exec_file;
40719 +#endif
40720 +
40721 out:
40722 if (bprm->mm) {
40723 acct_arg_size(bprm, 0);
40724 @@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat
40725 struct fdtable *fdt;
40726 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40727
40728 + pax_track_stack();
40729 +
40730 if (n < 0)
40731 goto out_nofds;
40732
40733 diff -urNp linux-2.6.32.45/fs/compat_ioctl.c linux-2.6.32.45/fs/compat_ioctl.c
40734 --- linux-2.6.32.45/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
40735 +++ linux-2.6.32.45/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
40736 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
40737 up = (struct compat_video_spu_palette __user *) arg;
40738 err = get_user(palp, &up->palette);
40739 err |= get_user(length, &up->length);
40740 + if (err)
40741 + return -EFAULT;
40742
40743 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
40744 err = put_user(compat_ptr(palp), &up_native->palette);
40745 diff -urNp linux-2.6.32.45/fs/configfs/dir.c linux-2.6.32.45/fs/configfs/dir.c
40746 --- linux-2.6.32.45/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
40747 +++ linux-2.6.32.45/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
40748 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
40749 }
40750 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
40751 struct configfs_dirent *next;
40752 - const char * name;
40753 + const unsigned char * name;
40754 + char d_name[sizeof(next->s_dentry->d_iname)];
40755 int len;
40756
40757 next = list_entry(p, struct configfs_dirent,
40758 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
40759 continue;
40760
40761 name = configfs_get_name(next);
40762 - len = strlen(name);
40763 + if (next->s_dentry && name == next->s_dentry->d_iname) {
40764 + len = next->s_dentry->d_name.len;
40765 + memcpy(d_name, name, len);
40766 + name = d_name;
40767 + } else
40768 + len = strlen(name);
40769 if (next->s_dentry)
40770 ino = next->s_dentry->d_inode->i_ino;
40771 else
40772 diff -urNp linux-2.6.32.45/fs/dcache.c linux-2.6.32.45/fs/dcache.c
40773 --- linux-2.6.32.45/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
40774 +++ linux-2.6.32.45/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
40775 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
40776
40777 static struct kmem_cache *dentry_cache __read_mostly;
40778
40779 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
40780 -
40781 /*
40782 * This is the single most critical data structure when it comes
40783 * to the dcache: the hashtable for lookups. Somebody should try
40784 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
40785 mempages -= reserve;
40786
40787 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
40788 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
40789 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
40790
40791 dcache_init();
40792 inode_init();
40793 diff -urNp linux-2.6.32.45/fs/dlm/lockspace.c linux-2.6.32.45/fs/dlm/lockspace.c
40794 --- linux-2.6.32.45/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
40795 +++ linux-2.6.32.45/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
40796 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
40797 kfree(ls);
40798 }
40799
40800 -static struct sysfs_ops dlm_attr_ops = {
40801 +static const struct sysfs_ops dlm_attr_ops = {
40802 .show = dlm_attr_show,
40803 .store = dlm_attr_store,
40804 };
40805 diff -urNp linux-2.6.32.45/fs/ecryptfs/inode.c linux-2.6.32.45/fs/ecryptfs/inode.c
40806 --- linux-2.6.32.45/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40807 +++ linux-2.6.32.45/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40808 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
40809 old_fs = get_fs();
40810 set_fs(get_ds());
40811 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
40812 - (char __user *)lower_buf,
40813 + (__force char __user *)lower_buf,
40814 lower_bufsiz);
40815 set_fs(old_fs);
40816 if (rc < 0)
40817 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
40818 }
40819 old_fs = get_fs();
40820 set_fs(get_ds());
40821 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
40822 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
40823 set_fs(old_fs);
40824 if (rc < 0)
40825 goto out_free;
40826 diff -urNp linux-2.6.32.45/fs/exec.c linux-2.6.32.45/fs/exec.c
40827 --- linux-2.6.32.45/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
40828 +++ linux-2.6.32.45/fs/exec.c 2011-08-11 19:56:19.000000000 -0400
40829 @@ -56,12 +56,24 @@
40830 #include <linux/fsnotify.h>
40831 #include <linux/fs_struct.h>
40832 #include <linux/pipe_fs_i.h>
40833 +#include <linux/random.h>
40834 +#include <linux/seq_file.h>
40835 +
40836 +#ifdef CONFIG_PAX_REFCOUNT
40837 +#include <linux/kallsyms.h>
40838 +#include <linux/kdebug.h>
40839 +#endif
40840
40841 #include <asm/uaccess.h>
40842 #include <asm/mmu_context.h>
40843 #include <asm/tlb.h>
40844 #include "internal.h"
40845
40846 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
40847 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
40848 +EXPORT_SYMBOL(pax_set_initial_flags_func);
40849 +#endif
40850 +
40851 int core_uses_pid;
40852 char core_pattern[CORENAME_MAX_SIZE] = "core";
40853 unsigned int core_pipe_limit;
40854 @@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
40855 goto out;
40856
40857 file = do_filp_open(AT_FDCWD, tmp,
40858 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40859 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40860 MAY_READ | MAY_EXEC | MAY_OPEN);
40861 putname(tmp);
40862 error = PTR_ERR(file);
40863 @@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
40864 int write)
40865 {
40866 struct page *page;
40867 - int ret;
40868
40869 -#ifdef CONFIG_STACK_GROWSUP
40870 - if (write) {
40871 - ret = expand_stack_downwards(bprm->vma, pos);
40872 - if (ret < 0)
40873 - return NULL;
40874 - }
40875 -#endif
40876 - ret = get_user_pages(current, bprm->mm, pos,
40877 - 1, write, 1, &page, NULL);
40878 - if (ret <= 0)
40879 + if (0 > expand_stack_downwards(bprm->vma, pos))
40880 + return NULL;
40881 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
40882 return NULL;
40883
40884 if (write) {
40885 @@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
40886 vma->vm_end = STACK_TOP_MAX;
40887 vma->vm_start = vma->vm_end - PAGE_SIZE;
40888 vma->vm_flags = VM_STACK_FLAGS;
40889 +
40890 +#ifdef CONFIG_PAX_SEGMEXEC
40891 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
40892 +#endif
40893 +
40894 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
40895
40896 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
40897 @@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
40898 mm->stack_vm = mm->total_vm = 1;
40899 up_write(&mm->mmap_sem);
40900 bprm->p = vma->vm_end - sizeof(void *);
40901 +
40902 +#ifdef CONFIG_PAX_RANDUSTACK
40903 + if (randomize_va_space)
40904 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
40905 +#endif
40906 +
40907 return 0;
40908 err:
40909 up_write(&mm->mmap_sem);
40910 @@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
40911 int r;
40912 mm_segment_t oldfs = get_fs();
40913 set_fs(KERNEL_DS);
40914 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
40915 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
40916 set_fs(oldfs);
40917 return r;
40918 }
40919 @@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
40920 unsigned long new_end = old_end - shift;
40921 struct mmu_gather *tlb;
40922
40923 - BUG_ON(new_start > new_end);
40924 + if (new_start >= new_end || new_start < mmap_min_addr)
40925 + return -ENOMEM;
40926
40927 /*
40928 * ensure there are no vmas between where we want to go
40929 @@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
40930 if (vma != find_vma(mm, new_start))
40931 return -EFAULT;
40932
40933 +#ifdef CONFIG_PAX_SEGMEXEC
40934 + BUG_ON(pax_find_mirror_vma(vma));
40935 +#endif
40936 +
40937 /*
40938 * cover the whole range: [new_start, old_end)
40939 */
40940 @@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
40941 stack_top = arch_align_stack(stack_top);
40942 stack_top = PAGE_ALIGN(stack_top);
40943
40944 - if (unlikely(stack_top < mmap_min_addr) ||
40945 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
40946 - return -ENOMEM;
40947 -
40948 stack_shift = vma->vm_end - stack_top;
40949
40950 bprm->p -= stack_shift;
40951 @@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
40952 bprm->exec -= stack_shift;
40953
40954 down_write(&mm->mmap_sem);
40955 +
40956 + /* Move stack pages down in memory. */
40957 + if (stack_shift) {
40958 + ret = shift_arg_pages(vma, stack_shift);
40959 + if (ret)
40960 + goto out_unlock;
40961 + }
40962 +
40963 vm_flags = VM_STACK_FLAGS;
40964
40965 /*
40966 @@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
40967 vm_flags &= ~VM_EXEC;
40968 vm_flags |= mm->def_flags;
40969
40970 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40971 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40972 + vm_flags &= ~VM_EXEC;
40973 +
40974 +#ifdef CONFIG_PAX_MPROTECT
40975 + if (mm->pax_flags & MF_PAX_MPROTECT)
40976 + vm_flags &= ~VM_MAYEXEC;
40977 +#endif
40978 +
40979 + }
40980 +#endif
40981 +
40982 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
40983 vm_flags);
40984 if (ret)
40985 goto out_unlock;
40986 BUG_ON(prev != vma);
40987
40988 - /* Move stack pages down in memory. */
40989 - if (stack_shift) {
40990 - ret = shift_arg_pages(vma, stack_shift);
40991 - if (ret)
40992 - goto out_unlock;
40993 - }
40994 -
40995 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
40996 stack_size = vma->vm_end - vma->vm_start;
40997 /*
40998 @@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
40999 int err;
41000
41001 file = do_filp_open(AT_FDCWD, name,
41002 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
41003 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
41004 MAY_EXEC | MAY_OPEN);
41005 if (IS_ERR(file))
41006 goto out;
41007 @@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
41008 old_fs = get_fs();
41009 set_fs(get_ds());
41010 /* The cast to a user pointer is valid due to the set_fs() */
41011 - result = vfs_read(file, (void __user *)addr, count, &pos);
41012 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
41013 set_fs(old_fs);
41014 return result;
41015 }
41016 @@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
41017 }
41018 rcu_read_unlock();
41019
41020 - if (p->fs->users > n_fs) {
41021 + if (atomic_read(&p->fs->users) > n_fs) {
41022 bprm->unsafe |= LSM_UNSAFE_SHARE;
41023 } else {
41024 res = -EAGAIN;
41025 @@ -1347,11 +1376,35 @@ int do_execve(char * filename,
41026 char __user *__user *envp,
41027 struct pt_regs * regs)
41028 {
41029 +#ifdef CONFIG_GRKERNSEC
41030 + struct file *old_exec_file;
41031 + struct acl_subject_label *old_acl;
41032 + struct rlimit old_rlim[RLIM_NLIMITS];
41033 +#endif
41034 struct linux_binprm *bprm;
41035 struct file *file;
41036 struct files_struct *displaced;
41037 bool clear_in_exec;
41038 int retval;
41039 + const struct cred *cred = current_cred();
41040 +
41041 + /*
41042 + * We move the actual failure in case of RLIMIT_NPROC excess from
41043 + * set*uid() to execve() because too many poorly written programs
41044 + * don't check setuid() return code. Here we additionally recheck
41045 + * whether NPROC limit is still exceeded.
41046 + */
41047 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41048 +
41049 + if ((current->flags & PF_NPROC_EXCEEDED) &&
41050 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
41051 + retval = -EAGAIN;
41052 + goto out_ret;
41053 + }
41054 +
41055 + /* We're below the limit (still or again), so we don't want to make
41056 + * further execve() calls fail. */
41057 + current->flags &= ~PF_NPROC_EXCEEDED;
41058
41059 retval = unshare_files(&displaced);
41060 if (retval)
41061 @@ -1383,6 +1436,16 @@ int do_execve(char * filename,
41062 bprm->filename = filename;
41063 bprm->interp = filename;
41064
41065 + if (gr_process_user_ban()) {
41066 + retval = -EPERM;
41067 + goto out_file;
41068 + }
41069 +
41070 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41071 + retval = -EACCES;
41072 + goto out_file;
41073 + }
41074 +
41075 retval = bprm_mm_init(bprm);
41076 if (retval)
41077 goto out_file;
41078 @@ -1412,10 +1475,41 @@ int do_execve(char * filename,
41079 if (retval < 0)
41080 goto out;
41081
41082 + if (!gr_tpe_allow(file)) {
41083 + retval = -EACCES;
41084 + goto out;
41085 + }
41086 +
41087 + if (gr_check_crash_exec(file)) {
41088 + retval = -EACCES;
41089 + goto out;
41090 + }
41091 +
41092 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41093 +
41094 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
41095 +
41096 +#ifdef CONFIG_GRKERNSEC
41097 + old_acl = current->acl;
41098 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41099 + old_exec_file = current->exec_file;
41100 + get_file(file);
41101 + current->exec_file = file;
41102 +#endif
41103 +
41104 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41105 + bprm->unsafe & LSM_UNSAFE_SHARE);
41106 + if (retval < 0)
41107 + goto out_fail;
41108 +
41109 current->flags &= ~PF_KTHREAD;
41110 retval = search_binary_handler(bprm,regs);
41111 if (retval < 0)
41112 - goto out;
41113 + goto out_fail;
41114 +#ifdef CONFIG_GRKERNSEC
41115 + if (old_exec_file)
41116 + fput(old_exec_file);
41117 +#endif
41118
41119 /* execve succeeded */
41120 current->fs->in_exec = 0;
41121 @@ -1426,6 +1520,14 @@ int do_execve(char * filename,
41122 put_files_struct(displaced);
41123 return retval;
41124
41125 +out_fail:
41126 +#ifdef CONFIG_GRKERNSEC
41127 + current->acl = old_acl;
41128 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41129 + fput(current->exec_file);
41130 + current->exec_file = old_exec_file;
41131 +#endif
41132 +
41133 out:
41134 if (bprm->mm) {
41135 acct_arg_size(bprm, 0);
41136 @@ -1591,6 +1693,220 @@ out:
41137 return ispipe;
41138 }
41139
41140 +int pax_check_flags(unsigned long *flags)
41141 +{
41142 + int retval = 0;
41143 +
41144 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41145 + if (*flags & MF_PAX_SEGMEXEC)
41146 + {
41147 + *flags &= ~MF_PAX_SEGMEXEC;
41148 + retval = -EINVAL;
41149 + }
41150 +#endif
41151 +
41152 + if ((*flags & MF_PAX_PAGEEXEC)
41153 +
41154 +#ifdef CONFIG_PAX_PAGEEXEC
41155 + && (*flags & MF_PAX_SEGMEXEC)
41156 +#endif
41157 +
41158 + )
41159 + {
41160 + *flags &= ~MF_PAX_PAGEEXEC;
41161 + retval = -EINVAL;
41162 + }
41163 +
41164 + if ((*flags & MF_PAX_MPROTECT)
41165 +
41166 +#ifdef CONFIG_PAX_MPROTECT
41167 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41168 +#endif
41169 +
41170 + )
41171 + {
41172 + *flags &= ~MF_PAX_MPROTECT;
41173 + retval = -EINVAL;
41174 + }
41175 +
41176 + if ((*flags & MF_PAX_EMUTRAMP)
41177 +
41178 +#ifdef CONFIG_PAX_EMUTRAMP
41179 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41180 +#endif
41181 +
41182 + )
41183 + {
41184 + *flags &= ~MF_PAX_EMUTRAMP;
41185 + retval = -EINVAL;
41186 + }
41187 +
41188 + return retval;
41189 +}
41190 +
41191 +EXPORT_SYMBOL(pax_check_flags);
41192 +
41193 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41194 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41195 +{
41196 + struct task_struct *tsk = current;
41197 + struct mm_struct *mm = current->mm;
41198 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41199 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41200 + char *path_exec = NULL;
41201 + char *path_fault = NULL;
41202 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
41203 +
41204 + if (buffer_exec && buffer_fault) {
41205 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41206 +
41207 + down_read(&mm->mmap_sem);
41208 + vma = mm->mmap;
41209 + while (vma && (!vma_exec || !vma_fault)) {
41210 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41211 + vma_exec = vma;
41212 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41213 + vma_fault = vma;
41214 + vma = vma->vm_next;
41215 + }
41216 + if (vma_exec) {
41217 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41218 + if (IS_ERR(path_exec))
41219 + path_exec = "<path too long>";
41220 + else {
41221 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41222 + if (path_exec) {
41223 + *path_exec = 0;
41224 + path_exec = buffer_exec;
41225 + } else
41226 + path_exec = "<path too long>";
41227 + }
41228 + }
41229 + if (vma_fault) {
41230 + start = vma_fault->vm_start;
41231 + end = vma_fault->vm_end;
41232 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41233 + if (vma_fault->vm_file) {
41234 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41235 + if (IS_ERR(path_fault))
41236 + path_fault = "<path too long>";
41237 + else {
41238 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41239 + if (path_fault) {
41240 + *path_fault = 0;
41241 + path_fault = buffer_fault;
41242 + } else
41243 + path_fault = "<path too long>";
41244 + }
41245 + } else
41246 + path_fault = "<anonymous mapping>";
41247 + }
41248 + up_read(&mm->mmap_sem);
41249 + }
41250 + if (tsk->signal->curr_ip)
41251 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41252 + else
41253 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41254 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41255 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41256 + task_uid(tsk), task_euid(tsk), pc, sp);
41257 + free_page((unsigned long)buffer_exec);
41258 + free_page((unsigned long)buffer_fault);
41259 + pax_report_insns(pc, sp);
41260 + do_coredump(SIGKILL, SIGKILL, regs);
41261 +}
41262 +#endif
41263 +
41264 +#ifdef CONFIG_PAX_REFCOUNT
41265 +void pax_report_refcount_overflow(struct pt_regs *regs)
41266 +{
41267 + if (current->signal->curr_ip)
41268 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41269 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41270 + else
41271 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41272 + current->comm, task_pid_nr(current), current_uid(), current_euid());
41273 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41274 + show_regs(regs);
41275 + force_sig_specific(SIGKILL, current);
41276 +}
41277 +#endif
41278 +
41279 +#ifdef CONFIG_PAX_USERCOPY
41280 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41281 +int object_is_on_stack(const void *obj, unsigned long len)
41282 +{
41283 + const void * const stack = task_stack_page(current);
41284 + const void * const stackend = stack + THREAD_SIZE;
41285 +
41286 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41287 + const void *frame = NULL;
41288 + const void *oldframe;
41289 +#endif
41290 +
41291 + if (obj + len < obj)
41292 + return -1;
41293 +
41294 + if (obj + len <= stack || stackend <= obj)
41295 + return 0;
41296 +
41297 + if (obj < stack || stackend < obj + len)
41298 + return -1;
41299 +
41300 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41301 + oldframe = __builtin_frame_address(1);
41302 + if (oldframe)
41303 + frame = __builtin_frame_address(2);
41304 + /*
41305 + low ----------------------------------------------> high
41306 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
41307 + ^----------------^
41308 + allow copies only within here
41309 + */
41310 + while (stack <= frame && frame < stackend) {
41311 + /* if obj + len extends past the last frame, this
41312 + check won't pass and the next frame will be 0,
41313 + causing us to bail out and correctly report
41314 + the copy as invalid
41315 + */
41316 + if (obj + len <= frame)
41317 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41318 + oldframe = frame;
41319 + frame = *(const void * const *)frame;
41320 + }
41321 + return -1;
41322 +#else
41323 + return 1;
41324 +#endif
41325 +}
41326 +
41327 +
41328 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41329 +{
41330 + if (current->signal->curr_ip)
41331 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41332 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41333 + else
41334 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41335 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41336 +
41337 + dump_stack();
41338 + gr_handle_kernel_exploit();
41339 + do_group_exit(SIGKILL);
41340 +}
41341 +#endif
41342 +
41343 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41344 +void pax_track_stack(void)
41345 +{
41346 + unsigned long sp = (unsigned long)&sp;
41347 + if (sp < current_thread_info()->lowest_stack &&
41348 + sp > (unsigned long)task_stack_page(current))
41349 + current_thread_info()->lowest_stack = sp;
41350 +}
41351 +EXPORT_SYMBOL(pax_track_stack);
41352 +#endif
41353 +
41354 static int zap_process(struct task_struct *start)
41355 {
41356 struct task_struct *t;
41357 @@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct
41358 pipe = file->f_path.dentry->d_inode->i_pipe;
41359
41360 pipe_lock(pipe);
41361 - pipe->readers++;
41362 - pipe->writers--;
41363 + atomic_inc(&pipe->readers);
41364 + atomic_dec(&pipe->writers);
41365
41366 - while ((pipe->readers > 1) && (!signal_pending(current))) {
41367 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41368 wake_up_interruptible_sync(&pipe->wait);
41369 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41370 pipe_wait(pipe);
41371 }
41372
41373 - pipe->readers--;
41374 - pipe->writers++;
41375 + atomic_dec(&pipe->readers);
41376 + atomic_inc(&pipe->writers);
41377 pipe_unlock(pipe);
41378
41379 }
41380 @@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_co
41381 char **helper_argv = NULL;
41382 int helper_argc = 0;
41383 int dump_count = 0;
41384 - static atomic_t core_dump_count = ATOMIC_INIT(0);
41385 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41386
41387 audit_core_dumps(signr);
41388
41389 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41390 + gr_handle_brute_attach(current, mm->flags);
41391 +
41392 binfmt = mm->binfmt;
41393 if (!binfmt || !binfmt->core_dump)
41394 goto fail;
41395 @@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_co
41396 */
41397 clear_thread_flag(TIF_SIGPENDING);
41398
41399 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41400 +
41401 /*
41402 * lock_kernel() because format_corename() is controlled by sysctl, which
41403 * uses lock_kernel()
41404 @@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_co
41405 goto fail_unlock;
41406 }
41407
41408 - dump_count = atomic_inc_return(&core_dump_count);
41409 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
41410 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41411 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41412 task_tgid_vnr(current), current->comm);
41413 @@ -1972,7 +2293,7 @@ close_fail:
41414 filp_close(file, NULL);
41415 fail_dropcount:
41416 if (dump_count)
41417 - atomic_dec(&core_dump_count);
41418 + atomic_dec_unchecked(&core_dump_count);
41419 fail_unlock:
41420 if (helper_argv)
41421 argv_free(helper_argv);
41422 diff -urNp linux-2.6.32.45/fs/ext2/balloc.c linux-2.6.32.45/fs/ext2/balloc.c
41423 --- linux-2.6.32.45/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
41424 +++ linux-2.6.32.45/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
41425 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41426
41427 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41428 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41429 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41430 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41431 sbi->s_resuid != current_fsuid() &&
41432 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41433 return 0;
41434 diff -urNp linux-2.6.32.45/fs/ext3/balloc.c linux-2.6.32.45/fs/ext3/balloc.c
41435 --- linux-2.6.32.45/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
41436 +++ linux-2.6.32.45/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
41437 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
41438
41439 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41440 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41441 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41442 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41443 sbi->s_resuid != current_fsuid() &&
41444 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41445 return 0;
41446 diff -urNp linux-2.6.32.45/fs/ext4/balloc.c linux-2.6.32.45/fs/ext4/balloc.c
41447 --- linux-2.6.32.45/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
41448 +++ linux-2.6.32.45/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
41449 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
41450 /* Hm, nope. Are (enough) root reserved blocks available? */
41451 if (sbi->s_resuid == current_fsuid() ||
41452 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41453 - capable(CAP_SYS_RESOURCE)) {
41454 + capable_nolog(CAP_SYS_RESOURCE)) {
41455 if (free_blocks >= (nblocks + dirty_blocks))
41456 return 1;
41457 }
41458 diff -urNp linux-2.6.32.45/fs/ext4/ext4.h linux-2.6.32.45/fs/ext4/ext4.h
41459 --- linux-2.6.32.45/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
41460 +++ linux-2.6.32.45/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
41461 @@ -1078,19 +1078,19 @@ struct ext4_sb_info {
41462
41463 /* stats for buddy allocator */
41464 spinlock_t s_mb_pa_lock;
41465 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41466 - atomic_t s_bal_success; /* we found long enough chunks */
41467 - atomic_t s_bal_allocated; /* in blocks */
41468 - atomic_t s_bal_ex_scanned; /* total extents scanned */
41469 - atomic_t s_bal_goals; /* goal hits */
41470 - atomic_t s_bal_breaks; /* too long searches */
41471 - atomic_t s_bal_2orders; /* 2^order hits */
41472 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41473 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41474 + atomic_unchecked_t s_bal_allocated; /* in blocks */
41475 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41476 + atomic_unchecked_t s_bal_goals; /* goal hits */
41477 + atomic_unchecked_t s_bal_breaks; /* too long searches */
41478 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41479 spinlock_t s_bal_lock;
41480 unsigned long s_mb_buddies_generated;
41481 unsigned long long s_mb_generation_time;
41482 - atomic_t s_mb_lost_chunks;
41483 - atomic_t s_mb_preallocated;
41484 - atomic_t s_mb_discarded;
41485 + atomic_unchecked_t s_mb_lost_chunks;
41486 + atomic_unchecked_t s_mb_preallocated;
41487 + atomic_unchecked_t s_mb_discarded;
41488 atomic_t s_lock_busy;
41489
41490 /* locality groups */
41491 diff -urNp linux-2.6.32.45/fs/ext4/mballoc.c linux-2.6.32.45/fs/ext4/mballoc.c
41492 --- linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
41493 +++ linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
41494 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
41495 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41496
41497 if (EXT4_SB(sb)->s_mb_stats)
41498 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41499 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41500
41501 break;
41502 }
41503 @@ -2131,7 +2131,7 @@ repeat:
41504 ac->ac_status = AC_STATUS_CONTINUE;
41505 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41506 cr = 3;
41507 - atomic_inc(&sbi->s_mb_lost_chunks);
41508 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41509 goto repeat;
41510 }
41511 }
41512 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
41513 ext4_grpblk_t counters[16];
41514 } sg;
41515
41516 + pax_track_stack();
41517 +
41518 group--;
41519 if (group == 0)
41520 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41521 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
41522 if (sbi->s_mb_stats) {
41523 printk(KERN_INFO
41524 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41525 - atomic_read(&sbi->s_bal_allocated),
41526 - atomic_read(&sbi->s_bal_reqs),
41527 - atomic_read(&sbi->s_bal_success));
41528 + atomic_read_unchecked(&sbi->s_bal_allocated),
41529 + atomic_read_unchecked(&sbi->s_bal_reqs),
41530 + atomic_read_unchecked(&sbi->s_bal_success));
41531 printk(KERN_INFO
41532 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41533 "%u 2^N hits, %u breaks, %u lost\n",
41534 - atomic_read(&sbi->s_bal_ex_scanned),
41535 - atomic_read(&sbi->s_bal_goals),
41536 - atomic_read(&sbi->s_bal_2orders),
41537 - atomic_read(&sbi->s_bal_breaks),
41538 - atomic_read(&sbi->s_mb_lost_chunks));
41539 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41540 + atomic_read_unchecked(&sbi->s_bal_goals),
41541 + atomic_read_unchecked(&sbi->s_bal_2orders),
41542 + atomic_read_unchecked(&sbi->s_bal_breaks),
41543 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41544 printk(KERN_INFO
41545 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41546 sbi->s_mb_buddies_generated++,
41547 sbi->s_mb_generation_time);
41548 printk(KERN_INFO
41549 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41550 - atomic_read(&sbi->s_mb_preallocated),
41551 - atomic_read(&sbi->s_mb_discarded));
41552 + atomic_read_unchecked(&sbi->s_mb_preallocated),
41553 + atomic_read_unchecked(&sbi->s_mb_discarded));
41554 }
41555
41556 free_percpu(sbi->s_locality_groups);
41557 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
41558 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41559
41560 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41561 - atomic_inc(&sbi->s_bal_reqs);
41562 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41563 + atomic_inc_unchecked(&sbi->s_bal_reqs);
41564 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41565 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
41566 - atomic_inc(&sbi->s_bal_success);
41567 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41568 + atomic_inc_unchecked(&sbi->s_bal_success);
41569 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41570 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41571 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41572 - atomic_inc(&sbi->s_bal_goals);
41573 + atomic_inc_unchecked(&sbi->s_bal_goals);
41574 if (ac->ac_found > sbi->s_mb_max_to_scan)
41575 - atomic_inc(&sbi->s_bal_breaks);
41576 + atomic_inc_unchecked(&sbi->s_bal_breaks);
41577 }
41578
41579 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41580 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41581 trace_ext4_mb_new_inode_pa(ac, pa);
41582
41583 ext4_mb_use_inode_pa(ac, pa);
41584 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41585 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41586
41587 ei = EXT4_I(ac->ac_inode);
41588 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41589 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41590 trace_ext4_mb_new_group_pa(ac, pa);
41591
41592 ext4_mb_use_group_pa(ac, pa);
41593 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41594 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41595
41596 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41597 lg = ac->ac_lg;
41598 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
41599 * from the bitmap and continue.
41600 */
41601 }
41602 - atomic_add(free, &sbi->s_mb_discarded);
41603 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
41604
41605 return err;
41606 }
41607 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
41608 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
41609 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
41610 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
41611 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41612 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41613
41614 if (ac) {
41615 ac->ac_sb = sb;
41616 diff -urNp linux-2.6.32.45/fs/ext4/super.c linux-2.6.32.45/fs/ext4/super.c
41617 --- linux-2.6.32.45/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
41618 +++ linux-2.6.32.45/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
41619 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
41620 }
41621
41622
41623 -static struct sysfs_ops ext4_attr_ops = {
41624 +static const struct sysfs_ops ext4_attr_ops = {
41625 .show = ext4_attr_show,
41626 .store = ext4_attr_store,
41627 };
41628 diff -urNp linux-2.6.32.45/fs/fcntl.c linux-2.6.32.45/fs/fcntl.c
41629 --- linux-2.6.32.45/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
41630 +++ linux-2.6.32.45/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
41631 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
41632 if (err)
41633 return err;
41634
41635 + if (gr_handle_chroot_fowner(pid, type))
41636 + return -ENOENT;
41637 + if (gr_check_protected_task_fowner(pid, type))
41638 + return -EACCES;
41639 +
41640 f_modown(filp, pid, type, force);
41641 return 0;
41642 }
41643 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
41644 switch (cmd) {
41645 case F_DUPFD:
41646 case F_DUPFD_CLOEXEC:
41647 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
41648 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41649 break;
41650 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
41651 diff -urNp linux-2.6.32.45/fs/fifo.c linux-2.6.32.45/fs/fifo.c
41652 --- linux-2.6.32.45/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
41653 +++ linux-2.6.32.45/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
41654 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
41655 */
41656 filp->f_op = &read_pipefifo_fops;
41657 pipe->r_counter++;
41658 - if (pipe->readers++ == 0)
41659 + if (atomic_inc_return(&pipe->readers) == 1)
41660 wake_up_partner(inode);
41661
41662 - if (!pipe->writers) {
41663 + if (!atomic_read(&pipe->writers)) {
41664 if ((filp->f_flags & O_NONBLOCK)) {
41665 /* suppress POLLHUP until we have
41666 * seen a writer */
41667 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
41668 * errno=ENXIO when there is no process reading the FIFO.
41669 */
41670 ret = -ENXIO;
41671 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
41672 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
41673 goto err;
41674
41675 filp->f_op = &write_pipefifo_fops;
41676 pipe->w_counter++;
41677 - if (!pipe->writers++)
41678 + if (atomic_inc_return(&pipe->writers) == 1)
41679 wake_up_partner(inode);
41680
41681 - if (!pipe->readers) {
41682 + if (!atomic_read(&pipe->readers)) {
41683 wait_for_partner(inode, &pipe->r_counter);
41684 if (signal_pending(current))
41685 goto err_wr;
41686 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
41687 */
41688 filp->f_op = &rdwr_pipefifo_fops;
41689
41690 - pipe->readers++;
41691 - pipe->writers++;
41692 + atomic_inc(&pipe->readers);
41693 + atomic_inc(&pipe->writers);
41694 pipe->r_counter++;
41695 pipe->w_counter++;
41696 - if (pipe->readers == 1 || pipe->writers == 1)
41697 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
41698 wake_up_partner(inode);
41699 break;
41700
41701 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
41702 return 0;
41703
41704 err_rd:
41705 - if (!--pipe->readers)
41706 + if (atomic_dec_and_test(&pipe->readers))
41707 wake_up_interruptible(&pipe->wait);
41708 ret = -ERESTARTSYS;
41709 goto err;
41710
41711 err_wr:
41712 - if (!--pipe->writers)
41713 + if (atomic_dec_and_test(&pipe->writers))
41714 wake_up_interruptible(&pipe->wait);
41715 ret = -ERESTARTSYS;
41716 goto err;
41717
41718 err:
41719 - if (!pipe->readers && !pipe->writers)
41720 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
41721 free_pipe_info(inode);
41722
41723 err_nocleanup:
41724 diff -urNp linux-2.6.32.45/fs/file.c linux-2.6.32.45/fs/file.c
41725 --- linux-2.6.32.45/fs/file.c 2011-03-27 14:31:47.000000000 -0400
41726 +++ linux-2.6.32.45/fs/file.c 2011-04-17 15:56:46.000000000 -0400
41727 @@ -14,6 +14,7 @@
41728 #include <linux/slab.h>
41729 #include <linux/vmalloc.h>
41730 #include <linux/file.h>
41731 +#include <linux/security.h>
41732 #include <linux/fdtable.h>
41733 #include <linux/bitops.h>
41734 #include <linux/interrupt.h>
41735 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
41736 * N.B. For clone tasks sharing a files structure, this test
41737 * will limit the total number of files that can be opened.
41738 */
41739 +
41740 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
41741 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41742 return -EMFILE;
41743
41744 diff -urNp linux-2.6.32.45/fs/filesystems.c linux-2.6.32.45/fs/filesystems.c
41745 --- linux-2.6.32.45/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
41746 +++ linux-2.6.32.45/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
41747 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
41748 int len = dot ? dot - name : strlen(name);
41749
41750 fs = __get_fs_type(name, len);
41751 +
41752 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
41753 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
41754 +#else
41755 if (!fs && (request_module("%.*s", len, name) == 0))
41756 +#endif
41757 fs = __get_fs_type(name, len);
41758
41759 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
41760 diff -urNp linux-2.6.32.45/fs/fscache/cookie.c linux-2.6.32.45/fs/fscache/cookie.c
41761 --- linux-2.6.32.45/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
41762 +++ linux-2.6.32.45/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
41763 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
41764 parent ? (char *) parent->def->name : "<no-parent>",
41765 def->name, netfs_data);
41766
41767 - fscache_stat(&fscache_n_acquires);
41768 + fscache_stat_unchecked(&fscache_n_acquires);
41769
41770 /* if there's no parent cookie, then we don't create one here either */
41771 if (!parent) {
41772 - fscache_stat(&fscache_n_acquires_null);
41773 + fscache_stat_unchecked(&fscache_n_acquires_null);
41774 _leave(" [no parent]");
41775 return NULL;
41776 }
41777 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
41778 /* allocate and initialise a cookie */
41779 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
41780 if (!cookie) {
41781 - fscache_stat(&fscache_n_acquires_oom);
41782 + fscache_stat_unchecked(&fscache_n_acquires_oom);
41783 _leave(" [ENOMEM]");
41784 return NULL;
41785 }
41786 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
41787
41788 switch (cookie->def->type) {
41789 case FSCACHE_COOKIE_TYPE_INDEX:
41790 - fscache_stat(&fscache_n_cookie_index);
41791 + fscache_stat_unchecked(&fscache_n_cookie_index);
41792 break;
41793 case FSCACHE_COOKIE_TYPE_DATAFILE:
41794 - fscache_stat(&fscache_n_cookie_data);
41795 + fscache_stat_unchecked(&fscache_n_cookie_data);
41796 break;
41797 default:
41798 - fscache_stat(&fscache_n_cookie_special);
41799 + fscache_stat_unchecked(&fscache_n_cookie_special);
41800 break;
41801 }
41802
41803 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
41804 if (fscache_acquire_non_index_cookie(cookie) < 0) {
41805 atomic_dec(&parent->n_children);
41806 __fscache_cookie_put(cookie);
41807 - fscache_stat(&fscache_n_acquires_nobufs);
41808 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
41809 _leave(" = NULL");
41810 return NULL;
41811 }
41812 }
41813
41814 - fscache_stat(&fscache_n_acquires_ok);
41815 + fscache_stat_unchecked(&fscache_n_acquires_ok);
41816 _leave(" = %p", cookie);
41817 return cookie;
41818 }
41819 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
41820 cache = fscache_select_cache_for_object(cookie->parent);
41821 if (!cache) {
41822 up_read(&fscache_addremove_sem);
41823 - fscache_stat(&fscache_n_acquires_no_cache);
41824 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
41825 _leave(" = -ENOMEDIUM [no cache]");
41826 return -ENOMEDIUM;
41827 }
41828 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
41829 object = cache->ops->alloc_object(cache, cookie);
41830 fscache_stat_d(&fscache_n_cop_alloc_object);
41831 if (IS_ERR(object)) {
41832 - fscache_stat(&fscache_n_object_no_alloc);
41833 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
41834 ret = PTR_ERR(object);
41835 goto error;
41836 }
41837
41838 - fscache_stat(&fscache_n_object_alloc);
41839 + fscache_stat_unchecked(&fscache_n_object_alloc);
41840
41841 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
41842
41843 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
41844 struct fscache_object *object;
41845 struct hlist_node *_p;
41846
41847 - fscache_stat(&fscache_n_updates);
41848 + fscache_stat_unchecked(&fscache_n_updates);
41849
41850 if (!cookie) {
41851 - fscache_stat(&fscache_n_updates_null);
41852 + fscache_stat_unchecked(&fscache_n_updates_null);
41853 _leave(" [no cookie]");
41854 return;
41855 }
41856 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
41857 struct fscache_object *object;
41858 unsigned long event;
41859
41860 - fscache_stat(&fscache_n_relinquishes);
41861 + fscache_stat_unchecked(&fscache_n_relinquishes);
41862 if (retire)
41863 - fscache_stat(&fscache_n_relinquishes_retire);
41864 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
41865
41866 if (!cookie) {
41867 - fscache_stat(&fscache_n_relinquishes_null);
41868 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
41869 _leave(" [no cookie]");
41870 return;
41871 }
41872 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
41873
41874 /* wait for the cookie to finish being instantiated (or to fail) */
41875 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
41876 - fscache_stat(&fscache_n_relinquishes_waitcrt);
41877 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
41878 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
41879 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
41880 }
41881 diff -urNp linux-2.6.32.45/fs/fscache/internal.h linux-2.6.32.45/fs/fscache/internal.h
41882 --- linux-2.6.32.45/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
41883 +++ linux-2.6.32.45/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
41884 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
41885 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
41886 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
41887
41888 -extern atomic_t fscache_n_op_pend;
41889 -extern atomic_t fscache_n_op_run;
41890 -extern atomic_t fscache_n_op_enqueue;
41891 -extern atomic_t fscache_n_op_deferred_release;
41892 -extern atomic_t fscache_n_op_release;
41893 -extern atomic_t fscache_n_op_gc;
41894 -extern atomic_t fscache_n_op_cancelled;
41895 -extern atomic_t fscache_n_op_rejected;
41896 -
41897 -extern atomic_t fscache_n_attr_changed;
41898 -extern atomic_t fscache_n_attr_changed_ok;
41899 -extern atomic_t fscache_n_attr_changed_nobufs;
41900 -extern atomic_t fscache_n_attr_changed_nomem;
41901 -extern atomic_t fscache_n_attr_changed_calls;
41902 -
41903 -extern atomic_t fscache_n_allocs;
41904 -extern atomic_t fscache_n_allocs_ok;
41905 -extern atomic_t fscache_n_allocs_wait;
41906 -extern atomic_t fscache_n_allocs_nobufs;
41907 -extern atomic_t fscache_n_allocs_intr;
41908 -extern atomic_t fscache_n_allocs_object_dead;
41909 -extern atomic_t fscache_n_alloc_ops;
41910 -extern atomic_t fscache_n_alloc_op_waits;
41911 -
41912 -extern atomic_t fscache_n_retrievals;
41913 -extern atomic_t fscache_n_retrievals_ok;
41914 -extern atomic_t fscache_n_retrievals_wait;
41915 -extern atomic_t fscache_n_retrievals_nodata;
41916 -extern atomic_t fscache_n_retrievals_nobufs;
41917 -extern atomic_t fscache_n_retrievals_intr;
41918 -extern atomic_t fscache_n_retrievals_nomem;
41919 -extern atomic_t fscache_n_retrievals_object_dead;
41920 -extern atomic_t fscache_n_retrieval_ops;
41921 -extern atomic_t fscache_n_retrieval_op_waits;
41922 -
41923 -extern atomic_t fscache_n_stores;
41924 -extern atomic_t fscache_n_stores_ok;
41925 -extern atomic_t fscache_n_stores_again;
41926 -extern atomic_t fscache_n_stores_nobufs;
41927 -extern atomic_t fscache_n_stores_oom;
41928 -extern atomic_t fscache_n_store_ops;
41929 -extern atomic_t fscache_n_store_calls;
41930 -extern atomic_t fscache_n_store_pages;
41931 -extern atomic_t fscache_n_store_radix_deletes;
41932 -extern atomic_t fscache_n_store_pages_over_limit;
41933 -
41934 -extern atomic_t fscache_n_store_vmscan_not_storing;
41935 -extern atomic_t fscache_n_store_vmscan_gone;
41936 -extern atomic_t fscache_n_store_vmscan_busy;
41937 -extern atomic_t fscache_n_store_vmscan_cancelled;
41938 -
41939 -extern atomic_t fscache_n_marks;
41940 -extern atomic_t fscache_n_uncaches;
41941 -
41942 -extern atomic_t fscache_n_acquires;
41943 -extern atomic_t fscache_n_acquires_null;
41944 -extern atomic_t fscache_n_acquires_no_cache;
41945 -extern atomic_t fscache_n_acquires_ok;
41946 -extern atomic_t fscache_n_acquires_nobufs;
41947 -extern atomic_t fscache_n_acquires_oom;
41948 -
41949 -extern atomic_t fscache_n_updates;
41950 -extern atomic_t fscache_n_updates_null;
41951 -extern atomic_t fscache_n_updates_run;
41952 -
41953 -extern atomic_t fscache_n_relinquishes;
41954 -extern atomic_t fscache_n_relinquishes_null;
41955 -extern atomic_t fscache_n_relinquishes_waitcrt;
41956 -extern atomic_t fscache_n_relinquishes_retire;
41957 -
41958 -extern atomic_t fscache_n_cookie_index;
41959 -extern atomic_t fscache_n_cookie_data;
41960 -extern atomic_t fscache_n_cookie_special;
41961 -
41962 -extern atomic_t fscache_n_object_alloc;
41963 -extern atomic_t fscache_n_object_no_alloc;
41964 -extern atomic_t fscache_n_object_lookups;
41965 -extern atomic_t fscache_n_object_lookups_negative;
41966 -extern atomic_t fscache_n_object_lookups_positive;
41967 -extern atomic_t fscache_n_object_lookups_timed_out;
41968 -extern atomic_t fscache_n_object_created;
41969 -extern atomic_t fscache_n_object_avail;
41970 -extern atomic_t fscache_n_object_dead;
41971 -
41972 -extern atomic_t fscache_n_checkaux_none;
41973 -extern atomic_t fscache_n_checkaux_okay;
41974 -extern atomic_t fscache_n_checkaux_update;
41975 -extern atomic_t fscache_n_checkaux_obsolete;
41976 +extern atomic_unchecked_t fscache_n_op_pend;
41977 +extern atomic_unchecked_t fscache_n_op_run;
41978 +extern atomic_unchecked_t fscache_n_op_enqueue;
41979 +extern atomic_unchecked_t fscache_n_op_deferred_release;
41980 +extern atomic_unchecked_t fscache_n_op_release;
41981 +extern atomic_unchecked_t fscache_n_op_gc;
41982 +extern atomic_unchecked_t fscache_n_op_cancelled;
41983 +extern atomic_unchecked_t fscache_n_op_rejected;
41984 +
41985 +extern atomic_unchecked_t fscache_n_attr_changed;
41986 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
41987 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
41988 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
41989 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
41990 +
41991 +extern atomic_unchecked_t fscache_n_allocs;
41992 +extern atomic_unchecked_t fscache_n_allocs_ok;
41993 +extern atomic_unchecked_t fscache_n_allocs_wait;
41994 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
41995 +extern atomic_unchecked_t fscache_n_allocs_intr;
41996 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
41997 +extern atomic_unchecked_t fscache_n_alloc_ops;
41998 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
41999 +
42000 +extern atomic_unchecked_t fscache_n_retrievals;
42001 +extern atomic_unchecked_t fscache_n_retrievals_ok;
42002 +extern atomic_unchecked_t fscache_n_retrievals_wait;
42003 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
42004 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42005 +extern atomic_unchecked_t fscache_n_retrievals_intr;
42006 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
42007 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42008 +extern atomic_unchecked_t fscache_n_retrieval_ops;
42009 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42010 +
42011 +extern atomic_unchecked_t fscache_n_stores;
42012 +extern atomic_unchecked_t fscache_n_stores_ok;
42013 +extern atomic_unchecked_t fscache_n_stores_again;
42014 +extern atomic_unchecked_t fscache_n_stores_nobufs;
42015 +extern atomic_unchecked_t fscache_n_stores_oom;
42016 +extern atomic_unchecked_t fscache_n_store_ops;
42017 +extern atomic_unchecked_t fscache_n_store_calls;
42018 +extern atomic_unchecked_t fscache_n_store_pages;
42019 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
42020 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42021 +
42022 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42023 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42024 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42025 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42026 +
42027 +extern atomic_unchecked_t fscache_n_marks;
42028 +extern atomic_unchecked_t fscache_n_uncaches;
42029 +
42030 +extern atomic_unchecked_t fscache_n_acquires;
42031 +extern atomic_unchecked_t fscache_n_acquires_null;
42032 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
42033 +extern atomic_unchecked_t fscache_n_acquires_ok;
42034 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
42035 +extern atomic_unchecked_t fscache_n_acquires_oom;
42036 +
42037 +extern atomic_unchecked_t fscache_n_updates;
42038 +extern atomic_unchecked_t fscache_n_updates_null;
42039 +extern atomic_unchecked_t fscache_n_updates_run;
42040 +
42041 +extern atomic_unchecked_t fscache_n_relinquishes;
42042 +extern atomic_unchecked_t fscache_n_relinquishes_null;
42043 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42044 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
42045 +
42046 +extern atomic_unchecked_t fscache_n_cookie_index;
42047 +extern atomic_unchecked_t fscache_n_cookie_data;
42048 +extern atomic_unchecked_t fscache_n_cookie_special;
42049 +
42050 +extern atomic_unchecked_t fscache_n_object_alloc;
42051 +extern atomic_unchecked_t fscache_n_object_no_alloc;
42052 +extern atomic_unchecked_t fscache_n_object_lookups;
42053 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
42054 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
42055 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42056 +extern atomic_unchecked_t fscache_n_object_created;
42057 +extern atomic_unchecked_t fscache_n_object_avail;
42058 +extern atomic_unchecked_t fscache_n_object_dead;
42059 +
42060 +extern atomic_unchecked_t fscache_n_checkaux_none;
42061 +extern atomic_unchecked_t fscache_n_checkaux_okay;
42062 +extern atomic_unchecked_t fscache_n_checkaux_update;
42063 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42064
42065 extern atomic_t fscache_n_cop_alloc_object;
42066 extern atomic_t fscache_n_cop_lookup_object;
42067 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
42068 atomic_inc(stat);
42069 }
42070
42071 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42072 +{
42073 + atomic_inc_unchecked(stat);
42074 +}
42075 +
42076 static inline void fscache_stat_d(atomic_t *stat)
42077 {
42078 atomic_dec(stat);
42079 @@ -259,6 +264,7 @@ extern const struct file_operations fsca
42080
42081 #define __fscache_stat(stat) (NULL)
42082 #define fscache_stat(stat) do {} while (0)
42083 +#define fscache_stat_unchecked(stat) do {} while (0)
42084 #define fscache_stat_d(stat) do {} while (0)
42085 #endif
42086
42087 diff -urNp linux-2.6.32.45/fs/fscache/object.c linux-2.6.32.45/fs/fscache/object.c
42088 --- linux-2.6.32.45/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
42089 +++ linux-2.6.32.45/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
42090 @@ -144,7 +144,7 @@ static void fscache_object_state_machine
42091 /* update the object metadata on disk */
42092 case FSCACHE_OBJECT_UPDATING:
42093 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42094 - fscache_stat(&fscache_n_updates_run);
42095 + fscache_stat_unchecked(&fscache_n_updates_run);
42096 fscache_stat(&fscache_n_cop_update_object);
42097 object->cache->ops->update_object(object);
42098 fscache_stat_d(&fscache_n_cop_update_object);
42099 @@ -233,7 +233,7 @@ static void fscache_object_state_machine
42100 spin_lock(&object->lock);
42101 object->state = FSCACHE_OBJECT_DEAD;
42102 spin_unlock(&object->lock);
42103 - fscache_stat(&fscache_n_object_dead);
42104 + fscache_stat_unchecked(&fscache_n_object_dead);
42105 goto terminal_transit;
42106
42107 /* handle the parent cache of this object being withdrawn from
42108 @@ -248,7 +248,7 @@ static void fscache_object_state_machine
42109 spin_lock(&object->lock);
42110 object->state = FSCACHE_OBJECT_DEAD;
42111 spin_unlock(&object->lock);
42112 - fscache_stat(&fscache_n_object_dead);
42113 + fscache_stat_unchecked(&fscache_n_object_dead);
42114 goto terminal_transit;
42115
42116 /* complain about the object being woken up once it is
42117 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
42118 parent->cookie->def->name, cookie->def->name,
42119 object->cache->tag->name);
42120
42121 - fscache_stat(&fscache_n_object_lookups);
42122 + fscache_stat_unchecked(&fscache_n_object_lookups);
42123 fscache_stat(&fscache_n_cop_lookup_object);
42124 ret = object->cache->ops->lookup_object(object);
42125 fscache_stat_d(&fscache_n_cop_lookup_object);
42126 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
42127 if (ret == -ETIMEDOUT) {
42128 /* probably stuck behind another object, so move this one to
42129 * the back of the queue */
42130 - fscache_stat(&fscache_n_object_lookups_timed_out);
42131 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42132 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42133 }
42134
42135 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
42136
42137 spin_lock(&object->lock);
42138 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42139 - fscache_stat(&fscache_n_object_lookups_negative);
42140 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42141
42142 /* transit here to allow write requests to begin stacking up
42143 * and read requests to begin returning ENODATA */
42144 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
42145 * result, in which case there may be data available */
42146 spin_lock(&object->lock);
42147 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42148 - fscache_stat(&fscache_n_object_lookups_positive);
42149 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42150
42151 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42152
42153 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
42154 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42155 } else {
42156 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42157 - fscache_stat(&fscache_n_object_created);
42158 + fscache_stat_unchecked(&fscache_n_object_created);
42159
42160 object->state = FSCACHE_OBJECT_AVAILABLE;
42161 spin_unlock(&object->lock);
42162 @@ -633,7 +633,7 @@ static void fscache_object_available(str
42163 fscache_enqueue_dependents(object);
42164
42165 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42166 - fscache_stat(&fscache_n_object_avail);
42167 + fscache_stat_unchecked(&fscache_n_object_avail);
42168
42169 _leave("");
42170 }
42171 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42172 enum fscache_checkaux result;
42173
42174 if (!object->cookie->def->check_aux) {
42175 - fscache_stat(&fscache_n_checkaux_none);
42176 + fscache_stat_unchecked(&fscache_n_checkaux_none);
42177 return FSCACHE_CHECKAUX_OKAY;
42178 }
42179
42180 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42181 switch (result) {
42182 /* entry okay as is */
42183 case FSCACHE_CHECKAUX_OKAY:
42184 - fscache_stat(&fscache_n_checkaux_okay);
42185 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
42186 break;
42187
42188 /* entry requires update */
42189 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42190 - fscache_stat(&fscache_n_checkaux_update);
42191 + fscache_stat_unchecked(&fscache_n_checkaux_update);
42192 break;
42193
42194 /* entry requires deletion */
42195 case FSCACHE_CHECKAUX_OBSOLETE:
42196 - fscache_stat(&fscache_n_checkaux_obsolete);
42197 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42198 break;
42199
42200 default:
42201 diff -urNp linux-2.6.32.45/fs/fscache/operation.c linux-2.6.32.45/fs/fscache/operation.c
42202 --- linux-2.6.32.45/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
42203 +++ linux-2.6.32.45/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
42204 @@ -16,7 +16,7 @@
42205 #include <linux/seq_file.h>
42206 #include "internal.h"
42207
42208 -atomic_t fscache_op_debug_id;
42209 +atomic_unchecked_t fscache_op_debug_id;
42210 EXPORT_SYMBOL(fscache_op_debug_id);
42211
42212 /**
42213 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
42214 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42215 ASSERTCMP(atomic_read(&op->usage), >, 0);
42216
42217 - fscache_stat(&fscache_n_op_enqueue);
42218 + fscache_stat_unchecked(&fscache_n_op_enqueue);
42219 switch (op->flags & FSCACHE_OP_TYPE) {
42220 case FSCACHE_OP_FAST:
42221 _debug("queue fast");
42222 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
42223 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42224 if (op->processor)
42225 fscache_enqueue_operation(op);
42226 - fscache_stat(&fscache_n_op_run);
42227 + fscache_stat_unchecked(&fscache_n_op_run);
42228 }
42229
42230 /*
42231 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
42232 if (object->n_ops > 0) {
42233 atomic_inc(&op->usage);
42234 list_add_tail(&op->pend_link, &object->pending_ops);
42235 - fscache_stat(&fscache_n_op_pend);
42236 + fscache_stat_unchecked(&fscache_n_op_pend);
42237 } else if (!list_empty(&object->pending_ops)) {
42238 atomic_inc(&op->usage);
42239 list_add_tail(&op->pend_link, &object->pending_ops);
42240 - fscache_stat(&fscache_n_op_pend);
42241 + fscache_stat_unchecked(&fscache_n_op_pend);
42242 fscache_start_operations(object);
42243 } else {
42244 ASSERTCMP(object->n_in_progress, ==, 0);
42245 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
42246 object->n_exclusive++; /* reads and writes must wait */
42247 atomic_inc(&op->usage);
42248 list_add_tail(&op->pend_link, &object->pending_ops);
42249 - fscache_stat(&fscache_n_op_pend);
42250 + fscache_stat_unchecked(&fscache_n_op_pend);
42251 ret = 0;
42252 } else {
42253 /* not allowed to submit ops in any other state */
42254 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
42255 if (object->n_exclusive > 0) {
42256 atomic_inc(&op->usage);
42257 list_add_tail(&op->pend_link, &object->pending_ops);
42258 - fscache_stat(&fscache_n_op_pend);
42259 + fscache_stat_unchecked(&fscache_n_op_pend);
42260 } else if (!list_empty(&object->pending_ops)) {
42261 atomic_inc(&op->usage);
42262 list_add_tail(&op->pend_link, &object->pending_ops);
42263 - fscache_stat(&fscache_n_op_pend);
42264 + fscache_stat_unchecked(&fscache_n_op_pend);
42265 fscache_start_operations(object);
42266 } else {
42267 ASSERTCMP(object->n_exclusive, ==, 0);
42268 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
42269 object->n_ops++;
42270 atomic_inc(&op->usage);
42271 list_add_tail(&op->pend_link, &object->pending_ops);
42272 - fscache_stat(&fscache_n_op_pend);
42273 + fscache_stat_unchecked(&fscache_n_op_pend);
42274 ret = 0;
42275 } else if (object->state == FSCACHE_OBJECT_DYING ||
42276 object->state == FSCACHE_OBJECT_LC_DYING ||
42277 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42278 - fscache_stat(&fscache_n_op_rejected);
42279 + fscache_stat_unchecked(&fscache_n_op_rejected);
42280 ret = -ENOBUFS;
42281 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42282 fscache_report_unexpected_submission(object, op, ostate);
42283 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
42284
42285 ret = -EBUSY;
42286 if (!list_empty(&op->pend_link)) {
42287 - fscache_stat(&fscache_n_op_cancelled);
42288 + fscache_stat_unchecked(&fscache_n_op_cancelled);
42289 list_del_init(&op->pend_link);
42290 object->n_ops--;
42291 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42292 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
42293 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42294 BUG();
42295
42296 - fscache_stat(&fscache_n_op_release);
42297 + fscache_stat_unchecked(&fscache_n_op_release);
42298
42299 if (op->release) {
42300 op->release(op);
42301 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
42302 * lock, and defer it otherwise */
42303 if (!spin_trylock(&object->lock)) {
42304 _debug("defer put");
42305 - fscache_stat(&fscache_n_op_deferred_release);
42306 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
42307
42308 cache = object->cache;
42309 spin_lock(&cache->op_gc_list_lock);
42310 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
42311
42312 _debug("GC DEFERRED REL OBJ%x OP%x",
42313 object->debug_id, op->debug_id);
42314 - fscache_stat(&fscache_n_op_gc);
42315 + fscache_stat_unchecked(&fscache_n_op_gc);
42316
42317 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42318
42319 diff -urNp linux-2.6.32.45/fs/fscache/page.c linux-2.6.32.45/fs/fscache/page.c
42320 --- linux-2.6.32.45/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
42321 +++ linux-2.6.32.45/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
42322 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
42323 val = radix_tree_lookup(&cookie->stores, page->index);
42324 if (!val) {
42325 rcu_read_unlock();
42326 - fscache_stat(&fscache_n_store_vmscan_not_storing);
42327 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42328 __fscache_uncache_page(cookie, page);
42329 return true;
42330 }
42331 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
42332 spin_unlock(&cookie->stores_lock);
42333
42334 if (xpage) {
42335 - fscache_stat(&fscache_n_store_vmscan_cancelled);
42336 - fscache_stat(&fscache_n_store_radix_deletes);
42337 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42338 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42339 ASSERTCMP(xpage, ==, page);
42340 } else {
42341 - fscache_stat(&fscache_n_store_vmscan_gone);
42342 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42343 }
42344
42345 wake_up_bit(&cookie->flags, 0);
42346 @@ -106,7 +106,7 @@ page_busy:
42347 /* we might want to wait here, but that could deadlock the allocator as
42348 * the slow-work threads writing to the cache may all end up sleeping
42349 * on memory allocation */
42350 - fscache_stat(&fscache_n_store_vmscan_busy);
42351 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42352 return false;
42353 }
42354 EXPORT_SYMBOL(__fscache_maybe_release_page);
42355 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
42356 FSCACHE_COOKIE_STORING_TAG);
42357 if (!radix_tree_tag_get(&cookie->stores, page->index,
42358 FSCACHE_COOKIE_PENDING_TAG)) {
42359 - fscache_stat(&fscache_n_store_radix_deletes);
42360 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42361 xpage = radix_tree_delete(&cookie->stores, page->index);
42362 }
42363 spin_unlock(&cookie->stores_lock);
42364 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
42365
42366 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42367
42368 - fscache_stat(&fscache_n_attr_changed_calls);
42369 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42370
42371 if (fscache_object_is_active(object)) {
42372 fscache_set_op_state(op, "CallFS");
42373 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
42374
42375 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42376
42377 - fscache_stat(&fscache_n_attr_changed);
42378 + fscache_stat_unchecked(&fscache_n_attr_changed);
42379
42380 op = kzalloc(sizeof(*op), GFP_KERNEL);
42381 if (!op) {
42382 - fscache_stat(&fscache_n_attr_changed_nomem);
42383 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42384 _leave(" = -ENOMEM");
42385 return -ENOMEM;
42386 }
42387 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
42388 if (fscache_submit_exclusive_op(object, op) < 0)
42389 goto nobufs;
42390 spin_unlock(&cookie->lock);
42391 - fscache_stat(&fscache_n_attr_changed_ok);
42392 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42393 fscache_put_operation(op);
42394 _leave(" = 0");
42395 return 0;
42396 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
42397 nobufs:
42398 spin_unlock(&cookie->lock);
42399 kfree(op);
42400 - fscache_stat(&fscache_n_attr_changed_nobufs);
42401 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42402 _leave(" = %d", -ENOBUFS);
42403 return -ENOBUFS;
42404 }
42405 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
42406 /* allocate a retrieval operation and attempt to submit it */
42407 op = kzalloc(sizeof(*op), GFP_NOIO);
42408 if (!op) {
42409 - fscache_stat(&fscache_n_retrievals_nomem);
42410 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42411 return NULL;
42412 }
42413
42414 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
42415 return 0;
42416 }
42417
42418 - fscache_stat(&fscache_n_retrievals_wait);
42419 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
42420
42421 jif = jiffies;
42422 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42423 fscache_wait_bit_interruptible,
42424 TASK_INTERRUPTIBLE) != 0) {
42425 - fscache_stat(&fscache_n_retrievals_intr);
42426 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42427 _leave(" = -ERESTARTSYS");
42428 return -ERESTARTSYS;
42429 }
42430 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
42431 */
42432 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42433 struct fscache_retrieval *op,
42434 - atomic_t *stat_op_waits,
42435 - atomic_t *stat_object_dead)
42436 + atomic_unchecked_t *stat_op_waits,
42437 + atomic_unchecked_t *stat_object_dead)
42438 {
42439 int ret;
42440
42441 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
42442 goto check_if_dead;
42443
42444 _debug(">>> WT");
42445 - fscache_stat(stat_op_waits);
42446 + fscache_stat_unchecked(stat_op_waits);
42447 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42448 fscache_wait_bit_interruptible,
42449 TASK_INTERRUPTIBLE) < 0) {
42450 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
42451
42452 check_if_dead:
42453 if (unlikely(fscache_object_is_dead(object))) {
42454 - fscache_stat(stat_object_dead);
42455 + fscache_stat_unchecked(stat_object_dead);
42456 return -ENOBUFS;
42457 }
42458 return 0;
42459 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
42460
42461 _enter("%p,%p,,,", cookie, page);
42462
42463 - fscache_stat(&fscache_n_retrievals);
42464 + fscache_stat_unchecked(&fscache_n_retrievals);
42465
42466 if (hlist_empty(&cookie->backing_objects))
42467 goto nobufs;
42468 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
42469 goto nobufs_unlock;
42470 spin_unlock(&cookie->lock);
42471
42472 - fscache_stat(&fscache_n_retrieval_ops);
42473 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
42474
42475 /* pin the netfs read context in case we need to do the actual netfs
42476 * read because we've encountered a cache read failure */
42477 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
42478
42479 error:
42480 if (ret == -ENOMEM)
42481 - fscache_stat(&fscache_n_retrievals_nomem);
42482 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42483 else if (ret == -ERESTARTSYS)
42484 - fscache_stat(&fscache_n_retrievals_intr);
42485 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42486 else if (ret == -ENODATA)
42487 - fscache_stat(&fscache_n_retrievals_nodata);
42488 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42489 else if (ret < 0)
42490 - fscache_stat(&fscache_n_retrievals_nobufs);
42491 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42492 else
42493 - fscache_stat(&fscache_n_retrievals_ok);
42494 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
42495
42496 fscache_put_retrieval(op);
42497 _leave(" = %d", ret);
42498 @@ -453,7 +453,7 @@ nobufs_unlock:
42499 spin_unlock(&cookie->lock);
42500 kfree(op);
42501 nobufs:
42502 - fscache_stat(&fscache_n_retrievals_nobufs);
42503 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42504 _leave(" = -ENOBUFS");
42505 return -ENOBUFS;
42506 }
42507 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
42508
42509 _enter("%p,,%d,,,", cookie, *nr_pages);
42510
42511 - fscache_stat(&fscache_n_retrievals);
42512 + fscache_stat_unchecked(&fscache_n_retrievals);
42513
42514 if (hlist_empty(&cookie->backing_objects))
42515 goto nobufs;
42516 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
42517 goto nobufs_unlock;
42518 spin_unlock(&cookie->lock);
42519
42520 - fscache_stat(&fscache_n_retrieval_ops);
42521 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
42522
42523 /* pin the netfs read context in case we need to do the actual netfs
42524 * read because we've encountered a cache read failure */
42525 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
42526
42527 error:
42528 if (ret == -ENOMEM)
42529 - fscache_stat(&fscache_n_retrievals_nomem);
42530 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42531 else if (ret == -ERESTARTSYS)
42532 - fscache_stat(&fscache_n_retrievals_intr);
42533 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42534 else if (ret == -ENODATA)
42535 - fscache_stat(&fscache_n_retrievals_nodata);
42536 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42537 else if (ret < 0)
42538 - fscache_stat(&fscache_n_retrievals_nobufs);
42539 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42540 else
42541 - fscache_stat(&fscache_n_retrievals_ok);
42542 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
42543
42544 fscache_put_retrieval(op);
42545 _leave(" = %d", ret);
42546 @@ -570,7 +570,7 @@ nobufs_unlock:
42547 spin_unlock(&cookie->lock);
42548 kfree(op);
42549 nobufs:
42550 - fscache_stat(&fscache_n_retrievals_nobufs);
42551 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42552 _leave(" = -ENOBUFS");
42553 return -ENOBUFS;
42554 }
42555 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
42556
42557 _enter("%p,%p,,,", cookie, page);
42558
42559 - fscache_stat(&fscache_n_allocs);
42560 + fscache_stat_unchecked(&fscache_n_allocs);
42561
42562 if (hlist_empty(&cookie->backing_objects))
42563 goto nobufs;
42564 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
42565 goto nobufs_unlock;
42566 spin_unlock(&cookie->lock);
42567
42568 - fscache_stat(&fscache_n_alloc_ops);
42569 + fscache_stat_unchecked(&fscache_n_alloc_ops);
42570
42571 ret = fscache_wait_for_retrieval_activation(
42572 object, op,
42573 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
42574
42575 error:
42576 if (ret == -ERESTARTSYS)
42577 - fscache_stat(&fscache_n_allocs_intr);
42578 + fscache_stat_unchecked(&fscache_n_allocs_intr);
42579 else if (ret < 0)
42580 - fscache_stat(&fscache_n_allocs_nobufs);
42581 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42582 else
42583 - fscache_stat(&fscache_n_allocs_ok);
42584 + fscache_stat_unchecked(&fscache_n_allocs_ok);
42585
42586 fscache_put_retrieval(op);
42587 _leave(" = %d", ret);
42588 @@ -651,7 +651,7 @@ nobufs_unlock:
42589 spin_unlock(&cookie->lock);
42590 kfree(op);
42591 nobufs:
42592 - fscache_stat(&fscache_n_allocs_nobufs);
42593 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42594 _leave(" = -ENOBUFS");
42595 return -ENOBUFS;
42596 }
42597 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
42598
42599 spin_lock(&cookie->stores_lock);
42600
42601 - fscache_stat(&fscache_n_store_calls);
42602 + fscache_stat_unchecked(&fscache_n_store_calls);
42603
42604 /* find a page to store */
42605 page = NULL;
42606 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
42607 page = results[0];
42608 _debug("gang %d [%lx]", n, page->index);
42609 if (page->index > op->store_limit) {
42610 - fscache_stat(&fscache_n_store_pages_over_limit);
42611 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
42612 goto superseded;
42613 }
42614
42615 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
42616
42617 if (page) {
42618 fscache_set_op_state(&op->op, "Store");
42619 - fscache_stat(&fscache_n_store_pages);
42620 + fscache_stat_unchecked(&fscache_n_store_pages);
42621 fscache_stat(&fscache_n_cop_write_page);
42622 ret = object->cache->ops->write_page(op, page);
42623 fscache_stat_d(&fscache_n_cop_write_page);
42624 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
42625 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42626 ASSERT(PageFsCache(page));
42627
42628 - fscache_stat(&fscache_n_stores);
42629 + fscache_stat_unchecked(&fscache_n_stores);
42630
42631 op = kzalloc(sizeof(*op), GFP_NOIO);
42632 if (!op)
42633 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
42634 spin_unlock(&cookie->stores_lock);
42635 spin_unlock(&object->lock);
42636
42637 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
42638 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
42639 op->store_limit = object->store_limit;
42640
42641 if (fscache_submit_op(object, &op->op) < 0)
42642 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
42643
42644 spin_unlock(&cookie->lock);
42645 radix_tree_preload_end();
42646 - fscache_stat(&fscache_n_store_ops);
42647 - fscache_stat(&fscache_n_stores_ok);
42648 + fscache_stat_unchecked(&fscache_n_store_ops);
42649 + fscache_stat_unchecked(&fscache_n_stores_ok);
42650
42651 /* the slow work queue now carries its own ref on the object */
42652 fscache_put_operation(&op->op);
42653 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
42654 return 0;
42655
42656 already_queued:
42657 - fscache_stat(&fscache_n_stores_again);
42658 + fscache_stat_unchecked(&fscache_n_stores_again);
42659 already_pending:
42660 spin_unlock(&cookie->stores_lock);
42661 spin_unlock(&object->lock);
42662 spin_unlock(&cookie->lock);
42663 radix_tree_preload_end();
42664 kfree(op);
42665 - fscache_stat(&fscache_n_stores_ok);
42666 + fscache_stat_unchecked(&fscache_n_stores_ok);
42667 _leave(" = 0");
42668 return 0;
42669
42670 @@ -886,14 +886,14 @@ nobufs:
42671 spin_unlock(&cookie->lock);
42672 radix_tree_preload_end();
42673 kfree(op);
42674 - fscache_stat(&fscache_n_stores_nobufs);
42675 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
42676 _leave(" = -ENOBUFS");
42677 return -ENOBUFS;
42678
42679 nomem_free:
42680 kfree(op);
42681 nomem:
42682 - fscache_stat(&fscache_n_stores_oom);
42683 + fscache_stat_unchecked(&fscache_n_stores_oom);
42684 _leave(" = -ENOMEM");
42685 return -ENOMEM;
42686 }
42687 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
42688 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42689 ASSERTCMP(page, !=, NULL);
42690
42691 - fscache_stat(&fscache_n_uncaches);
42692 + fscache_stat_unchecked(&fscache_n_uncaches);
42693
42694 /* cache withdrawal may beat us to it */
42695 if (!PageFsCache(page))
42696 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
42697 unsigned long loop;
42698
42699 #ifdef CONFIG_FSCACHE_STATS
42700 - atomic_add(pagevec->nr, &fscache_n_marks);
42701 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
42702 #endif
42703
42704 for (loop = 0; loop < pagevec->nr; loop++) {
42705 diff -urNp linux-2.6.32.45/fs/fscache/stats.c linux-2.6.32.45/fs/fscache/stats.c
42706 --- linux-2.6.32.45/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
42707 +++ linux-2.6.32.45/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
42708 @@ -18,95 +18,95 @@
42709 /*
42710 * operation counters
42711 */
42712 -atomic_t fscache_n_op_pend;
42713 -atomic_t fscache_n_op_run;
42714 -atomic_t fscache_n_op_enqueue;
42715 -atomic_t fscache_n_op_requeue;
42716 -atomic_t fscache_n_op_deferred_release;
42717 -atomic_t fscache_n_op_release;
42718 -atomic_t fscache_n_op_gc;
42719 -atomic_t fscache_n_op_cancelled;
42720 -atomic_t fscache_n_op_rejected;
42721 -
42722 -atomic_t fscache_n_attr_changed;
42723 -atomic_t fscache_n_attr_changed_ok;
42724 -atomic_t fscache_n_attr_changed_nobufs;
42725 -atomic_t fscache_n_attr_changed_nomem;
42726 -atomic_t fscache_n_attr_changed_calls;
42727 -
42728 -atomic_t fscache_n_allocs;
42729 -atomic_t fscache_n_allocs_ok;
42730 -atomic_t fscache_n_allocs_wait;
42731 -atomic_t fscache_n_allocs_nobufs;
42732 -atomic_t fscache_n_allocs_intr;
42733 -atomic_t fscache_n_allocs_object_dead;
42734 -atomic_t fscache_n_alloc_ops;
42735 -atomic_t fscache_n_alloc_op_waits;
42736 -
42737 -atomic_t fscache_n_retrievals;
42738 -atomic_t fscache_n_retrievals_ok;
42739 -atomic_t fscache_n_retrievals_wait;
42740 -atomic_t fscache_n_retrievals_nodata;
42741 -atomic_t fscache_n_retrievals_nobufs;
42742 -atomic_t fscache_n_retrievals_intr;
42743 -atomic_t fscache_n_retrievals_nomem;
42744 -atomic_t fscache_n_retrievals_object_dead;
42745 -atomic_t fscache_n_retrieval_ops;
42746 -atomic_t fscache_n_retrieval_op_waits;
42747 -
42748 -atomic_t fscache_n_stores;
42749 -atomic_t fscache_n_stores_ok;
42750 -atomic_t fscache_n_stores_again;
42751 -atomic_t fscache_n_stores_nobufs;
42752 -atomic_t fscache_n_stores_oom;
42753 -atomic_t fscache_n_store_ops;
42754 -atomic_t fscache_n_store_calls;
42755 -atomic_t fscache_n_store_pages;
42756 -atomic_t fscache_n_store_radix_deletes;
42757 -atomic_t fscache_n_store_pages_over_limit;
42758 -
42759 -atomic_t fscache_n_store_vmscan_not_storing;
42760 -atomic_t fscache_n_store_vmscan_gone;
42761 -atomic_t fscache_n_store_vmscan_busy;
42762 -atomic_t fscache_n_store_vmscan_cancelled;
42763 -
42764 -atomic_t fscache_n_marks;
42765 -atomic_t fscache_n_uncaches;
42766 -
42767 -atomic_t fscache_n_acquires;
42768 -atomic_t fscache_n_acquires_null;
42769 -atomic_t fscache_n_acquires_no_cache;
42770 -atomic_t fscache_n_acquires_ok;
42771 -atomic_t fscache_n_acquires_nobufs;
42772 -atomic_t fscache_n_acquires_oom;
42773 -
42774 -atomic_t fscache_n_updates;
42775 -atomic_t fscache_n_updates_null;
42776 -atomic_t fscache_n_updates_run;
42777 -
42778 -atomic_t fscache_n_relinquishes;
42779 -atomic_t fscache_n_relinquishes_null;
42780 -atomic_t fscache_n_relinquishes_waitcrt;
42781 -atomic_t fscache_n_relinquishes_retire;
42782 -
42783 -atomic_t fscache_n_cookie_index;
42784 -atomic_t fscache_n_cookie_data;
42785 -atomic_t fscache_n_cookie_special;
42786 -
42787 -atomic_t fscache_n_object_alloc;
42788 -atomic_t fscache_n_object_no_alloc;
42789 -atomic_t fscache_n_object_lookups;
42790 -atomic_t fscache_n_object_lookups_negative;
42791 -atomic_t fscache_n_object_lookups_positive;
42792 -atomic_t fscache_n_object_lookups_timed_out;
42793 -atomic_t fscache_n_object_created;
42794 -atomic_t fscache_n_object_avail;
42795 -atomic_t fscache_n_object_dead;
42796 -
42797 -atomic_t fscache_n_checkaux_none;
42798 -atomic_t fscache_n_checkaux_okay;
42799 -atomic_t fscache_n_checkaux_update;
42800 -atomic_t fscache_n_checkaux_obsolete;
42801 +atomic_unchecked_t fscache_n_op_pend;
42802 +atomic_unchecked_t fscache_n_op_run;
42803 +atomic_unchecked_t fscache_n_op_enqueue;
42804 +atomic_unchecked_t fscache_n_op_requeue;
42805 +atomic_unchecked_t fscache_n_op_deferred_release;
42806 +atomic_unchecked_t fscache_n_op_release;
42807 +atomic_unchecked_t fscache_n_op_gc;
42808 +atomic_unchecked_t fscache_n_op_cancelled;
42809 +atomic_unchecked_t fscache_n_op_rejected;
42810 +
42811 +atomic_unchecked_t fscache_n_attr_changed;
42812 +atomic_unchecked_t fscache_n_attr_changed_ok;
42813 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
42814 +atomic_unchecked_t fscache_n_attr_changed_nomem;
42815 +atomic_unchecked_t fscache_n_attr_changed_calls;
42816 +
42817 +atomic_unchecked_t fscache_n_allocs;
42818 +atomic_unchecked_t fscache_n_allocs_ok;
42819 +atomic_unchecked_t fscache_n_allocs_wait;
42820 +atomic_unchecked_t fscache_n_allocs_nobufs;
42821 +atomic_unchecked_t fscache_n_allocs_intr;
42822 +atomic_unchecked_t fscache_n_allocs_object_dead;
42823 +atomic_unchecked_t fscache_n_alloc_ops;
42824 +atomic_unchecked_t fscache_n_alloc_op_waits;
42825 +
42826 +atomic_unchecked_t fscache_n_retrievals;
42827 +atomic_unchecked_t fscache_n_retrievals_ok;
42828 +atomic_unchecked_t fscache_n_retrievals_wait;
42829 +atomic_unchecked_t fscache_n_retrievals_nodata;
42830 +atomic_unchecked_t fscache_n_retrievals_nobufs;
42831 +atomic_unchecked_t fscache_n_retrievals_intr;
42832 +atomic_unchecked_t fscache_n_retrievals_nomem;
42833 +atomic_unchecked_t fscache_n_retrievals_object_dead;
42834 +atomic_unchecked_t fscache_n_retrieval_ops;
42835 +atomic_unchecked_t fscache_n_retrieval_op_waits;
42836 +
42837 +atomic_unchecked_t fscache_n_stores;
42838 +atomic_unchecked_t fscache_n_stores_ok;
42839 +atomic_unchecked_t fscache_n_stores_again;
42840 +atomic_unchecked_t fscache_n_stores_nobufs;
42841 +atomic_unchecked_t fscache_n_stores_oom;
42842 +atomic_unchecked_t fscache_n_store_ops;
42843 +atomic_unchecked_t fscache_n_store_calls;
42844 +atomic_unchecked_t fscache_n_store_pages;
42845 +atomic_unchecked_t fscache_n_store_radix_deletes;
42846 +atomic_unchecked_t fscache_n_store_pages_over_limit;
42847 +
42848 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42849 +atomic_unchecked_t fscache_n_store_vmscan_gone;
42850 +atomic_unchecked_t fscache_n_store_vmscan_busy;
42851 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42852 +
42853 +atomic_unchecked_t fscache_n_marks;
42854 +atomic_unchecked_t fscache_n_uncaches;
42855 +
42856 +atomic_unchecked_t fscache_n_acquires;
42857 +atomic_unchecked_t fscache_n_acquires_null;
42858 +atomic_unchecked_t fscache_n_acquires_no_cache;
42859 +atomic_unchecked_t fscache_n_acquires_ok;
42860 +atomic_unchecked_t fscache_n_acquires_nobufs;
42861 +atomic_unchecked_t fscache_n_acquires_oom;
42862 +
42863 +atomic_unchecked_t fscache_n_updates;
42864 +atomic_unchecked_t fscache_n_updates_null;
42865 +atomic_unchecked_t fscache_n_updates_run;
42866 +
42867 +atomic_unchecked_t fscache_n_relinquishes;
42868 +atomic_unchecked_t fscache_n_relinquishes_null;
42869 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42870 +atomic_unchecked_t fscache_n_relinquishes_retire;
42871 +
42872 +atomic_unchecked_t fscache_n_cookie_index;
42873 +atomic_unchecked_t fscache_n_cookie_data;
42874 +atomic_unchecked_t fscache_n_cookie_special;
42875 +
42876 +atomic_unchecked_t fscache_n_object_alloc;
42877 +atomic_unchecked_t fscache_n_object_no_alloc;
42878 +atomic_unchecked_t fscache_n_object_lookups;
42879 +atomic_unchecked_t fscache_n_object_lookups_negative;
42880 +atomic_unchecked_t fscache_n_object_lookups_positive;
42881 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
42882 +atomic_unchecked_t fscache_n_object_created;
42883 +atomic_unchecked_t fscache_n_object_avail;
42884 +atomic_unchecked_t fscache_n_object_dead;
42885 +
42886 +atomic_unchecked_t fscache_n_checkaux_none;
42887 +atomic_unchecked_t fscache_n_checkaux_okay;
42888 +atomic_unchecked_t fscache_n_checkaux_update;
42889 +atomic_unchecked_t fscache_n_checkaux_obsolete;
42890
42891 atomic_t fscache_n_cop_alloc_object;
42892 atomic_t fscache_n_cop_lookup_object;
42893 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
42894 seq_puts(m, "FS-Cache statistics\n");
42895
42896 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
42897 - atomic_read(&fscache_n_cookie_index),
42898 - atomic_read(&fscache_n_cookie_data),
42899 - atomic_read(&fscache_n_cookie_special));
42900 + atomic_read_unchecked(&fscache_n_cookie_index),
42901 + atomic_read_unchecked(&fscache_n_cookie_data),
42902 + atomic_read_unchecked(&fscache_n_cookie_special));
42903
42904 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
42905 - atomic_read(&fscache_n_object_alloc),
42906 - atomic_read(&fscache_n_object_no_alloc),
42907 - atomic_read(&fscache_n_object_avail),
42908 - atomic_read(&fscache_n_object_dead));
42909 + atomic_read_unchecked(&fscache_n_object_alloc),
42910 + atomic_read_unchecked(&fscache_n_object_no_alloc),
42911 + atomic_read_unchecked(&fscache_n_object_avail),
42912 + atomic_read_unchecked(&fscache_n_object_dead));
42913 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
42914 - atomic_read(&fscache_n_checkaux_none),
42915 - atomic_read(&fscache_n_checkaux_okay),
42916 - atomic_read(&fscache_n_checkaux_update),
42917 - atomic_read(&fscache_n_checkaux_obsolete));
42918 + atomic_read_unchecked(&fscache_n_checkaux_none),
42919 + atomic_read_unchecked(&fscache_n_checkaux_okay),
42920 + atomic_read_unchecked(&fscache_n_checkaux_update),
42921 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
42922
42923 seq_printf(m, "Pages : mrk=%u unc=%u\n",
42924 - atomic_read(&fscache_n_marks),
42925 - atomic_read(&fscache_n_uncaches));
42926 + atomic_read_unchecked(&fscache_n_marks),
42927 + atomic_read_unchecked(&fscache_n_uncaches));
42928
42929 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
42930 " oom=%u\n",
42931 - atomic_read(&fscache_n_acquires),
42932 - atomic_read(&fscache_n_acquires_null),
42933 - atomic_read(&fscache_n_acquires_no_cache),
42934 - atomic_read(&fscache_n_acquires_ok),
42935 - atomic_read(&fscache_n_acquires_nobufs),
42936 - atomic_read(&fscache_n_acquires_oom));
42937 + atomic_read_unchecked(&fscache_n_acquires),
42938 + atomic_read_unchecked(&fscache_n_acquires_null),
42939 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
42940 + atomic_read_unchecked(&fscache_n_acquires_ok),
42941 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
42942 + atomic_read_unchecked(&fscache_n_acquires_oom));
42943
42944 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
42945 - atomic_read(&fscache_n_object_lookups),
42946 - atomic_read(&fscache_n_object_lookups_negative),
42947 - atomic_read(&fscache_n_object_lookups_positive),
42948 - atomic_read(&fscache_n_object_lookups_timed_out),
42949 - atomic_read(&fscache_n_object_created));
42950 + atomic_read_unchecked(&fscache_n_object_lookups),
42951 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
42952 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
42953 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
42954 + atomic_read_unchecked(&fscache_n_object_created));
42955
42956 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
42957 - atomic_read(&fscache_n_updates),
42958 - atomic_read(&fscache_n_updates_null),
42959 - atomic_read(&fscache_n_updates_run));
42960 + atomic_read_unchecked(&fscache_n_updates),
42961 + atomic_read_unchecked(&fscache_n_updates_null),
42962 + atomic_read_unchecked(&fscache_n_updates_run));
42963
42964 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
42965 - atomic_read(&fscache_n_relinquishes),
42966 - atomic_read(&fscache_n_relinquishes_null),
42967 - atomic_read(&fscache_n_relinquishes_waitcrt),
42968 - atomic_read(&fscache_n_relinquishes_retire));
42969 + atomic_read_unchecked(&fscache_n_relinquishes),
42970 + atomic_read_unchecked(&fscache_n_relinquishes_null),
42971 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
42972 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
42973
42974 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
42975 - atomic_read(&fscache_n_attr_changed),
42976 - atomic_read(&fscache_n_attr_changed_ok),
42977 - atomic_read(&fscache_n_attr_changed_nobufs),
42978 - atomic_read(&fscache_n_attr_changed_nomem),
42979 - atomic_read(&fscache_n_attr_changed_calls));
42980 + atomic_read_unchecked(&fscache_n_attr_changed),
42981 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
42982 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
42983 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
42984 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
42985
42986 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
42987 - atomic_read(&fscache_n_allocs),
42988 - atomic_read(&fscache_n_allocs_ok),
42989 - atomic_read(&fscache_n_allocs_wait),
42990 - atomic_read(&fscache_n_allocs_nobufs),
42991 - atomic_read(&fscache_n_allocs_intr));
42992 + atomic_read_unchecked(&fscache_n_allocs),
42993 + atomic_read_unchecked(&fscache_n_allocs_ok),
42994 + atomic_read_unchecked(&fscache_n_allocs_wait),
42995 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
42996 + atomic_read_unchecked(&fscache_n_allocs_intr));
42997 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
42998 - atomic_read(&fscache_n_alloc_ops),
42999 - atomic_read(&fscache_n_alloc_op_waits),
43000 - atomic_read(&fscache_n_allocs_object_dead));
43001 + atomic_read_unchecked(&fscache_n_alloc_ops),
43002 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
43003 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
43004
43005 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43006 " int=%u oom=%u\n",
43007 - atomic_read(&fscache_n_retrievals),
43008 - atomic_read(&fscache_n_retrievals_ok),
43009 - atomic_read(&fscache_n_retrievals_wait),
43010 - atomic_read(&fscache_n_retrievals_nodata),
43011 - atomic_read(&fscache_n_retrievals_nobufs),
43012 - atomic_read(&fscache_n_retrievals_intr),
43013 - atomic_read(&fscache_n_retrievals_nomem));
43014 + atomic_read_unchecked(&fscache_n_retrievals),
43015 + atomic_read_unchecked(&fscache_n_retrievals_ok),
43016 + atomic_read_unchecked(&fscache_n_retrievals_wait),
43017 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
43018 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43019 + atomic_read_unchecked(&fscache_n_retrievals_intr),
43020 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
43021 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43022 - atomic_read(&fscache_n_retrieval_ops),
43023 - atomic_read(&fscache_n_retrieval_op_waits),
43024 - atomic_read(&fscache_n_retrievals_object_dead));
43025 + atomic_read_unchecked(&fscache_n_retrieval_ops),
43026 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43027 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43028
43029 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43030 - atomic_read(&fscache_n_stores),
43031 - atomic_read(&fscache_n_stores_ok),
43032 - atomic_read(&fscache_n_stores_again),
43033 - atomic_read(&fscache_n_stores_nobufs),
43034 - atomic_read(&fscache_n_stores_oom));
43035 + atomic_read_unchecked(&fscache_n_stores),
43036 + atomic_read_unchecked(&fscache_n_stores_ok),
43037 + atomic_read_unchecked(&fscache_n_stores_again),
43038 + atomic_read_unchecked(&fscache_n_stores_nobufs),
43039 + atomic_read_unchecked(&fscache_n_stores_oom));
43040 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43041 - atomic_read(&fscache_n_store_ops),
43042 - atomic_read(&fscache_n_store_calls),
43043 - atomic_read(&fscache_n_store_pages),
43044 - atomic_read(&fscache_n_store_radix_deletes),
43045 - atomic_read(&fscache_n_store_pages_over_limit));
43046 + atomic_read_unchecked(&fscache_n_store_ops),
43047 + atomic_read_unchecked(&fscache_n_store_calls),
43048 + atomic_read_unchecked(&fscache_n_store_pages),
43049 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
43050 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43051
43052 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43053 - atomic_read(&fscache_n_store_vmscan_not_storing),
43054 - atomic_read(&fscache_n_store_vmscan_gone),
43055 - atomic_read(&fscache_n_store_vmscan_busy),
43056 - atomic_read(&fscache_n_store_vmscan_cancelled));
43057 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43058 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43059 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43060 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43061
43062 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43063 - atomic_read(&fscache_n_op_pend),
43064 - atomic_read(&fscache_n_op_run),
43065 - atomic_read(&fscache_n_op_enqueue),
43066 - atomic_read(&fscache_n_op_cancelled),
43067 - atomic_read(&fscache_n_op_rejected));
43068 + atomic_read_unchecked(&fscache_n_op_pend),
43069 + atomic_read_unchecked(&fscache_n_op_run),
43070 + atomic_read_unchecked(&fscache_n_op_enqueue),
43071 + atomic_read_unchecked(&fscache_n_op_cancelled),
43072 + atomic_read_unchecked(&fscache_n_op_rejected));
43073 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43074 - atomic_read(&fscache_n_op_deferred_release),
43075 - atomic_read(&fscache_n_op_release),
43076 - atomic_read(&fscache_n_op_gc));
43077 + atomic_read_unchecked(&fscache_n_op_deferred_release),
43078 + atomic_read_unchecked(&fscache_n_op_release),
43079 + atomic_read_unchecked(&fscache_n_op_gc));
43080
43081 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43082 atomic_read(&fscache_n_cop_alloc_object),
43083 diff -urNp linux-2.6.32.45/fs/fs_struct.c linux-2.6.32.45/fs/fs_struct.c
43084 --- linux-2.6.32.45/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
43085 +++ linux-2.6.32.45/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
43086 @@ -4,6 +4,7 @@
43087 #include <linux/path.h>
43088 #include <linux/slab.h>
43089 #include <linux/fs_struct.h>
43090 +#include <linux/grsecurity.h>
43091
43092 /*
43093 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
43094 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
43095 old_root = fs->root;
43096 fs->root = *path;
43097 path_get(path);
43098 + gr_set_chroot_entries(current, path);
43099 write_unlock(&fs->lock);
43100 if (old_root.dentry)
43101 path_put(&old_root);
43102 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
43103 && fs->root.mnt == old_root->mnt) {
43104 path_get(new_root);
43105 fs->root = *new_root;
43106 + gr_set_chroot_entries(p, new_root);
43107 count++;
43108 }
43109 if (fs->pwd.dentry == old_root->dentry
43110 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
43111 task_lock(tsk);
43112 write_lock(&fs->lock);
43113 tsk->fs = NULL;
43114 - kill = !--fs->users;
43115 + gr_clear_chroot_entries(tsk);
43116 + kill = !atomic_dec_return(&fs->users);
43117 write_unlock(&fs->lock);
43118 task_unlock(tsk);
43119 if (kill)
43120 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
43121 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43122 /* We don't need to lock fs - think why ;-) */
43123 if (fs) {
43124 - fs->users = 1;
43125 + atomic_set(&fs->users, 1);
43126 fs->in_exec = 0;
43127 rwlock_init(&fs->lock);
43128 fs->umask = old->umask;
43129 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
43130
43131 task_lock(current);
43132 write_lock(&fs->lock);
43133 - kill = !--fs->users;
43134 + kill = !atomic_dec_return(&fs->users);
43135 current->fs = new_fs;
43136 + gr_set_chroot_entries(current, &new_fs->root);
43137 write_unlock(&fs->lock);
43138 task_unlock(current);
43139
43140 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
43141
43142 /* to be mentioned only in INIT_TASK */
43143 struct fs_struct init_fs = {
43144 - .users = 1,
43145 + .users = ATOMIC_INIT(1),
43146 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
43147 .umask = 0022,
43148 };
43149 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
43150 task_lock(current);
43151
43152 write_lock(&init_fs.lock);
43153 - init_fs.users++;
43154 + atomic_inc(&init_fs.users);
43155 write_unlock(&init_fs.lock);
43156
43157 write_lock(&fs->lock);
43158 current->fs = &init_fs;
43159 - kill = !--fs->users;
43160 + gr_set_chroot_entries(current, &current->fs->root);
43161 + kill = !atomic_dec_return(&fs->users);
43162 write_unlock(&fs->lock);
43163
43164 task_unlock(current);
43165 diff -urNp linux-2.6.32.45/fs/fuse/cuse.c linux-2.6.32.45/fs/fuse/cuse.c
43166 --- linux-2.6.32.45/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
43167 +++ linux-2.6.32.45/fs/fuse/cuse.c 2011-08-05 20:33:55.000000000 -0400
43168 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
43169 INIT_LIST_HEAD(&cuse_conntbl[i]);
43170
43171 /* inherit and extend fuse_dev_operations */
43172 - cuse_channel_fops = fuse_dev_operations;
43173 - cuse_channel_fops.owner = THIS_MODULE;
43174 - cuse_channel_fops.open = cuse_channel_open;
43175 - cuse_channel_fops.release = cuse_channel_release;
43176 + pax_open_kernel();
43177 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43178 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43179 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
43180 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
43181 + pax_close_kernel();
43182
43183 cuse_class = class_create(THIS_MODULE, "cuse");
43184 if (IS_ERR(cuse_class))
43185 diff -urNp linux-2.6.32.45/fs/fuse/dev.c linux-2.6.32.45/fs/fuse/dev.c
43186 --- linux-2.6.32.45/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
43187 +++ linux-2.6.32.45/fs/fuse/dev.c 2011-08-05 20:33:55.000000000 -0400
43188 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struc
43189 {
43190 struct fuse_notify_inval_entry_out outarg;
43191 int err = -EINVAL;
43192 - char buf[FUSE_NAME_MAX+1];
43193 + char *buf = NULL;
43194 struct qstr name;
43195
43196 if (size < sizeof(outarg))
43197 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struc
43198 if (outarg.namelen > FUSE_NAME_MAX)
43199 goto err;
43200
43201 + err = -ENOMEM;
43202 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
43203 + if (!buf)
43204 + goto err;
43205 +
43206 name.name = buf;
43207 name.len = outarg.namelen;
43208 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
43209 @@ -910,17 +915,15 @@ static int fuse_notify_inval_entry(struc
43210
43211 down_read(&fc->killsb);
43212 err = -ENOENT;
43213 - if (!fc->sb)
43214 - goto err_unlock;
43215 -
43216 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
43217 -
43218 -err_unlock:
43219 + if (fc->sb)
43220 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
43221 up_read(&fc->killsb);
43222 + kfree(buf);
43223 return err;
43224
43225 err:
43226 fuse_copy_finish(cs);
43227 + kfree(buf);
43228 return err;
43229 }
43230
43231 diff -urNp linux-2.6.32.45/fs/fuse/dir.c linux-2.6.32.45/fs/fuse/dir.c
43232 --- linux-2.6.32.45/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
43233 +++ linux-2.6.32.45/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
43234 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
43235 return link;
43236 }
43237
43238 -static void free_link(char *link)
43239 +static void free_link(const char *link)
43240 {
43241 if (!IS_ERR(link))
43242 free_page((unsigned long) link);
43243 diff -urNp linux-2.6.32.45/fs/gfs2/ops_inode.c linux-2.6.32.45/fs/gfs2/ops_inode.c
43244 --- linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
43245 +++ linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
43246 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
43247 unsigned int x;
43248 int error;
43249
43250 + pax_track_stack();
43251 +
43252 if (ndentry->d_inode) {
43253 nip = GFS2_I(ndentry->d_inode);
43254 if (ip == nip)
43255 diff -urNp linux-2.6.32.45/fs/gfs2/sys.c linux-2.6.32.45/fs/gfs2/sys.c
43256 --- linux-2.6.32.45/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
43257 +++ linux-2.6.32.45/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
43258 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
43259 return a->store ? a->store(sdp, buf, len) : len;
43260 }
43261
43262 -static struct sysfs_ops gfs2_attr_ops = {
43263 +static const struct sysfs_ops gfs2_attr_ops = {
43264 .show = gfs2_attr_show,
43265 .store = gfs2_attr_store,
43266 };
43267 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
43268 return 0;
43269 }
43270
43271 -static struct kset_uevent_ops gfs2_uevent_ops = {
43272 +static const struct kset_uevent_ops gfs2_uevent_ops = {
43273 .uevent = gfs2_uevent,
43274 };
43275
43276 diff -urNp linux-2.6.32.45/fs/hfsplus/catalog.c linux-2.6.32.45/fs/hfsplus/catalog.c
43277 --- linux-2.6.32.45/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
43278 +++ linux-2.6.32.45/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
43279 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
43280 int err;
43281 u16 type;
43282
43283 + pax_track_stack();
43284 +
43285 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43286 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43287 if (err)
43288 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
43289 int entry_size;
43290 int err;
43291
43292 + pax_track_stack();
43293 +
43294 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
43295 sb = dir->i_sb;
43296 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
43297 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
43298 int entry_size, type;
43299 int err = 0;
43300
43301 + pax_track_stack();
43302 +
43303 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
43304 dst_dir->i_ino, dst_name->name);
43305 sb = src_dir->i_sb;
43306 diff -urNp linux-2.6.32.45/fs/hfsplus/dir.c linux-2.6.32.45/fs/hfsplus/dir.c
43307 --- linux-2.6.32.45/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
43308 +++ linux-2.6.32.45/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
43309 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
43310 struct hfsplus_readdir_data *rd;
43311 u16 type;
43312
43313 + pax_track_stack();
43314 +
43315 if (filp->f_pos >= inode->i_size)
43316 return 0;
43317
43318 diff -urNp linux-2.6.32.45/fs/hfsplus/inode.c linux-2.6.32.45/fs/hfsplus/inode.c
43319 --- linux-2.6.32.45/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
43320 +++ linux-2.6.32.45/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
43321 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
43322 int res = 0;
43323 u16 type;
43324
43325 + pax_track_stack();
43326 +
43327 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43328
43329 HFSPLUS_I(inode).dev = 0;
43330 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
43331 struct hfs_find_data fd;
43332 hfsplus_cat_entry entry;
43333
43334 + pax_track_stack();
43335 +
43336 if (HFSPLUS_IS_RSRC(inode))
43337 main_inode = HFSPLUS_I(inode).rsrc_inode;
43338
43339 diff -urNp linux-2.6.32.45/fs/hfsplus/ioctl.c linux-2.6.32.45/fs/hfsplus/ioctl.c
43340 --- linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43341 +++ linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
43342 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
43343 struct hfsplus_cat_file *file;
43344 int res;
43345
43346 + pax_track_stack();
43347 +
43348 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43349 return -EOPNOTSUPP;
43350
43351 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43352 struct hfsplus_cat_file *file;
43353 ssize_t res = 0;
43354
43355 + pax_track_stack();
43356 +
43357 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43358 return -EOPNOTSUPP;
43359
43360 diff -urNp linux-2.6.32.45/fs/hfsplus/super.c linux-2.6.32.45/fs/hfsplus/super.c
43361 --- linux-2.6.32.45/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
43362 +++ linux-2.6.32.45/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
43363 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
43364 struct nls_table *nls = NULL;
43365 int err = -EINVAL;
43366
43367 + pax_track_stack();
43368 +
43369 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43370 if (!sbi)
43371 return -ENOMEM;
43372 diff -urNp linux-2.6.32.45/fs/hugetlbfs/inode.c linux-2.6.32.45/fs/hugetlbfs/inode.c
43373 --- linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43374 +++ linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
43375 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
43376 .kill_sb = kill_litter_super,
43377 };
43378
43379 -static struct vfsmount *hugetlbfs_vfsmount;
43380 +struct vfsmount *hugetlbfs_vfsmount;
43381
43382 static int can_do_hugetlb_shm(void)
43383 {
43384 diff -urNp linux-2.6.32.45/fs/ioctl.c linux-2.6.32.45/fs/ioctl.c
43385 --- linux-2.6.32.45/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43386 +++ linux-2.6.32.45/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
43387 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
43388 u64 phys, u64 len, u32 flags)
43389 {
43390 struct fiemap_extent extent;
43391 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
43392 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
43393
43394 /* only count the extents */
43395 if (fieinfo->fi_extents_max == 0) {
43396 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
43397
43398 fieinfo.fi_flags = fiemap.fm_flags;
43399 fieinfo.fi_extents_max = fiemap.fm_extent_count;
43400 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
43401 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
43402
43403 if (fiemap.fm_extent_count != 0 &&
43404 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
43405 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
43406 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
43407 fiemap.fm_flags = fieinfo.fi_flags;
43408 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
43409 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
43410 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
43411 error = -EFAULT;
43412
43413 return error;
43414 diff -urNp linux-2.6.32.45/fs/jbd/checkpoint.c linux-2.6.32.45/fs/jbd/checkpoint.c
43415 --- linux-2.6.32.45/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
43416 +++ linux-2.6.32.45/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
43417 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
43418 tid_t this_tid;
43419 int result;
43420
43421 + pax_track_stack();
43422 +
43423 jbd_debug(1, "Start checkpoint\n");
43424
43425 /*
43426 diff -urNp linux-2.6.32.45/fs/jffs2/compr_rtime.c linux-2.6.32.45/fs/jffs2/compr_rtime.c
43427 --- linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
43428 +++ linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
43429 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43430 int outpos = 0;
43431 int pos=0;
43432
43433 + pax_track_stack();
43434 +
43435 memset(positions,0,sizeof(positions));
43436
43437 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43438 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
43439 int outpos = 0;
43440 int pos=0;
43441
43442 + pax_track_stack();
43443 +
43444 memset(positions,0,sizeof(positions));
43445
43446 while (outpos<destlen) {
43447 diff -urNp linux-2.6.32.45/fs/jffs2/compr_rubin.c linux-2.6.32.45/fs/jffs2/compr_rubin.c
43448 --- linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
43449 +++ linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
43450 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43451 int ret;
43452 uint32_t mysrclen, mydstlen;
43453
43454 + pax_track_stack();
43455 +
43456 mysrclen = *sourcelen;
43457 mydstlen = *dstlen - 8;
43458
43459 diff -urNp linux-2.6.32.45/fs/jffs2/erase.c linux-2.6.32.45/fs/jffs2/erase.c
43460 --- linux-2.6.32.45/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
43461 +++ linux-2.6.32.45/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
43462 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
43463 struct jffs2_unknown_node marker = {
43464 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43465 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43466 - .totlen = cpu_to_je32(c->cleanmarker_size)
43467 + .totlen = cpu_to_je32(c->cleanmarker_size),
43468 + .hdr_crc = cpu_to_je32(0)
43469 };
43470
43471 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43472 diff -urNp linux-2.6.32.45/fs/jffs2/wbuf.c linux-2.6.32.45/fs/jffs2/wbuf.c
43473 --- linux-2.6.32.45/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
43474 +++ linux-2.6.32.45/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
43475 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43476 {
43477 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43478 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43479 - .totlen = constant_cpu_to_je32(8)
43480 + .totlen = constant_cpu_to_je32(8),
43481 + .hdr_crc = constant_cpu_to_je32(0)
43482 };
43483
43484 /*
43485 diff -urNp linux-2.6.32.45/fs/jffs2/xattr.c linux-2.6.32.45/fs/jffs2/xattr.c
43486 --- linux-2.6.32.45/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
43487 +++ linux-2.6.32.45/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
43488 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43489
43490 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43491
43492 + pax_track_stack();
43493 +
43494 /* Phase.1 : Merge same xref */
43495 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43496 xref_tmphash[i] = NULL;
43497 diff -urNp linux-2.6.32.45/fs/jfs/super.c linux-2.6.32.45/fs/jfs/super.c
43498 --- linux-2.6.32.45/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
43499 +++ linux-2.6.32.45/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
43500 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
43501
43502 jfs_inode_cachep =
43503 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43504 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43505 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43506 init_once);
43507 if (jfs_inode_cachep == NULL)
43508 return -ENOMEM;
43509 diff -urNp linux-2.6.32.45/fs/Kconfig.binfmt linux-2.6.32.45/fs/Kconfig.binfmt
43510 --- linux-2.6.32.45/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
43511 +++ linux-2.6.32.45/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
43512 @@ -86,7 +86,7 @@ config HAVE_AOUT
43513
43514 config BINFMT_AOUT
43515 tristate "Kernel support for a.out and ECOFF binaries"
43516 - depends on HAVE_AOUT
43517 + depends on HAVE_AOUT && BROKEN
43518 ---help---
43519 A.out (Assembler.OUTput) is a set of formats for libraries and
43520 executables used in the earliest versions of UNIX. Linux used
43521 diff -urNp linux-2.6.32.45/fs/libfs.c linux-2.6.32.45/fs/libfs.c
43522 --- linux-2.6.32.45/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
43523 +++ linux-2.6.32.45/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
43524 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
43525
43526 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43527 struct dentry *next;
43528 + char d_name[sizeof(next->d_iname)];
43529 + const unsigned char *name;
43530 +
43531 next = list_entry(p, struct dentry, d_u.d_child);
43532 if (d_unhashed(next) || !next->d_inode)
43533 continue;
43534
43535 spin_unlock(&dcache_lock);
43536 - if (filldir(dirent, next->d_name.name,
43537 + name = next->d_name.name;
43538 + if (name == next->d_iname) {
43539 + memcpy(d_name, name, next->d_name.len);
43540 + name = d_name;
43541 + }
43542 + if (filldir(dirent, name,
43543 next->d_name.len, filp->f_pos,
43544 next->d_inode->i_ino,
43545 dt_type(next->d_inode)) < 0)
43546 diff -urNp linux-2.6.32.45/fs/lockd/clntproc.c linux-2.6.32.45/fs/lockd/clntproc.c
43547 --- linux-2.6.32.45/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
43548 +++ linux-2.6.32.45/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
43549 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43550 /*
43551 * Cookie counter for NLM requests
43552 */
43553 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43554 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43555
43556 void nlmclnt_next_cookie(struct nlm_cookie *c)
43557 {
43558 - u32 cookie = atomic_inc_return(&nlm_cookie);
43559 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43560
43561 memcpy(c->data, &cookie, 4);
43562 c->len=4;
43563 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43564 struct nlm_rqst reqst, *req;
43565 int status;
43566
43567 + pax_track_stack();
43568 +
43569 req = &reqst;
43570 memset(req, 0, sizeof(*req));
43571 locks_init_lock(&req->a_args.lock.fl);
43572 diff -urNp linux-2.6.32.45/fs/lockd/svc.c linux-2.6.32.45/fs/lockd/svc.c
43573 --- linux-2.6.32.45/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
43574 +++ linux-2.6.32.45/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
43575 @@ -43,7 +43,7 @@
43576
43577 static struct svc_program nlmsvc_program;
43578
43579 -struct nlmsvc_binding * nlmsvc_ops;
43580 +const struct nlmsvc_binding * nlmsvc_ops;
43581 EXPORT_SYMBOL_GPL(nlmsvc_ops);
43582
43583 static DEFINE_MUTEX(nlmsvc_mutex);
43584 diff -urNp linux-2.6.32.45/fs/locks.c linux-2.6.32.45/fs/locks.c
43585 --- linux-2.6.32.45/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
43586 +++ linux-2.6.32.45/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
43587 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
43588
43589 static struct kmem_cache *filelock_cache __read_mostly;
43590
43591 +static void locks_init_lock_always(struct file_lock *fl)
43592 +{
43593 + fl->fl_next = NULL;
43594 + fl->fl_fasync = NULL;
43595 + fl->fl_owner = NULL;
43596 + fl->fl_pid = 0;
43597 + fl->fl_nspid = NULL;
43598 + fl->fl_file = NULL;
43599 + fl->fl_flags = 0;
43600 + fl->fl_type = 0;
43601 + fl->fl_start = fl->fl_end = 0;
43602 +}
43603 +
43604 /* Allocate an empty lock structure. */
43605 static struct file_lock *locks_alloc_lock(void)
43606 {
43607 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43608 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43609 +
43610 + if (fl)
43611 + locks_init_lock_always(fl);
43612 +
43613 + return fl;
43614 }
43615
43616 void locks_release_private(struct file_lock *fl)
43617 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
43618 INIT_LIST_HEAD(&fl->fl_link);
43619 INIT_LIST_HEAD(&fl->fl_block);
43620 init_waitqueue_head(&fl->fl_wait);
43621 - fl->fl_next = NULL;
43622 - fl->fl_fasync = NULL;
43623 - fl->fl_owner = NULL;
43624 - fl->fl_pid = 0;
43625 - fl->fl_nspid = NULL;
43626 - fl->fl_file = NULL;
43627 - fl->fl_flags = 0;
43628 - fl->fl_type = 0;
43629 - fl->fl_start = fl->fl_end = 0;
43630 fl->fl_ops = NULL;
43631 fl->fl_lmops = NULL;
43632 + locks_init_lock_always(fl);
43633 }
43634
43635 EXPORT_SYMBOL(locks_init_lock);
43636 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
43637 return;
43638
43639 if (filp->f_op && filp->f_op->flock) {
43640 - struct file_lock fl = {
43641 + struct file_lock flock = {
43642 .fl_pid = current->tgid,
43643 .fl_file = filp,
43644 .fl_flags = FL_FLOCK,
43645 .fl_type = F_UNLCK,
43646 .fl_end = OFFSET_MAX,
43647 };
43648 - filp->f_op->flock(filp, F_SETLKW, &fl);
43649 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
43650 - fl.fl_ops->fl_release_private(&fl);
43651 + filp->f_op->flock(filp, F_SETLKW, &flock);
43652 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
43653 + flock.fl_ops->fl_release_private(&flock);
43654 }
43655
43656 lock_kernel();
43657 diff -urNp linux-2.6.32.45/fs/mbcache.c linux-2.6.32.45/fs/mbcache.c
43658 --- linux-2.6.32.45/fs/mbcache.c 2011-03-27 14:31:47.000000000 -0400
43659 +++ linux-2.6.32.45/fs/mbcache.c 2011-08-05 20:33:55.000000000 -0400
43660 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct
43661 if (!cache)
43662 goto fail;
43663 cache->c_name = name;
43664 - cache->c_op.free = NULL;
43665 + *(void **)&cache->c_op.free = NULL;
43666 if (cache_op)
43667 - cache->c_op.free = cache_op->free;
43668 + *(void **)&cache->c_op.free = cache_op->free;
43669 atomic_set(&cache->c_entry_count, 0);
43670 cache->c_bucket_bits = bucket_bits;
43671 #ifdef MB_CACHE_INDEXES_COUNT
43672 diff -urNp linux-2.6.32.45/fs/namei.c linux-2.6.32.45/fs/namei.c
43673 --- linux-2.6.32.45/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
43674 +++ linux-2.6.32.45/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
43675 @@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
43676 return ret;
43677
43678 /*
43679 - * Read/write DACs are always overridable.
43680 - * Executable DACs are overridable if at least one exec bit is set.
43681 - */
43682 - if (!(mask & MAY_EXEC) || execute_ok(inode))
43683 - if (capable(CAP_DAC_OVERRIDE))
43684 - return 0;
43685 -
43686 - /*
43687 * Searching includes executable on directories, else just read.
43688 */
43689 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43690 @@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
43691 if (capable(CAP_DAC_READ_SEARCH))
43692 return 0;
43693
43694 + /*
43695 + * Read/write DACs are always overridable.
43696 + * Executable DACs are overridable if at least one exec bit is set.
43697 + */
43698 + if (!(mask & MAY_EXEC) || execute_ok(inode))
43699 + if (capable(CAP_DAC_OVERRIDE))
43700 + return 0;
43701 +
43702 return -EACCES;
43703 }
43704
43705 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
43706 if (!ret)
43707 goto ok;
43708
43709 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
43710 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
43711 + capable(CAP_DAC_OVERRIDE))
43712 goto ok;
43713
43714 return ret;
43715 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
43716 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
43717 error = PTR_ERR(cookie);
43718 if (!IS_ERR(cookie)) {
43719 - char *s = nd_get_link(nd);
43720 + const char *s = nd_get_link(nd);
43721 error = 0;
43722 if (s)
43723 error = __vfs_follow_link(nd, s);
43724 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct
43725 err = security_inode_follow_link(path->dentry, nd);
43726 if (err)
43727 goto loop;
43728 +
43729 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
43730 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
43731 + err = -EACCES;
43732 + goto loop;
43733 + }
43734 +
43735 current->link_count++;
43736 current->total_link_count++;
43737 nd->depth++;
43738 @@ -1016,11 +1024,18 @@ return_reval:
43739 break;
43740 }
43741 return_base:
43742 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
43743 + path_put(&nd->path);
43744 + return -ENOENT;
43745 + }
43746 return 0;
43747 out_dput:
43748 path_put_conditional(&next, nd);
43749 break;
43750 }
43751 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
43752 + err = -ENOENT;
43753 +
43754 path_put(&nd->path);
43755 return_err:
43756 return err;
43757 @@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
43758 int retval = path_init(dfd, name, flags, nd);
43759 if (!retval)
43760 retval = path_walk(name, nd);
43761 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
43762 - nd->path.dentry->d_inode))
43763 - audit_inode(name, nd->path.dentry);
43764 +
43765 + if (likely(!retval)) {
43766 + if (nd->path.dentry && nd->path.dentry->d_inode) {
43767 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
43768 + retval = -ENOENT;
43769 + if (!audit_dummy_context())
43770 + audit_inode(name, nd->path.dentry);
43771 + }
43772 + }
43773 if (nd->root.mnt) {
43774 path_put(&nd->root);
43775 nd->root.mnt = NULL;
43776 }
43777 +
43778 return retval;
43779 }
43780
43781 @@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
43782 if (error)
43783 goto err_out;
43784
43785 +
43786 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
43787 + error = -EPERM;
43788 + goto err_out;
43789 + }
43790 + if (gr_handle_rawio(inode)) {
43791 + error = -EPERM;
43792 + goto err_out;
43793 + }
43794 + if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
43795 + error = -EACCES;
43796 + goto err_out;
43797 + }
43798 +
43799 if (flag & O_TRUNC) {
43800 error = get_write_access(inode);
43801 if (error)
43802 @@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
43803 int error;
43804 struct dentry *dir = nd->path.dentry;
43805
43806 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
43807 + error = -EACCES;
43808 + goto out_unlock;
43809 + }
43810 +
43811 if (!IS_POSIXACL(dir->d_inode))
43812 mode &= ~current_umask();
43813 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
43814 if (error)
43815 goto out_unlock;
43816 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
43817 + if (!error)
43818 + gr_handle_create(path->dentry, nd->path.mnt);
43819 out_unlock:
43820 mutex_unlock(&dir->d_inode->i_mutex);
43821 dput(nd->path.dentry);
43822 @@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
43823 &nd, flag);
43824 if (error)
43825 return ERR_PTR(error);
43826 +
43827 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
43828 + error = -EPERM;
43829 + goto exit;
43830 + }
43831 +
43832 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
43833 + error = -EPERM;
43834 + goto exit;
43835 + }
43836 +
43837 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
43838 + error = -EACCES;
43839 + goto exit;
43840 + }
43841 +
43842 goto ok;
43843 }
43844
43845 @@ -1795,6 +1854,14 @@ do_last:
43846 /*
43847 * It already exists.
43848 */
43849 +
43850 + /* only check if O_CREAT is specified, all other checks need
43851 + to go into may_open */
43852 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
43853 + error = -EACCES;
43854 + goto exit_mutex_unlock;
43855 + }
43856 +
43857 mutex_unlock(&dir->d_inode->i_mutex);
43858 audit_inode(pathname, path.dentry);
43859
43860 @@ -1887,6 +1954,13 @@ do_link:
43861 error = security_inode_follow_link(path.dentry, &nd);
43862 if (error)
43863 goto exit_dput;
43864 +
43865 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
43866 + path.dentry, nd.path.mnt)) {
43867 + error = -EACCES;
43868 + goto exit_dput;
43869 + }
43870 +
43871 error = __do_follow_link(&path, &nd);
43872 if (error) {
43873 /* Does someone understand code flow here? Or it is only
43874 @@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43875 error = may_mknod(mode);
43876 if (error)
43877 goto out_dput;
43878 +
43879 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
43880 + error = -EPERM;
43881 + goto out_dput;
43882 + }
43883 +
43884 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
43885 + error = -EACCES;
43886 + goto out_dput;
43887 + }
43888 +
43889 error = mnt_want_write(nd.path.mnt);
43890 if (error)
43891 goto out_dput;
43892 @@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43893 }
43894 out_drop_write:
43895 mnt_drop_write(nd.path.mnt);
43896 +
43897 + if (!error)
43898 + gr_handle_create(dentry, nd.path.mnt);
43899 out_dput:
43900 dput(dentry);
43901 out_unlock:
43902 @@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43903 if (IS_ERR(dentry))
43904 goto out_unlock;
43905
43906 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
43907 + error = -EACCES;
43908 + goto out_dput;
43909 + }
43910 +
43911 if (!IS_POSIXACL(nd.path.dentry->d_inode))
43912 mode &= ~current_umask();
43913 error = mnt_want_write(nd.path.mnt);
43914 @@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43915 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
43916 out_drop_write:
43917 mnt_drop_write(nd.path.mnt);
43918 +
43919 + if (!error)
43920 + gr_handle_create(dentry, nd.path.mnt);
43921 +
43922 out_dput:
43923 dput(dentry);
43924 out_unlock:
43925 @@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
43926 char * name;
43927 struct dentry *dentry;
43928 struct nameidata nd;
43929 + ino_t saved_ino = 0;
43930 + dev_t saved_dev = 0;
43931
43932 error = user_path_parent(dfd, pathname, &nd, &name);
43933 if (error)
43934 @@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
43935 error = PTR_ERR(dentry);
43936 if (IS_ERR(dentry))
43937 goto exit2;
43938 +
43939 + if (dentry->d_inode != NULL) {
43940 + if (dentry->d_inode->i_nlink <= 1) {
43941 + saved_ino = dentry->d_inode->i_ino;
43942 + saved_dev = gr_get_dev_from_dentry(dentry);
43943 + }
43944 +
43945 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
43946 + error = -EACCES;
43947 + goto exit3;
43948 + }
43949 + }
43950 +
43951 error = mnt_want_write(nd.path.mnt);
43952 if (error)
43953 goto exit3;
43954 @@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
43955 if (error)
43956 goto exit4;
43957 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
43958 + if (!error && (saved_dev || saved_ino))
43959 + gr_handle_delete(saved_ino, saved_dev);
43960 exit4:
43961 mnt_drop_write(nd.path.mnt);
43962 exit3:
43963 @@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
43964 struct dentry *dentry;
43965 struct nameidata nd;
43966 struct inode *inode = NULL;
43967 + ino_t saved_ino = 0;
43968 + dev_t saved_dev = 0;
43969
43970 error = user_path_parent(dfd, pathname, &nd, &name);
43971 if (error)
43972 @@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
43973 if (nd.last.name[nd.last.len])
43974 goto slashes;
43975 inode = dentry->d_inode;
43976 - if (inode)
43977 + if (inode) {
43978 + if (inode->i_nlink <= 1) {
43979 + saved_ino = inode->i_ino;
43980 + saved_dev = gr_get_dev_from_dentry(dentry);
43981 + }
43982 +
43983 atomic_inc(&inode->i_count);
43984 +
43985 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
43986 + error = -EACCES;
43987 + goto exit2;
43988 + }
43989 + }
43990 error = mnt_want_write(nd.path.mnt);
43991 if (error)
43992 goto exit2;
43993 @@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
43994 if (error)
43995 goto exit3;
43996 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
43997 + if (!error && (saved_ino || saved_dev))
43998 + gr_handle_delete(saved_ino, saved_dev);
43999 exit3:
44000 mnt_drop_write(nd.path.mnt);
44001 exit2:
44002 @@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
44003 if (IS_ERR(dentry))
44004 goto out_unlock;
44005
44006 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
44007 + error = -EACCES;
44008 + goto out_dput;
44009 + }
44010 +
44011 error = mnt_want_write(nd.path.mnt);
44012 if (error)
44013 goto out_dput;
44014 @@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
44015 if (error)
44016 goto out_drop_write;
44017 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
44018 + if (!error)
44019 + gr_handle_create(dentry, nd.path.mnt);
44020 out_drop_write:
44021 mnt_drop_write(nd.path.mnt);
44022 out_dput:
44023 @@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44024 error = PTR_ERR(new_dentry);
44025 if (IS_ERR(new_dentry))
44026 goto out_unlock;
44027 +
44028 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44029 + old_path.dentry->d_inode,
44030 + old_path.dentry->d_inode->i_mode, to)) {
44031 + error = -EACCES;
44032 + goto out_dput;
44033 + }
44034 +
44035 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
44036 + old_path.dentry, old_path.mnt, to)) {
44037 + error = -EACCES;
44038 + goto out_dput;
44039 + }
44040 +
44041 error = mnt_want_write(nd.path.mnt);
44042 if (error)
44043 goto out_dput;
44044 @@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44045 if (error)
44046 goto out_drop_write;
44047 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
44048 + if (!error)
44049 + gr_handle_create(new_dentry, nd.path.mnt);
44050 out_drop_write:
44051 mnt_drop_write(nd.path.mnt);
44052 out_dput:
44053 @@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44054 char *to;
44055 int error;
44056
44057 + pax_track_stack();
44058 +
44059 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44060 if (error)
44061 goto exit;
44062 @@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44063 if (new_dentry == trap)
44064 goto exit5;
44065
44066 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44067 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
44068 + to);
44069 + if (error)
44070 + goto exit5;
44071 +
44072 error = mnt_want_write(oldnd.path.mnt);
44073 if (error)
44074 goto exit5;
44075 @@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44076 goto exit6;
44077 error = vfs_rename(old_dir->d_inode, old_dentry,
44078 new_dir->d_inode, new_dentry);
44079 + if (!error)
44080 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44081 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44082 exit6:
44083 mnt_drop_write(oldnd.path.mnt);
44084 exit5:
44085 @@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
44086
44087 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44088 {
44089 + char tmpbuf[64];
44090 + const char *newlink;
44091 int len;
44092
44093 len = PTR_ERR(link);
44094 @@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
44095 len = strlen(link);
44096 if (len > (unsigned) buflen)
44097 len = buflen;
44098 - if (copy_to_user(buffer, link, len))
44099 +
44100 + if (len < sizeof(tmpbuf)) {
44101 + memcpy(tmpbuf, link, len);
44102 + newlink = tmpbuf;
44103 + } else
44104 + newlink = link;
44105 +
44106 + if (copy_to_user(buffer, newlink, len))
44107 len = -EFAULT;
44108 out:
44109 return len;
44110 diff -urNp linux-2.6.32.45/fs/namespace.c linux-2.6.32.45/fs/namespace.c
44111 --- linux-2.6.32.45/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
44112 +++ linux-2.6.32.45/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
44113 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
44114 if (!(sb->s_flags & MS_RDONLY))
44115 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44116 up_write(&sb->s_umount);
44117 +
44118 + gr_log_remount(mnt->mnt_devname, retval);
44119 +
44120 return retval;
44121 }
44122
44123 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
44124 security_sb_umount_busy(mnt);
44125 up_write(&namespace_sem);
44126 release_mounts(&umount_list);
44127 +
44128 + gr_log_unmount(mnt->mnt_devname, retval);
44129 +
44130 return retval;
44131 }
44132
44133 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
44134 if (retval)
44135 goto dput_out;
44136
44137 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44138 + retval = -EPERM;
44139 + goto dput_out;
44140 + }
44141 +
44142 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44143 + retval = -EPERM;
44144 + goto dput_out;
44145 + }
44146 +
44147 if (flags & MS_REMOUNT)
44148 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44149 data_page);
44150 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
44151 dev_name, data_page);
44152 dput_out:
44153 path_put(&path);
44154 +
44155 + gr_log_mount(dev_name, dir_name, retval);
44156 +
44157 return retval;
44158 }
44159
44160 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
44161 goto out1;
44162 }
44163
44164 + if (gr_handle_chroot_pivot()) {
44165 + error = -EPERM;
44166 + path_put(&old);
44167 + goto out1;
44168 + }
44169 +
44170 read_lock(&current->fs->lock);
44171 root = current->fs->root;
44172 path_get(&current->fs->root);
44173 diff -urNp linux-2.6.32.45/fs/ncpfs/dir.c linux-2.6.32.45/fs/ncpfs/dir.c
44174 --- linux-2.6.32.45/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44175 +++ linux-2.6.32.45/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
44176 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
44177 int res, val = 0, len;
44178 __u8 __name[NCP_MAXPATHLEN + 1];
44179
44180 + pax_track_stack();
44181 +
44182 parent = dget_parent(dentry);
44183 dir = parent->d_inode;
44184
44185 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
44186 int error, res, len;
44187 __u8 __name[NCP_MAXPATHLEN + 1];
44188
44189 + pax_track_stack();
44190 +
44191 lock_kernel();
44192 error = -EIO;
44193 if (!ncp_conn_valid(server))
44194 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
44195 int error, result, len;
44196 int opmode;
44197 __u8 __name[NCP_MAXPATHLEN + 1];
44198 -
44199 +
44200 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44201 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44202
44203 + pax_track_stack();
44204 +
44205 error = -EIO;
44206 lock_kernel();
44207 if (!ncp_conn_valid(server))
44208 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
44209 int error, len;
44210 __u8 __name[NCP_MAXPATHLEN + 1];
44211
44212 + pax_track_stack();
44213 +
44214 DPRINTK("ncp_mkdir: making %s/%s\n",
44215 dentry->d_parent->d_name.name, dentry->d_name.name);
44216
44217 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
44218 if (!ncp_conn_valid(server))
44219 goto out;
44220
44221 + pax_track_stack();
44222 +
44223 ncp_age_dentry(server, dentry);
44224 len = sizeof(__name);
44225 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44226 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
44227 int old_len, new_len;
44228 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44229
44230 + pax_track_stack();
44231 +
44232 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44233 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44234 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44235 diff -urNp linux-2.6.32.45/fs/ncpfs/inode.c linux-2.6.32.45/fs/ncpfs/inode.c
44236 --- linux-2.6.32.45/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
44237 +++ linux-2.6.32.45/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
44238 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
44239 #endif
44240 struct ncp_entry_info finfo;
44241
44242 + pax_track_stack();
44243 +
44244 data.wdog_pid = NULL;
44245 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44246 if (!server)
44247 diff -urNp linux-2.6.32.45/fs/nfs/inode.c linux-2.6.32.45/fs/nfs/inode.c
44248 --- linux-2.6.32.45/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
44249 +++ linux-2.6.32.45/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
44250 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
44251 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44252 nfsi->attrtimeo_timestamp = jiffies;
44253
44254 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44255 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44256 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44257 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44258 else
44259 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
44260 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44261 }
44262
44263 -static atomic_long_t nfs_attr_generation_counter;
44264 +static atomic_long_unchecked_t nfs_attr_generation_counter;
44265
44266 static unsigned long nfs_read_attr_generation_counter(void)
44267 {
44268 - return atomic_long_read(&nfs_attr_generation_counter);
44269 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44270 }
44271
44272 unsigned long nfs_inc_attr_generation_counter(void)
44273 {
44274 - return atomic_long_inc_return(&nfs_attr_generation_counter);
44275 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44276 }
44277
44278 void nfs_fattr_init(struct nfs_fattr *fattr)
44279 diff -urNp linux-2.6.32.45/fs/nfsd/lockd.c linux-2.6.32.45/fs/nfsd/lockd.c
44280 --- linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
44281 +++ linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
44282 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
44283 fput(filp);
44284 }
44285
44286 -static struct nlmsvc_binding nfsd_nlm_ops = {
44287 +static const struct nlmsvc_binding nfsd_nlm_ops = {
44288 .fopen = nlm_fopen, /* open file for locking */
44289 .fclose = nlm_fclose, /* close file */
44290 };
44291 diff -urNp linux-2.6.32.45/fs/nfsd/nfs4state.c linux-2.6.32.45/fs/nfsd/nfs4state.c
44292 --- linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
44293 +++ linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
44294 @@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44295 unsigned int cmd;
44296 int err;
44297
44298 + pax_track_stack();
44299 +
44300 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44301 (long long) lock->lk_offset,
44302 (long long) lock->lk_length);
44303 diff -urNp linux-2.6.32.45/fs/nfsd/nfs4xdr.c linux-2.6.32.45/fs/nfsd/nfs4xdr.c
44304 --- linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
44305 +++ linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
44306 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44307 struct nfsd4_compoundres *resp = rqstp->rq_resp;
44308 u32 minorversion = resp->cstate.minorversion;
44309
44310 + pax_track_stack();
44311 +
44312 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44313 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44314 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44315 diff -urNp linux-2.6.32.45/fs/nfsd/vfs.c linux-2.6.32.45/fs/nfsd/vfs.c
44316 --- linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
44317 +++ linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
44318 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44319 } else {
44320 oldfs = get_fs();
44321 set_fs(KERNEL_DS);
44322 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44323 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
44324 set_fs(oldfs);
44325 }
44326
44327 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44328
44329 /* Write the data. */
44330 oldfs = get_fs(); set_fs(KERNEL_DS);
44331 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44332 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
44333 set_fs(oldfs);
44334 if (host_err < 0)
44335 goto out_nfserr;
44336 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44337 */
44338
44339 oldfs = get_fs(); set_fs(KERNEL_DS);
44340 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
44341 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
44342 set_fs(oldfs);
44343
44344 if (host_err < 0)
44345 diff -urNp linux-2.6.32.45/fs/nilfs2/ioctl.c linux-2.6.32.45/fs/nilfs2/ioctl.c
44346 --- linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
44347 +++ linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
44348 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
44349 unsigned int cmd, void __user *argp)
44350 {
44351 struct nilfs_argv argv[5];
44352 - const static size_t argsz[5] = {
44353 + static const size_t argsz[5] = {
44354 sizeof(struct nilfs_vdesc),
44355 sizeof(struct nilfs_period),
44356 sizeof(__u64),
44357 diff -urNp linux-2.6.32.45/fs/notify/dnotify/dnotify.c linux-2.6.32.45/fs/notify/dnotify/dnotify.c
44358 --- linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
44359 +++ linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
44360 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
44361 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
44362 }
44363
44364 -static struct fsnotify_ops dnotify_fsnotify_ops = {
44365 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
44366 .handle_event = dnotify_handle_event,
44367 .should_send_event = dnotify_should_send_event,
44368 .free_group_priv = NULL,
44369 diff -urNp linux-2.6.32.45/fs/notify/notification.c linux-2.6.32.45/fs/notify/notification.c
44370 --- linux-2.6.32.45/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
44371 +++ linux-2.6.32.45/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
44372 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44373 * get set to 0 so it will never get 'freed'
44374 */
44375 static struct fsnotify_event q_overflow_event;
44376 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44377 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44378
44379 /**
44380 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44381 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44382 */
44383 u32 fsnotify_get_cookie(void)
44384 {
44385 - return atomic_inc_return(&fsnotify_sync_cookie);
44386 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44387 }
44388 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44389
44390 diff -urNp linux-2.6.32.45/fs/ntfs/dir.c linux-2.6.32.45/fs/ntfs/dir.c
44391 --- linux-2.6.32.45/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44392 +++ linux-2.6.32.45/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
44393 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
44394 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44395 ~(s64)(ndir->itype.index.block_size - 1)));
44396 /* Bounds checks. */
44397 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44398 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44399 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44400 "inode 0x%lx or driver bug.", vdir->i_ino);
44401 goto err_out;
44402 diff -urNp linux-2.6.32.45/fs/ntfs/file.c linux-2.6.32.45/fs/ntfs/file.c
44403 --- linux-2.6.32.45/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
44404 +++ linux-2.6.32.45/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
44405 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
44406 #endif /* NTFS_RW */
44407 };
44408
44409 -const struct file_operations ntfs_empty_file_ops = {};
44410 +const struct file_operations ntfs_empty_file_ops __read_only;
44411
44412 -const struct inode_operations ntfs_empty_inode_ops = {};
44413 +const struct inode_operations ntfs_empty_inode_ops __read_only;
44414 diff -urNp linux-2.6.32.45/fs/ocfs2/cluster/masklog.c linux-2.6.32.45/fs/ocfs2/cluster/masklog.c
44415 --- linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
44416 +++ linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
44417 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
44418 return mlog_mask_store(mlog_attr->mask, buf, count);
44419 }
44420
44421 -static struct sysfs_ops mlog_attr_ops = {
44422 +static const struct sysfs_ops mlog_attr_ops = {
44423 .show = mlog_show,
44424 .store = mlog_store,
44425 };
44426 diff -urNp linux-2.6.32.45/fs/ocfs2/localalloc.c linux-2.6.32.45/fs/ocfs2/localalloc.c
44427 --- linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
44428 +++ linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
44429 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
44430 goto bail;
44431 }
44432
44433 - atomic_inc(&osb->alloc_stats.moves);
44434 + atomic_inc_unchecked(&osb->alloc_stats.moves);
44435
44436 status = 0;
44437 bail:
44438 diff -urNp linux-2.6.32.45/fs/ocfs2/namei.c linux-2.6.32.45/fs/ocfs2/namei.c
44439 --- linux-2.6.32.45/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
44440 +++ linux-2.6.32.45/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
44441 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
44442 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44443 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44444
44445 + pax_track_stack();
44446 +
44447 /* At some point it might be nice to break this function up a
44448 * bit. */
44449
44450 diff -urNp linux-2.6.32.45/fs/ocfs2/ocfs2.h linux-2.6.32.45/fs/ocfs2/ocfs2.h
44451 --- linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
44452 +++ linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
44453 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
44454
44455 struct ocfs2_alloc_stats
44456 {
44457 - atomic_t moves;
44458 - atomic_t local_data;
44459 - atomic_t bitmap_data;
44460 - atomic_t bg_allocs;
44461 - atomic_t bg_extends;
44462 + atomic_unchecked_t moves;
44463 + atomic_unchecked_t local_data;
44464 + atomic_unchecked_t bitmap_data;
44465 + atomic_unchecked_t bg_allocs;
44466 + atomic_unchecked_t bg_extends;
44467 };
44468
44469 enum ocfs2_local_alloc_state
44470 diff -urNp linux-2.6.32.45/fs/ocfs2/suballoc.c linux-2.6.32.45/fs/ocfs2/suballoc.c
44471 --- linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
44472 +++ linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
44473 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
44474 mlog_errno(status);
44475 goto bail;
44476 }
44477 - atomic_inc(&osb->alloc_stats.bg_extends);
44478 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44479
44480 /* You should never ask for this much metadata */
44481 BUG_ON(bits_wanted >
44482 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
44483 mlog_errno(status);
44484 goto bail;
44485 }
44486 - atomic_inc(&osb->alloc_stats.bg_allocs);
44487 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44488
44489 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
44490 ac->ac_bits_given += (*num_bits);
44491 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
44492 mlog_errno(status);
44493 goto bail;
44494 }
44495 - atomic_inc(&osb->alloc_stats.bg_allocs);
44496 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44497
44498 BUG_ON(num_bits != 1);
44499
44500 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44501 cluster_start,
44502 num_clusters);
44503 if (!status)
44504 - atomic_inc(&osb->alloc_stats.local_data);
44505 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
44506 } else {
44507 if (min_clusters > (osb->bitmap_cpg - 1)) {
44508 /* The only paths asking for contiguousness
44509 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44510 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44511 bg_blkno,
44512 bg_bit_off);
44513 - atomic_inc(&osb->alloc_stats.bitmap_data);
44514 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44515 }
44516 }
44517 if (status < 0) {
44518 diff -urNp linux-2.6.32.45/fs/ocfs2/super.c linux-2.6.32.45/fs/ocfs2/super.c
44519 --- linux-2.6.32.45/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
44520 +++ linux-2.6.32.45/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
44521 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44522 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44523 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44524 "Stats",
44525 - atomic_read(&osb->alloc_stats.bitmap_data),
44526 - atomic_read(&osb->alloc_stats.local_data),
44527 - atomic_read(&osb->alloc_stats.bg_allocs),
44528 - atomic_read(&osb->alloc_stats.moves),
44529 - atomic_read(&osb->alloc_stats.bg_extends));
44530 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44531 + atomic_read_unchecked(&osb->alloc_stats.local_data),
44532 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44533 + atomic_read_unchecked(&osb->alloc_stats.moves),
44534 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44535
44536 out += snprintf(buf + out, len - out,
44537 "%10s => State: %u Descriptor: %llu Size: %u bits "
44538 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
44539 spin_lock_init(&osb->osb_xattr_lock);
44540 ocfs2_init_inode_steal_slot(osb);
44541
44542 - atomic_set(&osb->alloc_stats.moves, 0);
44543 - atomic_set(&osb->alloc_stats.local_data, 0);
44544 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
44545 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
44546 - atomic_set(&osb->alloc_stats.bg_extends, 0);
44547 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44548 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44549 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44550 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44551 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44552
44553 /* Copy the blockcheck stats from the superblock probe */
44554 osb->osb_ecc_stats = *stats;
44555 diff -urNp linux-2.6.32.45/fs/open.c linux-2.6.32.45/fs/open.c
44556 --- linux-2.6.32.45/fs/open.c 2011-03-27 14:31:47.000000000 -0400
44557 +++ linux-2.6.32.45/fs/open.c 2011-04-17 15:56:46.000000000 -0400
44558 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
44559 error = locks_verify_truncate(inode, NULL, length);
44560 if (!error)
44561 error = security_path_truncate(&path, length, 0);
44562 +
44563 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44564 + error = -EACCES;
44565 +
44566 if (!error) {
44567 vfs_dq_init(inode);
44568 error = do_truncate(path.dentry, length, 0, NULL);
44569 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44570 if (__mnt_is_readonly(path.mnt))
44571 res = -EROFS;
44572
44573 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44574 + res = -EACCES;
44575 +
44576 out_path_release:
44577 path_put(&path);
44578 out:
44579 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44580 if (error)
44581 goto dput_and_out;
44582
44583 + gr_log_chdir(path.dentry, path.mnt);
44584 +
44585 set_fs_pwd(current->fs, &path);
44586
44587 dput_and_out:
44588 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44589 goto out_putf;
44590
44591 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
44592 +
44593 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44594 + error = -EPERM;
44595 +
44596 + if (!error)
44597 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44598 +
44599 if (!error)
44600 set_fs_pwd(current->fs, &file->f_path);
44601 out_putf:
44602 @@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
44603 if (!capable(CAP_SYS_CHROOT))
44604 goto dput_and_out;
44605
44606 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44607 + goto dput_and_out;
44608 +
44609 + if (gr_handle_chroot_caps(&path)) {
44610 + error = -ENOMEM;
44611 + goto dput_and_out;
44612 + }
44613 +
44614 set_fs_root(current->fs, &path);
44615 +
44616 + gr_handle_chroot_chdir(&path);
44617 +
44618 error = 0;
44619 dput_and_out:
44620 path_put(&path);
44621 @@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
44622 err = mnt_want_write_file(file);
44623 if (err)
44624 goto out_putf;
44625 +
44626 mutex_lock(&inode->i_mutex);
44627 +
44628 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
44629 + err = -EACCES;
44630 + goto out_unlock;
44631 + }
44632 +
44633 if (mode == (mode_t) -1)
44634 mode = inode->i_mode;
44635 +
44636 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
44637 + err = -EPERM;
44638 + goto out_unlock;
44639 + }
44640 +
44641 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44642 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44643 err = notify_change(dentry, &newattrs);
44644 +
44645 +out_unlock:
44646 mutex_unlock(&inode->i_mutex);
44647 mnt_drop_write(file->f_path.mnt);
44648 out_putf:
44649 @@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
44650 error = mnt_want_write(path.mnt);
44651 if (error)
44652 goto dput_and_out;
44653 +
44654 mutex_lock(&inode->i_mutex);
44655 +
44656 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
44657 + error = -EACCES;
44658 + goto out_unlock;
44659 + }
44660 +
44661 if (mode == (mode_t) -1)
44662 mode = inode->i_mode;
44663 +
44664 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
44665 + error = -EACCES;
44666 + goto out_unlock;
44667 + }
44668 +
44669 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44670 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44671 error = notify_change(path.dentry, &newattrs);
44672 +
44673 +out_unlock:
44674 mutex_unlock(&inode->i_mutex);
44675 mnt_drop_write(path.mnt);
44676 dput_and_out:
44677 @@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
44678 return sys_fchmodat(AT_FDCWD, filename, mode);
44679 }
44680
44681 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
44682 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
44683 {
44684 struct inode *inode = dentry->d_inode;
44685 int error;
44686 struct iattr newattrs;
44687
44688 + if (!gr_acl_handle_chown(dentry, mnt))
44689 + return -EACCES;
44690 +
44691 newattrs.ia_valid = ATTR_CTIME;
44692 if (user != (uid_t) -1) {
44693 newattrs.ia_valid |= ATTR_UID;
44694 @@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
44695 error = mnt_want_write(path.mnt);
44696 if (error)
44697 goto out_release;
44698 - error = chown_common(path.dentry, user, group);
44699 + error = chown_common(path.dentry, user, group, path.mnt);
44700 mnt_drop_write(path.mnt);
44701 out_release:
44702 path_put(&path);
44703 @@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
44704 error = mnt_want_write(path.mnt);
44705 if (error)
44706 goto out_release;
44707 - error = chown_common(path.dentry, user, group);
44708 + error = chown_common(path.dentry, user, group, path.mnt);
44709 mnt_drop_write(path.mnt);
44710 out_release:
44711 path_put(&path);
44712 @@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
44713 error = mnt_want_write(path.mnt);
44714 if (error)
44715 goto out_release;
44716 - error = chown_common(path.dentry, user, group);
44717 + error = chown_common(path.dentry, user, group, path.mnt);
44718 mnt_drop_write(path.mnt);
44719 out_release:
44720 path_put(&path);
44721 @@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
44722 goto out_fput;
44723 dentry = file->f_path.dentry;
44724 audit_inode(NULL, dentry);
44725 - error = chown_common(dentry, user, group);
44726 + error = chown_common(dentry, user, group, file->f_path.mnt);
44727 mnt_drop_write(file->f_path.mnt);
44728 out_fput:
44729 fput(file);
44730 @@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
44731 if (!IS_ERR(tmp)) {
44732 fd = get_unused_fd_flags(flags);
44733 if (fd >= 0) {
44734 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
44735 + struct file *f;
44736 + /* don't allow to be set by userland */
44737 + flags &= ~FMODE_GREXEC;
44738 + f = do_filp_open(dfd, tmp, flags, mode, 0);
44739 if (IS_ERR(f)) {
44740 put_unused_fd(fd);
44741 fd = PTR_ERR(f);
44742 diff -urNp linux-2.6.32.45/fs/partitions/ldm.c linux-2.6.32.45/fs/partitions/ldm.c
44743 --- linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
44744 +++ linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
44745 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
44746 ldm_error ("A VBLK claims to have %d parts.", num);
44747 return false;
44748 }
44749 +
44750 if (rec >= num) {
44751 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
44752 return false;
44753 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
44754 goto found;
44755 }
44756
44757 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44758 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44759 if (!f) {
44760 ldm_crit ("Out of memory.");
44761 return false;
44762 diff -urNp linux-2.6.32.45/fs/partitions/mac.c linux-2.6.32.45/fs/partitions/mac.c
44763 --- linux-2.6.32.45/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
44764 +++ linux-2.6.32.45/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
44765 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
44766 return 0; /* not a MacOS disk */
44767 }
44768 blocks_in_map = be32_to_cpu(part->map_count);
44769 + printk(" [mac]");
44770 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
44771 put_dev_sector(sect);
44772 return 0;
44773 }
44774 - printk(" [mac]");
44775 for (slot = 1; slot <= blocks_in_map; ++slot) {
44776 int pos = slot * secsize;
44777 put_dev_sector(sect);
44778 diff -urNp linux-2.6.32.45/fs/pipe.c linux-2.6.32.45/fs/pipe.c
44779 --- linux-2.6.32.45/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
44780 +++ linux-2.6.32.45/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
44781 @@ -401,9 +401,9 @@ redo:
44782 }
44783 if (bufs) /* More to do? */
44784 continue;
44785 - if (!pipe->writers)
44786 + if (!atomic_read(&pipe->writers))
44787 break;
44788 - if (!pipe->waiting_writers) {
44789 + if (!atomic_read(&pipe->waiting_writers)) {
44790 /* syscall merging: Usually we must not sleep
44791 * if O_NONBLOCK is set, or if we got some data.
44792 * But if a writer sleeps in kernel space, then
44793 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
44794 mutex_lock(&inode->i_mutex);
44795 pipe = inode->i_pipe;
44796
44797 - if (!pipe->readers) {
44798 + if (!atomic_read(&pipe->readers)) {
44799 send_sig(SIGPIPE, current, 0);
44800 ret = -EPIPE;
44801 goto out;
44802 @@ -511,7 +511,7 @@ redo1:
44803 for (;;) {
44804 int bufs;
44805
44806 - if (!pipe->readers) {
44807 + if (!atomic_read(&pipe->readers)) {
44808 send_sig(SIGPIPE, current, 0);
44809 if (!ret)
44810 ret = -EPIPE;
44811 @@ -597,9 +597,9 @@ redo2:
44812 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44813 do_wakeup = 0;
44814 }
44815 - pipe->waiting_writers++;
44816 + atomic_inc(&pipe->waiting_writers);
44817 pipe_wait(pipe);
44818 - pipe->waiting_writers--;
44819 + atomic_dec(&pipe->waiting_writers);
44820 }
44821 out:
44822 mutex_unlock(&inode->i_mutex);
44823 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
44824 mask = 0;
44825 if (filp->f_mode & FMODE_READ) {
44826 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
44827 - if (!pipe->writers && filp->f_version != pipe->w_counter)
44828 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
44829 mask |= POLLHUP;
44830 }
44831
44832 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
44833 * Most Unices do not set POLLERR for FIFOs but on Linux they
44834 * behave exactly like pipes for poll().
44835 */
44836 - if (!pipe->readers)
44837 + if (!atomic_read(&pipe->readers))
44838 mask |= POLLERR;
44839 }
44840
44841 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
44842
44843 mutex_lock(&inode->i_mutex);
44844 pipe = inode->i_pipe;
44845 - pipe->readers -= decr;
44846 - pipe->writers -= decw;
44847 + atomic_sub(decr, &pipe->readers);
44848 + atomic_sub(decw, &pipe->writers);
44849
44850 - if (!pipe->readers && !pipe->writers) {
44851 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
44852 free_pipe_info(inode);
44853 } else {
44854 wake_up_interruptible_sync(&pipe->wait);
44855 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
44856
44857 if (inode->i_pipe) {
44858 ret = 0;
44859 - inode->i_pipe->readers++;
44860 + atomic_inc(&inode->i_pipe->readers);
44861 }
44862
44863 mutex_unlock(&inode->i_mutex);
44864 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
44865
44866 if (inode->i_pipe) {
44867 ret = 0;
44868 - inode->i_pipe->writers++;
44869 + atomic_inc(&inode->i_pipe->writers);
44870 }
44871
44872 mutex_unlock(&inode->i_mutex);
44873 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
44874 if (inode->i_pipe) {
44875 ret = 0;
44876 if (filp->f_mode & FMODE_READ)
44877 - inode->i_pipe->readers++;
44878 + atomic_inc(&inode->i_pipe->readers);
44879 if (filp->f_mode & FMODE_WRITE)
44880 - inode->i_pipe->writers++;
44881 + atomic_inc(&inode->i_pipe->writers);
44882 }
44883
44884 mutex_unlock(&inode->i_mutex);
44885 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
44886 inode->i_pipe = NULL;
44887 }
44888
44889 -static struct vfsmount *pipe_mnt __read_mostly;
44890 +struct vfsmount *pipe_mnt __read_mostly;
44891 static int pipefs_delete_dentry(struct dentry *dentry)
44892 {
44893 /*
44894 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
44895 goto fail_iput;
44896 inode->i_pipe = pipe;
44897
44898 - pipe->readers = pipe->writers = 1;
44899 + atomic_set(&pipe->readers, 1);
44900 + atomic_set(&pipe->writers, 1);
44901 inode->i_fop = &rdwr_pipefifo_fops;
44902
44903 /*
44904 diff -urNp linux-2.6.32.45/fs/proc/array.c linux-2.6.32.45/fs/proc/array.c
44905 --- linux-2.6.32.45/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
44906 +++ linux-2.6.32.45/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
44907 @@ -60,6 +60,7 @@
44908 #include <linux/tty.h>
44909 #include <linux/string.h>
44910 #include <linux/mman.h>
44911 +#include <linux/grsecurity.h>
44912 #include <linux/proc_fs.h>
44913 #include <linux/ioport.h>
44914 #include <linux/uaccess.h>
44915 @@ -321,6 +322,21 @@ static inline void task_context_switch_c
44916 p->nivcsw);
44917 }
44918
44919 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44920 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
44921 +{
44922 + if (p->mm)
44923 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
44924 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
44925 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
44926 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
44927 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
44928 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
44929 + else
44930 + seq_printf(m, "PaX:\t-----\n");
44931 +}
44932 +#endif
44933 +
44934 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
44935 struct pid *pid, struct task_struct *task)
44936 {
44937 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
44938 task_cap(m, task);
44939 cpuset_task_status_allowed(m, task);
44940 task_context_switch_counts(m, task);
44941 +
44942 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44943 + task_pax(m, task);
44944 +#endif
44945 +
44946 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
44947 + task_grsec_rbac(m, task);
44948 +#endif
44949 +
44950 return 0;
44951 }
44952
44953 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44954 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44955 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
44956 + _mm->pax_flags & MF_PAX_SEGMEXEC))
44957 +#endif
44958 +
44959 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
44960 struct pid *pid, struct task_struct *task, int whole)
44961 {
44962 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
44963 cputime_t cutime, cstime, utime, stime;
44964 cputime_t cgtime, gtime;
44965 unsigned long rsslim = 0;
44966 - char tcomm[sizeof(task->comm)];
44967 + char tcomm[sizeof(task->comm)] = { 0 };
44968 unsigned long flags;
44969
44970 + pax_track_stack();
44971 +
44972 state = *get_task_state(task);
44973 vsize = eip = esp = 0;
44974 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
44975 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
44976 gtime = task_gtime(task);
44977 }
44978
44979 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44980 + if (PAX_RAND_FLAGS(mm)) {
44981 + eip = 0;
44982 + esp = 0;
44983 + wchan = 0;
44984 + }
44985 +#endif
44986 +#ifdef CONFIG_GRKERNSEC_HIDESYM
44987 + wchan = 0;
44988 + eip =0;
44989 + esp =0;
44990 +#endif
44991 +
44992 /* scale priority and nice values from timeslices to -20..20 */
44993 /* to make it look like a "normal" Unix priority/nice value */
44994 priority = task_prio(task);
44995 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
44996 vsize,
44997 mm ? get_mm_rss(mm) : 0,
44998 rsslim,
44999 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45000 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45001 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45002 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45003 +#else
45004 mm ? (permitted ? mm->start_code : 1) : 0,
45005 mm ? (permitted ? mm->end_code : 1) : 0,
45006 (permitted && mm) ? mm->start_stack : 0,
45007 +#endif
45008 esp,
45009 eip,
45010 /* The signal information here is obsolete.
45011 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
45012
45013 return 0;
45014 }
45015 +
45016 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45017 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45018 +{
45019 + u32 curr_ip = 0;
45020 + unsigned long flags;
45021 +
45022 + if (lock_task_sighand(task, &flags)) {
45023 + curr_ip = task->signal->curr_ip;
45024 + unlock_task_sighand(task, &flags);
45025 + }
45026 +
45027 + return sprintf(buffer, "%pI4\n", &curr_ip);
45028 +}
45029 +#endif
45030 diff -urNp linux-2.6.32.45/fs/proc/base.c linux-2.6.32.45/fs/proc/base.c
45031 --- linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:35:30.000000000 -0400
45032 +++ linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:34:33.000000000 -0400
45033 @@ -102,6 +102,22 @@ struct pid_entry {
45034 union proc_op op;
45035 };
45036
45037 +struct getdents_callback {
45038 + struct linux_dirent __user * current_dir;
45039 + struct linux_dirent __user * previous;
45040 + struct file * file;
45041 + int count;
45042 + int error;
45043 +};
45044 +
45045 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45046 + loff_t offset, u64 ino, unsigned int d_type)
45047 +{
45048 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
45049 + buf->error = -EINVAL;
45050 + return 0;
45051 +}
45052 +
45053 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45054 .name = (NAME), \
45055 .len = sizeof(NAME) - 1, \
45056 @@ -213,6 +229,9 @@ static int check_mem_permission(struct t
45057 if (task == current)
45058 return 0;
45059
45060 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45061 + return -EPERM;
45062 +
45063 /*
45064 * If current is actively ptrace'ing, and would also be
45065 * permitted to freshly attach with ptrace now, permit it.
45066 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
45067 if (!mm->arg_end)
45068 goto out_mm; /* Shh! No looking before we're done */
45069
45070 + if (gr_acl_handle_procpidmem(task))
45071 + goto out_mm;
45072 +
45073 len = mm->arg_end - mm->arg_start;
45074
45075 if (len > PAGE_SIZE)
45076 @@ -287,12 +309,28 @@ out:
45077 return res;
45078 }
45079
45080 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45081 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45082 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45083 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45084 +#endif
45085 +
45086 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45087 {
45088 int res = 0;
45089 struct mm_struct *mm = get_task_mm(task);
45090 if (mm) {
45091 unsigned int nwords = 0;
45092 +
45093 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45094 + /* allow if we're currently ptracing this task */
45095 + if (PAX_RAND_FLAGS(mm) &&
45096 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45097 + mmput(mm);
45098 + return res;
45099 + }
45100 +#endif
45101 +
45102 do {
45103 nwords += 2;
45104 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45105 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
45106 }
45107
45108
45109 -#ifdef CONFIG_KALLSYMS
45110 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45111 /*
45112 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45113 * Returns the resolved symbol. If that fails, simply return the address.
45114 @@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
45115 }
45116 #endif /* CONFIG_KALLSYMS */
45117
45118 -#ifdef CONFIG_STACKTRACE
45119 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45120
45121 #define MAX_STACK_TRACE_DEPTH 64
45122
45123 @@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
45124 return count;
45125 }
45126
45127 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45128 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45129 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45130 {
45131 long nr;
45132 @@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
45133 /************************************************************************/
45134
45135 /* permission checks */
45136 -static int proc_fd_access_allowed(struct inode *inode)
45137 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45138 {
45139 struct task_struct *task;
45140 int allowed = 0;
45141 @@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
45142 */
45143 task = get_proc_task(inode);
45144 if (task) {
45145 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45146 + if (log)
45147 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45148 + else
45149 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45150 put_task_struct(task);
45151 }
45152 return allowed;
45153 @@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
45154 if (!task)
45155 goto out_no_task;
45156
45157 + if (gr_acl_handle_procpidmem(task))
45158 + goto out;
45159 +
45160 if (!ptrace_may_access(task, PTRACE_MODE_READ))
45161 goto out;
45162
45163 @@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
45164 path_put(&nd->path);
45165
45166 /* Are we allowed to snoop on the tasks file descriptors? */
45167 - if (!proc_fd_access_allowed(inode))
45168 + if (!proc_fd_access_allowed(inode,0))
45169 goto out;
45170
45171 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45172 @@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
45173 struct path path;
45174
45175 /* Are we allowed to snoop on the tasks file descriptors? */
45176 - if (!proc_fd_access_allowed(inode))
45177 - goto out;
45178 + /* logging this is needed for learning on chromium to work properly,
45179 + but we don't want to flood the logs from 'ps' which does a readlink
45180 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45181 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
45182 + */
45183 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45184 + if (!proc_fd_access_allowed(inode,0))
45185 + goto out;
45186 + } else {
45187 + if (!proc_fd_access_allowed(inode,1))
45188 + goto out;
45189 + }
45190
45191 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45192 if (error)
45193 @@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
45194 rcu_read_lock();
45195 cred = __task_cred(task);
45196 inode->i_uid = cred->euid;
45197 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45198 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45199 +#else
45200 inode->i_gid = cred->egid;
45201 +#endif
45202 rcu_read_unlock();
45203 }
45204 security_task_to_inode(task, inode);
45205 @@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
45206 struct inode *inode = dentry->d_inode;
45207 struct task_struct *task;
45208 const struct cred *cred;
45209 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45210 + const struct cred *tmpcred = current_cred();
45211 +#endif
45212
45213 generic_fillattr(inode, stat);
45214
45215 @@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
45216 stat->uid = 0;
45217 stat->gid = 0;
45218 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45219 +
45220 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45221 + rcu_read_unlock();
45222 + return -ENOENT;
45223 + }
45224 +
45225 if (task) {
45226 + cred = __task_cred(task);
45227 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45228 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45229 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45230 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45231 +#endif
45232 + ) {
45233 +#endif
45234 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45235 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45236 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45237 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45238 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45239 +#endif
45240 task_dumpable(task)) {
45241 - cred = __task_cred(task);
45242 stat->uid = cred->euid;
45243 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45244 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45245 +#else
45246 stat->gid = cred->egid;
45247 +#endif
45248 }
45249 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45250 + } else {
45251 + rcu_read_unlock();
45252 + return -ENOENT;
45253 + }
45254 +#endif
45255 }
45256 rcu_read_unlock();
45257 return 0;
45258 @@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
45259
45260 if (task) {
45261 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45262 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45263 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45264 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45265 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45266 +#endif
45267 task_dumpable(task)) {
45268 rcu_read_lock();
45269 cred = __task_cred(task);
45270 inode->i_uid = cred->euid;
45271 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45272 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45273 +#else
45274 inode->i_gid = cred->egid;
45275 +#endif
45276 rcu_read_unlock();
45277 } else {
45278 inode->i_uid = 0;
45279 @@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
45280 int fd = proc_fd(inode);
45281
45282 if (task) {
45283 - files = get_files_struct(task);
45284 + if (!gr_acl_handle_procpidmem(task))
45285 + files = get_files_struct(task);
45286 put_task_struct(task);
45287 }
45288 if (files) {
45289 @@ -1895,12 +1994,22 @@ static const struct file_operations proc
45290 static int proc_fd_permission(struct inode *inode, int mask)
45291 {
45292 int rv;
45293 + struct task_struct *task;
45294
45295 rv = generic_permission(inode, mask, NULL);
45296 - if (rv == 0)
45297 - return 0;
45298 +
45299 if (task_pid(current) == proc_pid(inode))
45300 rv = 0;
45301 +
45302 + task = get_proc_task(inode);
45303 + if (task == NULL)
45304 + return rv;
45305 +
45306 + if (gr_acl_handle_procpidmem(task))
45307 + rv = -EACCES;
45308 +
45309 + put_task_struct(task);
45310 +
45311 return rv;
45312 }
45313
45314 @@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
45315 if (!task)
45316 goto out_no_task;
45317
45318 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45319 + goto out;
45320 +
45321 /*
45322 * Yes, it does not scale. And it should not. Don't add
45323 * new entries into /proc/<tgid>/ without very good reasons.
45324 @@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
45325 if (!task)
45326 goto out_no_task;
45327
45328 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45329 + goto out;
45330 +
45331 ret = 0;
45332 i = filp->f_pos;
45333 switch (i) {
45334 @@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
45335 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45336 void *cookie)
45337 {
45338 - char *s = nd_get_link(nd);
45339 + const char *s = nd_get_link(nd);
45340 if (!IS_ERR(s))
45341 __putname(s);
45342 }
45343 @@ -2522,7 +2637,7 @@ static const struct pid_entry tgid_base_
45344 #ifdef CONFIG_SCHED_DEBUG
45345 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45346 #endif
45347 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45348 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45349 INF("syscall", S_IRUSR, proc_pid_syscall),
45350 #endif
45351 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45352 @@ -2547,10 +2662,10 @@ static const struct pid_entry tgid_base_
45353 #ifdef CONFIG_SECURITY
45354 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45355 #endif
45356 -#ifdef CONFIG_KALLSYMS
45357 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45358 INF("wchan", S_IRUGO, proc_pid_wchan),
45359 #endif
45360 -#ifdef CONFIG_STACKTRACE
45361 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45362 ONE("stack", S_IRUSR, proc_pid_stack),
45363 #endif
45364 #ifdef CONFIG_SCHEDSTATS
45365 @@ -2580,6 +2695,9 @@ static const struct pid_entry tgid_base_
45366 #ifdef CONFIG_TASK_IO_ACCOUNTING
45367 INF("io", S_IRUSR, proc_tgid_io_accounting),
45368 #endif
45369 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45370 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45371 +#endif
45372 };
45373
45374 static int proc_tgid_base_readdir(struct file * filp,
45375 @@ -2704,7 +2822,14 @@ static struct dentry *proc_pid_instantia
45376 if (!inode)
45377 goto out;
45378
45379 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45380 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45381 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45382 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45383 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45384 +#else
45385 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45386 +#endif
45387 inode->i_op = &proc_tgid_base_inode_operations;
45388 inode->i_fop = &proc_tgid_base_operations;
45389 inode->i_flags|=S_IMMUTABLE;
45390 @@ -2746,7 +2871,11 @@ struct dentry *proc_pid_lookup(struct in
45391 if (!task)
45392 goto out;
45393
45394 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45395 + goto out_put_task;
45396 +
45397 result = proc_pid_instantiate(dir, dentry, task, NULL);
45398 +out_put_task:
45399 put_task_struct(task);
45400 out:
45401 return result;
45402 @@ -2811,6 +2940,11 @@ int proc_pid_readdir(struct file * filp,
45403 {
45404 unsigned int nr;
45405 struct task_struct *reaper;
45406 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45407 + const struct cred *tmpcred = current_cred();
45408 + const struct cred *itercred;
45409 +#endif
45410 + filldir_t __filldir = filldir;
45411 struct tgid_iter iter;
45412 struct pid_namespace *ns;
45413
45414 @@ -2834,8 +2968,27 @@ int proc_pid_readdir(struct file * filp,
45415 for (iter = next_tgid(ns, iter);
45416 iter.task;
45417 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45418 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45419 + rcu_read_lock();
45420 + itercred = __task_cred(iter.task);
45421 +#endif
45422 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45423 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45424 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45425 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45426 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45427 +#endif
45428 + )
45429 +#endif
45430 + )
45431 + __filldir = &gr_fake_filldir;
45432 + else
45433 + __filldir = filldir;
45434 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45435 + rcu_read_unlock();
45436 +#endif
45437 filp->f_pos = iter.tgid + TGID_OFFSET;
45438 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45439 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45440 put_task_struct(iter.task);
45441 goto out;
45442 }
45443 @@ -2861,7 +3014,7 @@ static const struct pid_entry tid_base_s
45444 #ifdef CONFIG_SCHED_DEBUG
45445 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45446 #endif
45447 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45448 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45449 INF("syscall", S_IRUSR, proc_pid_syscall),
45450 #endif
45451 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45452 @@ -2885,10 +3038,10 @@ static const struct pid_entry tid_base_s
45453 #ifdef CONFIG_SECURITY
45454 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45455 #endif
45456 -#ifdef CONFIG_KALLSYMS
45457 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45458 INF("wchan", S_IRUGO, proc_pid_wchan),
45459 #endif
45460 -#ifdef CONFIG_STACKTRACE
45461 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45462 ONE("stack", S_IRUSR, proc_pid_stack),
45463 #endif
45464 #ifdef CONFIG_SCHEDSTATS
45465 diff -urNp linux-2.6.32.45/fs/proc/cmdline.c linux-2.6.32.45/fs/proc/cmdline.c
45466 --- linux-2.6.32.45/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
45467 +++ linux-2.6.32.45/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
45468 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
45469
45470 static int __init proc_cmdline_init(void)
45471 {
45472 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45473 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45474 +#else
45475 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45476 +#endif
45477 return 0;
45478 }
45479 module_init(proc_cmdline_init);
45480 diff -urNp linux-2.6.32.45/fs/proc/devices.c linux-2.6.32.45/fs/proc/devices.c
45481 --- linux-2.6.32.45/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
45482 +++ linux-2.6.32.45/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
45483 @@ -64,7 +64,11 @@ static const struct file_operations proc
45484
45485 static int __init proc_devices_init(void)
45486 {
45487 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45488 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45489 +#else
45490 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45491 +#endif
45492 return 0;
45493 }
45494 module_init(proc_devices_init);
45495 diff -urNp linux-2.6.32.45/fs/proc/inode.c linux-2.6.32.45/fs/proc/inode.c
45496 --- linux-2.6.32.45/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
45497 +++ linux-2.6.32.45/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
45498 @@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
45499 if (de->mode) {
45500 inode->i_mode = de->mode;
45501 inode->i_uid = de->uid;
45502 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45503 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45504 +#else
45505 inode->i_gid = de->gid;
45506 +#endif
45507 }
45508 if (de->size)
45509 inode->i_size = de->size;
45510 diff -urNp linux-2.6.32.45/fs/proc/internal.h linux-2.6.32.45/fs/proc/internal.h
45511 --- linux-2.6.32.45/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
45512 +++ linux-2.6.32.45/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
45513 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45514 struct pid *pid, struct task_struct *task);
45515 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45516 struct pid *pid, struct task_struct *task);
45517 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45518 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45519 +#endif
45520 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45521
45522 extern const struct file_operations proc_maps_operations;
45523 diff -urNp linux-2.6.32.45/fs/proc/Kconfig linux-2.6.32.45/fs/proc/Kconfig
45524 --- linux-2.6.32.45/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
45525 +++ linux-2.6.32.45/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
45526 @@ -30,12 +30,12 @@ config PROC_FS
45527
45528 config PROC_KCORE
45529 bool "/proc/kcore support" if !ARM
45530 - depends on PROC_FS && MMU
45531 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45532
45533 config PROC_VMCORE
45534 bool "/proc/vmcore support (EXPERIMENTAL)"
45535 - depends on PROC_FS && CRASH_DUMP
45536 - default y
45537 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45538 + default n
45539 help
45540 Exports the dump image of crashed kernel in ELF format.
45541
45542 @@ -59,8 +59,8 @@ config PROC_SYSCTL
45543 limited in memory.
45544
45545 config PROC_PAGE_MONITOR
45546 - default y
45547 - depends on PROC_FS && MMU
45548 + default n
45549 + depends on PROC_FS && MMU && !GRKERNSEC
45550 bool "Enable /proc page monitoring" if EMBEDDED
45551 help
45552 Various /proc files exist to monitor process memory utilization:
45553 diff -urNp linux-2.6.32.45/fs/proc/kcore.c linux-2.6.32.45/fs/proc/kcore.c
45554 --- linux-2.6.32.45/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
45555 +++ linux-2.6.32.45/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
45556 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
45557 off_t offset = 0;
45558 struct kcore_list *m;
45559
45560 + pax_track_stack();
45561 +
45562 /* setup ELF header */
45563 elf = (struct elfhdr *) bufp;
45564 bufp += sizeof(struct elfhdr);
45565 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
45566 * the addresses in the elf_phdr on our list.
45567 */
45568 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45569 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45570 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45571 + if (tsz > buflen)
45572 tsz = buflen;
45573 -
45574 +
45575 while (buflen) {
45576 struct kcore_list *m;
45577
45578 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
45579 kfree(elf_buf);
45580 } else {
45581 if (kern_addr_valid(start)) {
45582 - unsigned long n;
45583 + char *elf_buf;
45584 + mm_segment_t oldfs;
45585
45586 - n = copy_to_user(buffer, (char *)start, tsz);
45587 - /*
45588 - * We cannot distingush between fault on source
45589 - * and fault on destination. When this happens
45590 - * we clear too and hope it will trigger the
45591 - * EFAULT again.
45592 - */
45593 - if (n) {
45594 - if (clear_user(buffer + tsz - n,
45595 - n))
45596 + elf_buf = kmalloc(tsz, GFP_KERNEL);
45597 + if (!elf_buf)
45598 + return -ENOMEM;
45599 + oldfs = get_fs();
45600 + set_fs(KERNEL_DS);
45601 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45602 + set_fs(oldfs);
45603 + if (copy_to_user(buffer, elf_buf, tsz)) {
45604 + kfree(elf_buf);
45605 return -EFAULT;
45606 + }
45607 }
45608 + set_fs(oldfs);
45609 + kfree(elf_buf);
45610 } else {
45611 if (clear_user(buffer, tsz))
45612 return -EFAULT;
45613 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
45614
45615 static int open_kcore(struct inode *inode, struct file *filp)
45616 {
45617 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45618 + return -EPERM;
45619 +#endif
45620 if (!capable(CAP_SYS_RAWIO))
45621 return -EPERM;
45622 if (kcore_need_update)
45623 diff -urNp linux-2.6.32.45/fs/proc/meminfo.c linux-2.6.32.45/fs/proc/meminfo.c
45624 --- linux-2.6.32.45/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
45625 +++ linux-2.6.32.45/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
45626 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
45627 unsigned long pages[NR_LRU_LISTS];
45628 int lru;
45629
45630 + pax_track_stack();
45631 +
45632 /*
45633 * display in kilobytes.
45634 */
45635 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
45636 vmi.used >> 10,
45637 vmi.largest_chunk >> 10
45638 #ifdef CONFIG_MEMORY_FAILURE
45639 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45640 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45641 #endif
45642 );
45643
45644 diff -urNp linux-2.6.32.45/fs/proc/nommu.c linux-2.6.32.45/fs/proc/nommu.c
45645 --- linux-2.6.32.45/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
45646 +++ linux-2.6.32.45/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
45647 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
45648 if (len < 1)
45649 len = 1;
45650 seq_printf(m, "%*c", len, ' ');
45651 - seq_path(m, &file->f_path, "");
45652 + seq_path(m, &file->f_path, "\n\\");
45653 }
45654
45655 seq_putc(m, '\n');
45656 diff -urNp linux-2.6.32.45/fs/proc/proc_net.c linux-2.6.32.45/fs/proc/proc_net.c
45657 --- linux-2.6.32.45/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
45658 +++ linux-2.6.32.45/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
45659 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
45660 struct task_struct *task;
45661 struct nsproxy *ns;
45662 struct net *net = NULL;
45663 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45664 + const struct cred *cred = current_cred();
45665 +#endif
45666 +
45667 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45668 + if (cred->fsuid)
45669 + return net;
45670 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45671 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45672 + return net;
45673 +#endif
45674
45675 rcu_read_lock();
45676 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45677 diff -urNp linux-2.6.32.45/fs/proc/proc_sysctl.c linux-2.6.32.45/fs/proc/proc_sysctl.c
45678 --- linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
45679 +++ linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
45680 @@ -7,6 +7,8 @@
45681 #include <linux/security.h>
45682 #include "internal.h"
45683
45684 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45685 +
45686 static const struct dentry_operations proc_sys_dentry_operations;
45687 static const struct file_operations proc_sys_file_operations;
45688 static const struct inode_operations proc_sys_inode_operations;
45689 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
45690 if (!p)
45691 goto out;
45692
45693 + if (gr_handle_sysctl(p, MAY_EXEC))
45694 + goto out;
45695 +
45696 err = ERR_PTR(-ENOMEM);
45697 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
45698 if (h)
45699 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
45700 if (*pos < file->f_pos)
45701 continue;
45702
45703 + if (gr_handle_sysctl(table, 0))
45704 + continue;
45705 +
45706 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45707 if (res)
45708 return res;
45709 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
45710 if (IS_ERR(head))
45711 return PTR_ERR(head);
45712
45713 + if (table && gr_handle_sysctl(table, MAY_EXEC))
45714 + return -ENOENT;
45715 +
45716 generic_fillattr(inode, stat);
45717 if (table)
45718 stat->mode = (stat->mode & S_IFMT) | table->mode;
45719 diff -urNp linux-2.6.32.45/fs/proc/root.c linux-2.6.32.45/fs/proc/root.c
45720 --- linux-2.6.32.45/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
45721 +++ linux-2.6.32.45/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
45722 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
45723 #ifdef CONFIG_PROC_DEVICETREE
45724 proc_device_tree_init();
45725 #endif
45726 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45727 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45728 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45729 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45730 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45731 +#endif
45732 +#else
45733 proc_mkdir("bus", NULL);
45734 +#endif
45735 proc_sys_init();
45736 }
45737
45738 diff -urNp linux-2.6.32.45/fs/proc/task_mmu.c linux-2.6.32.45/fs/proc/task_mmu.c
45739 --- linux-2.6.32.45/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
45740 +++ linux-2.6.32.45/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
45741 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
45742 "VmStk:\t%8lu kB\n"
45743 "VmExe:\t%8lu kB\n"
45744 "VmLib:\t%8lu kB\n"
45745 - "VmPTE:\t%8lu kB\n",
45746 - hiwater_vm << (PAGE_SHIFT-10),
45747 + "VmPTE:\t%8lu kB\n"
45748 +
45749 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45750 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
45751 +#endif
45752 +
45753 + ,hiwater_vm << (PAGE_SHIFT-10),
45754 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
45755 mm->locked_vm << (PAGE_SHIFT-10),
45756 hiwater_rss << (PAGE_SHIFT-10),
45757 total_rss << (PAGE_SHIFT-10),
45758 data << (PAGE_SHIFT-10),
45759 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
45760 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
45761 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
45762 +
45763 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45764 + , mm->context.user_cs_base, mm->context.user_cs_limit
45765 +#endif
45766 +
45767 + );
45768 }
45769
45770 unsigned long task_vsize(struct mm_struct *mm)
45771 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
45772 struct proc_maps_private *priv = m->private;
45773 struct vm_area_struct *vma = v;
45774
45775 - vma_stop(priv, vma);
45776 + if (!IS_ERR(vma))
45777 + vma_stop(priv, vma);
45778 if (priv->task)
45779 put_task_struct(priv->task);
45780 }
45781 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
45782 return ret;
45783 }
45784
45785 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45786 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45787 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45788 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45789 +#endif
45790 +
45791 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
45792 {
45793 struct mm_struct *mm = vma->vm_mm;
45794 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
45795 int flags = vma->vm_flags;
45796 unsigned long ino = 0;
45797 unsigned long long pgoff = 0;
45798 - unsigned long start;
45799 dev_t dev = 0;
45800 int len;
45801
45802 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
45803 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
45804 }
45805
45806 - /* We don't show the stack guard page in /proc/maps */
45807 - start = vma->vm_start;
45808 - if (vma->vm_flags & VM_GROWSDOWN)
45809 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
45810 - start += PAGE_SIZE;
45811 -
45812 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
45813 - start,
45814 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45815 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
45816 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
45817 +#else
45818 + vma->vm_start,
45819 vma->vm_end,
45820 +#endif
45821 flags & VM_READ ? 'r' : '-',
45822 flags & VM_WRITE ? 'w' : '-',
45823 flags & VM_EXEC ? 'x' : '-',
45824 flags & VM_MAYSHARE ? 's' : 'p',
45825 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45826 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
45827 +#else
45828 pgoff,
45829 +#endif
45830 MAJOR(dev), MINOR(dev), ino, &len);
45831
45832 /*
45833 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
45834 */
45835 if (file) {
45836 pad_len_spaces(m, len);
45837 - seq_path(m, &file->f_path, "\n");
45838 + seq_path(m, &file->f_path, "\n\\");
45839 } else {
45840 const char *name = arch_vma_name(vma);
45841 if (!name) {
45842 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
45843 if (vma->vm_start <= mm->brk &&
45844 vma->vm_end >= mm->start_brk) {
45845 name = "[heap]";
45846 - } else if (vma->vm_start <= mm->start_stack &&
45847 - vma->vm_end >= mm->start_stack) {
45848 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
45849 + (vma->vm_start <= mm->start_stack &&
45850 + vma->vm_end >= mm->start_stack)) {
45851 name = "[stack]";
45852 }
45853 } else {
45854 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
45855 };
45856
45857 memset(&mss, 0, sizeof mss);
45858 - mss.vma = vma;
45859 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45860 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45861 +
45862 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45863 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
45864 +#endif
45865 + mss.vma = vma;
45866 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45867 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45868 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45869 + }
45870 +#endif
45871
45872 show_map_vma(m, vma);
45873
45874 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
45875 "Swap: %8lu kB\n"
45876 "KernelPageSize: %8lu kB\n"
45877 "MMUPageSize: %8lu kB\n",
45878 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45879 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
45880 +#else
45881 (vma->vm_end - vma->vm_start) >> 10,
45882 +#endif
45883 mss.resident >> 10,
45884 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
45885 mss.shared_clean >> 10,
45886 diff -urNp linux-2.6.32.45/fs/proc/task_nommu.c linux-2.6.32.45/fs/proc/task_nommu.c
45887 --- linux-2.6.32.45/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
45888 +++ linux-2.6.32.45/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
45889 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
45890 else
45891 bytes += kobjsize(mm);
45892
45893 - if (current->fs && current->fs->users > 1)
45894 + if (current->fs && atomic_read(&current->fs->users) > 1)
45895 sbytes += kobjsize(current->fs);
45896 else
45897 bytes += kobjsize(current->fs);
45898 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
45899 if (len < 1)
45900 len = 1;
45901 seq_printf(m, "%*c", len, ' ');
45902 - seq_path(m, &file->f_path, "");
45903 + seq_path(m, &file->f_path, "\n\\");
45904 }
45905
45906 seq_putc(m, '\n');
45907 diff -urNp linux-2.6.32.45/fs/readdir.c linux-2.6.32.45/fs/readdir.c
45908 --- linux-2.6.32.45/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
45909 +++ linux-2.6.32.45/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
45910 @@ -16,6 +16,7 @@
45911 #include <linux/security.h>
45912 #include <linux/syscalls.h>
45913 #include <linux/unistd.h>
45914 +#include <linux/namei.h>
45915
45916 #include <asm/uaccess.h>
45917
45918 @@ -67,6 +68,7 @@ struct old_linux_dirent {
45919
45920 struct readdir_callback {
45921 struct old_linux_dirent __user * dirent;
45922 + struct file * file;
45923 int result;
45924 };
45925
45926 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
45927 buf->result = -EOVERFLOW;
45928 return -EOVERFLOW;
45929 }
45930 +
45931 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45932 + return 0;
45933 +
45934 buf->result++;
45935 dirent = buf->dirent;
45936 if (!access_ok(VERIFY_WRITE, dirent,
45937 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
45938
45939 buf.result = 0;
45940 buf.dirent = dirent;
45941 + buf.file = file;
45942
45943 error = vfs_readdir(file, fillonedir, &buf);
45944 if (buf.result)
45945 @@ -142,6 +149,7 @@ struct linux_dirent {
45946 struct getdents_callback {
45947 struct linux_dirent __user * current_dir;
45948 struct linux_dirent __user * previous;
45949 + struct file * file;
45950 int count;
45951 int error;
45952 };
45953 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
45954 buf->error = -EOVERFLOW;
45955 return -EOVERFLOW;
45956 }
45957 +
45958 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45959 + return 0;
45960 +
45961 dirent = buf->previous;
45962 if (dirent) {
45963 if (__put_user(offset, &dirent->d_off))
45964 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
45965 buf.previous = NULL;
45966 buf.count = count;
45967 buf.error = 0;
45968 + buf.file = file;
45969
45970 error = vfs_readdir(file, filldir, &buf);
45971 if (error >= 0)
45972 @@ -228,6 +241,7 @@ out:
45973 struct getdents_callback64 {
45974 struct linux_dirent64 __user * current_dir;
45975 struct linux_dirent64 __user * previous;
45976 + struct file *file;
45977 int count;
45978 int error;
45979 };
45980 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
45981 buf->error = -EINVAL; /* only used if we fail.. */
45982 if (reclen > buf->count)
45983 return -EINVAL;
45984 +
45985 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45986 + return 0;
45987 +
45988 dirent = buf->previous;
45989 if (dirent) {
45990 if (__put_user(offset, &dirent->d_off))
45991 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
45992
45993 buf.current_dir = dirent;
45994 buf.previous = NULL;
45995 + buf.file = file;
45996 buf.count = count;
45997 buf.error = 0;
45998
45999 diff -urNp linux-2.6.32.45/fs/reiserfs/dir.c linux-2.6.32.45/fs/reiserfs/dir.c
46000 --- linux-2.6.32.45/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
46001 +++ linux-2.6.32.45/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
46002 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
46003 struct reiserfs_dir_entry de;
46004 int ret = 0;
46005
46006 + pax_track_stack();
46007 +
46008 reiserfs_write_lock(inode->i_sb);
46009
46010 reiserfs_check_lock_depth(inode->i_sb, "readdir");
46011 diff -urNp linux-2.6.32.45/fs/reiserfs/do_balan.c linux-2.6.32.45/fs/reiserfs/do_balan.c
46012 --- linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
46013 +++ linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
46014 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
46015 return;
46016 }
46017
46018 - atomic_inc(&(fs_generation(tb->tb_sb)));
46019 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46020 do_balance_starts(tb);
46021
46022 /* balance leaf returns 0 except if combining L R and S into
46023 diff -urNp linux-2.6.32.45/fs/reiserfs/item_ops.c linux-2.6.32.45/fs/reiserfs/item_ops.c
46024 --- linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
46025 +++ linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
46026 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
46027 vi->vi_index, vi->vi_type, vi->vi_ih);
46028 }
46029
46030 -static struct item_operations stat_data_ops = {
46031 +static const struct item_operations stat_data_ops = {
46032 .bytes_number = sd_bytes_number,
46033 .decrement_key = sd_decrement_key,
46034 .is_left_mergeable = sd_is_left_mergeable,
46035 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
46036 vi->vi_index, vi->vi_type, vi->vi_ih);
46037 }
46038
46039 -static struct item_operations direct_ops = {
46040 +static const struct item_operations direct_ops = {
46041 .bytes_number = direct_bytes_number,
46042 .decrement_key = direct_decrement_key,
46043 .is_left_mergeable = direct_is_left_mergeable,
46044 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
46045 vi->vi_index, vi->vi_type, vi->vi_ih);
46046 }
46047
46048 -static struct item_operations indirect_ops = {
46049 +static const struct item_operations indirect_ops = {
46050 .bytes_number = indirect_bytes_number,
46051 .decrement_key = indirect_decrement_key,
46052 .is_left_mergeable = indirect_is_left_mergeable,
46053 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
46054 printk("\n");
46055 }
46056
46057 -static struct item_operations direntry_ops = {
46058 +static const struct item_operations direntry_ops = {
46059 .bytes_number = direntry_bytes_number,
46060 .decrement_key = direntry_decrement_key,
46061 .is_left_mergeable = direntry_is_left_mergeable,
46062 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
46063 "Invalid item type observed, run fsck ASAP");
46064 }
46065
46066 -static struct item_operations errcatch_ops = {
46067 +static const struct item_operations errcatch_ops = {
46068 errcatch_bytes_number,
46069 errcatch_decrement_key,
46070 errcatch_is_left_mergeable,
46071 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
46072 #error Item types must use disk-format assigned values.
46073 #endif
46074
46075 -struct item_operations *item_ops[TYPE_ANY + 1] = {
46076 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
46077 &stat_data_ops,
46078 &indirect_ops,
46079 &direct_ops,
46080 diff -urNp linux-2.6.32.45/fs/reiserfs/journal.c linux-2.6.32.45/fs/reiserfs/journal.c
46081 --- linux-2.6.32.45/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
46082 +++ linux-2.6.32.45/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
46083 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
46084 struct buffer_head *bh;
46085 int i, j;
46086
46087 + pax_track_stack();
46088 +
46089 bh = __getblk(dev, block, bufsize);
46090 if (buffer_uptodate(bh))
46091 return (bh);
46092 diff -urNp linux-2.6.32.45/fs/reiserfs/namei.c linux-2.6.32.45/fs/reiserfs/namei.c
46093 --- linux-2.6.32.45/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
46094 +++ linux-2.6.32.45/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
46095 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
46096 unsigned long savelink = 1;
46097 struct timespec ctime;
46098
46099 + pax_track_stack();
46100 +
46101 /* three balancings: (1) old name removal, (2) new name insertion
46102 and (3) maybe "save" link insertion
46103 stat data updates: (1) old directory,
46104 diff -urNp linux-2.6.32.45/fs/reiserfs/procfs.c linux-2.6.32.45/fs/reiserfs/procfs.c
46105 --- linux-2.6.32.45/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
46106 +++ linux-2.6.32.45/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
46107 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
46108 "SMALL_TAILS " : "NO_TAILS ",
46109 replay_only(sb) ? "REPLAY_ONLY " : "",
46110 convert_reiserfs(sb) ? "CONV " : "",
46111 - atomic_read(&r->s_generation_counter),
46112 + atomic_read_unchecked(&r->s_generation_counter),
46113 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46114 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46115 SF(s_good_search_by_key_reada), SF(s_bmaps),
46116 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file
46117 struct journal_params *jp = &rs->s_v1.s_journal;
46118 char b[BDEVNAME_SIZE];
46119
46120 + pax_track_stack();
46121 +
46122 seq_printf(m, /* on-disk fields */
46123 "jp_journal_1st_block: \t%i\n"
46124 "jp_journal_dev: \t%s[%x]\n"
46125 diff -urNp linux-2.6.32.45/fs/reiserfs/stree.c linux-2.6.32.45/fs/reiserfs/stree.c
46126 --- linux-2.6.32.45/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
46127 +++ linux-2.6.32.45/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
46128 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
46129 int iter = 0;
46130 #endif
46131
46132 + pax_track_stack();
46133 +
46134 BUG_ON(!th->t_trans_id);
46135
46136 init_tb_struct(th, &s_del_balance, sb, path,
46137 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
46138 int retval;
46139 int quota_cut_bytes = 0;
46140
46141 + pax_track_stack();
46142 +
46143 BUG_ON(!th->t_trans_id);
46144
46145 le_key2cpu_key(&cpu_key, key);
46146 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
46147 int quota_cut_bytes;
46148 loff_t tail_pos = 0;
46149
46150 + pax_track_stack();
46151 +
46152 BUG_ON(!th->t_trans_id);
46153
46154 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46155 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
46156 int retval;
46157 int fs_gen;
46158
46159 + pax_track_stack();
46160 +
46161 BUG_ON(!th->t_trans_id);
46162
46163 fs_gen = get_generation(inode->i_sb);
46164 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
46165 int fs_gen = 0;
46166 int quota_bytes = 0;
46167
46168 + pax_track_stack();
46169 +
46170 BUG_ON(!th->t_trans_id);
46171
46172 if (inode) { /* Do we count quotas for item? */
46173 diff -urNp linux-2.6.32.45/fs/reiserfs/super.c linux-2.6.32.45/fs/reiserfs/super.c
46174 --- linux-2.6.32.45/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
46175 +++ linux-2.6.32.45/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
46176 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
46177 {.option_name = NULL}
46178 };
46179
46180 + pax_track_stack();
46181 +
46182 *blocks = 0;
46183 if (!options || !*options)
46184 /* use default configuration: create tails, journaling on, no
46185 diff -urNp linux-2.6.32.45/fs/select.c linux-2.6.32.45/fs/select.c
46186 --- linux-2.6.32.45/fs/select.c 2011-03-27 14:31:47.000000000 -0400
46187 +++ linux-2.6.32.45/fs/select.c 2011-05-16 21:46:57.000000000 -0400
46188 @@ -20,6 +20,7 @@
46189 #include <linux/module.h>
46190 #include <linux/slab.h>
46191 #include <linux/poll.h>
46192 +#include <linux/security.h>
46193 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46194 #include <linux/file.h>
46195 #include <linux/fdtable.h>
46196 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
46197 int retval, i, timed_out = 0;
46198 unsigned long slack = 0;
46199
46200 + pax_track_stack();
46201 +
46202 rcu_read_lock();
46203 retval = max_select_fd(n, fds);
46204 rcu_read_unlock();
46205 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
46206 /* Allocate small arguments on the stack to save memory and be faster */
46207 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46208
46209 + pax_track_stack();
46210 +
46211 ret = -EINVAL;
46212 if (n < 0)
46213 goto out_nofds;
46214 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
46215 struct poll_list *walk = head;
46216 unsigned long todo = nfds;
46217
46218 + pax_track_stack();
46219 +
46220 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46221 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
46222 return -EINVAL;
46223
46224 diff -urNp linux-2.6.32.45/fs/seq_file.c linux-2.6.32.45/fs/seq_file.c
46225 --- linux-2.6.32.45/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
46226 +++ linux-2.6.32.45/fs/seq_file.c 2011-08-05 20:33:55.000000000 -0400
46227 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46228 return 0;
46229 }
46230 if (!m->buf) {
46231 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46232 + m->size = PAGE_SIZE;
46233 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46234 if (!m->buf)
46235 return -ENOMEM;
46236 }
46237 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46238 Eoverflow:
46239 m->op->stop(m, p);
46240 kfree(m->buf);
46241 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46242 + m->size <<= 1;
46243 + m->buf = kmalloc(m->size, GFP_KERNEL);
46244 return !m->buf ? -ENOMEM : -EAGAIN;
46245 }
46246
46247 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46248 m->version = file->f_version;
46249 /* grab buffer if we didn't have one */
46250 if (!m->buf) {
46251 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46252 + m->size = PAGE_SIZE;
46253 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46254 if (!m->buf)
46255 goto Enomem;
46256 }
46257 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46258 goto Fill;
46259 m->op->stop(m, p);
46260 kfree(m->buf);
46261 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46262 + m->size <<= 1;
46263 + m->buf = kmalloc(m->size, GFP_KERNEL);
46264 if (!m->buf)
46265 goto Enomem;
46266 m->count = 0;
46267 @@ -555,10 +559,10 @@ int single_open(struct file *file, int (
46268 int res = -ENOMEM;
46269
46270 if (op) {
46271 - op->start = single_start;
46272 - op->next = single_next;
46273 - op->stop = single_stop;
46274 - op->show = show;
46275 + *(void **)&op->start = single_start;
46276 + *(void **)&op->next = single_next;
46277 + *(void **)&op->stop = single_stop;
46278 + *(void **)&op->show = show;
46279 res = seq_open(file, op);
46280 if (!res)
46281 ((struct seq_file *)file->private_data)->private = data;
46282 diff -urNp linux-2.6.32.45/fs/smbfs/proc.c linux-2.6.32.45/fs/smbfs/proc.c
46283 --- linux-2.6.32.45/fs/smbfs/proc.c 2011-03-27 14:31:47.000000000 -0400
46284 +++ linux-2.6.32.45/fs/smbfs/proc.c 2011-08-05 20:33:55.000000000 -0400
46285 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *
46286
46287 out:
46288 if (server->local_nls != NULL && server->remote_nls != NULL)
46289 - server->ops->convert = convert_cp;
46290 + *(void **)&server->ops->convert = convert_cp;
46291 else
46292 - server->ops->convert = convert_memcpy;
46293 + *(void **)&server->ops->convert = convert_memcpy;
46294
46295 smb_unlock_server(server);
46296 return n;
46297 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server,
46298
46299 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
46300 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
46301 - server->ops->getattr = smb_proc_getattr_core;
46302 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
46303 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
46304 - server->ops->getattr = smb_proc_getattr_ff;
46305 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
46306 }
46307
46308 /* Decode server capabilities */
46309 @@ -3439,7 +3439,7 @@ out:
46310 static void
46311 install_ops(struct smb_ops *dst, struct smb_ops *src)
46312 {
46313 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46314 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46315 }
46316
46317 /* < LANMAN2 */
46318 diff -urNp linux-2.6.32.45/fs/smbfs/symlink.c linux-2.6.32.45/fs/smbfs/symlink.c
46319 --- linux-2.6.32.45/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46320 +++ linux-2.6.32.45/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46321 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
46322
46323 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46324 {
46325 - char *s = nd_get_link(nd);
46326 + const char *s = nd_get_link(nd);
46327 if (!IS_ERR(s))
46328 __putname(s);
46329 }
46330 diff -urNp linux-2.6.32.45/fs/splice.c linux-2.6.32.45/fs/splice.c
46331 --- linux-2.6.32.45/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
46332 +++ linux-2.6.32.45/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
46333 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46334 pipe_lock(pipe);
46335
46336 for (;;) {
46337 - if (!pipe->readers) {
46338 + if (!atomic_read(&pipe->readers)) {
46339 send_sig(SIGPIPE, current, 0);
46340 if (!ret)
46341 ret = -EPIPE;
46342 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46343 do_wakeup = 0;
46344 }
46345
46346 - pipe->waiting_writers++;
46347 + atomic_inc(&pipe->waiting_writers);
46348 pipe_wait(pipe);
46349 - pipe->waiting_writers--;
46350 + atomic_dec(&pipe->waiting_writers);
46351 }
46352
46353 pipe_unlock(pipe);
46354 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
46355 .spd_release = spd_release_page,
46356 };
46357
46358 + pax_track_stack();
46359 +
46360 index = *ppos >> PAGE_CACHE_SHIFT;
46361 loff = *ppos & ~PAGE_CACHE_MASK;
46362 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46363 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
46364 old_fs = get_fs();
46365 set_fs(get_ds());
46366 /* The cast to a user pointer is valid due to the set_fs() */
46367 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46368 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
46369 set_fs(old_fs);
46370
46371 return res;
46372 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
46373 old_fs = get_fs();
46374 set_fs(get_ds());
46375 /* The cast to a user pointer is valid due to the set_fs() */
46376 - res = vfs_write(file, (const char __user *)buf, count, &pos);
46377 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
46378 set_fs(old_fs);
46379
46380 return res;
46381 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
46382 .spd_release = spd_release_page,
46383 };
46384
46385 + pax_track_stack();
46386 +
46387 index = *ppos >> PAGE_CACHE_SHIFT;
46388 offset = *ppos & ~PAGE_CACHE_MASK;
46389 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46390 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
46391 goto err;
46392
46393 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46394 - vec[i].iov_base = (void __user *) page_address(page);
46395 + vec[i].iov_base = (__force void __user *) page_address(page);
46396 vec[i].iov_len = this_len;
46397 pages[i] = page;
46398 spd.nr_pages++;
46399 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46400 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46401 {
46402 while (!pipe->nrbufs) {
46403 - if (!pipe->writers)
46404 + if (!atomic_read(&pipe->writers))
46405 return 0;
46406
46407 - if (!pipe->waiting_writers && sd->num_spliced)
46408 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46409 return 0;
46410
46411 if (sd->flags & SPLICE_F_NONBLOCK)
46412 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
46413 * out of the pipe right after the splice_to_pipe(). So set
46414 * PIPE_READERS appropriately.
46415 */
46416 - pipe->readers = 1;
46417 + atomic_set(&pipe->readers, 1);
46418
46419 current->splice_pipe = pipe;
46420 }
46421 @@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
46422 .spd_release = spd_release_page,
46423 };
46424
46425 + pax_track_stack();
46426 +
46427 pipe = pipe_info(file->f_path.dentry->d_inode);
46428 if (!pipe)
46429 return -EBADF;
46430 @@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
46431 ret = -ERESTARTSYS;
46432 break;
46433 }
46434 - if (!pipe->writers)
46435 + if (!atomic_read(&pipe->writers))
46436 break;
46437 - if (!pipe->waiting_writers) {
46438 + if (!atomic_read(&pipe->waiting_writers)) {
46439 if (flags & SPLICE_F_NONBLOCK) {
46440 ret = -EAGAIN;
46441 break;
46442 @@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
46443 pipe_lock(pipe);
46444
46445 while (pipe->nrbufs >= PIPE_BUFFERS) {
46446 - if (!pipe->readers) {
46447 + if (!atomic_read(&pipe->readers)) {
46448 send_sig(SIGPIPE, current, 0);
46449 ret = -EPIPE;
46450 break;
46451 @@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
46452 ret = -ERESTARTSYS;
46453 break;
46454 }
46455 - pipe->waiting_writers++;
46456 + atomic_inc(&pipe->waiting_writers);
46457 pipe_wait(pipe);
46458 - pipe->waiting_writers--;
46459 + atomic_dec(&pipe->waiting_writers);
46460 }
46461
46462 pipe_unlock(pipe);
46463 @@ -1785,14 +1791,14 @@ retry:
46464 pipe_double_lock(ipipe, opipe);
46465
46466 do {
46467 - if (!opipe->readers) {
46468 + if (!atomic_read(&opipe->readers)) {
46469 send_sig(SIGPIPE, current, 0);
46470 if (!ret)
46471 ret = -EPIPE;
46472 break;
46473 }
46474
46475 - if (!ipipe->nrbufs && !ipipe->writers)
46476 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46477 break;
46478
46479 /*
46480 @@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
46481 pipe_double_lock(ipipe, opipe);
46482
46483 do {
46484 - if (!opipe->readers) {
46485 + if (!atomic_read(&opipe->readers)) {
46486 send_sig(SIGPIPE, current, 0);
46487 if (!ret)
46488 ret = -EPIPE;
46489 @@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
46490 * return EAGAIN if we have the potential of some data in the
46491 * future, otherwise just return 0
46492 */
46493 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46494 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46495 ret = -EAGAIN;
46496
46497 pipe_unlock(ipipe);
46498 diff -urNp linux-2.6.32.45/fs/sysfs/file.c linux-2.6.32.45/fs/sysfs/file.c
46499 --- linux-2.6.32.45/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
46500 +++ linux-2.6.32.45/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
46501 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46502
46503 struct sysfs_open_dirent {
46504 atomic_t refcnt;
46505 - atomic_t event;
46506 + atomic_unchecked_t event;
46507 wait_queue_head_t poll;
46508 struct list_head buffers; /* goes through sysfs_buffer.list */
46509 };
46510 @@ -53,7 +53,7 @@ struct sysfs_buffer {
46511 size_t count;
46512 loff_t pos;
46513 char * page;
46514 - struct sysfs_ops * ops;
46515 + const struct sysfs_ops * ops;
46516 struct mutex mutex;
46517 int needs_read_fill;
46518 int event;
46519 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
46520 {
46521 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46522 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46523 - struct sysfs_ops * ops = buffer->ops;
46524 + const struct sysfs_ops * ops = buffer->ops;
46525 int ret = 0;
46526 ssize_t count;
46527
46528 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
46529 if (!sysfs_get_active_two(attr_sd))
46530 return -ENODEV;
46531
46532 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46533 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46534 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46535
46536 sysfs_put_active_two(attr_sd);
46537 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
46538 {
46539 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46540 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46541 - struct sysfs_ops * ops = buffer->ops;
46542 + const struct sysfs_ops * ops = buffer->ops;
46543 int rc;
46544
46545 /* need attr_sd for attr and ops, its parent for kobj */
46546 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
46547 return -ENOMEM;
46548
46549 atomic_set(&new_od->refcnt, 0);
46550 - atomic_set(&new_od->event, 1);
46551 + atomic_set_unchecked(&new_od->event, 1);
46552 init_waitqueue_head(&new_od->poll);
46553 INIT_LIST_HEAD(&new_od->buffers);
46554 goto retry;
46555 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
46556 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
46557 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46558 struct sysfs_buffer *buffer;
46559 - struct sysfs_ops *ops;
46560 + const struct sysfs_ops *ops;
46561 int error = -EACCES;
46562 char *p;
46563
46564 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
46565
46566 sysfs_put_active_two(attr_sd);
46567
46568 - if (buffer->event != atomic_read(&od->event))
46569 + if (buffer->event != atomic_read_unchecked(&od->event))
46570 goto trigger;
46571
46572 return DEFAULT_POLLMASK;
46573 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
46574
46575 od = sd->s_attr.open;
46576 if (od) {
46577 - atomic_inc(&od->event);
46578 + atomic_inc_unchecked(&od->event);
46579 wake_up_interruptible(&od->poll);
46580 }
46581
46582 diff -urNp linux-2.6.32.45/fs/sysfs/mount.c linux-2.6.32.45/fs/sysfs/mount.c
46583 --- linux-2.6.32.45/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
46584 +++ linux-2.6.32.45/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
46585 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46586 .s_name = "",
46587 .s_count = ATOMIC_INIT(1),
46588 .s_flags = SYSFS_DIR,
46589 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46590 + .s_mode = S_IFDIR | S_IRWXU,
46591 +#else
46592 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46593 +#endif
46594 .s_ino = 1,
46595 };
46596
46597 diff -urNp linux-2.6.32.45/fs/sysfs/symlink.c linux-2.6.32.45/fs/sysfs/symlink.c
46598 --- linux-2.6.32.45/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46599 +++ linux-2.6.32.45/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46600 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
46601
46602 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46603 {
46604 - char *page = nd_get_link(nd);
46605 + const char *page = nd_get_link(nd);
46606 if (!IS_ERR(page))
46607 free_page((unsigned long)page);
46608 }
46609 diff -urNp linux-2.6.32.45/fs/udf/balloc.c linux-2.6.32.45/fs/udf/balloc.c
46610 --- linux-2.6.32.45/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
46611 +++ linux-2.6.32.45/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
46612 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
46613
46614 mutex_lock(&sbi->s_alloc_mutex);
46615 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46616 - if (bloc->logicalBlockNum < 0 ||
46617 - (bloc->logicalBlockNum + count) >
46618 - partmap->s_partition_len) {
46619 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46620 udf_debug("%d < %d || %d + %d > %d\n",
46621 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
46622 count, partmap->s_partition_len);
46623 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
46624
46625 mutex_lock(&sbi->s_alloc_mutex);
46626 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46627 - if (bloc->logicalBlockNum < 0 ||
46628 - (bloc->logicalBlockNum + count) >
46629 - partmap->s_partition_len) {
46630 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46631 udf_debug("%d < %d || %d + %d > %d\n",
46632 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
46633 partmap->s_partition_len);
46634 diff -urNp linux-2.6.32.45/fs/udf/inode.c linux-2.6.32.45/fs/udf/inode.c
46635 --- linux-2.6.32.45/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
46636 +++ linux-2.6.32.45/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
46637 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
46638 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46639 int lastblock = 0;
46640
46641 + pax_track_stack();
46642 +
46643 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46644 prev_epos.block = iinfo->i_location;
46645 prev_epos.bh = NULL;
46646 diff -urNp linux-2.6.32.45/fs/udf/misc.c linux-2.6.32.45/fs/udf/misc.c
46647 --- linux-2.6.32.45/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
46648 +++ linux-2.6.32.45/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
46649 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46650
46651 u8 udf_tag_checksum(const struct tag *t)
46652 {
46653 - u8 *data = (u8 *)t;
46654 + const u8 *data = (const u8 *)t;
46655 u8 checksum = 0;
46656 int i;
46657 for (i = 0; i < sizeof(struct tag); ++i)
46658 diff -urNp linux-2.6.32.45/fs/utimes.c linux-2.6.32.45/fs/utimes.c
46659 --- linux-2.6.32.45/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
46660 +++ linux-2.6.32.45/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
46661 @@ -1,6 +1,7 @@
46662 #include <linux/compiler.h>
46663 #include <linux/file.h>
46664 #include <linux/fs.h>
46665 +#include <linux/security.h>
46666 #include <linux/linkage.h>
46667 #include <linux/mount.h>
46668 #include <linux/namei.h>
46669 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46670 goto mnt_drop_write_and_out;
46671 }
46672 }
46673 +
46674 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46675 + error = -EACCES;
46676 + goto mnt_drop_write_and_out;
46677 + }
46678 +
46679 mutex_lock(&inode->i_mutex);
46680 error = notify_change(path->dentry, &newattrs);
46681 mutex_unlock(&inode->i_mutex);
46682 diff -urNp linux-2.6.32.45/fs/xattr_acl.c linux-2.6.32.45/fs/xattr_acl.c
46683 --- linux-2.6.32.45/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
46684 +++ linux-2.6.32.45/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
46685 @@ -17,8 +17,8 @@
46686 struct posix_acl *
46687 posix_acl_from_xattr(const void *value, size_t size)
46688 {
46689 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46690 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46691 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46692 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46693 int count;
46694 struct posix_acl *acl;
46695 struct posix_acl_entry *acl_e;
46696 diff -urNp linux-2.6.32.45/fs/xattr.c linux-2.6.32.45/fs/xattr.c
46697 --- linux-2.6.32.45/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
46698 +++ linux-2.6.32.45/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
46699 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46700 * Extended attribute SET operations
46701 */
46702 static long
46703 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
46704 +setxattr(struct path *path, const char __user *name, const void __user *value,
46705 size_t size, int flags)
46706 {
46707 int error;
46708 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
46709 return PTR_ERR(kvalue);
46710 }
46711
46712 - error = vfs_setxattr(d, kname, kvalue, size, flags);
46713 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46714 + error = -EACCES;
46715 + goto out;
46716 + }
46717 +
46718 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46719 +out:
46720 kfree(kvalue);
46721 return error;
46722 }
46723 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
46724 return error;
46725 error = mnt_want_write(path.mnt);
46726 if (!error) {
46727 - error = setxattr(path.dentry, name, value, size, flags);
46728 + error = setxattr(&path, name, value, size, flags);
46729 mnt_drop_write(path.mnt);
46730 }
46731 path_put(&path);
46732 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
46733 return error;
46734 error = mnt_want_write(path.mnt);
46735 if (!error) {
46736 - error = setxattr(path.dentry, name, value, size, flags);
46737 + error = setxattr(&path, name, value, size, flags);
46738 mnt_drop_write(path.mnt);
46739 }
46740 path_put(&path);
46741 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
46742 const void __user *,value, size_t, size, int, flags)
46743 {
46744 struct file *f;
46745 - struct dentry *dentry;
46746 int error = -EBADF;
46747
46748 f = fget(fd);
46749 if (!f)
46750 return error;
46751 - dentry = f->f_path.dentry;
46752 - audit_inode(NULL, dentry);
46753 + audit_inode(NULL, f->f_path.dentry);
46754 error = mnt_want_write_file(f);
46755 if (!error) {
46756 - error = setxattr(dentry, name, value, size, flags);
46757 + error = setxattr(&f->f_path, name, value, size, flags);
46758 mnt_drop_write(f->f_path.mnt);
46759 }
46760 fput(f);
46761 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c
46762 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
46763 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
46764 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
46765 xfs_fsop_geom_t fsgeo;
46766 int error;
46767
46768 + memset(&fsgeo, 0, sizeof(fsgeo));
46769 error = xfs_fs_geometry(mp, &fsgeo, 3);
46770 if (error)
46771 return -error;
46772 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c
46773 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
46774 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
46775 @@ -134,7 +134,7 @@ xfs_find_handle(
46776 }
46777
46778 error = -EFAULT;
46779 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46780 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46781 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46782 goto out_put;
46783
46784 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
46785 if (IS_ERR(dentry))
46786 return PTR_ERR(dentry);
46787
46788 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
46789 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
46790 if (!kbuf)
46791 goto out_dput;
46792
46793 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
46794 xfs_mount_t *mp,
46795 void __user *arg)
46796 {
46797 - xfs_fsop_geom_t fsgeo;
46798 + xfs_fsop_geom_t fsgeo;
46799 int error;
46800
46801 error = xfs_fs_geometry(mp, &fsgeo, 3);
46802 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c
46803 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
46804 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
46805 @@ -468,7 +468,7 @@ xfs_vn_put_link(
46806 struct nameidata *nd,
46807 void *p)
46808 {
46809 - char *s = nd_get_link(nd);
46810 + const char *s = nd_get_link(nd);
46811
46812 if (!IS_ERR(s))
46813 kfree(s);
46814 diff -urNp linux-2.6.32.45/fs/xfs/xfs_bmap.c linux-2.6.32.45/fs/xfs/xfs_bmap.c
46815 --- linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
46816 +++ linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
46817 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
46818 int nmap,
46819 int ret_nmap);
46820 #else
46821 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46822 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46823 #endif /* DEBUG */
46824
46825 #if defined(XFS_RW_TRACE)
46826 diff -urNp linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c
46827 --- linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
46828 +++ linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
46829 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
46830 }
46831
46832 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
46833 - if (filldir(dirent, sfep->name, sfep->namelen,
46834 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46835 + char name[sfep->namelen];
46836 + memcpy(name, sfep->name, sfep->namelen);
46837 + if (filldir(dirent, name, sfep->namelen,
46838 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
46839 + *offset = off & 0x7fffffff;
46840 + return 0;
46841 + }
46842 + } else if (filldir(dirent, sfep->name, sfep->namelen,
46843 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46844 *offset = off & 0x7fffffff;
46845 return 0;
46846 diff -urNp linux-2.6.32.45/grsecurity/gracl_alloc.c linux-2.6.32.45/grsecurity/gracl_alloc.c
46847 --- linux-2.6.32.45/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
46848 +++ linux-2.6.32.45/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
46849 @@ -0,0 +1,105 @@
46850 +#include <linux/kernel.h>
46851 +#include <linux/mm.h>
46852 +#include <linux/slab.h>
46853 +#include <linux/vmalloc.h>
46854 +#include <linux/gracl.h>
46855 +#include <linux/grsecurity.h>
46856 +
46857 +static unsigned long alloc_stack_next = 1;
46858 +static unsigned long alloc_stack_size = 1;
46859 +static void **alloc_stack;
46860 +
46861 +static __inline__ int
46862 +alloc_pop(void)
46863 +{
46864 + if (alloc_stack_next == 1)
46865 + return 0;
46866 +
46867 + kfree(alloc_stack[alloc_stack_next - 2]);
46868 +
46869 + alloc_stack_next--;
46870 +
46871 + return 1;
46872 +}
46873 +
46874 +static __inline__ int
46875 +alloc_push(void *buf)
46876 +{
46877 + if (alloc_stack_next >= alloc_stack_size)
46878 + return 1;
46879 +
46880 + alloc_stack[alloc_stack_next - 1] = buf;
46881 +
46882 + alloc_stack_next++;
46883 +
46884 + return 0;
46885 +}
46886 +
46887 +void *
46888 +acl_alloc(unsigned long len)
46889 +{
46890 + void *ret = NULL;
46891 +
46892 + if (!len || len > PAGE_SIZE)
46893 + goto out;
46894 +
46895 + ret = kmalloc(len, GFP_KERNEL);
46896 +
46897 + if (ret) {
46898 + if (alloc_push(ret)) {
46899 + kfree(ret);
46900 + ret = NULL;
46901 + }
46902 + }
46903 +
46904 +out:
46905 + return ret;
46906 +}
46907 +
46908 +void *
46909 +acl_alloc_num(unsigned long num, unsigned long len)
46910 +{
46911 + if (!len || (num > (PAGE_SIZE / len)))
46912 + return NULL;
46913 +
46914 + return acl_alloc(num * len);
46915 +}
46916 +
46917 +void
46918 +acl_free_all(void)
46919 +{
46920 + if (gr_acl_is_enabled() || !alloc_stack)
46921 + return;
46922 +
46923 + while (alloc_pop()) ;
46924 +
46925 + if (alloc_stack) {
46926 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
46927 + kfree(alloc_stack);
46928 + else
46929 + vfree(alloc_stack);
46930 + }
46931 +
46932 + alloc_stack = NULL;
46933 + alloc_stack_size = 1;
46934 + alloc_stack_next = 1;
46935 +
46936 + return;
46937 +}
46938 +
46939 +int
46940 +acl_alloc_stack_init(unsigned long size)
46941 +{
46942 + if ((size * sizeof (void *)) <= PAGE_SIZE)
46943 + alloc_stack =
46944 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
46945 + else
46946 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
46947 +
46948 + alloc_stack_size = size;
46949 +
46950 + if (!alloc_stack)
46951 + return 0;
46952 + else
46953 + return 1;
46954 +}
46955 diff -urNp linux-2.6.32.45/grsecurity/gracl.c linux-2.6.32.45/grsecurity/gracl.c
46956 --- linux-2.6.32.45/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
46957 +++ linux-2.6.32.45/grsecurity/gracl.c 2011-07-14 20:02:48.000000000 -0400
46958 @@ -0,0 +1,4082 @@
46959 +#include <linux/kernel.h>
46960 +#include <linux/module.h>
46961 +#include <linux/sched.h>
46962 +#include <linux/mm.h>
46963 +#include <linux/file.h>
46964 +#include <linux/fs.h>
46965 +#include <linux/namei.h>
46966 +#include <linux/mount.h>
46967 +#include <linux/tty.h>
46968 +#include <linux/proc_fs.h>
46969 +#include <linux/smp_lock.h>
46970 +#include <linux/slab.h>
46971 +#include <linux/vmalloc.h>
46972 +#include <linux/types.h>
46973 +#include <linux/sysctl.h>
46974 +#include <linux/netdevice.h>
46975 +#include <linux/ptrace.h>
46976 +#include <linux/gracl.h>
46977 +#include <linux/gralloc.h>
46978 +#include <linux/grsecurity.h>
46979 +#include <linux/grinternal.h>
46980 +#include <linux/pid_namespace.h>
46981 +#include <linux/fdtable.h>
46982 +#include <linux/percpu.h>
46983 +
46984 +#include <asm/uaccess.h>
46985 +#include <asm/errno.h>
46986 +#include <asm/mman.h>
46987 +
46988 +static struct acl_role_db acl_role_set;
46989 +static struct name_db name_set;
46990 +static struct inodev_db inodev_set;
46991 +
46992 +/* for keeping track of userspace pointers used for subjects, so we
46993 + can share references in the kernel as well
46994 +*/
46995 +
46996 +static struct dentry *real_root;
46997 +static struct vfsmount *real_root_mnt;
46998 +
46999 +static struct acl_subj_map_db subj_map_set;
47000 +
47001 +static struct acl_role_label *default_role;
47002 +
47003 +static struct acl_role_label *role_list;
47004 +
47005 +static u16 acl_sp_role_value;
47006 +
47007 +extern char *gr_shared_page[4];
47008 +static DEFINE_MUTEX(gr_dev_mutex);
47009 +DEFINE_RWLOCK(gr_inode_lock);
47010 +
47011 +struct gr_arg *gr_usermode;
47012 +
47013 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
47014 +
47015 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
47016 +extern void gr_clear_learn_entries(void);
47017 +
47018 +#ifdef CONFIG_GRKERNSEC_RESLOG
47019 +extern void gr_log_resource(const struct task_struct *task,
47020 + const int res, const unsigned long wanted, const int gt);
47021 +#endif
47022 +
47023 +unsigned char *gr_system_salt;
47024 +unsigned char *gr_system_sum;
47025 +
47026 +static struct sprole_pw **acl_special_roles = NULL;
47027 +static __u16 num_sprole_pws = 0;
47028 +
47029 +static struct acl_role_label *kernel_role = NULL;
47030 +
47031 +static unsigned int gr_auth_attempts = 0;
47032 +static unsigned long gr_auth_expires = 0UL;
47033 +
47034 +#ifdef CONFIG_NET
47035 +extern struct vfsmount *sock_mnt;
47036 +#endif
47037 +extern struct vfsmount *pipe_mnt;
47038 +extern struct vfsmount *shm_mnt;
47039 +#ifdef CONFIG_HUGETLBFS
47040 +extern struct vfsmount *hugetlbfs_vfsmount;
47041 +#endif
47042 +
47043 +static struct acl_object_label *fakefs_obj_rw;
47044 +static struct acl_object_label *fakefs_obj_rwx;
47045 +
47046 +extern int gr_init_uidset(void);
47047 +extern void gr_free_uidset(void);
47048 +extern void gr_remove_uid(uid_t uid);
47049 +extern int gr_find_uid(uid_t uid);
47050 +
47051 +__inline__ int
47052 +gr_acl_is_enabled(void)
47053 +{
47054 + return (gr_status & GR_READY);
47055 +}
47056 +
47057 +#ifdef CONFIG_BTRFS_FS
47058 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
47059 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
47060 +#endif
47061 +
47062 +static inline dev_t __get_dev(const struct dentry *dentry)
47063 +{
47064 +#ifdef CONFIG_BTRFS_FS
47065 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
47066 + return get_btrfs_dev_from_inode(dentry->d_inode);
47067 + else
47068 +#endif
47069 + return dentry->d_inode->i_sb->s_dev;
47070 +}
47071 +
47072 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47073 +{
47074 + return __get_dev(dentry);
47075 +}
47076 +
47077 +static char gr_task_roletype_to_char(struct task_struct *task)
47078 +{
47079 + switch (task->role->roletype &
47080 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
47081 + GR_ROLE_SPECIAL)) {
47082 + case GR_ROLE_DEFAULT:
47083 + return 'D';
47084 + case GR_ROLE_USER:
47085 + return 'U';
47086 + case GR_ROLE_GROUP:
47087 + return 'G';
47088 + case GR_ROLE_SPECIAL:
47089 + return 'S';
47090 + }
47091 +
47092 + return 'X';
47093 +}
47094 +
47095 +char gr_roletype_to_char(void)
47096 +{
47097 + return gr_task_roletype_to_char(current);
47098 +}
47099 +
47100 +__inline__ int
47101 +gr_acl_tpe_check(void)
47102 +{
47103 + if (unlikely(!(gr_status & GR_READY)))
47104 + return 0;
47105 + if (current->role->roletype & GR_ROLE_TPE)
47106 + return 1;
47107 + else
47108 + return 0;
47109 +}
47110 +
47111 +int
47112 +gr_handle_rawio(const struct inode *inode)
47113 +{
47114 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47115 + if (inode && S_ISBLK(inode->i_mode) &&
47116 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47117 + !capable(CAP_SYS_RAWIO))
47118 + return 1;
47119 +#endif
47120 + return 0;
47121 +}
47122 +
47123 +static int
47124 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
47125 +{
47126 + if (likely(lena != lenb))
47127 + return 0;
47128 +
47129 + return !memcmp(a, b, lena);
47130 +}
47131 +
47132 +/* this must be called with vfsmount_lock and dcache_lock held */
47133 +
47134 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
47135 + struct dentry *root, struct vfsmount *rootmnt,
47136 + char *buffer, int buflen)
47137 +{
47138 + char * end = buffer+buflen;
47139 + char * retval;
47140 + int namelen;
47141 +
47142 + *--end = '\0';
47143 + buflen--;
47144 +
47145 + if (buflen < 1)
47146 + goto Elong;
47147 + /* Get '/' right */
47148 + retval = end-1;
47149 + *retval = '/';
47150 +
47151 + for (;;) {
47152 + struct dentry * parent;
47153 +
47154 + if (dentry == root && vfsmnt == rootmnt)
47155 + break;
47156 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47157 + /* Global root? */
47158 + if (vfsmnt->mnt_parent == vfsmnt)
47159 + goto global_root;
47160 + dentry = vfsmnt->mnt_mountpoint;
47161 + vfsmnt = vfsmnt->mnt_parent;
47162 + continue;
47163 + }
47164 + parent = dentry->d_parent;
47165 + prefetch(parent);
47166 + namelen = dentry->d_name.len;
47167 + buflen -= namelen + 1;
47168 + if (buflen < 0)
47169 + goto Elong;
47170 + end -= namelen;
47171 + memcpy(end, dentry->d_name.name, namelen);
47172 + *--end = '/';
47173 + retval = end;
47174 + dentry = parent;
47175 + }
47176 +
47177 +out:
47178 + return retval;
47179 +
47180 +global_root:
47181 + namelen = dentry->d_name.len;
47182 + buflen -= namelen;
47183 + if (buflen < 0)
47184 + goto Elong;
47185 + retval -= namelen-1; /* hit the slash */
47186 + memcpy(retval, dentry->d_name.name, namelen);
47187 + goto out;
47188 +Elong:
47189 + retval = ERR_PTR(-ENAMETOOLONG);
47190 + goto out;
47191 +}
47192 +
47193 +static char *
47194 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
47195 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
47196 +{
47197 + char *retval;
47198 +
47199 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
47200 + if (unlikely(IS_ERR(retval)))
47201 + retval = strcpy(buf, "<path too long>");
47202 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47203 + retval[1] = '\0';
47204 +
47205 + return retval;
47206 +}
47207 +
47208 +static char *
47209 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47210 + char *buf, int buflen)
47211 +{
47212 + char *res;
47213 +
47214 + /* we can use real_root, real_root_mnt, because this is only called
47215 + by the RBAC system */
47216 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
47217 +
47218 + return res;
47219 +}
47220 +
47221 +static char *
47222 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47223 + char *buf, int buflen)
47224 +{
47225 + char *res;
47226 + struct dentry *root;
47227 + struct vfsmount *rootmnt;
47228 + struct task_struct *reaper = &init_task;
47229 +
47230 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
47231 + read_lock(&reaper->fs->lock);
47232 + root = dget(reaper->fs->root.dentry);
47233 + rootmnt = mntget(reaper->fs->root.mnt);
47234 + read_unlock(&reaper->fs->lock);
47235 +
47236 + spin_lock(&dcache_lock);
47237 + spin_lock(&vfsmount_lock);
47238 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
47239 + spin_unlock(&vfsmount_lock);
47240 + spin_unlock(&dcache_lock);
47241 +
47242 + dput(root);
47243 + mntput(rootmnt);
47244 + return res;
47245 +}
47246 +
47247 +static char *
47248 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47249 +{
47250 + char *ret;
47251 + spin_lock(&dcache_lock);
47252 + spin_lock(&vfsmount_lock);
47253 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47254 + PAGE_SIZE);
47255 + spin_unlock(&vfsmount_lock);
47256 + spin_unlock(&dcache_lock);
47257 + return ret;
47258 +}
47259 +
47260 +char *
47261 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47262 +{
47263 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47264 + PAGE_SIZE);
47265 +}
47266 +
47267 +char *
47268 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47269 +{
47270 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47271 + PAGE_SIZE);
47272 +}
47273 +
47274 +char *
47275 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47276 +{
47277 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47278 + PAGE_SIZE);
47279 +}
47280 +
47281 +char *
47282 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47283 +{
47284 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47285 + PAGE_SIZE);
47286 +}
47287 +
47288 +char *
47289 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47290 +{
47291 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47292 + PAGE_SIZE);
47293 +}
47294 +
47295 +__inline__ __u32
47296 +to_gr_audit(const __u32 reqmode)
47297 +{
47298 + /* masks off auditable permission flags, then shifts them to create
47299 + auditing flags, and adds the special case of append auditing if
47300 + we're requesting write */
47301 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47302 +}
47303 +
47304 +struct acl_subject_label *
47305 +lookup_subject_map(const struct acl_subject_label *userp)
47306 +{
47307 + unsigned int index = shash(userp, subj_map_set.s_size);
47308 + struct subject_map *match;
47309 +
47310 + match = subj_map_set.s_hash[index];
47311 +
47312 + while (match && match->user != userp)
47313 + match = match->next;
47314 +
47315 + if (match != NULL)
47316 + return match->kernel;
47317 + else
47318 + return NULL;
47319 +}
47320 +
47321 +static void
47322 +insert_subj_map_entry(struct subject_map *subjmap)
47323 +{
47324 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47325 + struct subject_map **curr;
47326 +
47327 + subjmap->prev = NULL;
47328 +
47329 + curr = &subj_map_set.s_hash[index];
47330 + if (*curr != NULL)
47331 + (*curr)->prev = subjmap;
47332 +
47333 + subjmap->next = *curr;
47334 + *curr = subjmap;
47335 +
47336 + return;
47337 +}
47338 +
47339 +static struct acl_role_label *
47340 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47341 + const gid_t gid)
47342 +{
47343 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47344 + struct acl_role_label *match;
47345 + struct role_allowed_ip *ipp;
47346 + unsigned int x;
47347 + u32 curr_ip = task->signal->curr_ip;
47348 +
47349 + task->signal->saved_ip = curr_ip;
47350 +
47351 + match = acl_role_set.r_hash[index];
47352 +
47353 + while (match) {
47354 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47355 + for (x = 0; x < match->domain_child_num; x++) {
47356 + if (match->domain_children[x] == uid)
47357 + goto found;
47358 + }
47359 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47360 + break;
47361 + match = match->next;
47362 + }
47363 +found:
47364 + if (match == NULL) {
47365 + try_group:
47366 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47367 + match = acl_role_set.r_hash[index];
47368 +
47369 + while (match) {
47370 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47371 + for (x = 0; x < match->domain_child_num; x++) {
47372 + if (match->domain_children[x] == gid)
47373 + goto found2;
47374 + }
47375 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47376 + break;
47377 + match = match->next;
47378 + }
47379 +found2:
47380 + if (match == NULL)
47381 + match = default_role;
47382 + if (match->allowed_ips == NULL)
47383 + return match;
47384 + else {
47385 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47386 + if (likely
47387 + ((ntohl(curr_ip) & ipp->netmask) ==
47388 + (ntohl(ipp->addr) & ipp->netmask)))
47389 + return match;
47390 + }
47391 + match = default_role;
47392 + }
47393 + } else if (match->allowed_ips == NULL) {
47394 + return match;
47395 + } else {
47396 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47397 + if (likely
47398 + ((ntohl(curr_ip) & ipp->netmask) ==
47399 + (ntohl(ipp->addr) & ipp->netmask)))
47400 + return match;
47401 + }
47402 + goto try_group;
47403 + }
47404 +
47405 + return match;
47406 +}
47407 +
47408 +struct acl_subject_label *
47409 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47410 + const struct acl_role_label *role)
47411 +{
47412 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47413 + struct acl_subject_label *match;
47414 +
47415 + match = role->subj_hash[index];
47416 +
47417 + while (match && (match->inode != ino || match->device != dev ||
47418 + (match->mode & GR_DELETED))) {
47419 + match = match->next;
47420 + }
47421 +
47422 + if (match && !(match->mode & GR_DELETED))
47423 + return match;
47424 + else
47425 + return NULL;
47426 +}
47427 +
47428 +struct acl_subject_label *
47429 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47430 + const struct acl_role_label *role)
47431 +{
47432 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47433 + struct acl_subject_label *match;
47434 +
47435 + match = role->subj_hash[index];
47436 +
47437 + while (match && (match->inode != ino || match->device != dev ||
47438 + !(match->mode & GR_DELETED))) {
47439 + match = match->next;
47440 + }
47441 +
47442 + if (match && (match->mode & GR_DELETED))
47443 + return match;
47444 + else
47445 + return NULL;
47446 +}
47447 +
47448 +static struct acl_object_label *
47449 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47450 + const struct acl_subject_label *subj)
47451 +{
47452 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47453 + struct acl_object_label *match;
47454 +
47455 + match = subj->obj_hash[index];
47456 +
47457 + while (match && (match->inode != ino || match->device != dev ||
47458 + (match->mode & GR_DELETED))) {
47459 + match = match->next;
47460 + }
47461 +
47462 + if (match && !(match->mode & GR_DELETED))
47463 + return match;
47464 + else
47465 + return NULL;
47466 +}
47467 +
47468 +static struct acl_object_label *
47469 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47470 + const struct acl_subject_label *subj)
47471 +{
47472 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47473 + struct acl_object_label *match;
47474 +
47475 + match = subj->obj_hash[index];
47476 +
47477 + while (match && (match->inode != ino || match->device != dev ||
47478 + !(match->mode & GR_DELETED))) {
47479 + match = match->next;
47480 + }
47481 +
47482 + if (match && (match->mode & GR_DELETED))
47483 + return match;
47484 +
47485 + match = subj->obj_hash[index];
47486 +
47487 + while (match && (match->inode != ino || match->device != dev ||
47488 + (match->mode & GR_DELETED))) {
47489 + match = match->next;
47490 + }
47491 +
47492 + if (match && !(match->mode & GR_DELETED))
47493 + return match;
47494 + else
47495 + return NULL;
47496 +}
47497 +
47498 +static struct name_entry *
47499 +lookup_name_entry(const char *name)
47500 +{
47501 + unsigned int len = strlen(name);
47502 + unsigned int key = full_name_hash(name, len);
47503 + unsigned int index = key % name_set.n_size;
47504 + struct name_entry *match;
47505 +
47506 + match = name_set.n_hash[index];
47507 +
47508 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47509 + match = match->next;
47510 +
47511 + return match;
47512 +}
47513 +
47514 +static struct name_entry *
47515 +lookup_name_entry_create(const char *name)
47516 +{
47517 + unsigned int len = strlen(name);
47518 + unsigned int key = full_name_hash(name, len);
47519 + unsigned int index = key % name_set.n_size;
47520 + struct name_entry *match;
47521 +
47522 + match = name_set.n_hash[index];
47523 +
47524 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47525 + !match->deleted))
47526 + match = match->next;
47527 +
47528 + if (match && match->deleted)
47529 + return match;
47530 +
47531 + match = name_set.n_hash[index];
47532 +
47533 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47534 + match->deleted))
47535 + match = match->next;
47536 +
47537 + if (match && !match->deleted)
47538 + return match;
47539 + else
47540 + return NULL;
47541 +}
47542 +
47543 +static struct inodev_entry *
47544 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
47545 +{
47546 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
47547 + struct inodev_entry *match;
47548 +
47549 + match = inodev_set.i_hash[index];
47550 +
47551 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47552 + match = match->next;
47553 +
47554 + return match;
47555 +}
47556 +
47557 +static void
47558 +insert_inodev_entry(struct inodev_entry *entry)
47559 +{
47560 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47561 + inodev_set.i_size);
47562 + struct inodev_entry **curr;
47563 +
47564 + entry->prev = NULL;
47565 +
47566 + curr = &inodev_set.i_hash[index];
47567 + if (*curr != NULL)
47568 + (*curr)->prev = entry;
47569 +
47570 + entry->next = *curr;
47571 + *curr = entry;
47572 +
47573 + return;
47574 +}
47575 +
47576 +static void
47577 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47578 +{
47579 + unsigned int index =
47580 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47581 + struct acl_role_label **curr;
47582 + struct acl_role_label *tmp;
47583 +
47584 + curr = &acl_role_set.r_hash[index];
47585 +
47586 + /* if role was already inserted due to domains and already has
47587 + a role in the same bucket as it attached, then we need to
47588 + combine these two buckets
47589 + */
47590 + if (role->next) {
47591 + tmp = role->next;
47592 + while (tmp->next)
47593 + tmp = tmp->next;
47594 + tmp->next = *curr;
47595 + } else
47596 + role->next = *curr;
47597 + *curr = role;
47598 +
47599 + return;
47600 +}
47601 +
47602 +static void
47603 +insert_acl_role_label(struct acl_role_label *role)
47604 +{
47605 + int i;
47606 +
47607 + if (role_list == NULL) {
47608 + role_list = role;
47609 + role->prev = NULL;
47610 + } else {
47611 + role->prev = role_list;
47612 + role_list = role;
47613 + }
47614 +
47615 + /* used for hash chains */
47616 + role->next = NULL;
47617 +
47618 + if (role->roletype & GR_ROLE_DOMAIN) {
47619 + for (i = 0; i < role->domain_child_num; i++)
47620 + __insert_acl_role_label(role, role->domain_children[i]);
47621 + } else
47622 + __insert_acl_role_label(role, role->uidgid);
47623 +}
47624 +
47625 +static int
47626 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47627 +{
47628 + struct name_entry **curr, *nentry;
47629 + struct inodev_entry *ientry;
47630 + unsigned int len = strlen(name);
47631 + unsigned int key = full_name_hash(name, len);
47632 + unsigned int index = key % name_set.n_size;
47633 +
47634 + curr = &name_set.n_hash[index];
47635 +
47636 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47637 + curr = &((*curr)->next);
47638 +
47639 + if (*curr != NULL)
47640 + return 1;
47641 +
47642 + nentry = acl_alloc(sizeof (struct name_entry));
47643 + if (nentry == NULL)
47644 + return 0;
47645 + ientry = acl_alloc(sizeof (struct inodev_entry));
47646 + if (ientry == NULL)
47647 + return 0;
47648 + ientry->nentry = nentry;
47649 +
47650 + nentry->key = key;
47651 + nentry->name = name;
47652 + nentry->inode = inode;
47653 + nentry->device = device;
47654 + nentry->len = len;
47655 + nentry->deleted = deleted;
47656 +
47657 + nentry->prev = NULL;
47658 + curr = &name_set.n_hash[index];
47659 + if (*curr != NULL)
47660 + (*curr)->prev = nentry;
47661 + nentry->next = *curr;
47662 + *curr = nentry;
47663 +
47664 + /* insert us into the table searchable by inode/dev */
47665 + insert_inodev_entry(ientry);
47666 +
47667 + return 1;
47668 +}
47669 +
47670 +static void
47671 +insert_acl_obj_label(struct acl_object_label *obj,
47672 + struct acl_subject_label *subj)
47673 +{
47674 + unsigned int index =
47675 + fhash(obj->inode, obj->device, subj->obj_hash_size);
47676 + struct acl_object_label **curr;
47677 +
47678 +
47679 + obj->prev = NULL;
47680 +
47681 + curr = &subj->obj_hash[index];
47682 + if (*curr != NULL)
47683 + (*curr)->prev = obj;
47684 +
47685 + obj->next = *curr;
47686 + *curr = obj;
47687 +
47688 + return;
47689 +}
47690 +
47691 +static void
47692 +insert_acl_subj_label(struct acl_subject_label *obj,
47693 + struct acl_role_label *role)
47694 +{
47695 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
47696 + struct acl_subject_label **curr;
47697 +
47698 + obj->prev = NULL;
47699 +
47700 + curr = &role->subj_hash[index];
47701 + if (*curr != NULL)
47702 + (*curr)->prev = obj;
47703 +
47704 + obj->next = *curr;
47705 + *curr = obj;
47706 +
47707 + return;
47708 +}
47709 +
47710 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
47711 +
47712 +static void *
47713 +create_table(__u32 * len, int elementsize)
47714 +{
47715 + unsigned int table_sizes[] = {
47716 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
47717 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
47718 + 4194301, 8388593, 16777213, 33554393, 67108859
47719 + };
47720 + void *newtable = NULL;
47721 + unsigned int pwr = 0;
47722 +
47723 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
47724 + table_sizes[pwr] <= *len)
47725 + pwr++;
47726 +
47727 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
47728 + return newtable;
47729 +
47730 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
47731 + newtable =
47732 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
47733 + else
47734 + newtable = vmalloc(table_sizes[pwr] * elementsize);
47735 +
47736 + *len = table_sizes[pwr];
47737 +
47738 + return newtable;
47739 +}
47740 +
47741 +static int
47742 +init_variables(const struct gr_arg *arg)
47743 +{
47744 + struct task_struct *reaper = &init_task;
47745 + unsigned int stacksize;
47746 +
47747 + subj_map_set.s_size = arg->role_db.num_subjects;
47748 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
47749 + name_set.n_size = arg->role_db.num_objects;
47750 + inodev_set.i_size = arg->role_db.num_objects;
47751 +
47752 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
47753 + !name_set.n_size || !inodev_set.i_size)
47754 + return 1;
47755 +
47756 + if (!gr_init_uidset())
47757 + return 1;
47758 +
47759 + /* set up the stack that holds allocation info */
47760 +
47761 + stacksize = arg->role_db.num_pointers + 5;
47762 +
47763 + if (!acl_alloc_stack_init(stacksize))
47764 + return 1;
47765 +
47766 + /* grab reference for the real root dentry and vfsmount */
47767 + read_lock(&reaper->fs->lock);
47768 + real_root = dget(reaper->fs->root.dentry);
47769 + real_root_mnt = mntget(reaper->fs->root.mnt);
47770 + read_unlock(&reaper->fs->lock);
47771 +
47772 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47773 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
47774 +#endif
47775 +
47776 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
47777 + if (fakefs_obj_rw == NULL)
47778 + return 1;
47779 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
47780 +
47781 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
47782 + if (fakefs_obj_rwx == NULL)
47783 + return 1;
47784 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
47785 +
47786 + subj_map_set.s_hash =
47787 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
47788 + acl_role_set.r_hash =
47789 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
47790 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
47791 + inodev_set.i_hash =
47792 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
47793 +
47794 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
47795 + !name_set.n_hash || !inodev_set.i_hash)
47796 + return 1;
47797 +
47798 + memset(subj_map_set.s_hash, 0,
47799 + sizeof(struct subject_map *) * subj_map_set.s_size);
47800 + memset(acl_role_set.r_hash, 0,
47801 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
47802 + memset(name_set.n_hash, 0,
47803 + sizeof (struct name_entry *) * name_set.n_size);
47804 + memset(inodev_set.i_hash, 0,
47805 + sizeof (struct inodev_entry *) * inodev_set.i_size);
47806 +
47807 + return 0;
47808 +}
47809 +
47810 +/* free information not needed after startup
47811 + currently contains user->kernel pointer mappings for subjects
47812 +*/
47813 +
47814 +static void
47815 +free_init_variables(void)
47816 +{
47817 + __u32 i;
47818 +
47819 + if (subj_map_set.s_hash) {
47820 + for (i = 0; i < subj_map_set.s_size; i++) {
47821 + if (subj_map_set.s_hash[i]) {
47822 + kfree(subj_map_set.s_hash[i]);
47823 + subj_map_set.s_hash[i] = NULL;
47824 + }
47825 + }
47826 +
47827 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
47828 + PAGE_SIZE)
47829 + kfree(subj_map_set.s_hash);
47830 + else
47831 + vfree(subj_map_set.s_hash);
47832 + }
47833 +
47834 + return;
47835 +}
47836 +
47837 +static void
47838 +free_variables(void)
47839 +{
47840 + struct acl_subject_label *s;
47841 + struct acl_role_label *r;
47842 + struct task_struct *task, *task2;
47843 + unsigned int x;
47844 +
47845 + gr_clear_learn_entries();
47846 +
47847 + read_lock(&tasklist_lock);
47848 + do_each_thread(task2, task) {
47849 + task->acl_sp_role = 0;
47850 + task->acl_role_id = 0;
47851 + task->acl = NULL;
47852 + task->role = NULL;
47853 + } while_each_thread(task2, task);
47854 + read_unlock(&tasklist_lock);
47855 +
47856 + /* release the reference to the real root dentry and vfsmount */
47857 + if (real_root)
47858 + dput(real_root);
47859 + real_root = NULL;
47860 + if (real_root_mnt)
47861 + mntput(real_root_mnt);
47862 + real_root_mnt = NULL;
47863 +
47864 + /* free all object hash tables */
47865 +
47866 + FOR_EACH_ROLE_START(r)
47867 + if (r->subj_hash == NULL)
47868 + goto next_role;
47869 + FOR_EACH_SUBJECT_START(r, s, x)
47870 + if (s->obj_hash == NULL)
47871 + break;
47872 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47873 + kfree(s->obj_hash);
47874 + else
47875 + vfree(s->obj_hash);
47876 + FOR_EACH_SUBJECT_END(s, x)
47877 + FOR_EACH_NESTED_SUBJECT_START(r, s)
47878 + if (s->obj_hash == NULL)
47879 + break;
47880 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47881 + kfree(s->obj_hash);
47882 + else
47883 + vfree(s->obj_hash);
47884 + FOR_EACH_NESTED_SUBJECT_END(s)
47885 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
47886 + kfree(r->subj_hash);
47887 + else
47888 + vfree(r->subj_hash);
47889 + r->subj_hash = NULL;
47890 +next_role:
47891 + FOR_EACH_ROLE_END(r)
47892 +
47893 + acl_free_all();
47894 +
47895 + if (acl_role_set.r_hash) {
47896 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
47897 + PAGE_SIZE)
47898 + kfree(acl_role_set.r_hash);
47899 + else
47900 + vfree(acl_role_set.r_hash);
47901 + }
47902 + if (name_set.n_hash) {
47903 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
47904 + PAGE_SIZE)
47905 + kfree(name_set.n_hash);
47906 + else
47907 + vfree(name_set.n_hash);
47908 + }
47909 +
47910 + if (inodev_set.i_hash) {
47911 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
47912 + PAGE_SIZE)
47913 + kfree(inodev_set.i_hash);
47914 + else
47915 + vfree(inodev_set.i_hash);
47916 + }
47917 +
47918 + gr_free_uidset();
47919 +
47920 + memset(&name_set, 0, sizeof (struct name_db));
47921 + memset(&inodev_set, 0, sizeof (struct inodev_db));
47922 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
47923 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
47924 +
47925 + default_role = NULL;
47926 + role_list = NULL;
47927 +
47928 + return;
47929 +}
47930 +
47931 +static __u32
47932 +count_user_objs(struct acl_object_label *userp)
47933 +{
47934 + struct acl_object_label o_tmp;
47935 + __u32 num = 0;
47936 +
47937 + while (userp) {
47938 + if (copy_from_user(&o_tmp, userp,
47939 + sizeof (struct acl_object_label)))
47940 + break;
47941 +
47942 + userp = o_tmp.prev;
47943 + num++;
47944 + }
47945 +
47946 + return num;
47947 +}
47948 +
47949 +static struct acl_subject_label *
47950 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
47951 +
47952 +static int
47953 +copy_user_glob(struct acl_object_label *obj)
47954 +{
47955 + struct acl_object_label *g_tmp, **guser;
47956 + unsigned int len;
47957 + char *tmp;
47958 +
47959 + if (obj->globbed == NULL)
47960 + return 0;
47961 +
47962 + guser = &obj->globbed;
47963 + while (*guser) {
47964 + g_tmp = (struct acl_object_label *)
47965 + acl_alloc(sizeof (struct acl_object_label));
47966 + if (g_tmp == NULL)
47967 + return -ENOMEM;
47968 +
47969 + if (copy_from_user(g_tmp, *guser,
47970 + sizeof (struct acl_object_label)))
47971 + return -EFAULT;
47972 +
47973 + len = strnlen_user(g_tmp->filename, PATH_MAX);
47974 +
47975 + if (!len || len >= PATH_MAX)
47976 + return -EINVAL;
47977 +
47978 + if ((tmp = (char *) acl_alloc(len)) == NULL)
47979 + return -ENOMEM;
47980 +
47981 + if (copy_from_user(tmp, g_tmp->filename, len))
47982 + return -EFAULT;
47983 + tmp[len-1] = '\0';
47984 + g_tmp->filename = tmp;
47985 +
47986 + *guser = g_tmp;
47987 + guser = &(g_tmp->next);
47988 + }
47989 +
47990 + return 0;
47991 +}
47992 +
47993 +static int
47994 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
47995 + struct acl_role_label *role)
47996 +{
47997 + struct acl_object_label *o_tmp;
47998 + unsigned int len;
47999 + int ret;
48000 + char *tmp;
48001 +
48002 + while (userp) {
48003 + if ((o_tmp = (struct acl_object_label *)
48004 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
48005 + return -ENOMEM;
48006 +
48007 + if (copy_from_user(o_tmp, userp,
48008 + sizeof (struct acl_object_label)))
48009 + return -EFAULT;
48010 +
48011 + userp = o_tmp->prev;
48012 +
48013 + len = strnlen_user(o_tmp->filename, PATH_MAX);
48014 +
48015 + if (!len || len >= PATH_MAX)
48016 + return -EINVAL;
48017 +
48018 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48019 + return -ENOMEM;
48020 +
48021 + if (copy_from_user(tmp, o_tmp->filename, len))
48022 + return -EFAULT;
48023 + tmp[len-1] = '\0';
48024 + o_tmp->filename = tmp;
48025 +
48026 + insert_acl_obj_label(o_tmp, subj);
48027 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
48028 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
48029 + return -ENOMEM;
48030 +
48031 + ret = copy_user_glob(o_tmp);
48032 + if (ret)
48033 + return ret;
48034 +
48035 + if (o_tmp->nested) {
48036 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
48037 + if (IS_ERR(o_tmp->nested))
48038 + return PTR_ERR(o_tmp->nested);
48039 +
48040 + /* insert into nested subject list */
48041 + o_tmp->nested->next = role->hash->first;
48042 + role->hash->first = o_tmp->nested;
48043 + }
48044 + }
48045 +
48046 + return 0;
48047 +}
48048 +
48049 +static __u32
48050 +count_user_subjs(struct acl_subject_label *userp)
48051 +{
48052 + struct acl_subject_label s_tmp;
48053 + __u32 num = 0;
48054 +
48055 + while (userp) {
48056 + if (copy_from_user(&s_tmp, userp,
48057 + sizeof (struct acl_subject_label)))
48058 + break;
48059 +
48060 + userp = s_tmp.prev;
48061 + /* do not count nested subjects against this count, since
48062 + they are not included in the hash table, but are
48063 + attached to objects. We have already counted
48064 + the subjects in userspace for the allocation
48065 + stack
48066 + */
48067 + if (!(s_tmp.mode & GR_NESTED))
48068 + num++;
48069 + }
48070 +
48071 + return num;
48072 +}
48073 +
48074 +static int
48075 +copy_user_allowedips(struct acl_role_label *rolep)
48076 +{
48077 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
48078 +
48079 + ruserip = rolep->allowed_ips;
48080 +
48081 + while (ruserip) {
48082 + rlast = rtmp;
48083 +
48084 + if ((rtmp = (struct role_allowed_ip *)
48085 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
48086 + return -ENOMEM;
48087 +
48088 + if (copy_from_user(rtmp, ruserip,
48089 + sizeof (struct role_allowed_ip)))
48090 + return -EFAULT;
48091 +
48092 + ruserip = rtmp->prev;
48093 +
48094 + if (!rlast) {
48095 + rtmp->prev = NULL;
48096 + rolep->allowed_ips = rtmp;
48097 + } else {
48098 + rlast->next = rtmp;
48099 + rtmp->prev = rlast;
48100 + }
48101 +
48102 + if (!ruserip)
48103 + rtmp->next = NULL;
48104 + }
48105 +
48106 + return 0;
48107 +}
48108 +
48109 +static int
48110 +copy_user_transitions(struct acl_role_label *rolep)
48111 +{
48112 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
48113 +
48114 + unsigned int len;
48115 + char *tmp;
48116 +
48117 + rusertp = rolep->transitions;
48118 +
48119 + while (rusertp) {
48120 + rlast = rtmp;
48121 +
48122 + if ((rtmp = (struct role_transition *)
48123 + acl_alloc(sizeof (struct role_transition))) == NULL)
48124 + return -ENOMEM;
48125 +
48126 + if (copy_from_user(rtmp, rusertp,
48127 + sizeof (struct role_transition)))
48128 + return -EFAULT;
48129 +
48130 + rusertp = rtmp->prev;
48131 +
48132 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48133 +
48134 + if (!len || len >= GR_SPROLE_LEN)
48135 + return -EINVAL;
48136 +
48137 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48138 + return -ENOMEM;
48139 +
48140 + if (copy_from_user(tmp, rtmp->rolename, len))
48141 + return -EFAULT;
48142 + tmp[len-1] = '\0';
48143 + rtmp->rolename = tmp;
48144 +
48145 + if (!rlast) {
48146 + rtmp->prev = NULL;
48147 + rolep->transitions = rtmp;
48148 + } else {
48149 + rlast->next = rtmp;
48150 + rtmp->prev = rlast;
48151 + }
48152 +
48153 + if (!rusertp)
48154 + rtmp->next = NULL;
48155 + }
48156 +
48157 + return 0;
48158 +}
48159 +
48160 +static struct acl_subject_label *
48161 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48162 +{
48163 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48164 + unsigned int len;
48165 + char *tmp;
48166 + __u32 num_objs;
48167 + struct acl_ip_label **i_tmp, *i_utmp2;
48168 + struct gr_hash_struct ghash;
48169 + struct subject_map *subjmap;
48170 + unsigned int i_num;
48171 + int err;
48172 +
48173 + s_tmp = lookup_subject_map(userp);
48174 +
48175 + /* we've already copied this subject into the kernel, just return
48176 + the reference to it, and don't copy it over again
48177 + */
48178 + if (s_tmp)
48179 + return(s_tmp);
48180 +
48181 + if ((s_tmp = (struct acl_subject_label *)
48182 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48183 + return ERR_PTR(-ENOMEM);
48184 +
48185 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48186 + if (subjmap == NULL)
48187 + return ERR_PTR(-ENOMEM);
48188 +
48189 + subjmap->user = userp;
48190 + subjmap->kernel = s_tmp;
48191 + insert_subj_map_entry(subjmap);
48192 +
48193 + if (copy_from_user(s_tmp, userp,
48194 + sizeof (struct acl_subject_label)))
48195 + return ERR_PTR(-EFAULT);
48196 +
48197 + len = strnlen_user(s_tmp->filename, PATH_MAX);
48198 +
48199 + if (!len || len >= PATH_MAX)
48200 + return ERR_PTR(-EINVAL);
48201 +
48202 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48203 + return ERR_PTR(-ENOMEM);
48204 +
48205 + if (copy_from_user(tmp, s_tmp->filename, len))
48206 + return ERR_PTR(-EFAULT);
48207 + tmp[len-1] = '\0';
48208 + s_tmp->filename = tmp;
48209 +
48210 + if (!strcmp(s_tmp->filename, "/"))
48211 + role->root_label = s_tmp;
48212 +
48213 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48214 + return ERR_PTR(-EFAULT);
48215 +
48216 + /* copy user and group transition tables */
48217 +
48218 + if (s_tmp->user_trans_num) {
48219 + uid_t *uidlist;
48220 +
48221 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48222 + if (uidlist == NULL)
48223 + return ERR_PTR(-ENOMEM);
48224 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48225 + return ERR_PTR(-EFAULT);
48226 +
48227 + s_tmp->user_transitions = uidlist;
48228 + }
48229 +
48230 + if (s_tmp->group_trans_num) {
48231 + gid_t *gidlist;
48232 +
48233 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48234 + if (gidlist == NULL)
48235 + return ERR_PTR(-ENOMEM);
48236 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48237 + return ERR_PTR(-EFAULT);
48238 +
48239 + s_tmp->group_transitions = gidlist;
48240 + }
48241 +
48242 + /* set up object hash table */
48243 + num_objs = count_user_objs(ghash.first);
48244 +
48245 + s_tmp->obj_hash_size = num_objs;
48246 + s_tmp->obj_hash =
48247 + (struct acl_object_label **)
48248 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48249 +
48250 + if (!s_tmp->obj_hash)
48251 + return ERR_PTR(-ENOMEM);
48252 +
48253 + memset(s_tmp->obj_hash, 0,
48254 + s_tmp->obj_hash_size *
48255 + sizeof (struct acl_object_label *));
48256 +
48257 + /* add in objects */
48258 + err = copy_user_objs(ghash.first, s_tmp, role);
48259 +
48260 + if (err)
48261 + return ERR_PTR(err);
48262 +
48263 + /* set pointer for parent subject */
48264 + if (s_tmp->parent_subject) {
48265 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48266 +
48267 + if (IS_ERR(s_tmp2))
48268 + return s_tmp2;
48269 +
48270 + s_tmp->parent_subject = s_tmp2;
48271 + }
48272 +
48273 + /* add in ip acls */
48274 +
48275 + if (!s_tmp->ip_num) {
48276 + s_tmp->ips = NULL;
48277 + goto insert;
48278 + }
48279 +
48280 + i_tmp =
48281 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48282 + sizeof (struct acl_ip_label *));
48283 +
48284 + if (!i_tmp)
48285 + return ERR_PTR(-ENOMEM);
48286 +
48287 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48288 + *(i_tmp + i_num) =
48289 + (struct acl_ip_label *)
48290 + acl_alloc(sizeof (struct acl_ip_label));
48291 + if (!*(i_tmp + i_num))
48292 + return ERR_PTR(-ENOMEM);
48293 +
48294 + if (copy_from_user
48295 + (&i_utmp2, s_tmp->ips + i_num,
48296 + sizeof (struct acl_ip_label *)))
48297 + return ERR_PTR(-EFAULT);
48298 +
48299 + if (copy_from_user
48300 + (*(i_tmp + i_num), i_utmp2,
48301 + sizeof (struct acl_ip_label)))
48302 + return ERR_PTR(-EFAULT);
48303 +
48304 + if ((*(i_tmp + i_num))->iface == NULL)
48305 + continue;
48306 +
48307 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48308 + if (!len || len >= IFNAMSIZ)
48309 + return ERR_PTR(-EINVAL);
48310 + tmp = acl_alloc(len);
48311 + if (tmp == NULL)
48312 + return ERR_PTR(-ENOMEM);
48313 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48314 + return ERR_PTR(-EFAULT);
48315 + (*(i_tmp + i_num))->iface = tmp;
48316 + }
48317 +
48318 + s_tmp->ips = i_tmp;
48319 +
48320 +insert:
48321 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48322 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48323 + return ERR_PTR(-ENOMEM);
48324 +
48325 + return s_tmp;
48326 +}
48327 +
48328 +static int
48329 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48330 +{
48331 + struct acl_subject_label s_pre;
48332 + struct acl_subject_label * ret;
48333 + int err;
48334 +
48335 + while (userp) {
48336 + if (copy_from_user(&s_pre, userp,
48337 + sizeof (struct acl_subject_label)))
48338 + return -EFAULT;
48339 +
48340 + /* do not add nested subjects here, add
48341 + while parsing objects
48342 + */
48343 +
48344 + if (s_pre.mode & GR_NESTED) {
48345 + userp = s_pre.prev;
48346 + continue;
48347 + }
48348 +
48349 + ret = do_copy_user_subj(userp, role);
48350 +
48351 + err = PTR_ERR(ret);
48352 + if (IS_ERR(ret))
48353 + return err;
48354 +
48355 + insert_acl_subj_label(ret, role);
48356 +
48357 + userp = s_pre.prev;
48358 + }
48359 +
48360 + return 0;
48361 +}
48362 +
48363 +static int
48364 +copy_user_acl(struct gr_arg *arg)
48365 +{
48366 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48367 + struct sprole_pw *sptmp;
48368 + struct gr_hash_struct *ghash;
48369 + uid_t *domainlist;
48370 + unsigned int r_num;
48371 + unsigned int len;
48372 + char *tmp;
48373 + int err = 0;
48374 + __u16 i;
48375 + __u32 num_subjs;
48376 +
48377 + /* we need a default and kernel role */
48378 + if (arg->role_db.num_roles < 2)
48379 + return -EINVAL;
48380 +
48381 + /* copy special role authentication info from userspace */
48382 +
48383 + num_sprole_pws = arg->num_sprole_pws;
48384 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48385 +
48386 + if (!acl_special_roles) {
48387 + err = -ENOMEM;
48388 + goto cleanup;
48389 + }
48390 +
48391 + for (i = 0; i < num_sprole_pws; i++) {
48392 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48393 + if (!sptmp) {
48394 + err = -ENOMEM;
48395 + goto cleanup;
48396 + }
48397 + if (copy_from_user(sptmp, arg->sprole_pws + i,
48398 + sizeof (struct sprole_pw))) {
48399 + err = -EFAULT;
48400 + goto cleanup;
48401 + }
48402 +
48403 + len =
48404 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48405 +
48406 + if (!len || len >= GR_SPROLE_LEN) {
48407 + err = -EINVAL;
48408 + goto cleanup;
48409 + }
48410 +
48411 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48412 + err = -ENOMEM;
48413 + goto cleanup;
48414 + }
48415 +
48416 + if (copy_from_user(tmp, sptmp->rolename, len)) {
48417 + err = -EFAULT;
48418 + goto cleanup;
48419 + }
48420 + tmp[len-1] = '\0';
48421 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48422 + printk(KERN_ALERT "Copying special role %s\n", tmp);
48423 +#endif
48424 + sptmp->rolename = tmp;
48425 + acl_special_roles[i] = sptmp;
48426 + }
48427 +
48428 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48429 +
48430 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48431 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
48432 +
48433 + if (!r_tmp) {
48434 + err = -ENOMEM;
48435 + goto cleanup;
48436 + }
48437 +
48438 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
48439 + sizeof (struct acl_role_label *))) {
48440 + err = -EFAULT;
48441 + goto cleanup;
48442 + }
48443 +
48444 + if (copy_from_user(r_tmp, r_utmp2,
48445 + sizeof (struct acl_role_label))) {
48446 + err = -EFAULT;
48447 + goto cleanup;
48448 + }
48449 +
48450 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48451 +
48452 + if (!len || len >= PATH_MAX) {
48453 + err = -EINVAL;
48454 + goto cleanup;
48455 + }
48456 +
48457 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48458 + err = -ENOMEM;
48459 + goto cleanup;
48460 + }
48461 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
48462 + err = -EFAULT;
48463 + goto cleanup;
48464 + }
48465 + tmp[len-1] = '\0';
48466 + r_tmp->rolename = tmp;
48467 +
48468 + if (!strcmp(r_tmp->rolename, "default")
48469 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48470 + default_role = r_tmp;
48471 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48472 + kernel_role = r_tmp;
48473 + }
48474 +
48475 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48476 + err = -ENOMEM;
48477 + goto cleanup;
48478 + }
48479 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48480 + err = -EFAULT;
48481 + goto cleanup;
48482 + }
48483 +
48484 + r_tmp->hash = ghash;
48485 +
48486 + num_subjs = count_user_subjs(r_tmp->hash->first);
48487 +
48488 + r_tmp->subj_hash_size = num_subjs;
48489 + r_tmp->subj_hash =
48490 + (struct acl_subject_label **)
48491 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48492 +
48493 + if (!r_tmp->subj_hash) {
48494 + err = -ENOMEM;
48495 + goto cleanup;
48496 + }
48497 +
48498 + err = copy_user_allowedips(r_tmp);
48499 + if (err)
48500 + goto cleanup;
48501 +
48502 + /* copy domain info */
48503 + if (r_tmp->domain_children != NULL) {
48504 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48505 + if (domainlist == NULL) {
48506 + err = -ENOMEM;
48507 + goto cleanup;
48508 + }
48509 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48510 + err = -EFAULT;
48511 + goto cleanup;
48512 + }
48513 + r_tmp->domain_children = domainlist;
48514 + }
48515 +
48516 + err = copy_user_transitions(r_tmp);
48517 + if (err)
48518 + goto cleanup;
48519 +
48520 + memset(r_tmp->subj_hash, 0,
48521 + r_tmp->subj_hash_size *
48522 + sizeof (struct acl_subject_label *));
48523 +
48524 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48525 +
48526 + if (err)
48527 + goto cleanup;
48528 +
48529 + /* set nested subject list to null */
48530 + r_tmp->hash->first = NULL;
48531 +
48532 + insert_acl_role_label(r_tmp);
48533 + }
48534 +
48535 + goto return_err;
48536 + cleanup:
48537 + free_variables();
48538 + return_err:
48539 + return err;
48540 +
48541 +}
48542 +
48543 +static int
48544 +gracl_init(struct gr_arg *args)
48545 +{
48546 + int error = 0;
48547 +
48548 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48549 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48550 +
48551 + if (init_variables(args)) {
48552 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48553 + error = -ENOMEM;
48554 + free_variables();
48555 + goto out;
48556 + }
48557 +
48558 + error = copy_user_acl(args);
48559 + free_init_variables();
48560 + if (error) {
48561 + free_variables();
48562 + goto out;
48563 + }
48564 +
48565 + if ((error = gr_set_acls(0))) {
48566 + free_variables();
48567 + goto out;
48568 + }
48569 +
48570 + pax_open_kernel();
48571 + gr_status |= GR_READY;
48572 + pax_close_kernel();
48573 +
48574 + out:
48575 + return error;
48576 +}
48577 +
48578 +/* derived from glibc fnmatch() 0: match, 1: no match*/
48579 +
48580 +static int
48581 +glob_match(const char *p, const char *n)
48582 +{
48583 + char c;
48584 +
48585 + while ((c = *p++) != '\0') {
48586 + switch (c) {
48587 + case '?':
48588 + if (*n == '\0')
48589 + return 1;
48590 + else if (*n == '/')
48591 + return 1;
48592 + break;
48593 + case '\\':
48594 + if (*n != c)
48595 + return 1;
48596 + break;
48597 + case '*':
48598 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
48599 + if (*n == '/')
48600 + return 1;
48601 + else if (c == '?') {
48602 + if (*n == '\0')
48603 + return 1;
48604 + else
48605 + ++n;
48606 + }
48607 + }
48608 + if (c == '\0') {
48609 + return 0;
48610 + } else {
48611 + const char *endp;
48612 +
48613 + if ((endp = strchr(n, '/')) == NULL)
48614 + endp = n + strlen(n);
48615 +
48616 + if (c == '[') {
48617 + for (--p; n < endp; ++n)
48618 + if (!glob_match(p, n))
48619 + return 0;
48620 + } else if (c == '/') {
48621 + while (*n != '\0' && *n != '/')
48622 + ++n;
48623 + if (*n == '/' && !glob_match(p, n + 1))
48624 + return 0;
48625 + } else {
48626 + for (--p; n < endp; ++n)
48627 + if (*n == c && !glob_match(p, n))
48628 + return 0;
48629 + }
48630 +
48631 + return 1;
48632 + }
48633 + case '[':
48634 + {
48635 + int not;
48636 + char cold;
48637 +
48638 + if (*n == '\0' || *n == '/')
48639 + return 1;
48640 +
48641 + not = (*p == '!' || *p == '^');
48642 + if (not)
48643 + ++p;
48644 +
48645 + c = *p++;
48646 + for (;;) {
48647 + unsigned char fn = (unsigned char)*n;
48648 +
48649 + if (c == '\0')
48650 + return 1;
48651 + else {
48652 + if (c == fn)
48653 + goto matched;
48654 + cold = c;
48655 + c = *p++;
48656 +
48657 + if (c == '-' && *p != ']') {
48658 + unsigned char cend = *p++;
48659 +
48660 + if (cend == '\0')
48661 + return 1;
48662 +
48663 + if (cold <= fn && fn <= cend)
48664 + goto matched;
48665 +
48666 + c = *p++;
48667 + }
48668 + }
48669 +
48670 + if (c == ']')
48671 + break;
48672 + }
48673 + if (!not)
48674 + return 1;
48675 + break;
48676 + matched:
48677 + while (c != ']') {
48678 + if (c == '\0')
48679 + return 1;
48680 +
48681 + c = *p++;
48682 + }
48683 + if (not)
48684 + return 1;
48685 + }
48686 + break;
48687 + default:
48688 + if (c != *n)
48689 + return 1;
48690 + }
48691 +
48692 + ++n;
48693 + }
48694 +
48695 + if (*n == '\0')
48696 + return 0;
48697 +
48698 + if (*n == '/')
48699 + return 0;
48700 +
48701 + return 1;
48702 +}
48703 +
48704 +static struct acl_object_label *
48705 +chk_glob_label(struct acl_object_label *globbed,
48706 + struct dentry *dentry, struct vfsmount *mnt, char **path)
48707 +{
48708 + struct acl_object_label *tmp;
48709 +
48710 + if (*path == NULL)
48711 + *path = gr_to_filename_nolock(dentry, mnt);
48712 +
48713 + tmp = globbed;
48714 +
48715 + while (tmp) {
48716 + if (!glob_match(tmp->filename, *path))
48717 + return tmp;
48718 + tmp = tmp->next;
48719 + }
48720 +
48721 + return NULL;
48722 +}
48723 +
48724 +static struct acl_object_label *
48725 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48726 + const ino_t curr_ino, const dev_t curr_dev,
48727 + const struct acl_subject_label *subj, char **path, const int checkglob)
48728 +{
48729 + struct acl_subject_label *tmpsubj;
48730 + struct acl_object_label *retval;
48731 + struct acl_object_label *retval2;
48732 +
48733 + tmpsubj = (struct acl_subject_label *) subj;
48734 + read_lock(&gr_inode_lock);
48735 + do {
48736 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
48737 + if (retval) {
48738 + if (checkglob && retval->globbed) {
48739 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
48740 + (struct vfsmount *)orig_mnt, path);
48741 + if (retval2)
48742 + retval = retval2;
48743 + }
48744 + break;
48745 + }
48746 + } while ((tmpsubj = tmpsubj->parent_subject));
48747 + read_unlock(&gr_inode_lock);
48748 +
48749 + return retval;
48750 +}
48751 +
48752 +static __inline__ struct acl_object_label *
48753 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48754 + const struct dentry *curr_dentry,
48755 + const struct acl_subject_label *subj, char **path, const int checkglob)
48756 +{
48757 + int newglob = checkglob;
48758 +
48759 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
48760 + as we don't want a / * rule to match instead of the / object
48761 + don't do this for create lookups that call this function though, since they're looking up
48762 + on the parent and thus need globbing checks on all paths
48763 + */
48764 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
48765 + newglob = GR_NO_GLOB;
48766 +
48767 + return __full_lookup(orig_dentry, orig_mnt,
48768 + curr_dentry->d_inode->i_ino,
48769 + __get_dev(curr_dentry), subj, path, newglob);
48770 +}
48771 +
48772 +static struct acl_object_label *
48773 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48774 + const struct acl_subject_label *subj, char *path, const int checkglob)
48775 +{
48776 + struct dentry *dentry = (struct dentry *) l_dentry;
48777 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48778 + struct acl_object_label *retval;
48779 +
48780 + spin_lock(&dcache_lock);
48781 + spin_lock(&vfsmount_lock);
48782 +
48783 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
48784 +#ifdef CONFIG_NET
48785 + mnt == sock_mnt ||
48786 +#endif
48787 +#ifdef CONFIG_HUGETLBFS
48788 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
48789 +#endif
48790 + /* ignore Eric Biederman */
48791 + IS_PRIVATE(l_dentry->d_inode))) {
48792 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
48793 + goto out;
48794 + }
48795 +
48796 + for (;;) {
48797 + if (dentry == real_root && mnt == real_root_mnt)
48798 + break;
48799 +
48800 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48801 + if (mnt->mnt_parent == mnt)
48802 + break;
48803 +
48804 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48805 + if (retval != NULL)
48806 + goto out;
48807 +
48808 + dentry = mnt->mnt_mountpoint;
48809 + mnt = mnt->mnt_parent;
48810 + continue;
48811 + }
48812 +
48813 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48814 + if (retval != NULL)
48815 + goto out;
48816 +
48817 + dentry = dentry->d_parent;
48818 + }
48819 +
48820 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48821 +
48822 + if (retval == NULL)
48823 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
48824 +out:
48825 + spin_unlock(&vfsmount_lock);
48826 + spin_unlock(&dcache_lock);
48827 +
48828 + BUG_ON(retval == NULL);
48829 +
48830 + return retval;
48831 +}
48832 +
48833 +static __inline__ struct acl_object_label *
48834 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48835 + const struct acl_subject_label *subj)
48836 +{
48837 + char *path = NULL;
48838 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
48839 +}
48840 +
48841 +static __inline__ struct acl_object_label *
48842 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48843 + const struct acl_subject_label *subj)
48844 +{
48845 + char *path = NULL;
48846 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
48847 +}
48848 +
48849 +static __inline__ struct acl_object_label *
48850 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48851 + const struct acl_subject_label *subj, char *path)
48852 +{
48853 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
48854 +}
48855 +
48856 +static struct acl_subject_label *
48857 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48858 + const struct acl_role_label *role)
48859 +{
48860 + struct dentry *dentry = (struct dentry *) l_dentry;
48861 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48862 + struct acl_subject_label *retval;
48863 +
48864 + spin_lock(&dcache_lock);
48865 + spin_lock(&vfsmount_lock);
48866 +
48867 + for (;;) {
48868 + if (dentry == real_root && mnt == real_root_mnt)
48869 + break;
48870 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48871 + if (mnt->mnt_parent == mnt)
48872 + break;
48873 +
48874 + read_lock(&gr_inode_lock);
48875 + retval =
48876 + lookup_acl_subj_label(dentry->d_inode->i_ino,
48877 + __get_dev(dentry), role);
48878 + read_unlock(&gr_inode_lock);
48879 + if (retval != NULL)
48880 + goto out;
48881 +
48882 + dentry = mnt->mnt_mountpoint;
48883 + mnt = mnt->mnt_parent;
48884 + continue;
48885 + }
48886 +
48887 + read_lock(&gr_inode_lock);
48888 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48889 + __get_dev(dentry), role);
48890 + read_unlock(&gr_inode_lock);
48891 + if (retval != NULL)
48892 + goto out;
48893 +
48894 + dentry = dentry->d_parent;
48895 + }
48896 +
48897 + read_lock(&gr_inode_lock);
48898 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48899 + __get_dev(dentry), role);
48900 + read_unlock(&gr_inode_lock);
48901 +
48902 + if (unlikely(retval == NULL)) {
48903 + read_lock(&gr_inode_lock);
48904 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
48905 + __get_dev(real_root), role);
48906 + read_unlock(&gr_inode_lock);
48907 + }
48908 +out:
48909 + spin_unlock(&vfsmount_lock);
48910 + spin_unlock(&dcache_lock);
48911 +
48912 + BUG_ON(retval == NULL);
48913 +
48914 + return retval;
48915 +}
48916 +
48917 +static void
48918 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
48919 +{
48920 + struct task_struct *task = current;
48921 + const struct cred *cred = current_cred();
48922 +
48923 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48924 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48925 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48926 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
48927 +
48928 + return;
48929 +}
48930 +
48931 +static void
48932 +gr_log_learn_sysctl(const char *path, const __u32 mode)
48933 +{
48934 + struct task_struct *task = current;
48935 + const struct cred *cred = current_cred();
48936 +
48937 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48938 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48939 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48940 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
48941 +
48942 + return;
48943 +}
48944 +
48945 +static void
48946 +gr_log_learn_id_change(const char type, const unsigned int real,
48947 + const unsigned int effective, const unsigned int fs)
48948 +{
48949 + struct task_struct *task = current;
48950 + const struct cred *cred = current_cred();
48951 +
48952 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
48953 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48954 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48955 + type, real, effective, fs, &task->signal->saved_ip);
48956 +
48957 + return;
48958 +}
48959 +
48960 +__u32
48961 +gr_check_link(const struct dentry * new_dentry,
48962 + const struct dentry * parent_dentry,
48963 + const struct vfsmount * parent_mnt,
48964 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
48965 +{
48966 + struct acl_object_label *obj;
48967 + __u32 oldmode, newmode;
48968 + __u32 needmode;
48969 +
48970 + if (unlikely(!(gr_status & GR_READY)))
48971 + return (GR_CREATE | GR_LINK);
48972 +
48973 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
48974 + oldmode = obj->mode;
48975 +
48976 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48977 + oldmode |= (GR_CREATE | GR_LINK);
48978 +
48979 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
48980 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
48981 + needmode |= GR_SETID | GR_AUDIT_SETID;
48982 +
48983 + newmode =
48984 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
48985 + oldmode | needmode);
48986 +
48987 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
48988 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
48989 + GR_INHERIT | GR_AUDIT_INHERIT);
48990 +
48991 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
48992 + goto bad;
48993 +
48994 + if ((oldmode & needmode) != needmode)
48995 + goto bad;
48996 +
48997 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
48998 + if ((newmode & needmode) != needmode)
48999 + goto bad;
49000 +
49001 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
49002 + return newmode;
49003 +bad:
49004 + needmode = oldmode;
49005 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49006 + needmode |= GR_SETID;
49007 +
49008 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49009 + gr_log_learn(old_dentry, old_mnt, needmode);
49010 + return (GR_CREATE | GR_LINK);
49011 + } else if (newmode & GR_SUPPRESS)
49012 + return GR_SUPPRESS;
49013 + else
49014 + return 0;
49015 +}
49016 +
49017 +__u32
49018 +gr_search_file(const struct dentry * dentry, const __u32 mode,
49019 + const struct vfsmount * mnt)
49020 +{
49021 + __u32 retval = mode;
49022 + struct acl_subject_label *curracl;
49023 + struct acl_object_label *currobj;
49024 +
49025 + if (unlikely(!(gr_status & GR_READY)))
49026 + return (mode & ~GR_AUDITS);
49027 +
49028 + curracl = current->acl;
49029 +
49030 + currobj = chk_obj_label(dentry, mnt, curracl);
49031 + retval = currobj->mode & mode;
49032 +
49033 + /* if we're opening a specified transfer file for writing
49034 + (e.g. /dev/initctl), then transfer our role to init
49035 + */
49036 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
49037 + current->role->roletype & GR_ROLE_PERSIST)) {
49038 + struct task_struct *task = init_pid_ns.child_reaper;
49039 +
49040 + if (task->role != current->role) {
49041 + task->acl_sp_role = 0;
49042 + task->acl_role_id = current->acl_role_id;
49043 + task->role = current->role;
49044 + rcu_read_lock();
49045 + read_lock(&grsec_exec_file_lock);
49046 + gr_apply_subject_to_task(task);
49047 + read_unlock(&grsec_exec_file_lock);
49048 + rcu_read_unlock();
49049 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
49050 + }
49051 + }
49052 +
49053 + if (unlikely
49054 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
49055 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
49056 + __u32 new_mode = mode;
49057 +
49058 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49059 +
49060 + retval = new_mode;
49061 +
49062 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
49063 + new_mode |= GR_INHERIT;
49064 +
49065 + if (!(mode & GR_NOLEARN))
49066 + gr_log_learn(dentry, mnt, new_mode);
49067 + }
49068 +
49069 + return retval;
49070 +}
49071 +
49072 +__u32
49073 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
49074 + const struct vfsmount * mnt, const __u32 mode)
49075 +{
49076 + struct name_entry *match;
49077 + struct acl_object_label *matchpo;
49078 + struct acl_subject_label *curracl;
49079 + char *path;
49080 + __u32 retval;
49081 +
49082 + if (unlikely(!(gr_status & GR_READY)))
49083 + return (mode & ~GR_AUDITS);
49084 +
49085 + preempt_disable();
49086 + path = gr_to_filename_rbac(new_dentry, mnt);
49087 + match = lookup_name_entry_create(path);
49088 +
49089 + if (!match)
49090 + goto check_parent;
49091 +
49092 + curracl = current->acl;
49093 +
49094 + read_lock(&gr_inode_lock);
49095 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
49096 + read_unlock(&gr_inode_lock);
49097 +
49098 + if (matchpo) {
49099 + if ((matchpo->mode & mode) !=
49100 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
49101 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49102 + __u32 new_mode = mode;
49103 +
49104 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49105 +
49106 + gr_log_learn(new_dentry, mnt, new_mode);
49107 +
49108 + preempt_enable();
49109 + return new_mode;
49110 + }
49111 + preempt_enable();
49112 + return (matchpo->mode & mode);
49113 + }
49114 +
49115 + check_parent:
49116 + curracl = current->acl;
49117 +
49118 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
49119 + retval = matchpo->mode & mode;
49120 +
49121 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
49122 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
49123 + __u32 new_mode = mode;
49124 +
49125 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49126 +
49127 + gr_log_learn(new_dentry, mnt, new_mode);
49128 + preempt_enable();
49129 + return new_mode;
49130 + }
49131 +
49132 + preempt_enable();
49133 + return retval;
49134 +}
49135 +
49136 +int
49137 +gr_check_hidden_task(const struct task_struct *task)
49138 +{
49139 + if (unlikely(!(gr_status & GR_READY)))
49140 + return 0;
49141 +
49142 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49143 + return 1;
49144 +
49145 + return 0;
49146 +}
49147 +
49148 +int
49149 +gr_check_protected_task(const struct task_struct *task)
49150 +{
49151 + if (unlikely(!(gr_status & GR_READY) || !task))
49152 + return 0;
49153 +
49154 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49155 + task->acl != current->acl)
49156 + return 1;
49157 +
49158 + return 0;
49159 +}
49160 +
49161 +int
49162 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49163 +{
49164 + struct task_struct *p;
49165 + int ret = 0;
49166 +
49167 + if (unlikely(!(gr_status & GR_READY) || !pid))
49168 + return ret;
49169 +
49170 + read_lock(&tasklist_lock);
49171 + do_each_pid_task(pid, type, p) {
49172 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49173 + p->acl != current->acl) {
49174 + ret = 1;
49175 + goto out;
49176 + }
49177 + } while_each_pid_task(pid, type, p);
49178 +out:
49179 + read_unlock(&tasklist_lock);
49180 +
49181 + return ret;
49182 +}
49183 +
49184 +void
49185 +gr_copy_label(struct task_struct *tsk)
49186 +{
49187 + tsk->signal->used_accept = 0;
49188 + tsk->acl_sp_role = 0;
49189 + tsk->acl_role_id = current->acl_role_id;
49190 + tsk->acl = current->acl;
49191 + tsk->role = current->role;
49192 + tsk->signal->curr_ip = current->signal->curr_ip;
49193 + tsk->signal->saved_ip = current->signal->saved_ip;
49194 + if (current->exec_file)
49195 + get_file(current->exec_file);
49196 + tsk->exec_file = current->exec_file;
49197 + tsk->is_writable = current->is_writable;
49198 + if (unlikely(current->signal->used_accept)) {
49199 + current->signal->curr_ip = 0;
49200 + current->signal->saved_ip = 0;
49201 + }
49202 +
49203 + return;
49204 +}
49205 +
49206 +static void
49207 +gr_set_proc_res(struct task_struct *task)
49208 +{
49209 + struct acl_subject_label *proc;
49210 + unsigned short i;
49211 +
49212 + proc = task->acl;
49213 +
49214 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49215 + return;
49216 +
49217 + for (i = 0; i < RLIM_NLIMITS; i++) {
49218 + if (!(proc->resmask & (1 << i)))
49219 + continue;
49220 +
49221 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49222 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49223 + }
49224 +
49225 + return;
49226 +}
49227 +
49228 +extern int __gr_process_user_ban(struct user_struct *user);
49229 +
49230 +int
49231 +gr_check_user_change(int real, int effective, int fs)
49232 +{
49233 + unsigned int i;
49234 + __u16 num;
49235 + uid_t *uidlist;
49236 + int curuid;
49237 + int realok = 0;
49238 + int effectiveok = 0;
49239 + int fsok = 0;
49240 +
49241 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49242 + struct user_struct *user;
49243 +
49244 + if (real == -1)
49245 + goto skipit;
49246 +
49247 + user = find_user(real);
49248 + if (user == NULL)
49249 + goto skipit;
49250 +
49251 + if (__gr_process_user_ban(user)) {
49252 + /* for find_user */
49253 + free_uid(user);
49254 + return 1;
49255 + }
49256 +
49257 + /* for find_user */
49258 + free_uid(user);
49259 +
49260 +skipit:
49261 +#endif
49262 +
49263 + if (unlikely(!(gr_status & GR_READY)))
49264 + return 0;
49265 +
49266 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49267 + gr_log_learn_id_change('u', real, effective, fs);
49268 +
49269 + num = current->acl->user_trans_num;
49270 + uidlist = current->acl->user_transitions;
49271 +
49272 + if (uidlist == NULL)
49273 + return 0;
49274 +
49275 + if (real == -1)
49276 + realok = 1;
49277 + if (effective == -1)
49278 + effectiveok = 1;
49279 + if (fs == -1)
49280 + fsok = 1;
49281 +
49282 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
49283 + for (i = 0; i < num; i++) {
49284 + curuid = (int)uidlist[i];
49285 + if (real == curuid)
49286 + realok = 1;
49287 + if (effective == curuid)
49288 + effectiveok = 1;
49289 + if (fs == curuid)
49290 + fsok = 1;
49291 + }
49292 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
49293 + for (i = 0; i < num; i++) {
49294 + curuid = (int)uidlist[i];
49295 + if (real == curuid)
49296 + break;
49297 + if (effective == curuid)
49298 + break;
49299 + if (fs == curuid)
49300 + break;
49301 + }
49302 + /* not in deny list */
49303 + if (i == num) {
49304 + realok = 1;
49305 + effectiveok = 1;
49306 + fsok = 1;
49307 + }
49308 + }
49309 +
49310 + if (realok && effectiveok && fsok)
49311 + return 0;
49312 + else {
49313 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49314 + return 1;
49315 + }
49316 +}
49317 +
49318 +int
49319 +gr_check_group_change(int real, int effective, int fs)
49320 +{
49321 + unsigned int i;
49322 + __u16 num;
49323 + gid_t *gidlist;
49324 + int curgid;
49325 + int realok = 0;
49326 + int effectiveok = 0;
49327 + int fsok = 0;
49328 +
49329 + if (unlikely(!(gr_status & GR_READY)))
49330 + return 0;
49331 +
49332 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49333 + gr_log_learn_id_change('g', real, effective, fs);
49334 +
49335 + num = current->acl->group_trans_num;
49336 + gidlist = current->acl->group_transitions;
49337 +
49338 + if (gidlist == NULL)
49339 + return 0;
49340 +
49341 + if (real == -1)
49342 + realok = 1;
49343 + if (effective == -1)
49344 + effectiveok = 1;
49345 + if (fs == -1)
49346 + fsok = 1;
49347 +
49348 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
49349 + for (i = 0; i < num; i++) {
49350 + curgid = (int)gidlist[i];
49351 + if (real == curgid)
49352 + realok = 1;
49353 + if (effective == curgid)
49354 + effectiveok = 1;
49355 + if (fs == curgid)
49356 + fsok = 1;
49357 + }
49358 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
49359 + for (i = 0; i < num; i++) {
49360 + curgid = (int)gidlist[i];
49361 + if (real == curgid)
49362 + break;
49363 + if (effective == curgid)
49364 + break;
49365 + if (fs == curgid)
49366 + break;
49367 + }
49368 + /* not in deny list */
49369 + if (i == num) {
49370 + realok = 1;
49371 + effectiveok = 1;
49372 + fsok = 1;
49373 + }
49374 + }
49375 +
49376 + if (realok && effectiveok && fsok)
49377 + return 0;
49378 + else {
49379 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49380 + return 1;
49381 + }
49382 +}
49383 +
49384 +void
49385 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49386 +{
49387 + struct acl_role_label *role = task->role;
49388 + struct acl_subject_label *subj = NULL;
49389 + struct acl_object_label *obj;
49390 + struct file *filp;
49391 +
49392 + if (unlikely(!(gr_status & GR_READY)))
49393 + return;
49394 +
49395 + filp = task->exec_file;
49396 +
49397 + /* kernel process, we'll give them the kernel role */
49398 + if (unlikely(!filp)) {
49399 + task->role = kernel_role;
49400 + task->acl = kernel_role->root_label;
49401 + return;
49402 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49403 + role = lookup_acl_role_label(task, uid, gid);
49404 +
49405 + /* perform subject lookup in possibly new role
49406 + we can use this result below in the case where role == task->role
49407 + */
49408 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49409 +
49410 + /* if we changed uid/gid, but result in the same role
49411 + and are using inheritance, don't lose the inherited subject
49412 + if current subject is other than what normal lookup
49413 + would result in, we arrived via inheritance, don't
49414 + lose subject
49415 + */
49416 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49417 + (subj == task->acl)))
49418 + task->acl = subj;
49419 +
49420 + task->role = role;
49421 +
49422 + task->is_writable = 0;
49423 +
49424 + /* ignore additional mmap checks for processes that are writable
49425 + by the default ACL */
49426 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49427 + if (unlikely(obj->mode & GR_WRITE))
49428 + task->is_writable = 1;
49429 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49430 + if (unlikely(obj->mode & GR_WRITE))
49431 + task->is_writable = 1;
49432 +
49433 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49434 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49435 +#endif
49436 +
49437 + gr_set_proc_res(task);
49438 +
49439 + return;
49440 +}
49441 +
49442 +int
49443 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49444 + const int unsafe_share)
49445 +{
49446 + struct task_struct *task = current;
49447 + struct acl_subject_label *newacl;
49448 + struct acl_object_label *obj;
49449 + __u32 retmode;
49450 +
49451 + if (unlikely(!(gr_status & GR_READY)))
49452 + return 0;
49453 +
49454 + newacl = chk_subj_label(dentry, mnt, task->role);
49455 +
49456 + task_lock(task);
49457 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49458 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49459 + !(task->role->roletype & GR_ROLE_GOD) &&
49460 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49461 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49462 + task_unlock(task);
49463 + if (unsafe_share)
49464 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49465 + else
49466 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49467 + return -EACCES;
49468 + }
49469 + task_unlock(task);
49470 +
49471 + obj = chk_obj_label(dentry, mnt, task->acl);
49472 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49473 +
49474 + if (!(task->acl->mode & GR_INHERITLEARN) &&
49475 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49476 + if (obj->nested)
49477 + task->acl = obj->nested;
49478 + else
49479 + task->acl = newacl;
49480 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49481 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49482 +
49483 + task->is_writable = 0;
49484 +
49485 + /* ignore additional mmap checks for processes that are writable
49486 + by the default ACL */
49487 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
49488 + if (unlikely(obj->mode & GR_WRITE))
49489 + task->is_writable = 1;
49490 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
49491 + if (unlikely(obj->mode & GR_WRITE))
49492 + task->is_writable = 1;
49493 +
49494 + gr_set_proc_res(task);
49495 +
49496 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49497 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49498 +#endif
49499 + return 0;
49500 +}
49501 +
49502 +/* always called with valid inodev ptr */
49503 +static void
49504 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49505 +{
49506 + struct acl_object_label *matchpo;
49507 + struct acl_subject_label *matchps;
49508 + struct acl_subject_label *subj;
49509 + struct acl_role_label *role;
49510 + unsigned int x;
49511 +
49512 + FOR_EACH_ROLE_START(role)
49513 + FOR_EACH_SUBJECT_START(role, subj, x)
49514 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49515 + matchpo->mode |= GR_DELETED;
49516 + FOR_EACH_SUBJECT_END(subj,x)
49517 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
49518 + if (subj->inode == ino && subj->device == dev)
49519 + subj->mode |= GR_DELETED;
49520 + FOR_EACH_NESTED_SUBJECT_END(subj)
49521 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49522 + matchps->mode |= GR_DELETED;
49523 + FOR_EACH_ROLE_END(role)
49524 +
49525 + inodev->nentry->deleted = 1;
49526 +
49527 + return;
49528 +}
49529 +
49530 +void
49531 +gr_handle_delete(const ino_t ino, const dev_t dev)
49532 +{
49533 + struct inodev_entry *inodev;
49534 +
49535 + if (unlikely(!(gr_status & GR_READY)))
49536 + return;
49537 +
49538 + write_lock(&gr_inode_lock);
49539 + inodev = lookup_inodev_entry(ino, dev);
49540 + if (inodev != NULL)
49541 + do_handle_delete(inodev, ino, dev);
49542 + write_unlock(&gr_inode_lock);
49543 +
49544 + return;
49545 +}
49546 +
49547 +static void
49548 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49549 + const ino_t newinode, const dev_t newdevice,
49550 + struct acl_subject_label *subj)
49551 +{
49552 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49553 + struct acl_object_label *match;
49554 +
49555 + match = subj->obj_hash[index];
49556 +
49557 + while (match && (match->inode != oldinode ||
49558 + match->device != olddevice ||
49559 + !(match->mode & GR_DELETED)))
49560 + match = match->next;
49561 +
49562 + if (match && (match->inode == oldinode)
49563 + && (match->device == olddevice)
49564 + && (match->mode & GR_DELETED)) {
49565 + if (match->prev == NULL) {
49566 + subj->obj_hash[index] = match->next;
49567 + if (match->next != NULL)
49568 + match->next->prev = NULL;
49569 + } else {
49570 + match->prev->next = match->next;
49571 + if (match->next != NULL)
49572 + match->next->prev = match->prev;
49573 + }
49574 + match->prev = NULL;
49575 + match->next = NULL;
49576 + match->inode = newinode;
49577 + match->device = newdevice;
49578 + match->mode &= ~GR_DELETED;
49579 +
49580 + insert_acl_obj_label(match, subj);
49581 + }
49582 +
49583 + return;
49584 +}
49585 +
49586 +static void
49587 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49588 + const ino_t newinode, const dev_t newdevice,
49589 + struct acl_role_label *role)
49590 +{
49591 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49592 + struct acl_subject_label *match;
49593 +
49594 + match = role->subj_hash[index];
49595 +
49596 + while (match && (match->inode != oldinode ||
49597 + match->device != olddevice ||
49598 + !(match->mode & GR_DELETED)))
49599 + match = match->next;
49600 +
49601 + if (match && (match->inode == oldinode)
49602 + && (match->device == olddevice)
49603 + && (match->mode & GR_DELETED)) {
49604 + if (match->prev == NULL) {
49605 + role->subj_hash[index] = match->next;
49606 + if (match->next != NULL)
49607 + match->next->prev = NULL;
49608 + } else {
49609 + match->prev->next = match->next;
49610 + if (match->next != NULL)
49611 + match->next->prev = match->prev;
49612 + }
49613 + match->prev = NULL;
49614 + match->next = NULL;
49615 + match->inode = newinode;
49616 + match->device = newdevice;
49617 + match->mode &= ~GR_DELETED;
49618 +
49619 + insert_acl_subj_label(match, role);
49620 + }
49621 +
49622 + return;
49623 +}
49624 +
49625 +static void
49626 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49627 + const ino_t newinode, const dev_t newdevice)
49628 +{
49629 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49630 + struct inodev_entry *match;
49631 +
49632 + match = inodev_set.i_hash[index];
49633 +
49634 + while (match && (match->nentry->inode != oldinode ||
49635 + match->nentry->device != olddevice || !match->nentry->deleted))
49636 + match = match->next;
49637 +
49638 + if (match && (match->nentry->inode == oldinode)
49639 + && (match->nentry->device == olddevice) &&
49640 + match->nentry->deleted) {
49641 + if (match->prev == NULL) {
49642 + inodev_set.i_hash[index] = match->next;
49643 + if (match->next != NULL)
49644 + match->next->prev = NULL;
49645 + } else {
49646 + match->prev->next = match->next;
49647 + if (match->next != NULL)
49648 + match->next->prev = match->prev;
49649 + }
49650 + match->prev = NULL;
49651 + match->next = NULL;
49652 + match->nentry->inode = newinode;
49653 + match->nentry->device = newdevice;
49654 + match->nentry->deleted = 0;
49655 +
49656 + insert_inodev_entry(match);
49657 + }
49658 +
49659 + return;
49660 +}
49661 +
49662 +static void
49663 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
49664 + const struct vfsmount *mnt)
49665 +{
49666 + struct acl_subject_label *subj;
49667 + struct acl_role_label *role;
49668 + unsigned int x;
49669 + ino_t inode = dentry->d_inode->i_ino;
49670 + dev_t dev = __get_dev(dentry);
49671 +
49672 + FOR_EACH_ROLE_START(role)
49673 + update_acl_subj_label(matchn->inode, matchn->device,
49674 + inode, dev, role);
49675 +
49676 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
49677 + if ((subj->inode == inode) && (subj->device == dev)) {
49678 + subj->inode = inode;
49679 + subj->device = dev;
49680 + }
49681 + FOR_EACH_NESTED_SUBJECT_END(subj)
49682 + FOR_EACH_SUBJECT_START(role, subj, x)
49683 + update_acl_obj_label(matchn->inode, matchn->device,
49684 + inode, dev, subj);
49685 + FOR_EACH_SUBJECT_END(subj,x)
49686 + FOR_EACH_ROLE_END(role)
49687 +
49688 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
49689 +
49690 + return;
49691 +}
49692 +
49693 +void
49694 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49695 +{
49696 + struct name_entry *matchn;
49697 +
49698 + if (unlikely(!(gr_status & GR_READY)))
49699 + return;
49700 +
49701 + preempt_disable();
49702 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
49703 +
49704 + if (unlikely((unsigned long)matchn)) {
49705 + write_lock(&gr_inode_lock);
49706 + do_handle_create(matchn, dentry, mnt);
49707 + write_unlock(&gr_inode_lock);
49708 + }
49709 + preempt_enable();
49710 +
49711 + return;
49712 +}
49713 +
49714 +void
49715 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49716 + struct dentry *old_dentry,
49717 + struct dentry *new_dentry,
49718 + struct vfsmount *mnt, const __u8 replace)
49719 +{
49720 + struct name_entry *matchn;
49721 + struct inodev_entry *inodev;
49722 + ino_t oldinode = old_dentry->d_inode->i_ino;
49723 + dev_t olddev = __get_dev(old_dentry);
49724 +
49725 + /* vfs_rename swaps the name and parent link for old_dentry and
49726 + new_dentry
49727 + at this point, old_dentry has the new name, parent link, and inode
49728 + for the renamed file
49729 + if a file is being replaced by a rename, new_dentry has the inode
49730 + and name for the replaced file
49731 + */
49732 +
49733 + if (unlikely(!(gr_status & GR_READY)))
49734 + return;
49735 +
49736 + preempt_disable();
49737 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
49738 +
49739 + /* we wouldn't have to check d_inode if it weren't for
49740 + NFS silly-renaming
49741 + */
49742 +
49743 + write_lock(&gr_inode_lock);
49744 + if (unlikely(replace && new_dentry->d_inode)) {
49745 + ino_t newinode = new_dentry->d_inode->i_ino;
49746 + dev_t newdev = __get_dev(new_dentry);
49747 + inodev = lookup_inodev_entry(newinode, newdev);
49748 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
49749 + do_handle_delete(inodev, newinode, newdev);
49750 + }
49751 +
49752 + inodev = lookup_inodev_entry(oldinode, olddev);
49753 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
49754 + do_handle_delete(inodev, oldinode, olddev);
49755 +
49756 + if (unlikely((unsigned long)matchn))
49757 + do_handle_create(matchn, old_dentry, mnt);
49758 +
49759 + write_unlock(&gr_inode_lock);
49760 + preempt_enable();
49761 +
49762 + return;
49763 +}
49764 +
49765 +static int
49766 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
49767 + unsigned char **sum)
49768 +{
49769 + struct acl_role_label *r;
49770 + struct role_allowed_ip *ipp;
49771 + struct role_transition *trans;
49772 + unsigned int i;
49773 + int found = 0;
49774 + u32 curr_ip = current->signal->curr_ip;
49775 +
49776 + current->signal->saved_ip = curr_ip;
49777 +
49778 + /* check transition table */
49779 +
49780 + for (trans = current->role->transitions; trans; trans = trans->next) {
49781 + if (!strcmp(rolename, trans->rolename)) {
49782 + found = 1;
49783 + break;
49784 + }
49785 + }
49786 +
49787 + if (!found)
49788 + return 0;
49789 +
49790 + /* handle special roles that do not require authentication
49791 + and check ip */
49792 +
49793 + FOR_EACH_ROLE_START(r)
49794 + if (!strcmp(rolename, r->rolename) &&
49795 + (r->roletype & GR_ROLE_SPECIAL)) {
49796 + found = 0;
49797 + if (r->allowed_ips != NULL) {
49798 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
49799 + if ((ntohl(curr_ip) & ipp->netmask) ==
49800 + (ntohl(ipp->addr) & ipp->netmask))
49801 + found = 1;
49802 + }
49803 + } else
49804 + found = 2;
49805 + if (!found)
49806 + return 0;
49807 +
49808 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
49809 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
49810 + *salt = NULL;
49811 + *sum = NULL;
49812 + return 1;
49813 + }
49814 + }
49815 + FOR_EACH_ROLE_END(r)
49816 +
49817 + for (i = 0; i < num_sprole_pws; i++) {
49818 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
49819 + *salt = acl_special_roles[i]->salt;
49820 + *sum = acl_special_roles[i]->sum;
49821 + return 1;
49822 + }
49823 + }
49824 +
49825 + return 0;
49826 +}
49827 +
49828 +static void
49829 +assign_special_role(char *rolename)
49830 +{
49831 + struct acl_object_label *obj;
49832 + struct acl_role_label *r;
49833 + struct acl_role_label *assigned = NULL;
49834 + struct task_struct *tsk;
49835 + struct file *filp;
49836 +
49837 + FOR_EACH_ROLE_START(r)
49838 + if (!strcmp(rolename, r->rolename) &&
49839 + (r->roletype & GR_ROLE_SPECIAL)) {
49840 + assigned = r;
49841 + break;
49842 + }
49843 + FOR_EACH_ROLE_END(r)
49844 +
49845 + if (!assigned)
49846 + return;
49847 +
49848 + read_lock(&tasklist_lock);
49849 + read_lock(&grsec_exec_file_lock);
49850 +
49851 + tsk = current->real_parent;
49852 + if (tsk == NULL)
49853 + goto out_unlock;
49854 +
49855 + filp = tsk->exec_file;
49856 + if (filp == NULL)
49857 + goto out_unlock;
49858 +
49859 + tsk->is_writable = 0;
49860 +
49861 + tsk->acl_sp_role = 1;
49862 + tsk->acl_role_id = ++acl_sp_role_value;
49863 + tsk->role = assigned;
49864 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
49865 +
49866 + /* ignore additional mmap checks for processes that are writable
49867 + by the default ACL */
49868 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49869 + if (unlikely(obj->mode & GR_WRITE))
49870 + tsk->is_writable = 1;
49871 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
49872 + if (unlikely(obj->mode & GR_WRITE))
49873 + tsk->is_writable = 1;
49874 +
49875 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49876 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
49877 +#endif
49878 +
49879 +out_unlock:
49880 + read_unlock(&grsec_exec_file_lock);
49881 + read_unlock(&tasklist_lock);
49882 + return;
49883 +}
49884 +
49885 +int gr_check_secure_terminal(struct task_struct *task)
49886 +{
49887 + struct task_struct *p, *p2, *p3;
49888 + struct files_struct *files;
49889 + struct fdtable *fdt;
49890 + struct file *our_file = NULL, *file;
49891 + int i;
49892 +
49893 + if (task->signal->tty == NULL)
49894 + return 1;
49895 +
49896 + files = get_files_struct(task);
49897 + if (files != NULL) {
49898 + rcu_read_lock();
49899 + fdt = files_fdtable(files);
49900 + for (i=0; i < fdt->max_fds; i++) {
49901 + file = fcheck_files(files, i);
49902 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
49903 + get_file(file);
49904 + our_file = file;
49905 + }
49906 + }
49907 + rcu_read_unlock();
49908 + put_files_struct(files);
49909 + }
49910 +
49911 + if (our_file == NULL)
49912 + return 1;
49913 +
49914 + read_lock(&tasklist_lock);
49915 + do_each_thread(p2, p) {
49916 + files = get_files_struct(p);
49917 + if (files == NULL ||
49918 + (p->signal && p->signal->tty == task->signal->tty)) {
49919 + if (files != NULL)
49920 + put_files_struct(files);
49921 + continue;
49922 + }
49923 + rcu_read_lock();
49924 + fdt = files_fdtable(files);
49925 + for (i=0; i < fdt->max_fds; i++) {
49926 + file = fcheck_files(files, i);
49927 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
49928 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
49929 + p3 = task;
49930 + while (p3->pid > 0) {
49931 + if (p3 == p)
49932 + break;
49933 + p3 = p3->real_parent;
49934 + }
49935 + if (p3 == p)
49936 + break;
49937 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
49938 + gr_handle_alertkill(p);
49939 + rcu_read_unlock();
49940 + put_files_struct(files);
49941 + read_unlock(&tasklist_lock);
49942 + fput(our_file);
49943 + return 0;
49944 + }
49945 + }
49946 + rcu_read_unlock();
49947 + put_files_struct(files);
49948 + } while_each_thread(p2, p);
49949 + read_unlock(&tasklist_lock);
49950 +
49951 + fput(our_file);
49952 + return 1;
49953 +}
49954 +
49955 +ssize_t
49956 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
49957 +{
49958 + struct gr_arg_wrapper uwrap;
49959 + unsigned char *sprole_salt = NULL;
49960 + unsigned char *sprole_sum = NULL;
49961 + int error = sizeof (struct gr_arg_wrapper);
49962 + int error2 = 0;
49963 +
49964 + mutex_lock(&gr_dev_mutex);
49965 +
49966 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
49967 + error = -EPERM;
49968 + goto out;
49969 + }
49970 +
49971 + if (count != sizeof (struct gr_arg_wrapper)) {
49972 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
49973 + error = -EINVAL;
49974 + goto out;
49975 + }
49976 +
49977 +
49978 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
49979 + gr_auth_expires = 0;
49980 + gr_auth_attempts = 0;
49981 + }
49982 +
49983 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
49984 + error = -EFAULT;
49985 + goto out;
49986 + }
49987 +
49988 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
49989 + error = -EINVAL;
49990 + goto out;
49991 + }
49992 +
49993 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
49994 + error = -EFAULT;
49995 + goto out;
49996 + }
49997 +
49998 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49999 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50000 + time_after(gr_auth_expires, get_seconds())) {
50001 + error = -EBUSY;
50002 + goto out;
50003 + }
50004 +
50005 + /* if non-root trying to do anything other than use a special role,
50006 + do not attempt authentication, do not count towards authentication
50007 + locking
50008 + */
50009 +
50010 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
50011 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50012 + current_uid()) {
50013 + error = -EPERM;
50014 + goto out;
50015 + }
50016 +
50017 + /* ensure pw and special role name are null terminated */
50018 +
50019 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
50020 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
50021 +
50022 + /* Okay.
50023 + * We have our enough of the argument structure..(we have yet
50024 + * to copy_from_user the tables themselves) . Copy the tables
50025 + * only if we need them, i.e. for loading operations. */
50026 +
50027 + switch (gr_usermode->mode) {
50028 + case GR_STATUS:
50029 + if (gr_status & GR_READY) {
50030 + error = 1;
50031 + if (!gr_check_secure_terminal(current))
50032 + error = 3;
50033 + } else
50034 + error = 2;
50035 + goto out;
50036 + case GR_SHUTDOWN:
50037 + if ((gr_status & GR_READY)
50038 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50039 + pax_open_kernel();
50040 + gr_status &= ~GR_READY;
50041 + pax_close_kernel();
50042 +
50043 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
50044 + free_variables();
50045 + memset(gr_usermode, 0, sizeof (struct gr_arg));
50046 + memset(gr_system_salt, 0, GR_SALT_LEN);
50047 + memset(gr_system_sum, 0, GR_SHA_LEN);
50048 + } else if (gr_status & GR_READY) {
50049 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
50050 + error = -EPERM;
50051 + } else {
50052 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
50053 + error = -EAGAIN;
50054 + }
50055 + break;
50056 + case GR_ENABLE:
50057 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
50058 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
50059 + else {
50060 + if (gr_status & GR_READY)
50061 + error = -EAGAIN;
50062 + else
50063 + error = error2;
50064 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
50065 + }
50066 + break;
50067 + case GR_RELOAD:
50068 + if (!(gr_status & GR_READY)) {
50069 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
50070 + error = -EAGAIN;
50071 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50072 + lock_kernel();
50073 +
50074 + pax_open_kernel();
50075 + gr_status &= ~GR_READY;
50076 + pax_close_kernel();
50077 +
50078 + free_variables();
50079 + if (!(error2 = gracl_init(gr_usermode))) {
50080 + unlock_kernel();
50081 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50082 + } else {
50083 + unlock_kernel();
50084 + error = error2;
50085 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50086 + }
50087 + } else {
50088 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50089 + error = -EPERM;
50090 + }
50091 + break;
50092 + case GR_SEGVMOD:
50093 + if (unlikely(!(gr_status & GR_READY))) {
50094 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50095 + error = -EAGAIN;
50096 + break;
50097 + }
50098 +
50099 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50100 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50101 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50102 + struct acl_subject_label *segvacl;
50103 + segvacl =
50104 + lookup_acl_subj_label(gr_usermode->segv_inode,
50105 + gr_usermode->segv_device,
50106 + current->role);
50107 + if (segvacl) {
50108 + segvacl->crashes = 0;
50109 + segvacl->expires = 0;
50110 + }
50111 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50112 + gr_remove_uid(gr_usermode->segv_uid);
50113 + }
50114 + } else {
50115 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50116 + error = -EPERM;
50117 + }
50118 + break;
50119 + case GR_SPROLE:
50120 + case GR_SPROLEPAM:
50121 + if (unlikely(!(gr_status & GR_READY))) {
50122 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50123 + error = -EAGAIN;
50124 + break;
50125 + }
50126 +
50127 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50128 + current->role->expires = 0;
50129 + current->role->auth_attempts = 0;
50130 + }
50131 +
50132 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50133 + time_after(current->role->expires, get_seconds())) {
50134 + error = -EBUSY;
50135 + goto out;
50136 + }
50137 +
50138 + if (lookup_special_role_auth
50139 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50140 + && ((!sprole_salt && !sprole_sum)
50141 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50142 + char *p = "";
50143 + assign_special_role(gr_usermode->sp_role);
50144 + read_lock(&tasklist_lock);
50145 + if (current->real_parent)
50146 + p = current->real_parent->role->rolename;
50147 + read_unlock(&tasklist_lock);
50148 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50149 + p, acl_sp_role_value);
50150 + } else {
50151 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50152 + error = -EPERM;
50153 + if(!(current->role->auth_attempts++))
50154 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50155 +
50156 + goto out;
50157 + }
50158 + break;
50159 + case GR_UNSPROLE:
50160 + if (unlikely(!(gr_status & GR_READY))) {
50161 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50162 + error = -EAGAIN;
50163 + break;
50164 + }
50165 +
50166 + if (current->role->roletype & GR_ROLE_SPECIAL) {
50167 + char *p = "";
50168 + int i = 0;
50169 +
50170 + read_lock(&tasklist_lock);
50171 + if (current->real_parent) {
50172 + p = current->real_parent->role->rolename;
50173 + i = current->real_parent->acl_role_id;
50174 + }
50175 + read_unlock(&tasklist_lock);
50176 +
50177 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50178 + gr_set_acls(1);
50179 + } else {
50180 + error = -EPERM;
50181 + goto out;
50182 + }
50183 + break;
50184 + default:
50185 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50186 + error = -EINVAL;
50187 + break;
50188 + }
50189 +
50190 + if (error != -EPERM)
50191 + goto out;
50192 +
50193 + if(!(gr_auth_attempts++))
50194 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50195 +
50196 + out:
50197 + mutex_unlock(&gr_dev_mutex);
50198 + return error;
50199 +}
50200 +
50201 +/* must be called with
50202 + rcu_read_lock();
50203 + read_lock(&tasklist_lock);
50204 + read_lock(&grsec_exec_file_lock);
50205 +*/
50206 +int gr_apply_subject_to_task(struct task_struct *task)
50207 +{
50208 + struct acl_object_label *obj;
50209 + char *tmpname;
50210 + struct acl_subject_label *tmpsubj;
50211 + struct file *filp;
50212 + struct name_entry *nmatch;
50213 +
50214 + filp = task->exec_file;
50215 + if (filp == NULL)
50216 + return 0;
50217 +
50218 + /* the following is to apply the correct subject
50219 + on binaries running when the RBAC system
50220 + is enabled, when the binaries have been
50221 + replaced or deleted since their execution
50222 + -----
50223 + when the RBAC system starts, the inode/dev
50224 + from exec_file will be one the RBAC system
50225 + is unaware of. It only knows the inode/dev
50226 + of the present file on disk, or the absence
50227 + of it.
50228 + */
50229 + preempt_disable();
50230 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50231 +
50232 + nmatch = lookup_name_entry(tmpname);
50233 + preempt_enable();
50234 + tmpsubj = NULL;
50235 + if (nmatch) {
50236 + if (nmatch->deleted)
50237 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50238 + else
50239 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50240 + if (tmpsubj != NULL)
50241 + task->acl = tmpsubj;
50242 + }
50243 + if (tmpsubj == NULL)
50244 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50245 + task->role);
50246 + if (task->acl) {
50247 + task->is_writable = 0;
50248 + /* ignore additional mmap checks for processes that are writable
50249 + by the default ACL */
50250 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50251 + if (unlikely(obj->mode & GR_WRITE))
50252 + task->is_writable = 1;
50253 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50254 + if (unlikely(obj->mode & GR_WRITE))
50255 + task->is_writable = 1;
50256 +
50257 + gr_set_proc_res(task);
50258 +
50259 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50260 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50261 +#endif
50262 + } else {
50263 + return 1;
50264 + }
50265 +
50266 + return 0;
50267 +}
50268 +
50269 +int
50270 +gr_set_acls(const int type)
50271 +{
50272 + struct task_struct *task, *task2;
50273 + struct acl_role_label *role = current->role;
50274 + __u16 acl_role_id = current->acl_role_id;
50275 + const struct cred *cred;
50276 + int ret;
50277 +
50278 + rcu_read_lock();
50279 + read_lock(&tasklist_lock);
50280 + read_lock(&grsec_exec_file_lock);
50281 + do_each_thread(task2, task) {
50282 + /* check to see if we're called from the exit handler,
50283 + if so, only replace ACLs that have inherited the admin
50284 + ACL */
50285 +
50286 + if (type && (task->role != role ||
50287 + task->acl_role_id != acl_role_id))
50288 + continue;
50289 +
50290 + task->acl_role_id = 0;
50291 + task->acl_sp_role = 0;
50292 +
50293 + if (task->exec_file) {
50294 + cred = __task_cred(task);
50295 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50296 +
50297 + ret = gr_apply_subject_to_task(task);
50298 + if (ret) {
50299 + read_unlock(&grsec_exec_file_lock);
50300 + read_unlock(&tasklist_lock);
50301 + rcu_read_unlock();
50302 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50303 + return ret;
50304 + }
50305 + } else {
50306 + // it's a kernel process
50307 + task->role = kernel_role;
50308 + task->acl = kernel_role->root_label;
50309 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50310 + task->acl->mode &= ~GR_PROCFIND;
50311 +#endif
50312 + }
50313 + } while_each_thread(task2, task);
50314 + read_unlock(&grsec_exec_file_lock);
50315 + read_unlock(&tasklist_lock);
50316 + rcu_read_unlock();
50317 +
50318 + return 0;
50319 +}
50320 +
50321 +void
50322 +gr_learn_resource(const struct task_struct *task,
50323 + const int res, const unsigned long wanted, const int gt)
50324 +{
50325 + struct acl_subject_label *acl;
50326 + const struct cred *cred;
50327 +
50328 + if (unlikely((gr_status & GR_READY) &&
50329 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50330 + goto skip_reslog;
50331 +
50332 +#ifdef CONFIG_GRKERNSEC_RESLOG
50333 + gr_log_resource(task, res, wanted, gt);
50334 +#endif
50335 + skip_reslog:
50336 +
50337 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50338 + return;
50339 +
50340 + acl = task->acl;
50341 +
50342 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50343 + !(acl->resmask & (1 << (unsigned short) res))))
50344 + return;
50345 +
50346 + if (wanted >= acl->res[res].rlim_cur) {
50347 + unsigned long res_add;
50348 +
50349 + res_add = wanted;
50350 + switch (res) {
50351 + case RLIMIT_CPU:
50352 + res_add += GR_RLIM_CPU_BUMP;
50353 + break;
50354 + case RLIMIT_FSIZE:
50355 + res_add += GR_RLIM_FSIZE_BUMP;
50356 + break;
50357 + case RLIMIT_DATA:
50358 + res_add += GR_RLIM_DATA_BUMP;
50359 + break;
50360 + case RLIMIT_STACK:
50361 + res_add += GR_RLIM_STACK_BUMP;
50362 + break;
50363 + case RLIMIT_CORE:
50364 + res_add += GR_RLIM_CORE_BUMP;
50365 + break;
50366 + case RLIMIT_RSS:
50367 + res_add += GR_RLIM_RSS_BUMP;
50368 + break;
50369 + case RLIMIT_NPROC:
50370 + res_add += GR_RLIM_NPROC_BUMP;
50371 + break;
50372 + case RLIMIT_NOFILE:
50373 + res_add += GR_RLIM_NOFILE_BUMP;
50374 + break;
50375 + case RLIMIT_MEMLOCK:
50376 + res_add += GR_RLIM_MEMLOCK_BUMP;
50377 + break;
50378 + case RLIMIT_AS:
50379 + res_add += GR_RLIM_AS_BUMP;
50380 + break;
50381 + case RLIMIT_LOCKS:
50382 + res_add += GR_RLIM_LOCKS_BUMP;
50383 + break;
50384 + case RLIMIT_SIGPENDING:
50385 + res_add += GR_RLIM_SIGPENDING_BUMP;
50386 + break;
50387 + case RLIMIT_MSGQUEUE:
50388 + res_add += GR_RLIM_MSGQUEUE_BUMP;
50389 + break;
50390 + case RLIMIT_NICE:
50391 + res_add += GR_RLIM_NICE_BUMP;
50392 + break;
50393 + case RLIMIT_RTPRIO:
50394 + res_add += GR_RLIM_RTPRIO_BUMP;
50395 + break;
50396 + case RLIMIT_RTTIME:
50397 + res_add += GR_RLIM_RTTIME_BUMP;
50398 + break;
50399 + }
50400 +
50401 + acl->res[res].rlim_cur = res_add;
50402 +
50403 + if (wanted > acl->res[res].rlim_max)
50404 + acl->res[res].rlim_max = res_add;
50405 +
50406 + /* only log the subject filename, since resource logging is supported for
50407 + single-subject learning only */
50408 + rcu_read_lock();
50409 + cred = __task_cred(task);
50410 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50411 + task->role->roletype, cred->uid, cred->gid, acl->filename,
50412 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50413 + "", (unsigned long) res, &task->signal->saved_ip);
50414 + rcu_read_unlock();
50415 + }
50416 +
50417 + return;
50418 +}
50419 +
50420 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50421 +void
50422 +pax_set_initial_flags(struct linux_binprm *bprm)
50423 +{
50424 + struct task_struct *task = current;
50425 + struct acl_subject_label *proc;
50426 + unsigned long flags;
50427 +
50428 + if (unlikely(!(gr_status & GR_READY)))
50429 + return;
50430 +
50431 + flags = pax_get_flags(task);
50432 +
50433 + proc = task->acl;
50434 +
50435 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50436 + flags &= ~MF_PAX_PAGEEXEC;
50437 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50438 + flags &= ~MF_PAX_SEGMEXEC;
50439 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50440 + flags &= ~MF_PAX_RANDMMAP;
50441 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50442 + flags &= ~MF_PAX_EMUTRAMP;
50443 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50444 + flags &= ~MF_PAX_MPROTECT;
50445 +
50446 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50447 + flags |= MF_PAX_PAGEEXEC;
50448 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50449 + flags |= MF_PAX_SEGMEXEC;
50450 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50451 + flags |= MF_PAX_RANDMMAP;
50452 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50453 + flags |= MF_PAX_EMUTRAMP;
50454 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50455 + flags |= MF_PAX_MPROTECT;
50456 +
50457 + pax_set_flags(task, flags);
50458 +
50459 + return;
50460 +}
50461 +#endif
50462 +
50463 +#ifdef CONFIG_SYSCTL
50464 +/* Eric Biederman likes breaking userland ABI and every inode-based security
50465 + system to save 35kb of memory */
50466 +
50467 +/* we modify the passed in filename, but adjust it back before returning */
50468 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50469 +{
50470 + struct name_entry *nmatch;
50471 + char *p, *lastp = NULL;
50472 + struct acl_object_label *obj = NULL, *tmp;
50473 + struct acl_subject_label *tmpsubj;
50474 + char c = '\0';
50475 +
50476 + read_lock(&gr_inode_lock);
50477 +
50478 + p = name + len - 1;
50479 + do {
50480 + nmatch = lookup_name_entry(name);
50481 + if (lastp != NULL)
50482 + *lastp = c;
50483 +
50484 + if (nmatch == NULL)
50485 + goto next_component;
50486 + tmpsubj = current->acl;
50487 + do {
50488 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50489 + if (obj != NULL) {
50490 + tmp = obj->globbed;
50491 + while (tmp) {
50492 + if (!glob_match(tmp->filename, name)) {
50493 + obj = tmp;
50494 + goto found_obj;
50495 + }
50496 + tmp = tmp->next;
50497 + }
50498 + goto found_obj;
50499 + }
50500 + } while ((tmpsubj = tmpsubj->parent_subject));
50501 +next_component:
50502 + /* end case */
50503 + if (p == name)
50504 + break;
50505 +
50506 + while (*p != '/')
50507 + p--;
50508 + if (p == name)
50509 + lastp = p + 1;
50510 + else {
50511 + lastp = p;
50512 + p--;
50513 + }
50514 + c = *lastp;
50515 + *lastp = '\0';
50516 + } while (1);
50517 +found_obj:
50518 + read_unlock(&gr_inode_lock);
50519 + /* obj returned will always be non-null */
50520 + return obj;
50521 +}
50522 +
50523 +/* returns 0 when allowing, non-zero on error
50524 + op of 0 is used for readdir, so we don't log the names of hidden files
50525 +*/
50526 +__u32
50527 +gr_handle_sysctl(const struct ctl_table *table, const int op)
50528 +{
50529 + ctl_table *tmp;
50530 + const char *proc_sys = "/proc/sys";
50531 + char *path;
50532 + struct acl_object_label *obj;
50533 + unsigned short len = 0, pos = 0, depth = 0, i;
50534 + __u32 err = 0;
50535 + __u32 mode = 0;
50536 +
50537 + if (unlikely(!(gr_status & GR_READY)))
50538 + return 0;
50539 +
50540 + /* for now, ignore operations on non-sysctl entries if it's not a
50541 + readdir*/
50542 + if (table->child != NULL && op != 0)
50543 + return 0;
50544 +
50545 + mode |= GR_FIND;
50546 + /* it's only a read if it's an entry, read on dirs is for readdir */
50547 + if (op & MAY_READ)
50548 + mode |= GR_READ;
50549 + if (op & MAY_WRITE)
50550 + mode |= GR_WRITE;
50551 +
50552 + preempt_disable();
50553 +
50554 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50555 +
50556 + /* it's only a read/write if it's an actual entry, not a dir
50557 + (which are opened for readdir)
50558 + */
50559 +
50560 + /* convert the requested sysctl entry into a pathname */
50561 +
50562 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50563 + len += strlen(tmp->procname);
50564 + len++;
50565 + depth++;
50566 + }
50567 +
50568 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50569 + /* deny */
50570 + goto out;
50571 + }
50572 +
50573 + memset(path, 0, PAGE_SIZE);
50574 +
50575 + memcpy(path, proc_sys, strlen(proc_sys));
50576 +
50577 + pos += strlen(proc_sys);
50578 +
50579 + for (; depth > 0; depth--) {
50580 + path[pos] = '/';
50581 + pos++;
50582 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50583 + if (depth == i) {
50584 + memcpy(path + pos, tmp->procname,
50585 + strlen(tmp->procname));
50586 + pos += strlen(tmp->procname);
50587 + }
50588 + i++;
50589 + }
50590 + }
50591 +
50592 + obj = gr_lookup_by_name(path, pos);
50593 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50594 +
50595 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50596 + ((err & mode) != mode))) {
50597 + __u32 new_mode = mode;
50598 +
50599 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50600 +
50601 + err = 0;
50602 + gr_log_learn_sysctl(path, new_mode);
50603 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50604 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50605 + err = -ENOENT;
50606 + } else if (!(err & GR_FIND)) {
50607 + err = -ENOENT;
50608 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50609 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50610 + path, (mode & GR_READ) ? " reading" : "",
50611 + (mode & GR_WRITE) ? " writing" : "");
50612 + err = -EACCES;
50613 + } else if ((err & mode) != mode) {
50614 + err = -EACCES;
50615 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50616 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50617 + path, (mode & GR_READ) ? " reading" : "",
50618 + (mode & GR_WRITE) ? " writing" : "");
50619 + err = 0;
50620 + } else
50621 + err = 0;
50622 +
50623 + out:
50624 + preempt_enable();
50625 +
50626 + return err;
50627 +}
50628 +#endif
50629 +
50630 +int
50631 +gr_handle_proc_ptrace(struct task_struct *task)
50632 +{
50633 + struct file *filp;
50634 + struct task_struct *tmp = task;
50635 + struct task_struct *curtemp = current;
50636 + __u32 retmode;
50637 +
50638 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50639 + if (unlikely(!(gr_status & GR_READY)))
50640 + return 0;
50641 +#endif
50642 +
50643 + read_lock(&tasklist_lock);
50644 + read_lock(&grsec_exec_file_lock);
50645 + filp = task->exec_file;
50646 +
50647 + while (tmp->pid > 0) {
50648 + if (tmp == curtemp)
50649 + break;
50650 + tmp = tmp->real_parent;
50651 + }
50652 +
50653 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50654 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
50655 + read_unlock(&grsec_exec_file_lock);
50656 + read_unlock(&tasklist_lock);
50657 + return 1;
50658 + }
50659 +
50660 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50661 + if (!(gr_status & GR_READY)) {
50662 + read_unlock(&grsec_exec_file_lock);
50663 + read_unlock(&tasklist_lock);
50664 + return 0;
50665 + }
50666 +#endif
50667 +
50668 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
50669 + read_unlock(&grsec_exec_file_lock);
50670 + read_unlock(&tasklist_lock);
50671 +
50672 + if (retmode & GR_NOPTRACE)
50673 + return 1;
50674 +
50675 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
50676 + && (current->acl != task->acl || (current->acl != current->role->root_label
50677 + && current->pid != task->pid)))
50678 + return 1;
50679 +
50680 + return 0;
50681 +}
50682 +
50683 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
50684 +{
50685 + if (unlikely(!(gr_status & GR_READY)))
50686 + return;
50687 +
50688 + if (!(current->role->roletype & GR_ROLE_GOD))
50689 + return;
50690 +
50691 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
50692 + p->role->rolename, gr_task_roletype_to_char(p),
50693 + p->acl->filename);
50694 +}
50695 +
50696 +int
50697 +gr_handle_ptrace(struct task_struct *task, const long request)
50698 +{
50699 + struct task_struct *tmp = task;
50700 + struct task_struct *curtemp = current;
50701 + __u32 retmode;
50702 +
50703 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50704 + if (unlikely(!(gr_status & GR_READY)))
50705 + return 0;
50706 +#endif
50707 +
50708 + read_lock(&tasklist_lock);
50709 + while (tmp->pid > 0) {
50710 + if (tmp == curtemp)
50711 + break;
50712 + tmp = tmp->real_parent;
50713 + }
50714 +
50715 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50716 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
50717 + read_unlock(&tasklist_lock);
50718 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50719 + return 1;
50720 + }
50721 + read_unlock(&tasklist_lock);
50722 +
50723 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50724 + if (!(gr_status & GR_READY))
50725 + return 0;
50726 +#endif
50727 +
50728 + read_lock(&grsec_exec_file_lock);
50729 + if (unlikely(!task->exec_file)) {
50730 + read_unlock(&grsec_exec_file_lock);
50731 + return 0;
50732 + }
50733 +
50734 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
50735 + read_unlock(&grsec_exec_file_lock);
50736 +
50737 + if (retmode & GR_NOPTRACE) {
50738 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50739 + return 1;
50740 + }
50741 +
50742 + if (retmode & GR_PTRACERD) {
50743 + switch (request) {
50744 + case PTRACE_POKETEXT:
50745 + case PTRACE_POKEDATA:
50746 + case PTRACE_POKEUSR:
50747 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
50748 + case PTRACE_SETREGS:
50749 + case PTRACE_SETFPREGS:
50750 +#endif
50751 +#ifdef CONFIG_X86
50752 + case PTRACE_SETFPXREGS:
50753 +#endif
50754 +#ifdef CONFIG_ALTIVEC
50755 + case PTRACE_SETVRREGS:
50756 +#endif
50757 + return 1;
50758 + default:
50759 + return 0;
50760 + }
50761 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
50762 + !(current->role->roletype & GR_ROLE_GOD) &&
50763 + (current->acl != task->acl)) {
50764 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50765 + return 1;
50766 + }
50767 +
50768 + return 0;
50769 +}
50770 +
50771 +static int is_writable_mmap(const struct file *filp)
50772 +{
50773 + struct task_struct *task = current;
50774 + struct acl_object_label *obj, *obj2;
50775 +
50776 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
50777 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
50778 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50779 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
50780 + task->role->root_label);
50781 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
50782 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
50783 + return 1;
50784 + }
50785 + }
50786 + return 0;
50787 +}
50788 +
50789 +int
50790 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
50791 +{
50792 + __u32 mode;
50793 +
50794 + if (unlikely(!file || !(prot & PROT_EXEC)))
50795 + return 1;
50796 +
50797 + if (is_writable_mmap(file))
50798 + return 0;
50799 +
50800 + mode =
50801 + gr_search_file(file->f_path.dentry,
50802 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50803 + file->f_path.mnt);
50804 +
50805 + if (!gr_tpe_allow(file))
50806 + return 0;
50807 +
50808 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50809 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50810 + return 0;
50811 + } else if (unlikely(!(mode & GR_EXEC))) {
50812 + return 0;
50813 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50814 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50815 + return 1;
50816 + }
50817 +
50818 + return 1;
50819 +}
50820 +
50821 +int
50822 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50823 +{
50824 + __u32 mode;
50825 +
50826 + if (unlikely(!file || !(prot & PROT_EXEC)))
50827 + return 1;
50828 +
50829 + if (is_writable_mmap(file))
50830 + return 0;
50831 +
50832 + mode =
50833 + gr_search_file(file->f_path.dentry,
50834 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50835 + file->f_path.mnt);
50836 +
50837 + if (!gr_tpe_allow(file))
50838 + return 0;
50839 +
50840 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50841 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50842 + return 0;
50843 + } else if (unlikely(!(mode & GR_EXEC))) {
50844 + return 0;
50845 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50846 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50847 + return 1;
50848 + }
50849 +
50850 + return 1;
50851 +}
50852 +
50853 +void
50854 +gr_acl_handle_psacct(struct task_struct *task, const long code)
50855 +{
50856 + unsigned long runtime;
50857 + unsigned long cputime;
50858 + unsigned int wday, cday;
50859 + __u8 whr, chr;
50860 + __u8 wmin, cmin;
50861 + __u8 wsec, csec;
50862 + struct timespec timeval;
50863 +
50864 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
50865 + !(task->acl->mode & GR_PROCACCT)))
50866 + return;
50867 +
50868 + do_posix_clock_monotonic_gettime(&timeval);
50869 + runtime = timeval.tv_sec - task->start_time.tv_sec;
50870 + wday = runtime / (3600 * 24);
50871 + runtime -= wday * (3600 * 24);
50872 + whr = runtime / 3600;
50873 + runtime -= whr * 3600;
50874 + wmin = runtime / 60;
50875 + runtime -= wmin * 60;
50876 + wsec = runtime;
50877 +
50878 + cputime = (task->utime + task->stime) / HZ;
50879 + cday = cputime / (3600 * 24);
50880 + cputime -= cday * (3600 * 24);
50881 + chr = cputime / 3600;
50882 + cputime -= chr * 3600;
50883 + cmin = cputime / 60;
50884 + cputime -= cmin * 60;
50885 + csec = cputime;
50886 +
50887 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
50888 +
50889 + return;
50890 +}
50891 +
50892 +void gr_set_kernel_label(struct task_struct *task)
50893 +{
50894 + if (gr_status & GR_READY) {
50895 + task->role = kernel_role;
50896 + task->acl = kernel_role->root_label;
50897 + }
50898 + return;
50899 +}
50900 +
50901 +#ifdef CONFIG_TASKSTATS
50902 +int gr_is_taskstats_denied(int pid)
50903 +{
50904 + struct task_struct *task;
50905 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50906 + const struct cred *cred;
50907 +#endif
50908 + int ret = 0;
50909 +
50910 + /* restrict taskstats viewing to un-chrooted root users
50911 + who have the 'view' subject flag if the RBAC system is enabled
50912 + */
50913 +
50914 + rcu_read_lock();
50915 + read_lock(&tasklist_lock);
50916 + task = find_task_by_vpid(pid);
50917 + if (task) {
50918 +#ifdef CONFIG_GRKERNSEC_CHROOT
50919 + if (proc_is_chrooted(task))
50920 + ret = -EACCES;
50921 +#endif
50922 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50923 + cred = __task_cred(task);
50924 +#ifdef CONFIG_GRKERNSEC_PROC_USER
50925 + if (cred->uid != 0)
50926 + ret = -EACCES;
50927 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50928 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
50929 + ret = -EACCES;
50930 +#endif
50931 +#endif
50932 + if (gr_status & GR_READY) {
50933 + if (!(task->acl->mode & GR_VIEW))
50934 + ret = -EACCES;
50935 + }
50936 + } else
50937 + ret = -ENOENT;
50938 +
50939 + read_unlock(&tasklist_lock);
50940 + rcu_read_unlock();
50941 +
50942 + return ret;
50943 +}
50944 +#endif
50945 +
50946 +/* AUXV entries are filled via a descendant of search_binary_handler
50947 + after we've already applied the subject for the target
50948 +*/
50949 +int gr_acl_enable_at_secure(void)
50950 +{
50951 + if (unlikely(!(gr_status & GR_READY)))
50952 + return 0;
50953 +
50954 + if (current->acl->mode & GR_ATSECURE)
50955 + return 1;
50956 +
50957 + return 0;
50958 +}
50959 +
50960 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
50961 +{
50962 + struct task_struct *task = current;
50963 + struct dentry *dentry = file->f_path.dentry;
50964 + struct vfsmount *mnt = file->f_path.mnt;
50965 + struct acl_object_label *obj, *tmp;
50966 + struct acl_subject_label *subj;
50967 + unsigned int bufsize;
50968 + int is_not_root;
50969 + char *path;
50970 + dev_t dev = __get_dev(dentry);
50971 +
50972 + if (unlikely(!(gr_status & GR_READY)))
50973 + return 1;
50974 +
50975 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50976 + return 1;
50977 +
50978 + /* ignore Eric Biederman */
50979 + if (IS_PRIVATE(dentry->d_inode))
50980 + return 1;
50981 +
50982 + subj = task->acl;
50983 + do {
50984 + obj = lookup_acl_obj_label(ino, dev, subj);
50985 + if (obj != NULL)
50986 + return (obj->mode & GR_FIND) ? 1 : 0;
50987 + } while ((subj = subj->parent_subject));
50988 +
50989 + /* this is purely an optimization since we're looking for an object
50990 + for the directory we're doing a readdir on
50991 + if it's possible for any globbed object to match the entry we're
50992 + filling into the directory, then the object we find here will be
50993 + an anchor point with attached globbed objects
50994 + */
50995 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
50996 + if (obj->globbed == NULL)
50997 + return (obj->mode & GR_FIND) ? 1 : 0;
50998 +
50999 + is_not_root = ((obj->filename[0] == '/') &&
51000 + (obj->filename[1] == '\0')) ? 0 : 1;
51001 + bufsize = PAGE_SIZE - namelen - is_not_root;
51002 +
51003 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
51004 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
51005 + return 1;
51006 +
51007 + preempt_disable();
51008 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51009 + bufsize);
51010 +
51011 + bufsize = strlen(path);
51012 +
51013 + /* if base is "/", don't append an additional slash */
51014 + if (is_not_root)
51015 + *(path + bufsize) = '/';
51016 + memcpy(path + bufsize + is_not_root, name, namelen);
51017 + *(path + bufsize + namelen + is_not_root) = '\0';
51018 +
51019 + tmp = obj->globbed;
51020 + while (tmp) {
51021 + if (!glob_match(tmp->filename, path)) {
51022 + preempt_enable();
51023 + return (tmp->mode & GR_FIND) ? 1 : 0;
51024 + }
51025 + tmp = tmp->next;
51026 + }
51027 + preempt_enable();
51028 + return (obj->mode & GR_FIND) ? 1 : 0;
51029 +}
51030 +
51031 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
51032 +EXPORT_SYMBOL(gr_acl_is_enabled);
51033 +#endif
51034 +EXPORT_SYMBOL(gr_learn_resource);
51035 +EXPORT_SYMBOL(gr_set_kernel_label);
51036 +#ifdef CONFIG_SECURITY
51037 +EXPORT_SYMBOL(gr_check_user_change);
51038 +EXPORT_SYMBOL(gr_check_group_change);
51039 +#endif
51040 +
51041 diff -urNp linux-2.6.32.45/grsecurity/gracl_cap.c linux-2.6.32.45/grsecurity/gracl_cap.c
51042 --- linux-2.6.32.45/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
51043 +++ linux-2.6.32.45/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
51044 @@ -0,0 +1,138 @@
51045 +#include <linux/kernel.h>
51046 +#include <linux/module.h>
51047 +#include <linux/sched.h>
51048 +#include <linux/gracl.h>
51049 +#include <linux/grsecurity.h>
51050 +#include <linux/grinternal.h>
51051 +
51052 +static const char *captab_log[] = {
51053 + "CAP_CHOWN",
51054 + "CAP_DAC_OVERRIDE",
51055 + "CAP_DAC_READ_SEARCH",
51056 + "CAP_FOWNER",
51057 + "CAP_FSETID",
51058 + "CAP_KILL",
51059 + "CAP_SETGID",
51060 + "CAP_SETUID",
51061 + "CAP_SETPCAP",
51062 + "CAP_LINUX_IMMUTABLE",
51063 + "CAP_NET_BIND_SERVICE",
51064 + "CAP_NET_BROADCAST",
51065 + "CAP_NET_ADMIN",
51066 + "CAP_NET_RAW",
51067 + "CAP_IPC_LOCK",
51068 + "CAP_IPC_OWNER",
51069 + "CAP_SYS_MODULE",
51070 + "CAP_SYS_RAWIO",
51071 + "CAP_SYS_CHROOT",
51072 + "CAP_SYS_PTRACE",
51073 + "CAP_SYS_PACCT",
51074 + "CAP_SYS_ADMIN",
51075 + "CAP_SYS_BOOT",
51076 + "CAP_SYS_NICE",
51077 + "CAP_SYS_RESOURCE",
51078 + "CAP_SYS_TIME",
51079 + "CAP_SYS_TTY_CONFIG",
51080 + "CAP_MKNOD",
51081 + "CAP_LEASE",
51082 + "CAP_AUDIT_WRITE",
51083 + "CAP_AUDIT_CONTROL",
51084 + "CAP_SETFCAP",
51085 + "CAP_MAC_OVERRIDE",
51086 + "CAP_MAC_ADMIN"
51087 +};
51088 +
51089 +EXPORT_SYMBOL(gr_is_capable);
51090 +EXPORT_SYMBOL(gr_is_capable_nolog);
51091 +
51092 +int
51093 +gr_is_capable(const int cap)
51094 +{
51095 + struct task_struct *task = current;
51096 + const struct cred *cred = current_cred();
51097 + struct acl_subject_label *curracl;
51098 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51099 + kernel_cap_t cap_audit = __cap_empty_set;
51100 +
51101 + if (!gr_acl_is_enabled())
51102 + return 1;
51103 +
51104 + curracl = task->acl;
51105 +
51106 + cap_drop = curracl->cap_lower;
51107 + cap_mask = curracl->cap_mask;
51108 + cap_audit = curracl->cap_invert_audit;
51109 +
51110 + while ((curracl = curracl->parent_subject)) {
51111 + /* if the cap isn't specified in the current computed mask but is specified in the
51112 + current level subject, and is lowered in the current level subject, then add
51113 + it to the set of dropped capabilities
51114 + otherwise, add the current level subject's mask to the current computed mask
51115 + */
51116 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51117 + cap_raise(cap_mask, cap);
51118 + if (cap_raised(curracl->cap_lower, cap))
51119 + cap_raise(cap_drop, cap);
51120 + if (cap_raised(curracl->cap_invert_audit, cap))
51121 + cap_raise(cap_audit, cap);
51122 + }
51123 + }
51124 +
51125 + if (!cap_raised(cap_drop, cap)) {
51126 + if (cap_raised(cap_audit, cap))
51127 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51128 + return 1;
51129 + }
51130 +
51131 + curracl = task->acl;
51132 +
51133 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51134 + && cap_raised(cred->cap_effective, cap)) {
51135 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51136 + task->role->roletype, cred->uid,
51137 + cred->gid, task->exec_file ?
51138 + gr_to_filename(task->exec_file->f_path.dentry,
51139 + task->exec_file->f_path.mnt) : curracl->filename,
51140 + curracl->filename, 0UL,
51141 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51142 + return 1;
51143 + }
51144 +
51145 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51146 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51147 + return 0;
51148 +}
51149 +
51150 +int
51151 +gr_is_capable_nolog(const int cap)
51152 +{
51153 + struct acl_subject_label *curracl;
51154 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51155 +
51156 + if (!gr_acl_is_enabled())
51157 + return 1;
51158 +
51159 + curracl = current->acl;
51160 +
51161 + cap_drop = curracl->cap_lower;
51162 + cap_mask = curracl->cap_mask;
51163 +
51164 + while ((curracl = curracl->parent_subject)) {
51165 + /* if the cap isn't specified in the current computed mask but is specified in the
51166 + current level subject, and is lowered in the current level subject, then add
51167 + it to the set of dropped capabilities
51168 + otherwise, add the current level subject's mask to the current computed mask
51169 + */
51170 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51171 + cap_raise(cap_mask, cap);
51172 + if (cap_raised(curracl->cap_lower, cap))
51173 + cap_raise(cap_drop, cap);
51174 + }
51175 + }
51176 +
51177 + if (!cap_raised(cap_drop, cap))
51178 + return 1;
51179 +
51180 + return 0;
51181 +}
51182 +
51183 diff -urNp linux-2.6.32.45/grsecurity/gracl_fs.c linux-2.6.32.45/grsecurity/gracl_fs.c
51184 --- linux-2.6.32.45/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51185 +++ linux-2.6.32.45/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
51186 @@ -0,0 +1,431 @@
51187 +#include <linux/kernel.h>
51188 +#include <linux/sched.h>
51189 +#include <linux/types.h>
51190 +#include <linux/fs.h>
51191 +#include <linux/file.h>
51192 +#include <linux/stat.h>
51193 +#include <linux/grsecurity.h>
51194 +#include <linux/grinternal.h>
51195 +#include <linux/gracl.h>
51196 +
51197 +__u32
51198 +gr_acl_handle_hidden_file(const struct dentry * dentry,
51199 + const struct vfsmount * mnt)
51200 +{
51201 + __u32 mode;
51202 +
51203 + if (unlikely(!dentry->d_inode))
51204 + return GR_FIND;
51205 +
51206 + mode =
51207 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51208 +
51209 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51210 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51211 + return mode;
51212 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51213 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51214 + return 0;
51215 + } else if (unlikely(!(mode & GR_FIND)))
51216 + return 0;
51217 +
51218 + return GR_FIND;
51219 +}
51220 +
51221 +__u32
51222 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51223 + const int fmode)
51224 +{
51225 + __u32 reqmode = GR_FIND;
51226 + __u32 mode;
51227 +
51228 + if (unlikely(!dentry->d_inode))
51229 + return reqmode;
51230 +
51231 + if (unlikely(fmode & O_APPEND))
51232 + reqmode |= GR_APPEND;
51233 + else if (unlikely(fmode & FMODE_WRITE))
51234 + reqmode |= GR_WRITE;
51235 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51236 + reqmode |= GR_READ;
51237 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
51238 + reqmode &= ~GR_READ;
51239 + mode =
51240 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51241 + mnt);
51242 +
51243 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51244 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51245 + reqmode & GR_READ ? " reading" : "",
51246 + reqmode & GR_WRITE ? " writing" : reqmode &
51247 + GR_APPEND ? " appending" : "");
51248 + return reqmode;
51249 + } else
51250 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51251 + {
51252 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51253 + reqmode & GR_READ ? " reading" : "",
51254 + reqmode & GR_WRITE ? " writing" : reqmode &
51255 + GR_APPEND ? " appending" : "");
51256 + return 0;
51257 + } else if (unlikely((mode & reqmode) != reqmode))
51258 + return 0;
51259 +
51260 + return reqmode;
51261 +}
51262 +
51263 +__u32
51264 +gr_acl_handle_creat(const struct dentry * dentry,
51265 + const struct dentry * p_dentry,
51266 + const struct vfsmount * p_mnt, const int fmode,
51267 + const int imode)
51268 +{
51269 + __u32 reqmode = GR_WRITE | GR_CREATE;
51270 + __u32 mode;
51271 +
51272 + if (unlikely(fmode & O_APPEND))
51273 + reqmode |= GR_APPEND;
51274 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51275 + reqmode |= GR_READ;
51276 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
51277 + reqmode |= GR_SETID;
51278 +
51279 + mode =
51280 + gr_check_create(dentry, p_dentry, p_mnt,
51281 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51282 +
51283 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51284 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51285 + reqmode & GR_READ ? " reading" : "",
51286 + reqmode & GR_WRITE ? " writing" : reqmode &
51287 + GR_APPEND ? " appending" : "");
51288 + return reqmode;
51289 + } else
51290 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51291 + {
51292 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51293 + reqmode & GR_READ ? " reading" : "",
51294 + reqmode & GR_WRITE ? " writing" : reqmode &
51295 + GR_APPEND ? " appending" : "");
51296 + return 0;
51297 + } else if (unlikely((mode & reqmode) != reqmode))
51298 + return 0;
51299 +
51300 + return reqmode;
51301 +}
51302 +
51303 +__u32
51304 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51305 + const int fmode)
51306 +{
51307 + __u32 mode, reqmode = GR_FIND;
51308 +
51309 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51310 + reqmode |= GR_EXEC;
51311 + if (fmode & S_IWOTH)
51312 + reqmode |= GR_WRITE;
51313 + if (fmode & S_IROTH)
51314 + reqmode |= GR_READ;
51315 +
51316 + mode =
51317 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51318 + mnt);
51319 +
51320 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51321 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51322 + reqmode & GR_READ ? " reading" : "",
51323 + reqmode & GR_WRITE ? " writing" : "",
51324 + reqmode & GR_EXEC ? " executing" : "");
51325 + return reqmode;
51326 + } else
51327 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51328 + {
51329 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51330 + reqmode & GR_READ ? " reading" : "",
51331 + reqmode & GR_WRITE ? " writing" : "",
51332 + reqmode & GR_EXEC ? " executing" : "");
51333 + return 0;
51334 + } else if (unlikely((mode & reqmode) != reqmode))
51335 + return 0;
51336 +
51337 + return reqmode;
51338 +}
51339 +
51340 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51341 +{
51342 + __u32 mode;
51343 +
51344 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51345 +
51346 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51347 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51348 + return mode;
51349 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51350 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51351 + return 0;
51352 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
51353 + return 0;
51354 +
51355 + return (reqmode);
51356 +}
51357 +
51358 +__u32
51359 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51360 +{
51361 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51362 +}
51363 +
51364 +__u32
51365 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51366 +{
51367 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51368 +}
51369 +
51370 +__u32
51371 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51372 +{
51373 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51374 +}
51375 +
51376 +__u32
51377 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51378 +{
51379 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51380 +}
51381 +
51382 +__u32
51383 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51384 + mode_t mode)
51385 +{
51386 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51387 + return 1;
51388 +
51389 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51390 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51391 + GR_FCHMOD_ACL_MSG);
51392 + } else {
51393 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51394 + }
51395 +}
51396 +
51397 +__u32
51398 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51399 + mode_t mode)
51400 +{
51401 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51402 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51403 + GR_CHMOD_ACL_MSG);
51404 + } else {
51405 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51406 + }
51407 +}
51408 +
51409 +__u32
51410 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51411 +{
51412 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51413 +}
51414 +
51415 +__u32
51416 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51417 +{
51418 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51419 +}
51420 +
51421 +__u32
51422 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51423 +{
51424 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51425 +}
51426 +
51427 +__u32
51428 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51429 +{
51430 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51431 + GR_UNIXCONNECT_ACL_MSG);
51432 +}
51433 +
51434 +/* hardlinks require at minimum create permission,
51435 + any additional privilege required is based on the
51436 + privilege of the file being linked to
51437 +*/
51438 +__u32
51439 +gr_acl_handle_link(const struct dentry * new_dentry,
51440 + const struct dentry * parent_dentry,
51441 + const struct vfsmount * parent_mnt,
51442 + const struct dentry * old_dentry,
51443 + const struct vfsmount * old_mnt, const char *to)
51444 +{
51445 + __u32 mode;
51446 + __u32 needmode = GR_CREATE | GR_LINK;
51447 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51448 +
51449 + mode =
51450 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51451 + old_mnt);
51452 +
51453 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51454 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51455 + return mode;
51456 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51457 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51458 + return 0;
51459 + } else if (unlikely((mode & needmode) != needmode))
51460 + return 0;
51461 +
51462 + return 1;
51463 +}
51464 +
51465 +__u32
51466 +gr_acl_handle_symlink(const struct dentry * new_dentry,
51467 + const struct dentry * parent_dentry,
51468 + const struct vfsmount * parent_mnt, const char *from)
51469 +{
51470 + __u32 needmode = GR_WRITE | GR_CREATE;
51471 + __u32 mode;
51472 +
51473 + mode =
51474 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
51475 + GR_CREATE | GR_AUDIT_CREATE |
51476 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51477 +
51478 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51479 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51480 + return mode;
51481 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51482 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51483 + return 0;
51484 + } else if (unlikely((mode & needmode) != needmode))
51485 + return 0;
51486 +
51487 + return (GR_WRITE | GR_CREATE);
51488 +}
51489 +
51490 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51491 +{
51492 + __u32 mode;
51493 +
51494 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51495 +
51496 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51497 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51498 + return mode;
51499 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51500 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51501 + return 0;
51502 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
51503 + return 0;
51504 +
51505 + return (reqmode);
51506 +}
51507 +
51508 +__u32
51509 +gr_acl_handle_mknod(const struct dentry * new_dentry,
51510 + const struct dentry * parent_dentry,
51511 + const struct vfsmount * parent_mnt,
51512 + const int mode)
51513 +{
51514 + __u32 reqmode = GR_WRITE | GR_CREATE;
51515 + if (unlikely(mode & (S_ISUID | S_ISGID)))
51516 + reqmode |= GR_SETID;
51517 +
51518 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51519 + reqmode, GR_MKNOD_ACL_MSG);
51520 +}
51521 +
51522 +__u32
51523 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
51524 + const struct dentry *parent_dentry,
51525 + const struct vfsmount *parent_mnt)
51526 +{
51527 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51528 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51529 +}
51530 +
51531 +#define RENAME_CHECK_SUCCESS(old, new) \
51532 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51533 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51534 +
51535 +int
51536 +gr_acl_handle_rename(struct dentry *new_dentry,
51537 + struct dentry *parent_dentry,
51538 + const struct vfsmount *parent_mnt,
51539 + struct dentry *old_dentry,
51540 + struct inode *old_parent_inode,
51541 + struct vfsmount *old_mnt, const char *newname)
51542 +{
51543 + __u32 comp1, comp2;
51544 + int error = 0;
51545 +
51546 + if (unlikely(!gr_acl_is_enabled()))
51547 + return 0;
51548 +
51549 + if (!new_dentry->d_inode) {
51550 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51551 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51552 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51553 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51554 + GR_DELETE | GR_AUDIT_DELETE |
51555 + GR_AUDIT_READ | GR_AUDIT_WRITE |
51556 + GR_SUPPRESS, old_mnt);
51557 + } else {
51558 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51559 + GR_CREATE | GR_DELETE |
51560 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51561 + GR_AUDIT_READ | GR_AUDIT_WRITE |
51562 + GR_SUPPRESS, parent_mnt);
51563 + comp2 =
51564 + gr_search_file(old_dentry,
51565 + GR_READ | GR_WRITE | GR_AUDIT_READ |
51566 + GR_DELETE | GR_AUDIT_DELETE |
51567 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51568 + }
51569 +
51570 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51571 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51572 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51573 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51574 + && !(comp2 & GR_SUPPRESS)) {
51575 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51576 + error = -EACCES;
51577 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51578 + error = -EACCES;
51579 +
51580 + return error;
51581 +}
51582 +
51583 +void
51584 +gr_acl_handle_exit(void)
51585 +{
51586 + u16 id;
51587 + char *rolename;
51588 + struct file *exec_file;
51589 +
51590 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51591 + !(current->role->roletype & GR_ROLE_PERSIST))) {
51592 + id = current->acl_role_id;
51593 + rolename = current->role->rolename;
51594 + gr_set_acls(1);
51595 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51596 + }
51597 +
51598 + write_lock(&grsec_exec_file_lock);
51599 + exec_file = current->exec_file;
51600 + current->exec_file = NULL;
51601 + write_unlock(&grsec_exec_file_lock);
51602 +
51603 + if (exec_file)
51604 + fput(exec_file);
51605 +}
51606 +
51607 +int
51608 +gr_acl_handle_procpidmem(const struct task_struct *task)
51609 +{
51610 + if (unlikely(!gr_acl_is_enabled()))
51611 + return 0;
51612 +
51613 + if (task != current && task->acl->mode & GR_PROTPROCFD)
51614 + return -EACCES;
51615 +
51616 + return 0;
51617 +}
51618 diff -urNp linux-2.6.32.45/grsecurity/gracl_ip.c linux-2.6.32.45/grsecurity/gracl_ip.c
51619 --- linux-2.6.32.45/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51620 +++ linux-2.6.32.45/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
51621 @@ -0,0 +1,382 @@
51622 +#include <linux/kernel.h>
51623 +#include <asm/uaccess.h>
51624 +#include <asm/errno.h>
51625 +#include <net/sock.h>
51626 +#include <linux/file.h>
51627 +#include <linux/fs.h>
51628 +#include <linux/net.h>
51629 +#include <linux/in.h>
51630 +#include <linux/skbuff.h>
51631 +#include <linux/ip.h>
51632 +#include <linux/udp.h>
51633 +#include <linux/smp_lock.h>
51634 +#include <linux/types.h>
51635 +#include <linux/sched.h>
51636 +#include <linux/netdevice.h>
51637 +#include <linux/inetdevice.h>
51638 +#include <linux/gracl.h>
51639 +#include <linux/grsecurity.h>
51640 +#include <linux/grinternal.h>
51641 +
51642 +#define GR_BIND 0x01
51643 +#define GR_CONNECT 0x02
51644 +#define GR_INVERT 0x04
51645 +#define GR_BINDOVERRIDE 0x08
51646 +#define GR_CONNECTOVERRIDE 0x10
51647 +#define GR_SOCK_FAMILY 0x20
51648 +
51649 +static const char * gr_protocols[IPPROTO_MAX] = {
51650 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51651 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51652 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51653 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51654 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51655 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51656 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51657 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
51658 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
51659 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
51660 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
51661 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
51662 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
51663 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
51664 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
51665 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
51666 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
51667 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
51668 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
51669 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
51670 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
51671 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
51672 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
51673 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
51674 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
51675 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
51676 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
51677 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
51678 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
51679 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
51680 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
51681 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
51682 + };
51683 +
51684 +static const char * gr_socktypes[SOCK_MAX] = {
51685 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
51686 + "unknown:7", "unknown:8", "unknown:9", "packet"
51687 + };
51688 +
51689 +static const char * gr_sockfamilies[AF_MAX+1] = {
51690 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
51691 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
51692 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
51693 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
51694 + };
51695 +
51696 +const char *
51697 +gr_proto_to_name(unsigned char proto)
51698 +{
51699 + return gr_protocols[proto];
51700 +}
51701 +
51702 +const char *
51703 +gr_socktype_to_name(unsigned char type)
51704 +{
51705 + return gr_socktypes[type];
51706 +}
51707 +
51708 +const char *
51709 +gr_sockfamily_to_name(unsigned char family)
51710 +{
51711 + return gr_sockfamilies[family];
51712 +}
51713 +
51714 +int
51715 +gr_search_socket(const int domain, const int type, const int protocol)
51716 +{
51717 + struct acl_subject_label *curr;
51718 + const struct cred *cred = current_cred();
51719 +
51720 + if (unlikely(!gr_acl_is_enabled()))
51721 + goto exit;
51722 +
51723 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
51724 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
51725 + goto exit; // let the kernel handle it
51726 +
51727 + curr = current->acl;
51728 +
51729 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
51730 + /* the family is allowed, if this is PF_INET allow it only if
51731 + the extra sock type/protocol checks pass */
51732 + if (domain == PF_INET)
51733 + goto inet_check;
51734 + goto exit;
51735 + } else {
51736 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51737 + __u32 fakeip = 0;
51738 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51739 + current->role->roletype, cred->uid,
51740 + cred->gid, current->exec_file ?
51741 + gr_to_filename(current->exec_file->f_path.dentry,
51742 + current->exec_file->f_path.mnt) :
51743 + curr->filename, curr->filename,
51744 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
51745 + &current->signal->saved_ip);
51746 + goto exit;
51747 + }
51748 + goto exit_fail;
51749 + }
51750 +
51751 +inet_check:
51752 + /* the rest of this checking is for IPv4 only */
51753 + if (!curr->ips)
51754 + goto exit;
51755 +
51756 + if ((curr->ip_type & (1 << type)) &&
51757 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
51758 + goto exit;
51759 +
51760 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51761 + /* we don't place acls on raw sockets , and sometimes
51762 + dgram/ip sockets are opened for ioctl and not
51763 + bind/connect, so we'll fake a bind learn log */
51764 + if (type == SOCK_RAW || type == SOCK_PACKET) {
51765 + __u32 fakeip = 0;
51766 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51767 + current->role->roletype, cred->uid,
51768 + cred->gid, current->exec_file ?
51769 + gr_to_filename(current->exec_file->f_path.dentry,
51770 + current->exec_file->f_path.mnt) :
51771 + curr->filename, curr->filename,
51772 + &fakeip, 0, type,
51773 + protocol, GR_CONNECT, &current->signal->saved_ip);
51774 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
51775 + __u32 fakeip = 0;
51776 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51777 + current->role->roletype, cred->uid,
51778 + cred->gid, current->exec_file ?
51779 + gr_to_filename(current->exec_file->f_path.dentry,
51780 + current->exec_file->f_path.mnt) :
51781 + curr->filename, curr->filename,
51782 + &fakeip, 0, type,
51783 + protocol, GR_BIND, &current->signal->saved_ip);
51784 + }
51785 + /* we'll log when they use connect or bind */
51786 + goto exit;
51787 + }
51788 +
51789 +exit_fail:
51790 + if (domain == PF_INET)
51791 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
51792 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
51793 + else
51794 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
51795 + gr_socktype_to_name(type), protocol);
51796 +
51797 + return 0;
51798 +exit:
51799 + return 1;
51800 +}
51801 +
51802 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
51803 +{
51804 + if ((ip->mode & mode) &&
51805 + (ip_port >= ip->low) &&
51806 + (ip_port <= ip->high) &&
51807 + ((ntohl(ip_addr) & our_netmask) ==
51808 + (ntohl(our_addr) & our_netmask))
51809 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
51810 + && (ip->type & (1 << type))) {
51811 + if (ip->mode & GR_INVERT)
51812 + return 2; // specifically denied
51813 + else
51814 + return 1; // allowed
51815 + }
51816 +
51817 + return 0; // not specifically allowed, may continue parsing
51818 +}
51819 +
51820 +static int
51821 +gr_search_connectbind(const int full_mode, struct sock *sk,
51822 + struct sockaddr_in *addr, const int type)
51823 +{
51824 + char iface[IFNAMSIZ] = {0};
51825 + struct acl_subject_label *curr;
51826 + struct acl_ip_label *ip;
51827 + struct inet_sock *isk;
51828 + struct net_device *dev;
51829 + struct in_device *idev;
51830 + unsigned long i;
51831 + int ret;
51832 + int mode = full_mode & (GR_BIND | GR_CONNECT);
51833 + __u32 ip_addr = 0;
51834 + __u32 our_addr;
51835 + __u32 our_netmask;
51836 + char *p;
51837 + __u16 ip_port = 0;
51838 + const struct cred *cred = current_cred();
51839 +
51840 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
51841 + return 0;
51842 +
51843 + curr = current->acl;
51844 + isk = inet_sk(sk);
51845 +
51846 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
51847 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
51848 + addr->sin_addr.s_addr = curr->inaddr_any_override;
51849 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
51850 + struct sockaddr_in saddr;
51851 + int err;
51852 +
51853 + saddr.sin_family = AF_INET;
51854 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
51855 + saddr.sin_port = isk->sport;
51856 +
51857 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51858 + if (err)
51859 + return err;
51860 +
51861 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51862 + if (err)
51863 + return err;
51864 + }
51865 +
51866 + if (!curr->ips)
51867 + return 0;
51868 +
51869 + ip_addr = addr->sin_addr.s_addr;
51870 + ip_port = ntohs(addr->sin_port);
51871 +
51872 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51873 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51874 + current->role->roletype, cred->uid,
51875 + cred->gid, current->exec_file ?
51876 + gr_to_filename(current->exec_file->f_path.dentry,
51877 + current->exec_file->f_path.mnt) :
51878 + curr->filename, curr->filename,
51879 + &ip_addr, ip_port, type,
51880 + sk->sk_protocol, mode, &current->signal->saved_ip);
51881 + return 0;
51882 + }
51883 +
51884 + for (i = 0; i < curr->ip_num; i++) {
51885 + ip = *(curr->ips + i);
51886 + if (ip->iface != NULL) {
51887 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
51888 + p = strchr(iface, ':');
51889 + if (p != NULL)
51890 + *p = '\0';
51891 + dev = dev_get_by_name(sock_net(sk), iface);
51892 + if (dev == NULL)
51893 + continue;
51894 + idev = in_dev_get(dev);
51895 + if (idev == NULL) {
51896 + dev_put(dev);
51897 + continue;
51898 + }
51899 + rcu_read_lock();
51900 + for_ifa(idev) {
51901 + if (!strcmp(ip->iface, ifa->ifa_label)) {
51902 + our_addr = ifa->ifa_address;
51903 + our_netmask = 0xffffffff;
51904 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51905 + if (ret == 1) {
51906 + rcu_read_unlock();
51907 + in_dev_put(idev);
51908 + dev_put(dev);
51909 + return 0;
51910 + } else if (ret == 2) {
51911 + rcu_read_unlock();
51912 + in_dev_put(idev);
51913 + dev_put(dev);
51914 + goto denied;
51915 + }
51916 + }
51917 + } endfor_ifa(idev);
51918 + rcu_read_unlock();
51919 + in_dev_put(idev);
51920 + dev_put(dev);
51921 + } else {
51922 + our_addr = ip->addr;
51923 + our_netmask = ip->netmask;
51924 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51925 + if (ret == 1)
51926 + return 0;
51927 + else if (ret == 2)
51928 + goto denied;
51929 + }
51930 + }
51931 +
51932 +denied:
51933 + if (mode == GR_BIND)
51934 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51935 + else if (mode == GR_CONNECT)
51936 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51937 +
51938 + return -EACCES;
51939 +}
51940 +
51941 +int
51942 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
51943 +{
51944 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
51945 +}
51946 +
51947 +int
51948 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
51949 +{
51950 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
51951 +}
51952 +
51953 +int gr_search_listen(struct socket *sock)
51954 +{
51955 + struct sock *sk = sock->sk;
51956 + struct sockaddr_in addr;
51957 +
51958 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51959 + addr.sin_port = inet_sk(sk)->sport;
51960 +
51961 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51962 +}
51963 +
51964 +int gr_search_accept(struct socket *sock)
51965 +{
51966 + struct sock *sk = sock->sk;
51967 + struct sockaddr_in addr;
51968 +
51969 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51970 + addr.sin_port = inet_sk(sk)->sport;
51971 +
51972 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51973 +}
51974 +
51975 +int
51976 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
51977 +{
51978 + if (addr)
51979 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
51980 + else {
51981 + struct sockaddr_in sin;
51982 + const struct inet_sock *inet = inet_sk(sk);
51983 +
51984 + sin.sin_addr.s_addr = inet->daddr;
51985 + sin.sin_port = inet->dport;
51986 +
51987 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51988 + }
51989 +}
51990 +
51991 +int
51992 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
51993 +{
51994 + struct sockaddr_in sin;
51995 +
51996 + if (unlikely(skb->len < sizeof (struct udphdr)))
51997 + return 0; // skip this packet
51998 +
51999 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
52000 + sin.sin_port = udp_hdr(skb)->source;
52001 +
52002 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52003 +}
52004 diff -urNp linux-2.6.32.45/grsecurity/gracl_learn.c linux-2.6.32.45/grsecurity/gracl_learn.c
52005 --- linux-2.6.32.45/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
52006 +++ linux-2.6.32.45/grsecurity/gracl_learn.c 2011-07-14 21:02:03.000000000 -0400
52007 @@ -0,0 +1,208 @@
52008 +#include <linux/kernel.h>
52009 +#include <linux/mm.h>
52010 +#include <linux/sched.h>
52011 +#include <linux/poll.h>
52012 +#include <linux/smp_lock.h>
52013 +#include <linux/string.h>
52014 +#include <linux/file.h>
52015 +#include <linux/types.h>
52016 +#include <linux/vmalloc.h>
52017 +#include <linux/grinternal.h>
52018 +
52019 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
52020 + size_t count, loff_t *ppos);
52021 +extern int gr_acl_is_enabled(void);
52022 +
52023 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
52024 +static int gr_learn_attached;
52025 +
52026 +/* use a 512k buffer */
52027 +#define LEARN_BUFFER_SIZE (512 * 1024)
52028 +
52029 +static DEFINE_SPINLOCK(gr_learn_lock);
52030 +static DEFINE_MUTEX(gr_learn_user_mutex);
52031 +
52032 +/* we need to maintain two buffers, so that the kernel context of grlearn
52033 + uses a semaphore around the userspace copying, and the other kernel contexts
52034 + use a spinlock when copying into the buffer, since they cannot sleep
52035 +*/
52036 +static char *learn_buffer;
52037 +static char *learn_buffer_user;
52038 +static int learn_buffer_len;
52039 +static int learn_buffer_user_len;
52040 +
52041 +static ssize_t
52042 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
52043 +{
52044 + DECLARE_WAITQUEUE(wait, current);
52045 + ssize_t retval = 0;
52046 +
52047 + add_wait_queue(&learn_wait, &wait);
52048 + set_current_state(TASK_INTERRUPTIBLE);
52049 + do {
52050 + mutex_lock(&gr_learn_user_mutex);
52051 + spin_lock(&gr_learn_lock);
52052 + if (learn_buffer_len)
52053 + break;
52054 + spin_unlock(&gr_learn_lock);
52055 + mutex_unlock(&gr_learn_user_mutex);
52056 + if (file->f_flags & O_NONBLOCK) {
52057 + retval = -EAGAIN;
52058 + goto out;
52059 + }
52060 + if (signal_pending(current)) {
52061 + retval = -ERESTARTSYS;
52062 + goto out;
52063 + }
52064 +
52065 + schedule();
52066 + } while (1);
52067 +
52068 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
52069 + learn_buffer_user_len = learn_buffer_len;
52070 + retval = learn_buffer_len;
52071 + learn_buffer_len = 0;
52072 +
52073 + spin_unlock(&gr_learn_lock);
52074 +
52075 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
52076 + retval = -EFAULT;
52077 +
52078 + mutex_unlock(&gr_learn_user_mutex);
52079 +out:
52080 + set_current_state(TASK_RUNNING);
52081 + remove_wait_queue(&learn_wait, &wait);
52082 + return retval;
52083 +}
52084 +
52085 +static unsigned int
52086 +poll_learn(struct file * file, poll_table * wait)
52087 +{
52088 + poll_wait(file, &learn_wait, wait);
52089 +
52090 + if (learn_buffer_len)
52091 + return (POLLIN | POLLRDNORM);
52092 +
52093 + return 0;
52094 +}
52095 +
52096 +void
52097 +gr_clear_learn_entries(void)
52098 +{
52099 + char *tmp;
52100 +
52101 + mutex_lock(&gr_learn_user_mutex);
52102 + spin_lock(&gr_learn_lock);
52103 + tmp = learn_buffer;
52104 + learn_buffer = NULL;
52105 + spin_unlock(&gr_learn_lock);
52106 + if (tmp)
52107 + vfree(tmp);
52108 + if (learn_buffer_user != NULL) {
52109 + vfree(learn_buffer_user);
52110 + learn_buffer_user = NULL;
52111 + }
52112 + learn_buffer_len = 0;
52113 + mutex_unlock(&gr_learn_user_mutex);
52114 +
52115 + return;
52116 +}
52117 +
52118 +void
52119 +gr_add_learn_entry(const char *fmt, ...)
52120 +{
52121 + va_list args;
52122 + unsigned int len;
52123 +
52124 + if (!gr_learn_attached)
52125 + return;
52126 +
52127 + spin_lock(&gr_learn_lock);
52128 +
52129 + /* leave a gap at the end so we know when it's "full" but don't have to
52130 + compute the exact length of the string we're trying to append
52131 + */
52132 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52133 + spin_unlock(&gr_learn_lock);
52134 + wake_up_interruptible(&learn_wait);
52135 + return;
52136 + }
52137 + if (learn_buffer == NULL) {
52138 + spin_unlock(&gr_learn_lock);
52139 + return;
52140 + }
52141 +
52142 + va_start(args, fmt);
52143 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52144 + va_end(args);
52145 +
52146 + learn_buffer_len += len + 1;
52147 +
52148 + spin_unlock(&gr_learn_lock);
52149 + wake_up_interruptible(&learn_wait);
52150 +
52151 + return;
52152 +}
52153 +
52154 +static int
52155 +open_learn(struct inode *inode, struct file *file)
52156 +{
52157 + if (file->f_mode & FMODE_READ && gr_learn_attached)
52158 + return -EBUSY;
52159 + if (file->f_mode & FMODE_READ) {
52160 + int retval = 0;
52161 + mutex_lock(&gr_learn_user_mutex);
52162 + if (learn_buffer == NULL)
52163 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52164 + if (learn_buffer_user == NULL)
52165 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52166 + if (learn_buffer == NULL) {
52167 + retval = -ENOMEM;
52168 + goto out_error;
52169 + }
52170 + if (learn_buffer_user == NULL) {
52171 + retval = -ENOMEM;
52172 + goto out_error;
52173 + }
52174 + learn_buffer_len = 0;
52175 + learn_buffer_user_len = 0;
52176 + gr_learn_attached = 1;
52177 +out_error:
52178 + mutex_unlock(&gr_learn_user_mutex);
52179 + return retval;
52180 + }
52181 + return 0;
52182 +}
52183 +
52184 +static int
52185 +close_learn(struct inode *inode, struct file *file)
52186 +{
52187 + if (file->f_mode & FMODE_READ) {
52188 + char *tmp = NULL;
52189 + mutex_lock(&gr_learn_user_mutex);
52190 + spin_lock(&gr_learn_lock);
52191 + tmp = learn_buffer;
52192 + learn_buffer = NULL;
52193 + spin_unlock(&gr_learn_lock);
52194 + if (tmp)
52195 + vfree(tmp);
52196 + if (learn_buffer_user != NULL) {
52197 + vfree(learn_buffer_user);
52198 + learn_buffer_user = NULL;
52199 + }
52200 + learn_buffer_len = 0;
52201 + learn_buffer_user_len = 0;
52202 + gr_learn_attached = 0;
52203 + mutex_unlock(&gr_learn_user_mutex);
52204 + }
52205 +
52206 + return 0;
52207 +}
52208 +
52209 +const struct file_operations grsec_fops = {
52210 + .read = read_learn,
52211 + .write = write_grsec_handler,
52212 + .open = open_learn,
52213 + .release = close_learn,
52214 + .poll = poll_learn,
52215 +};
52216 diff -urNp linux-2.6.32.45/grsecurity/gracl_res.c linux-2.6.32.45/grsecurity/gracl_res.c
52217 --- linux-2.6.32.45/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52218 +++ linux-2.6.32.45/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
52219 @@ -0,0 +1,67 @@
52220 +#include <linux/kernel.h>
52221 +#include <linux/sched.h>
52222 +#include <linux/gracl.h>
52223 +#include <linux/grinternal.h>
52224 +
52225 +static const char *restab_log[] = {
52226 + [RLIMIT_CPU] = "RLIMIT_CPU",
52227 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52228 + [RLIMIT_DATA] = "RLIMIT_DATA",
52229 + [RLIMIT_STACK] = "RLIMIT_STACK",
52230 + [RLIMIT_CORE] = "RLIMIT_CORE",
52231 + [RLIMIT_RSS] = "RLIMIT_RSS",
52232 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
52233 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52234 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52235 + [RLIMIT_AS] = "RLIMIT_AS",
52236 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52237 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52238 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52239 + [RLIMIT_NICE] = "RLIMIT_NICE",
52240 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52241 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52242 + [GR_CRASH_RES] = "RLIMIT_CRASH"
52243 +};
52244 +
52245 +void
52246 +gr_log_resource(const struct task_struct *task,
52247 + const int res, const unsigned long wanted, const int gt)
52248 +{
52249 + const struct cred *cred;
52250 + unsigned long rlim;
52251 +
52252 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
52253 + return;
52254 +
52255 + // not yet supported resource
52256 + if (unlikely(!restab_log[res]))
52257 + return;
52258 +
52259 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52260 + rlim = task->signal->rlim[res].rlim_max;
52261 + else
52262 + rlim = task->signal->rlim[res].rlim_cur;
52263 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52264 + return;
52265 +
52266 + rcu_read_lock();
52267 + cred = __task_cred(task);
52268 +
52269 + if (res == RLIMIT_NPROC &&
52270 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52271 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52272 + goto out_rcu_unlock;
52273 + else if (res == RLIMIT_MEMLOCK &&
52274 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52275 + goto out_rcu_unlock;
52276 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52277 + goto out_rcu_unlock;
52278 + rcu_read_unlock();
52279 +
52280 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52281 +
52282 + return;
52283 +out_rcu_unlock:
52284 + rcu_read_unlock();
52285 + return;
52286 +}
52287 diff -urNp linux-2.6.32.45/grsecurity/gracl_segv.c linux-2.6.32.45/grsecurity/gracl_segv.c
52288 --- linux-2.6.32.45/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52289 +++ linux-2.6.32.45/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
52290 @@ -0,0 +1,284 @@
52291 +#include <linux/kernel.h>
52292 +#include <linux/mm.h>
52293 +#include <asm/uaccess.h>
52294 +#include <asm/errno.h>
52295 +#include <asm/mman.h>
52296 +#include <net/sock.h>
52297 +#include <linux/file.h>
52298 +#include <linux/fs.h>
52299 +#include <linux/net.h>
52300 +#include <linux/in.h>
52301 +#include <linux/smp_lock.h>
52302 +#include <linux/slab.h>
52303 +#include <linux/types.h>
52304 +#include <linux/sched.h>
52305 +#include <linux/timer.h>
52306 +#include <linux/gracl.h>
52307 +#include <linux/grsecurity.h>
52308 +#include <linux/grinternal.h>
52309 +
52310 +static struct crash_uid *uid_set;
52311 +static unsigned short uid_used;
52312 +static DEFINE_SPINLOCK(gr_uid_lock);
52313 +extern rwlock_t gr_inode_lock;
52314 +extern struct acl_subject_label *
52315 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52316 + struct acl_role_label *role);
52317 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
52318 +
52319 +int
52320 +gr_init_uidset(void)
52321 +{
52322 + uid_set =
52323 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52324 + uid_used = 0;
52325 +
52326 + return uid_set ? 1 : 0;
52327 +}
52328 +
52329 +void
52330 +gr_free_uidset(void)
52331 +{
52332 + if (uid_set)
52333 + kfree(uid_set);
52334 +
52335 + return;
52336 +}
52337 +
52338 +int
52339 +gr_find_uid(const uid_t uid)
52340 +{
52341 + struct crash_uid *tmp = uid_set;
52342 + uid_t buid;
52343 + int low = 0, high = uid_used - 1, mid;
52344 +
52345 + while (high >= low) {
52346 + mid = (low + high) >> 1;
52347 + buid = tmp[mid].uid;
52348 + if (buid == uid)
52349 + return mid;
52350 + if (buid > uid)
52351 + high = mid - 1;
52352 + if (buid < uid)
52353 + low = mid + 1;
52354 + }
52355 +
52356 + return -1;
52357 +}
52358 +
52359 +static __inline__ void
52360 +gr_insertsort(void)
52361 +{
52362 + unsigned short i, j;
52363 + struct crash_uid index;
52364 +
52365 + for (i = 1; i < uid_used; i++) {
52366 + index = uid_set[i];
52367 + j = i;
52368 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52369 + uid_set[j] = uid_set[j - 1];
52370 + j--;
52371 + }
52372 + uid_set[j] = index;
52373 + }
52374 +
52375 + return;
52376 +}
52377 +
52378 +static __inline__ void
52379 +gr_insert_uid(const uid_t uid, const unsigned long expires)
52380 +{
52381 + int loc;
52382 +
52383 + if (uid_used == GR_UIDTABLE_MAX)
52384 + return;
52385 +
52386 + loc = gr_find_uid(uid);
52387 +
52388 + if (loc >= 0) {
52389 + uid_set[loc].expires = expires;
52390 + return;
52391 + }
52392 +
52393 + uid_set[uid_used].uid = uid;
52394 + uid_set[uid_used].expires = expires;
52395 + uid_used++;
52396 +
52397 + gr_insertsort();
52398 +
52399 + return;
52400 +}
52401 +
52402 +void
52403 +gr_remove_uid(const unsigned short loc)
52404 +{
52405 + unsigned short i;
52406 +
52407 + for (i = loc + 1; i < uid_used; i++)
52408 + uid_set[i - 1] = uid_set[i];
52409 +
52410 + uid_used--;
52411 +
52412 + return;
52413 +}
52414 +
52415 +int
52416 +gr_check_crash_uid(const uid_t uid)
52417 +{
52418 + int loc;
52419 + int ret = 0;
52420 +
52421 + if (unlikely(!gr_acl_is_enabled()))
52422 + return 0;
52423 +
52424 + spin_lock(&gr_uid_lock);
52425 + loc = gr_find_uid(uid);
52426 +
52427 + if (loc < 0)
52428 + goto out_unlock;
52429 +
52430 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
52431 + gr_remove_uid(loc);
52432 + else
52433 + ret = 1;
52434 +
52435 +out_unlock:
52436 + spin_unlock(&gr_uid_lock);
52437 + return ret;
52438 +}
52439 +
52440 +static __inline__ int
52441 +proc_is_setxid(const struct cred *cred)
52442 +{
52443 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
52444 + cred->uid != cred->fsuid)
52445 + return 1;
52446 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52447 + cred->gid != cred->fsgid)
52448 + return 1;
52449 +
52450 + return 0;
52451 +}
52452 +
52453 +void
52454 +gr_handle_crash(struct task_struct *task, const int sig)
52455 +{
52456 + struct acl_subject_label *curr;
52457 + struct acl_subject_label *curr2;
52458 + struct task_struct *tsk, *tsk2;
52459 + const struct cred *cred;
52460 + const struct cred *cred2;
52461 +
52462 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52463 + return;
52464 +
52465 + if (unlikely(!gr_acl_is_enabled()))
52466 + return;
52467 +
52468 + curr = task->acl;
52469 +
52470 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
52471 + return;
52472 +
52473 + if (time_before_eq(curr->expires, get_seconds())) {
52474 + curr->expires = 0;
52475 + curr->crashes = 0;
52476 + }
52477 +
52478 + curr->crashes++;
52479 +
52480 + if (!curr->expires)
52481 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52482 +
52483 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52484 + time_after(curr->expires, get_seconds())) {
52485 + rcu_read_lock();
52486 + cred = __task_cred(task);
52487 + if (cred->uid && proc_is_setxid(cred)) {
52488 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52489 + spin_lock(&gr_uid_lock);
52490 + gr_insert_uid(cred->uid, curr->expires);
52491 + spin_unlock(&gr_uid_lock);
52492 + curr->expires = 0;
52493 + curr->crashes = 0;
52494 + read_lock(&tasklist_lock);
52495 + do_each_thread(tsk2, tsk) {
52496 + cred2 = __task_cred(tsk);
52497 + if (tsk != task && cred2->uid == cred->uid)
52498 + gr_fake_force_sig(SIGKILL, tsk);
52499 + } while_each_thread(tsk2, tsk);
52500 + read_unlock(&tasklist_lock);
52501 + } else {
52502 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52503 + read_lock(&tasklist_lock);
52504 + do_each_thread(tsk2, tsk) {
52505 + if (likely(tsk != task)) {
52506 + curr2 = tsk->acl;
52507 +
52508 + if (curr2->device == curr->device &&
52509 + curr2->inode == curr->inode)
52510 + gr_fake_force_sig(SIGKILL, tsk);
52511 + }
52512 + } while_each_thread(tsk2, tsk);
52513 + read_unlock(&tasklist_lock);
52514 + }
52515 + rcu_read_unlock();
52516 + }
52517 +
52518 + return;
52519 +}
52520 +
52521 +int
52522 +gr_check_crash_exec(const struct file *filp)
52523 +{
52524 + struct acl_subject_label *curr;
52525 +
52526 + if (unlikely(!gr_acl_is_enabled()))
52527 + return 0;
52528 +
52529 + read_lock(&gr_inode_lock);
52530 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52531 + filp->f_path.dentry->d_inode->i_sb->s_dev,
52532 + current->role);
52533 + read_unlock(&gr_inode_lock);
52534 +
52535 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52536 + (!curr->crashes && !curr->expires))
52537 + return 0;
52538 +
52539 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52540 + time_after(curr->expires, get_seconds()))
52541 + return 1;
52542 + else if (time_before_eq(curr->expires, get_seconds())) {
52543 + curr->crashes = 0;
52544 + curr->expires = 0;
52545 + }
52546 +
52547 + return 0;
52548 +}
52549 +
52550 +void
52551 +gr_handle_alertkill(struct task_struct *task)
52552 +{
52553 + struct acl_subject_label *curracl;
52554 + __u32 curr_ip;
52555 + struct task_struct *p, *p2;
52556 +
52557 + if (unlikely(!gr_acl_is_enabled()))
52558 + return;
52559 +
52560 + curracl = task->acl;
52561 + curr_ip = task->signal->curr_ip;
52562 +
52563 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52564 + read_lock(&tasklist_lock);
52565 + do_each_thread(p2, p) {
52566 + if (p->signal->curr_ip == curr_ip)
52567 + gr_fake_force_sig(SIGKILL, p);
52568 + } while_each_thread(p2, p);
52569 + read_unlock(&tasklist_lock);
52570 + } else if (curracl->mode & GR_KILLPROC)
52571 + gr_fake_force_sig(SIGKILL, task);
52572 +
52573 + return;
52574 +}
52575 diff -urNp linux-2.6.32.45/grsecurity/gracl_shm.c linux-2.6.32.45/grsecurity/gracl_shm.c
52576 --- linux-2.6.32.45/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52577 +++ linux-2.6.32.45/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
52578 @@ -0,0 +1,40 @@
52579 +#include <linux/kernel.h>
52580 +#include <linux/mm.h>
52581 +#include <linux/sched.h>
52582 +#include <linux/file.h>
52583 +#include <linux/ipc.h>
52584 +#include <linux/gracl.h>
52585 +#include <linux/grsecurity.h>
52586 +#include <linux/grinternal.h>
52587 +
52588 +int
52589 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52590 + const time_t shm_createtime, const uid_t cuid, const int shmid)
52591 +{
52592 + struct task_struct *task;
52593 +
52594 + if (!gr_acl_is_enabled())
52595 + return 1;
52596 +
52597 + rcu_read_lock();
52598 + read_lock(&tasklist_lock);
52599 +
52600 + task = find_task_by_vpid(shm_cprid);
52601 +
52602 + if (unlikely(!task))
52603 + task = find_task_by_vpid(shm_lapid);
52604 +
52605 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52606 + (task->pid == shm_lapid)) &&
52607 + (task->acl->mode & GR_PROTSHM) &&
52608 + (task->acl != current->acl))) {
52609 + read_unlock(&tasklist_lock);
52610 + rcu_read_unlock();
52611 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52612 + return 0;
52613 + }
52614 + read_unlock(&tasklist_lock);
52615 + rcu_read_unlock();
52616 +
52617 + return 1;
52618 +}
52619 diff -urNp linux-2.6.32.45/grsecurity/grsec_chdir.c linux-2.6.32.45/grsecurity/grsec_chdir.c
52620 --- linux-2.6.32.45/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52621 +++ linux-2.6.32.45/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
52622 @@ -0,0 +1,19 @@
52623 +#include <linux/kernel.h>
52624 +#include <linux/sched.h>
52625 +#include <linux/fs.h>
52626 +#include <linux/file.h>
52627 +#include <linux/grsecurity.h>
52628 +#include <linux/grinternal.h>
52629 +
52630 +void
52631 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52632 +{
52633 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52634 + if ((grsec_enable_chdir && grsec_enable_group &&
52635 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52636 + !grsec_enable_group)) {
52637 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52638 + }
52639 +#endif
52640 + return;
52641 +}
52642 diff -urNp linux-2.6.32.45/grsecurity/grsec_chroot.c linux-2.6.32.45/grsecurity/grsec_chroot.c
52643 --- linux-2.6.32.45/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
52644 +++ linux-2.6.32.45/grsecurity/grsec_chroot.c 2011-07-18 17:14:10.000000000 -0400
52645 @@ -0,0 +1,384 @@
52646 +#include <linux/kernel.h>
52647 +#include <linux/module.h>
52648 +#include <linux/sched.h>
52649 +#include <linux/file.h>
52650 +#include <linux/fs.h>
52651 +#include <linux/mount.h>
52652 +#include <linux/types.h>
52653 +#include <linux/pid_namespace.h>
52654 +#include <linux/grsecurity.h>
52655 +#include <linux/grinternal.h>
52656 +
52657 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
52658 +{
52659 +#ifdef CONFIG_GRKERNSEC
52660 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
52661 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
52662 + task->gr_is_chrooted = 1;
52663 + else
52664 + task->gr_is_chrooted = 0;
52665 +
52666 + task->gr_chroot_dentry = path->dentry;
52667 +#endif
52668 + return;
52669 +}
52670 +
52671 +void gr_clear_chroot_entries(struct task_struct *task)
52672 +{
52673 +#ifdef CONFIG_GRKERNSEC
52674 + task->gr_is_chrooted = 0;
52675 + task->gr_chroot_dentry = NULL;
52676 +#endif
52677 + return;
52678 +}
52679 +
52680 +int
52681 +gr_handle_chroot_unix(const pid_t pid)
52682 +{
52683 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52684 + struct task_struct *p;
52685 +
52686 + if (unlikely(!grsec_enable_chroot_unix))
52687 + return 1;
52688 +
52689 + if (likely(!proc_is_chrooted(current)))
52690 + return 1;
52691 +
52692 + rcu_read_lock();
52693 + read_lock(&tasklist_lock);
52694 +
52695 + p = find_task_by_vpid_unrestricted(pid);
52696 + if (unlikely(p && !have_same_root(current, p))) {
52697 + read_unlock(&tasklist_lock);
52698 + rcu_read_unlock();
52699 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
52700 + return 0;
52701 + }
52702 + read_unlock(&tasklist_lock);
52703 + rcu_read_unlock();
52704 +#endif
52705 + return 1;
52706 +}
52707 +
52708 +int
52709 +gr_handle_chroot_nice(void)
52710 +{
52711 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52712 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
52713 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
52714 + return -EPERM;
52715 + }
52716 +#endif
52717 + return 0;
52718 +}
52719 +
52720 +int
52721 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
52722 +{
52723 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52724 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
52725 + && proc_is_chrooted(current)) {
52726 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
52727 + return -EACCES;
52728 + }
52729 +#endif
52730 + return 0;
52731 +}
52732 +
52733 +int
52734 +gr_handle_chroot_rawio(const struct inode *inode)
52735 +{
52736 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52737 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
52738 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
52739 + return 1;
52740 +#endif
52741 + return 0;
52742 +}
52743 +
52744 +int
52745 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
52746 +{
52747 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52748 + struct task_struct *p;
52749 + int ret = 0;
52750 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
52751 + return ret;
52752 +
52753 + read_lock(&tasklist_lock);
52754 + do_each_pid_task(pid, type, p) {
52755 + if (!have_same_root(current, p)) {
52756 + ret = 1;
52757 + goto out;
52758 + }
52759 + } while_each_pid_task(pid, type, p);
52760 +out:
52761 + read_unlock(&tasklist_lock);
52762 + return ret;
52763 +#endif
52764 + return 0;
52765 +}
52766 +
52767 +int
52768 +gr_pid_is_chrooted(struct task_struct *p)
52769 +{
52770 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52771 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
52772 + return 0;
52773 +
52774 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
52775 + !have_same_root(current, p)) {
52776 + return 1;
52777 + }
52778 +#endif
52779 + return 0;
52780 +}
52781 +
52782 +EXPORT_SYMBOL(gr_pid_is_chrooted);
52783 +
52784 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
52785 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
52786 +{
52787 + struct dentry *dentry = (struct dentry *)u_dentry;
52788 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
52789 + struct dentry *realroot;
52790 + struct vfsmount *realrootmnt;
52791 + struct dentry *currentroot;
52792 + struct vfsmount *currentmnt;
52793 + struct task_struct *reaper = &init_task;
52794 + int ret = 1;
52795 +
52796 + read_lock(&reaper->fs->lock);
52797 + realrootmnt = mntget(reaper->fs->root.mnt);
52798 + realroot = dget(reaper->fs->root.dentry);
52799 + read_unlock(&reaper->fs->lock);
52800 +
52801 + read_lock(&current->fs->lock);
52802 + currentmnt = mntget(current->fs->root.mnt);
52803 + currentroot = dget(current->fs->root.dentry);
52804 + read_unlock(&current->fs->lock);
52805 +
52806 + spin_lock(&dcache_lock);
52807 + for (;;) {
52808 + if (unlikely((dentry == realroot && mnt == realrootmnt)
52809 + || (dentry == currentroot && mnt == currentmnt)))
52810 + break;
52811 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
52812 + if (mnt->mnt_parent == mnt)
52813 + break;
52814 + dentry = mnt->mnt_mountpoint;
52815 + mnt = mnt->mnt_parent;
52816 + continue;
52817 + }
52818 + dentry = dentry->d_parent;
52819 + }
52820 + spin_unlock(&dcache_lock);
52821 +
52822 + dput(currentroot);
52823 + mntput(currentmnt);
52824 +
52825 + /* access is outside of chroot */
52826 + if (dentry == realroot && mnt == realrootmnt)
52827 + ret = 0;
52828 +
52829 + dput(realroot);
52830 + mntput(realrootmnt);
52831 + return ret;
52832 +}
52833 +#endif
52834 +
52835 +int
52836 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
52837 +{
52838 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52839 + if (!grsec_enable_chroot_fchdir)
52840 + return 1;
52841 +
52842 + if (!proc_is_chrooted(current))
52843 + return 1;
52844 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
52845 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
52846 + return 0;
52847 + }
52848 +#endif
52849 + return 1;
52850 +}
52851 +
52852 +int
52853 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52854 + const time_t shm_createtime)
52855 +{
52856 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52857 + struct task_struct *p;
52858 + time_t starttime;
52859 +
52860 + if (unlikely(!grsec_enable_chroot_shmat))
52861 + return 1;
52862 +
52863 + if (likely(!proc_is_chrooted(current)))
52864 + return 1;
52865 +
52866 + rcu_read_lock();
52867 + read_lock(&tasklist_lock);
52868 +
52869 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
52870 + starttime = p->start_time.tv_sec;
52871 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
52872 + if (have_same_root(current, p)) {
52873 + goto allow;
52874 + } else {
52875 + read_unlock(&tasklist_lock);
52876 + rcu_read_unlock();
52877 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52878 + return 0;
52879 + }
52880 + }
52881 + /* creator exited, pid reuse, fall through to next check */
52882 + }
52883 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
52884 + if (unlikely(!have_same_root(current, p))) {
52885 + read_unlock(&tasklist_lock);
52886 + rcu_read_unlock();
52887 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52888 + return 0;
52889 + }
52890 + }
52891 +
52892 +allow:
52893 + read_unlock(&tasklist_lock);
52894 + rcu_read_unlock();
52895 +#endif
52896 + return 1;
52897 +}
52898 +
52899 +void
52900 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
52901 +{
52902 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52903 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
52904 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
52905 +#endif
52906 + return;
52907 +}
52908 +
52909 +int
52910 +gr_handle_chroot_mknod(const struct dentry *dentry,
52911 + const struct vfsmount *mnt, const int mode)
52912 +{
52913 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52914 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
52915 + proc_is_chrooted(current)) {
52916 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
52917 + return -EPERM;
52918 + }
52919 +#endif
52920 + return 0;
52921 +}
52922 +
52923 +int
52924 +gr_handle_chroot_mount(const struct dentry *dentry,
52925 + const struct vfsmount *mnt, const char *dev_name)
52926 +{
52927 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52928 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
52929 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
52930 + return -EPERM;
52931 + }
52932 +#endif
52933 + return 0;
52934 +}
52935 +
52936 +int
52937 +gr_handle_chroot_pivot(void)
52938 +{
52939 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52940 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
52941 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
52942 + return -EPERM;
52943 + }
52944 +#endif
52945 + return 0;
52946 +}
52947 +
52948 +int
52949 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
52950 +{
52951 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52952 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
52953 + !gr_is_outside_chroot(dentry, mnt)) {
52954 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
52955 + return -EPERM;
52956 + }
52957 +#endif
52958 + return 0;
52959 +}
52960 +
52961 +int
52962 +gr_handle_chroot_caps(struct path *path)
52963 +{
52964 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52965 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
52966 + (init_task.fs->root.dentry != path->dentry) &&
52967 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
52968 +
52969 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
52970 + const struct cred *old = current_cred();
52971 + struct cred *new = prepare_creds();
52972 + if (new == NULL)
52973 + return 1;
52974 +
52975 + new->cap_permitted = cap_drop(old->cap_permitted,
52976 + chroot_caps);
52977 + new->cap_inheritable = cap_drop(old->cap_inheritable,
52978 + chroot_caps);
52979 + new->cap_effective = cap_drop(old->cap_effective,
52980 + chroot_caps);
52981 +
52982 + commit_creds(new);
52983 +
52984 + return 0;
52985 + }
52986 +#endif
52987 + return 0;
52988 +}
52989 +
52990 +int
52991 +gr_handle_chroot_sysctl(const int op)
52992 +{
52993 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52994 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
52995 + && (op & MAY_WRITE))
52996 + return -EACCES;
52997 +#endif
52998 + return 0;
52999 +}
53000 +
53001 +void
53002 +gr_handle_chroot_chdir(struct path *path)
53003 +{
53004 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53005 + if (grsec_enable_chroot_chdir)
53006 + set_fs_pwd(current->fs, path);
53007 +#endif
53008 + return;
53009 +}
53010 +
53011 +int
53012 +gr_handle_chroot_chmod(const struct dentry *dentry,
53013 + const struct vfsmount *mnt, const int mode)
53014 +{
53015 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53016 + /* allow chmod +s on directories, but not on files */
53017 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
53018 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
53019 + proc_is_chrooted(current)) {
53020 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
53021 + return -EPERM;
53022 + }
53023 +#endif
53024 + return 0;
53025 +}
53026 +
53027 +#ifdef CONFIG_SECURITY
53028 +EXPORT_SYMBOL(gr_handle_chroot_caps);
53029 +#endif
53030 diff -urNp linux-2.6.32.45/grsecurity/grsec_disabled.c linux-2.6.32.45/grsecurity/grsec_disabled.c
53031 --- linux-2.6.32.45/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
53032 +++ linux-2.6.32.45/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
53033 @@ -0,0 +1,447 @@
53034 +#include <linux/kernel.h>
53035 +#include <linux/module.h>
53036 +#include <linux/sched.h>
53037 +#include <linux/file.h>
53038 +#include <linux/fs.h>
53039 +#include <linux/kdev_t.h>
53040 +#include <linux/net.h>
53041 +#include <linux/in.h>
53042 +#include <linux/ip.h>
53043 +#include <linux/skbuff.h>
53044 +#include <linux/sysctl.h>
53045 +
53046 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
53047 +void
53048 +pax_set_initial_flags(struct linux_binprm *bprm)
53049 +{
53050 + return;
53051 +}
53052 +#endif
53053 +
53054 +#ifdef CONFIG_SYSCTL
53055 +__u32
53056 +gr_handle_sysctl(const struct ctl_table * table, const int op)
53057 +{
53058 + return 0;
53059 +}
53060 +#endif
53061 +
53062 +#ifdef CONFIG_TASKSTATS
53063 +int gr_is_taskstats_denied(int pid)
53064 +{
53065 + return 0;
53066 +}
53067 +#endif
53068 +
53069 +int
53070 +gr_acl_is_enabled(void)
53071 +{
53072 + return 0;
53073 +}
53074 +
53075 +int
53076 +gr_handle_rawio(const struct inode *inode)
53077 +{
53078 + return 0;
53079 +}
53080 +
53081 +void
53082 +gr_acl_handle_psacct(struct task_struct *task, const long code)
53083 +{
53084 + return;
53085 +}
53086 +
53087 +int
53088 +gr_handle_ptrace(struct task_struct *task, const long request)
53089 +{
53090 + return 0;
53091 +}
53092 +
53093 +int
53094 +gr_handle_proc_ptrace(struct task_struct *task)
53095 +{
53096 + return 0;
53097 +}
53098 +
53099 +void
53100 +gr_learn_resource(const struct task_struct *task,
53101 + const int res, const unsigned long wanted, const int gt)
53102 +{
53103 + return;
53104 +}
53105 +
53106 +int
53107 +gr_set_acls(const int type)
53108 +{
53109 + return 0;
53110 +}
53111 +
53112 +int
53113 +gr_check_hidden_task(const struct task_struct *tsk)
53114 +{
53115 + return 0;
53116 +}
53117 +
53118 +int
53119 +gr_check_protected_task(const struct task_struct *task)
53120 +{
53121 + return 0;
53122 +}
53123 +
53124 +int
53125 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53126 +{
53127 + return 0;
53128 +}
53129 +
53130 +void
53131 +gr_copy_label(struct task_struct *tsk)
53132 +{
53133 + return;
53134 +}
53135 +
53136 +void
53137 +gr_set_pax_flags(struct task_struct *task)
53138 +{
53139 + return;
53140 +}
53141 +
53142 +int
53143 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53144 + const int unsafe_share)
53145 +{
53146 + return 0;
53147 +}
53148 +
53149 +void
53150 +gr_handle_delete(const ino_t ino, const dev_t dev)
53151 +{
53152 + return;
53153 +}
53154 +
53155 +void
53156 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53157 +{
53158 + return;
53159 +}
53160 +
53161 +void
53162 +gr_handle_crash(struct task_struct *task, const int sig)
53163 +{
53164 + return;
53165 +}
53166 +
53167 +int
53168 +gr_check_crash_exec(const struct file *filp)
53169 +{
53170 + return 0;
53171 +}
53172 +
53173 +int
53174 +gr_check_crash_uid(const uid_t uid)
53175 +{
53176 + return 0;
53177 +}
53178 +
53179 +void
53180 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53181 + struct dentry *old_dentry,
53182 + struct dentry *new_dentry,
53183 + struct vfsmount *mnt, const __u8 replace)
53184 +{
53185 + return;
53186 +}
53187 +
53188 +int
53189 +gr_search_socket(const int family, const int type, const int protocol)
53190 +{
53191 + return 1;
53192 +}
53193 +
53194 +int
53195 +gr_search_connectbind(const int mode, const struct socket *sock,
53196 + const struct sockaddr_in *addr)
53197 +{
53198 + return 0;
53199 +}
53200 +
53201 +int
53202 +gr_is_capable(const int cap)
53203 +{
53204 + return 1;
53205 +}
53206 +
53207 +int
53208 +gr_is_capable_nolog(const int cap)
53209 +{
53210 + return 1;
53211 +}
53212 +
53213 +void
53214 +gr_handle_alertkill(struct task_struct *task)
53215 +{
53216 + return;
53217 +}
53218 +
53219 +__u32
53220 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53221 +{
53222 + return 1;
53223 +}
53224 +
53225 +__u32
53226 +gr_acl_handle_hidden_file(const struct dentry * dentry,
53227 + const struct vfsmount * mnt)
53228 +{
53229 + return 1;
53230 +}
53231 +
53232 +__u32
53233 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53234 + const int fmode)
53235 +{
53236 + return 1;
53237 +}
53238 +
53239 +__u32
53240 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53241 +{
53242 + return 1;
53243 +}
53244 +
53245 +__u32
53246 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53247 +{
53248 + return 1;
53249 +}
53250 +
53251 +int
53252 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53253 + unsigned int *vm_flags)
53254 +{
53255 + return 1;
53256 +}
53257 +
53258 +__u32
53259 +gr_acl_handle_truncate(const struct dentry * dentry,
53260 + const struct vfsmount * mnt)
53261 +{
53262 + return 1;
53263 +}
53264 +
53265 +__u32
53266 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53267 +{
53268 + return 1;
53269 +}
53270 +
53271 +__u32
53272 +gr_acl_handle_access(const struct dentry * dentry,
53273 + const struct vfsmount * mnt, const int fmode)
53274 +{
53275 + return 1;
53276 +}
53277 +
53278 +__u32
53279 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53280 + mode_t mode)
53281 +{
53282 + return 1;
53283 +}
53284 +
53285 +__u32
53286 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53287 + mode_t mode)
53288 +{
53289 + return 1;
53290 +}
53291 +
53292 +__u32
53293 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53294 +{
53295 + return 1;
53296 +}
53297 +
53298 +__u32
53299 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53300 +{
53301 + return 1;
53302 +}
53303 +
53304 +void
53305 +grsecurity_init(void)
53306 +{
53307 + return;
53308 +}
53309 +
53310 +__u32
53311 +gr_acl_handle_mknod(const struct dentry * new_dentry,
53312 + const struct dentry * parent_dentry,
53313 + const struct vfsmount * parent_mnt,
53314 + const int mode)
53315 +{
53316 + return 1;
53317 +}
53318 +
53319 +__u32
53320 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
53321 + const struct dentry * parent_dentry,
53322 + const struct vfsmount * parent_mnt)
53323 +{
53324 + return 1;
53325 +}
53326 +
53327 +__u32
53328 +gr_acl_handle_symlink(const struct dentry * new_dentry,
53329 + const struct dentry * parent_dentry,
53330 + const struct vfsmount * parent_mnt, const char *from)
53331 +{
53332 + return 1;
53333 +}
53334 +
53335 +__u32
53336 +gr_acl_handle_link(const struct dentry * new_dentry,
53337 + const struct dentry * parent_dentry,
53338 + const struct vfsmount * parent_mnt,
53339 + const struct dentry * old_dentry,
53340 + const struct vfsmount * old_mnt, const char *to)
53341 +{
53342 + return 1;
53343 +}
53344 +
53345 +int
53346 +gr_acl_handle_rename(const struct dentry *new_dentry,
53347 + const struct dentry *parent_dentry,
53348 + const struct vfsmount *parent_mnt,
53349 + const struct dentry *old_dentry,
53350 + const struct inode *old_parent_inode,
53351 + const struct vfsmount *old_mnt, const char *newname)
53352 +{
53353 + return 0;
53354 +}
53355 +
53356 +int
53357 +gr_acl_handle_filldir(const struct file *file, const char *name,
53358 + const int namelen, const ino_t ino)
53359 +{
53360 + return 1;
53361 +}
53362 +
53363 +int
53364 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53365 + const time_t shm_createtime, const uid_t cuid, const int shmid)
53366 +{
53367 + return 1;
53368 +}
53369 +
53370 +int
53371 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53372 +{
53373 + return 0;
53374 +}
53375 +
53376 +int
53377 +gr_search_accept(const struct socket *sock)
53378 +{
53379 + return 0;
53380 +}
53381 +
53382 +int
53383 +gr_search_listen(const struct socket *sock)
53384 +{
53385 + return 0;
53386 +}
53387 +
53388 +int
53389 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53390 +{
53391 + return 0;
53392 +}
53393 +
53394 +__u32
53395 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53396 +{
53397 + return 1;
53398 +}
53399 +
53400 +__u32
53401 +gr_acl_handle_creat(const struct dentry * dentry,
53402 + const struct dentry * p_dentry,
53403 + const struct vfsmount * p_mnt, const int fmode,
53404 + const int imode)
53405 +{
53406 + return 1;
53407 +}
53408 +
53409 +void
53410 +gr_acl_handle_exit(void)
53411 +{
53412 + return;
53413 +}
53414 +
53415 +int
53416 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53417 +{
53418 + return 1;
53419 +}
53420 +
53421 +void
53422 +gr_set_role_label(const uid_t uid, const gid_t gid)
53423 +{
53424 + return;
53425 +}
53426 +
53427 +int
53428 +gr_acl_handle_procpidmem(const struct task_struct *task)
53429 +{
53430 + return 0;
53431 +}
53432 +
53433 +int
53434 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53435 +{
53436 + return 0;
53437 +}
53438 +
53439 +int
53440 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53441 +{
53442 + return 0;
53443 +}
53444 +
53445 +void
53446 +gr_set_kernel_label(struct task_struct *task)
53447 +{
53448 + return;
53449 +}
53450 +
53451 +int
53452 +gr_check_user_change(int real, int effective, int fs)
53453 +{
53454 + return 0;
53455 +}
53456 +
53457 +int
53458 +gr_check_group_change(int real, int effective, int fs)
53459 +{
53460 + return 0;
53461 +}
53462 +
53463 +int gr_acl_enable_at_secure(void)
53464 +{
53465 + return 0;
53466 +}
53467 +
53468 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53469 +{
53470 + return dentry->d_inode->i_sb->s_dev;
53471 +}
53472 +
53473 +EXPORT_SYMBOL(gr_is_capable);
53474 +EXPORT_SYMBOL(gr_is_capable_nolog);
53475 +EXPORT_SYMBOL(gr_learn_resource);
53476 +EXPORT_SYMBOL(gr_set_kernel_label);
53477 +#ifdef CONFIG_SECURITY
53478 +EXPORT_SYMBOL(gr_check_user_change);
53479 +EXPORT_SYMBOL(gr_check_group_change);
53480 +#endif
53481 diff -urNp linux-2.6.32.45/grsecurity/grsec_exec.c linux-2.6.32.45/grsecurity/grsec_exec.c
53482 --- linux-2.6.32.45/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53483 +++ linux-2.6.32.45/grsecurity/grsec_exec.c 2011-08-11 19:57:19.000000000 -0400
53484 @@ -0,0 +1,132 @@
53485 +#include <linux/kernel.h>
53486 +#include <linux/sched.h>
53487 +#include <linux/file.h>
53488 +#include <linux/binfmts.h>
53489 +#include <linux/smp_lock.h>
53490 +#include <linux/fs.h>
53491 +#include <linux/types.h>
53492 +#include <linux/grdefs.h>
53493 +#include <linux/grinternal.h>
53494 +#include <linux/capability.h>
53495 +#include <linux/compat.h>
53496 +
53497 +#include <asm/uaccess.h>
53498 +
53499 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53500 +static char gr_exec_arg_buf[132];
53501 +static DEFINE_MUTEX(gr_exec_arg_mutex);
53502 +#endif
53503 +
53504 +void
53505 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
53506 +{
53507 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53508 + char *grarg = gr_exec_arg_buf;
53509 + unsigned int i, x, execlen = 0;
53510 + char c;
53511 +
53512 + if (!((grsec_enable_execlog && grsec_enable_group &&
53513 + in_group_p(grsec_audit_gid))
53514 + || (grsec_enable_execlog && !grsec_enable_group)))
53515 + return;
53516 +
53517 + mutex_lock(&gr_exec_arg_mutex);
53518 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
53519 +
53520 + if (unlikely(argv == NULL))
53521 + goto log;
53522 +
53523 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
53524 + const char __user *p;
53525 + unsigned int len;
53526 +
53527 + if (copy_from_user(&p, argv + i, sizeof(p)))
53528 + goto log;
53529 + if (!p)
53530 + goto log;
53531 + len = strnlen_user(p, 128 - execlen);
53532 + if (len > 128 - execlen)
53533 + len = 128 - execlen;
53534 + else if (len > 0)
53535 + len--;
53536 + if (copy_from_user(grarg + execlen, p, len))
53537 + goto log;
53538 +
53539 + /* rewrite unprintable characters */
53540 + for (x = 0; x < len; x++) {
53541 + c = *(grarg + execlen + x);
53542 + if (c < 32 || c > 126)
53543 + *(grarg + execlen + x) = ' ';
53544 + }
53545 +
53546 + execlen += len;
53547 + *(grarg + execlen) = ' ';
53548 + *(grarg + execlen + 1) = '\0';
53549 + execlen++;
53550 + }
53551 +
53552 + log:
53553 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53554 + bprm->file->f_path.mnt, grarg);
53555 + mutex_unlock(&gr_exec_arg_mutex);
53556 +#endif
53557 + return;
53558 +}
53559 +
53560 +#ifdef CONFIG_COMPAT
53561 +void
53562 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
53563 +{
53564 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53565 + char *grarg = gr_exec_arg_buf;
53566 + unsigned int i, x, execlen = 0;
53567 + char c;
53568 +
53569 + if (!((grsec_enable_execlog && grsec_enable_group &&
53570 + in_group_p(grsec_audit_gid))
53571 + || (grsec_enable_execlog && !grsec_enable_group)))
53572 + return;
53573 +
53574 + mutex_lock(&gr_exec_arg_mutex);
53575 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
53576 +
53577 + if (unlikely(argv == NULL))
53578 + goto log;
53579 +
53580 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
53581 + compat_uptr_t p;
53582 + unsigned int len;
53583 +
53584 + if (get_user(p, argv + i))
53585 + goto log;
53586 + len = strnlen_user(compat_ptr(p), 128 - execlen);
53587 + if (len > 128 - execlen)
53588 + len = 128 - execlen;
53589 + else if (len > 0)
53590 + len--;
53591 + else
53592 + goto log;
53593 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
53594 + goto log;
53595 +
53596 + /* rewrite unprintable characters */
53597 + for (x = 0; x < len; x++) {
53598 + c = *(grarg + execlen + x);
53599 + if (c < 32 || c > 126)
53600 + *(grarg + execlen + x) = ' ';
53601 + }
53602 +
53603 + execlen += len;
53604 + *(grarg + execlen) = ' ';
53605 + *(grarg + execlen + 1) = '\0';
53606 + execlen++;
53607 + }
53608 +
53609 + log:
53610 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53611 + bprm->file->f_path.mnt, grarg);
53612 + mutex_unlock(&gr_exec_arg_mutex);
53613 +#endif
53614 + return;
53615 +}
53616 +#endif
53617 diff -urNp linux-2.6.32.45/grsecurity/grsec_fifo.c linux-2.6.32.45/grsecurity/grsec_fifo.c
53618 --- linux-2.6.32.45/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53619 +++ linux-2.6.32.45/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
53620 @@ -0,0 +1,24 @@
53621 +#include <linux/kernel.h>
53622 +#include <linux/sched.h>
53623 +#include <linux/fs.h>
53624 +#include <linux/file.h>
53625 +#include <linux/grinternal.h>
53626 +
53627 +int
53628 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53629 + const struct dentry *dir, const int flag, const int acc_mode)
53630 +{
53631 +#ifdef CONFIG_GRKERNSEC_FIFO
53632 + const struct cred *cred = current_cred();
53633 +
53634 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53635 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53636 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53637 + (cred->fsuid != dentry->d_inode->i_uid)) {
53638 + if (!inode_permission(dentry->d_inode, acc_mode))
53639 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53640 + return -EACCES;
53641 + }
53642 +#endif
53643 + return 0;
53644 +}
53645 diff -urNp linux-2.6.32.45/grsecurity/grsec_fork.c linux-2.6.32.45/grsecurity/grsec_fork.c
53646 --- linux-2.6.32.45/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53647 +++ linux-2.6.32.45/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
53648 @@ -0,0 +1,23 @@
53649 +#include <linux/kernel.h>
53650 +#include <linux/sched.h>
53651 +#include <linux/grsecurity.h>
53652 +#include <linux/grinternal.h>
53653 +#include <linux/errno.h>
53654 +
53655 +void
53656 +gr_log_forkfail(const int retval)
53657 +{
53658 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
53659 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53660 + switch (retval) {
53661 + case -EAGAIN:
53662 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53663 + break;
53664 + case -ENOMEM:
53665 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53666 + break;
53667 + }
53668 + }
53669 +#endif
53670 + return;
53671 +}
53672 diff -urNp linux-2.6.32.45/grsecurity/grsec_init.c linux-2.6.32.45/grsecurity/grsec_init.c
53673 --- linux-2.6.32.45/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
53674 +++ linux-2.6.32.45/grsecurity/grsec_init.c 2011-08-11 19:57:42.000000000 -0400
53675 @@ -0,0 +1,270 @@
53676 +#include <linux/kernel.h>
53677 +#include <linux/sched.h>
53678 +#include <linux/mm.h>
53679 +#include <linux/smp_lock.h>
53680 +#include <linux/gracl.h>
53681 +#include <linux/slab.h>
53682 +#include <linux/vmalloc.h>
53683 +#include <linux/percpu.h>
53684 +#include <linux/module.h>
53685 +
53686 +int grsec_enable_brute;
53687 +int grsec_enable_link;
53688 +int grsec_enable_dmesg;
53689 +int grsec_enable_harden_ptrace;
53690 +int grsec_enable_fifo;
53691 +int grsec_enable_execlog;
53692 +int grsec_enable_signal;
53693 +int grsec_enable_forkfail;
53694 +int grsec_enable_audit_ptrace;
53695 +int grsec_enable_time;
53696 +int grsec_enable_audit_textrel;
53697 +int grsec_enable_group;
53698 +int grsec_audit_gid;
53699 +int grsec_enable_chdir;
53700 +int grsec_enable_mount;
53701 +int grsec_enable_rofs;
53702 +int grsec_enable_chroot_findtask;
53703 +int grsec_enable_chroot_mount;
53704 +int grsec_enable_chroot_shmat;
53705 +int grsec_enable_chroot_fchdir;
53706 +int grsec_enable_chroot_double;
53707 +int grsec_enable_chroot_pivot;
53708 +int grsec_enable_chroot_chdir;
53709 +int grsec_enable_chroot_chmod;
53710 +int grsec_enable_chroot_mknod;
53711 +int grsec_enable_chroot_nice;
53712 +int grsec_enable_chroot_execlog;
53713 +int grsec_enable_chroot_caps;
53714 +int grsec_enable_chroot_sysctl;
53715 +int grsec_enable_chroot_unix;
53716 +int grsec_enable_tpe;
53717 +int grsec_tpe_gid;
53718 +int grsec_enable_blackhole;
53719 +#ifdef CONFIG_IPV6_MODULE
53720 +EXPORT_SYMBOL(grsec_enable_blackhole);
53721 +#endif
53722 +int grsec_lastack_retries;
53723 +int grsec_enable_tpe_all;
53724 +int grsec_enable_tpe_invert;
53725 +int grsec_enable_socket_all;
53726 +int grsec_socket_all_gid;
53727 +int grsec_enable_socket_client;
53728 +int grsec_socket_client_gid;
53729 +int grsec_enable_socket_server;
53730 +int grsec_socket_server_gid;
53731 +int grsec_resource_logging;
53732 +int grsec_disable_privio;
53733 +int grsec_enable_log_rwxmaps;
53734 +int grsec_lock;
53735 +
53736 +DEFINE_SPINLOCK(grsec_alert_lock);
53737 +unsigned long grsec_alert_wtime = 0;
53738 +unsigned long grsec_alert_fyet = 0;
53739 +
53740 +DEFINE_SPINLOCK(grsec_audit_lock);
53741 +
53742 +DEFINE_RWLOCK(grsec_exec_file_lock);
53743 +
53744 +char *gr_shared_page[4];
53745 +
53746 +char *gr_alert_log_fmt;
53747 +char *gr_audit_log_fmt;
53748 +char *gr_alert_log_buf;
53749 +char *gr_audit_log_buf;
53750 +
53751 +extern struct gr_arg *gr_usermode;
53752 +extern unsigned char *gr_system_salt;
53753 +extern unsigned char *gr_system_sum;
53754 +
53755 +void __init
53756 +grsecurity_init(void)
53757 +{
53758 + int j;
53759 + /* create the per-cpu shared pages */
53760 +
53761 +#ifdef CONFIG_X86
53762 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
53763 +#endif
53764 +
53765 + for (j = 0; j < 4; j++) {
53766 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
53767 + if (gr_shared_page[j] == NULL) {
53768 + panic("Unable to allocate grsecurity shared page");
53769 + return;
53770 + }
53771 + }
53772 +
53773 + /* allocate log buffers */
53774 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
53775 + if (!gr_alert_log_fmt) {
53776 + panic("Unable to allocate grsecurity alert log format buffer");
53777 + return;
53778 + }
53779 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
53780 + if (!gr_audit_log_fmt) {
53781 + panic("Unable to allocate grsecurity audit log format buffer");
53782 + return;
53783 + }
53784 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53785 + if (!gr_alert_log_buf) {
53786 + panic("Unable to allocate grsecurity alert log buffer");
53787 + return;
53788 + }
53789 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53790 + if (!gr_audit_log_buf) {
53791 + panic("Unable to allocate grsecurity audit log buffer");
53792 + return;
53793 + }
53794 +
53795 + /* allocate memory for authentication structure */
53796 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
53797 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
53798 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
53799 +
53800 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
53801 + panic("Unable to allocate grsecurity authentication structure");
53802 + return;
53803 + }
53804 +
53805 +
53806 +#ifdef CONFIG_GRKERNSEC_IO
53807 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
53808 + grsec_disable_privio = 1;
53809 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53810 + grsec_disable_privio = 1;
53811 +#else
53812 + grsec_disable_privio = 0;
53813 +#endif
53814 +#endif
53815 +
53816 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53817 + /* for backward compatibility, tpe_invert always defaults to on if
53818 + enabled in the kernel
53819 + */
53820 + grsec_enable_tpe_invert = 1;
53821 +#endif
53822 +
53823 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53824 +#ifndef CONFIG_GRKERNSEC_SYSCTL
53825 + grsec_lock = 1;
53826 +#endif
53827 +
53828 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53829 + grsec_enable_audit_textrel = 1;
53830 +#endif
53831 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53832 + grsec_enable_log_rwxmaps = 1;
53833 +#endif
53834 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53835 + grsec_enable_group = 1;
53836 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
53837 +#endif
53838 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53839 + grsec_enable_chdir = 1;
53840 +#endif
53841 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53842 + grsec_enable_harden_ptrace = 1;
53843 +#endif
53844 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53845 + grsec_enable_mount = 1;
53846 +#endif
53847 +#ifdef CONFIG_GRKERNSEC_LINK
53848 + grsec_enable_link = 1;
53849 +#endif
53850 +#ifdef CONFIG_GRKERNSEC_BRUTE
53851 + grsec_enable_brute = 1;
53852 +#endif
53853 +#ifdef CONFIG_GRKERNSEC_DMESG
53854 + grsec_enable_dmesg = 1;
53855 +#endif
53856 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
53857 + grsec_enable_blackhole = 1;
53858 + grsec_lastack_retries = 4;
53859 +#endif
53860 +#ifdef CONFIG_GRKERNSEC_FIFO
53861 + grsec_enable_fifo = 1;
53862 +#endif
53863 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53864 + grsec_enable_execlog = 1;
53865 +#endif
53866 +#ifdef CONFIG_GRKERNSEC_SIGNAL
53867 + grsec_enable_signal = 1;
53868 +#endif
53869 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
53870 + grsec_enable_forkfail = 1;
53871 +#endif
53872 +#ifdef CONFIG_GRKERNSEC_TIME
53873 + grsec_enable_time = 1;
53874 +#endif
53875 +#ifdef CONFIG_GRKERNSEC_RESLOG
53876 + grsec_resource_logging = 1;
53877 +#endif
53878 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53879 + grsec_enable_chroot_findtask = 1;
53880 +#endif
53881 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53882 + grsec_enable_chroot_unix = 1;
53883 +#endif
53884 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53885 + grsec_enable_chroot_mount = 1;
53886 +#endif
53887 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53888 + grsec_enable_chroot_fchdir = 1;
53889 +#endif
53890 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53891 + grsec_enable_chroot_shmat = 1;
53892 +#endif
53893 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53894 + grsec_enable_audit_ptrace = 1;
53895 +#endif
53896 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53897 + grsec_enable_chroot_double = 1;
53898 +#endif
53899 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53900 + grsec_enable_chroot_pivot = 1;
53901 +#endif
53902 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53903 + grsec_enable_chroot_chdir = 1;
53904 +#endif
53905 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53906 + grsec_enable_chroot_chmod = 1;
53907 +#endif
53908 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53909 + grsec_enable_chroot_mknod = 1;
53910 +#endif
53911 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53912 + grsec_enable_chroot_nice = 1;
53913 +#endif
53914 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53915 + grsec_enable_chroot_execlog = 1;
53916 +#endif
53917 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53918 + grsec_enable_chroot_caps = 1;
53919 +#endif
53920 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53921 + grsec_enable_chroot_sysctl = 1;
53922 +#endif
53923 +#ifdef CONFIG_GRKERNSEC_TPE
53924 + grsec_enable_tpe = 1;
53925 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
53926 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
53927 + grsec_enable_tpe_all = 1;
53928 +#endif
53929 +#endif
53930 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53931 + grsec_enable_socket_all = 1;
53932 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
53933 +#endif
53934 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53935 + grsec_enable_socket_client = 1;
53936 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
53937 +#endif
53938 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53939 + grsec_enable_socket_server = 1;
53940 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
53941 +#endif
53942 +#endif
53943 +
53944 + return;
53945 +}
53946 diff -urNp linux-2.6.32.45/grsecurity/grsec_link.c linux-2.6.32.45/grsecurity/grsec_link.c
53947 --- linux-2.6.32.45/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
53948 +++ linux-2.6.32.45/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
53949 @@ -0,0 +1,43 @@
53950 +#include <linux/kernel.h>
53951 +#include <linux/sched.h>
53952 +#include <linux/fs.h>
53953 +#include <linux/file.h>
53954 +#include <linux/grinternal.h>
53955 +
53956 +int
53957 +gr_handle_follow_link(const struct inode *parent,
53958 + const struct inode *inode,
53959 + const struct dentry *dentry, const struct vfsmount *mnt)
53960 +{
53961 +#ifdef CONFIG_GRKERNSEC_LINK
53962 + const struct cred *cred = current_cred();
53963 +
53964 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
53965 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
53966 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
53967 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
53968 + return -EACCES;
53969 + }
53970 +#endif
53971 + return 0;
53972 +}
53973 +
53974 +int
53975 +gr_handle_hardlink(const struct dentry *dentry,
53976 + const struct vfsmount *mnt,
53977 + struct inode *inode, const int mode, const char *to)
53978 +{
53979 +#ifdef CONFIG_GRKERNSEC_LINK
53980 + const struct cred *cred = current_cred();
53981 +
53982 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
53983 + (!S_ISREG(mode) || (mode & S_ISUID) ||
53984 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
53985 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
53986 + !capable(CAP_FOWNER) && cred->uid) {
53987 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
53988 + return -EPERM;
53989 + }
53990 +#endif
53991 + return 0;
53992 +}
53993 diff -urNp linux-2.6.32.45/grsecurity/grsec_log.c linux-2.6.32.45/grsecurity/grsec_log.c
53994 --- linux-2.6.32.45/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
53995 +++ linux-2.6.32.45/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
53996 @@ -0,0 +1,310 @@
53997 +#include <linux/kernel.h>
53998 +#include <linux/sched.h>
53999 +#include <linux/file.h>
54000 +#include <linux/tty.h>
54001 +#include <linux/fs.h>
54002 +#include <linux/grinternal.h>
54003 +
54004 +#ifdef CONFIG_TREE_PREEMPT_RCU
54005 +#define DISABLE_PREEMPT() preempt_disable()
54006 +#define ENABLE_PREEMPT() preempt_enable()
54007 +#else
54008 +#define DISABLE_PREEMPT()
54009 +#define ENABLE_PREEMPT()
54010 +#endif
54011 +
54012 +#define BEGIN_LOCKS(x) \
54013 + DISABLE_PREEMPT(); \
54014 + rcu_read_lock(); \
54015 + read_lock(&tasklist_lock); \
54016 + read_lock(&grsec_exec_file_lock); \
54017 + if (x != GR_DO_AUDIT) \
54018 + spin_lock(&grsec_alert_lock); \
54019 + else \
54020 + spin_lock(&grsec_audit_lock)
54021 +
54022 +#define END_LOCKS(x) \
54023 + if (x != GR_DO_AUDIT) \
54024 + spin_unlock(&grsec_alert_lock); \
54025 + else \
54026 + spin_unlock(&grsec_audit_lock); \
54027 + read_unlock(&grsec_exec_file_lock); \
54028 + read_unlock(&tasklist_lock); \
54029 + rcu_read_unlock(); \
54030 + ENABLE_PREEMPT(); \
54031 + if (x == GR_DONT_AUDIT) \
54032 + gr_handle_alertkill(current)
54033 +
54034 +enum {
54035 + FLOODING,
54036 + NO_FLOODING
54037 +};
54038 +
54039 +extern char *gr_alert_log_fmt;
54040 +extern char *gr_audit_log_fmt;
54041 +extern char *gr_alert_log_buf;
54042 +extern char *gr_audit_log_buf;
54043 +
54044 +static int gr_log_start(int audit)
54045 +{
54046 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
54047 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
54048 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54049 +
54050 + if (audit == GR_DO_AUDIT)
54051 + goto set_fmt;
54052 +
54053 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
54054 + grsec_alert_wtime = jiffies;
54055 + grsec_alert_fyet = 0;
54056 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
54057 + grsec_alert_fyet++;
54058 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
54059 + grsec_alert_wtime = jiffies;
54060 + grsec_alert_fyet++;
54061 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
54062 + return FLOODING;
54063 + } else return FLOODING;
54064 +
54065 +set_fmt:
54066 + memset(buf, 0, PAGE_SIZE);
54067 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
54068 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
54069 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54070 + } else if (current->signal->curr_ip) {
54071 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
54072 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
54073 + } else if (gr_acl_is_enabled()) {
54074 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
54075 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54076 + } else {
54077 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
54078 + strcpy(buf, fmt);
54079 + }
54080 +
54081 + return NO_FLOODING;
54082 +}
54083 +
54084 +static void gr_log_middle(int audit, const char *msg, va_list ap)
54085 + __attribute__ ((format (printf, 2, 0)));
54086 +
54087 +static void gr_log_middle(int audit, const char *msg, va_list ap)
54088 +{
54089 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54090 + unsigned int len = strlen(buf);
54091 +
54092 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54093 +
54094 + return;
54095 +}
54096 +
54097 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
54098 + __attribute__ ((format (printf, 2, 3)));
54099 +
54100 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
54101 +{
54102 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54103 + unsigned int len = strlen(buf);
54104 + va_list ap;
54105 +
54106 + va_start(ap, msg);
54107 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54108 + va_end(ap);
54109 +
54110 + return;
54111 +}
54112 +
54113 +static void gr_log_end(int audit)
54114 +{
54115 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54116 + unsigned int len = strlen(buf);
54117 +
54118 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
54119 + printk("%s\n", buf);
54120 +
54121 + return;
54122 +}
54123 +
54124 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
54125 +{
54126 + int logtype;
54127 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
54128 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
54129 + void *voidptr = NULL;
54130 + int num1 = 0, num2 = 0;
54131 + unsigned long ulong1 = 0, ulong2 = 0;
54132 + struct dentry *dentry = NULL;
54133 + struct vfsmount *mnt = NULL;
54134 + struct file *file = NULL;
54135 + struct task_struct *task = NULL;
54136 + const struct cred *cred, *pcred;
54137 + va_list ap;
54138 +
54139 + BEGIN_LOCKS(audit);
54140 + logtype = gr_log_start(audit);
54141 + if (logtype == FLOODING) {
54142 + END_LOCKS(audit);
54143 + return;
54144 + }
54145 + va_start(ap, argtypes);
54146 + switch (argtypes) {
54147 + case GR_TTYSNIFF:
54148 + task = va_arg(ap, struct task_struct *);
54149 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54150 + break;
54151 + case GR_SYSCTL_HIDDEN:
54152 + str1 = va_arg(ap, char *);
54153 + gr_log_middle_varargs(audit, msg, result, str1);
54154 + break;
54155 + case GR_RBAC:
54156 + dentry = va_arg(ap, struct dentry *);
54157 + mnt = va_arg(ap, struct vfsmount *);
54158 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54159 + break;
54160 + case GR_RBAC_STR:
54161 + dentry = va_arg(ap, struct dentry *);
54162 + mnt = va_arg(ap, struct vfsmount *);
54163 + str1 = va_arg(ap, char *);
54164 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54165 + break;
54166 + case GR_STR_RBAC:
54167 + str1 = va_arg(ap, char *);
54168 + dentry = va_arg(ap, struct dentry *);
54169 + mnt = va_arg(ap, struct vfsmount *);
54170 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54171 + break;
54172 + case GR_RBAC_MODE2:
54173 + dentry = va_arg(ap, struct dentry *);
54174 + mnt = va_arg(ap, struct vfsmount *);
54175 + str1 = va_arg(ap, char *);
54176 + str2 = va_arg(ap, char *);
54177 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54178 + break;
54179 + case GR_RBAC_MODE3:
54180 + dentry = va_arg(ap, struct dentry *);
54181 + mnt = va_arg(ap, struct vfsmount *);
54182 + str1 = va_arg(ap, char *);
54183 + str2 = va_arg(ap, char *);
54184 + str3 = va_arg(ap, char *);
54185 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54186 + break;
54187 + case GR_FILENAME:
54188 + dentry = va_arg(ap, struct dentry *);
54189 + mnt = va_arg(ap, struct vfsmount *);
54190 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54191 + break;
54192 + case GR_STR_FILENAME:
54193 + str1 = va_arg(ap, char *);
54194 + dentry = va_arg(ap, struct dentry *);
54195 + mnt = va_arg(ap, struct vfsmount *);
54196 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54197 + break;
54198 + case GR_FILENAME_STR:
54199 + dentry = va_arg(ap, struct dentry *);
54200 + mnt = va_arg(ap, struct vfsmount *);
54201 + str1 = va_arg(ap, char *);
54202 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54203 + break;
54204 + case GR_FILENAME_TWO_INT:
54205 + dentry = va_arg(ap, struct dentry *);
54206 + mnt = va_arg(ap, struct vfsmount *);
54207 + num1 = va_arg(ap, int);
54208 + num2 = va_arg(ap, int);
54209 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54210 + break;
54211 + case GR_FILENAME_TWO_INT_STR:
54212 + dentry = va_arg(ap, struct dentry *);
54213 + mnt = va_arg(ap, struct vfsmount *);
54214 + num1 = va_arg(ap, int);
54215 + num2 = va_arg(ap, int);
54216 + str1 = va_arg(ap, char *);
54217 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54218 + break;
54219 + case GR_TEXTREL:
54220 + file = va_arg(ap, struct file *);
54221 + ulong1 = va_arg(ap, unsigned long);
54222 + ulong2 = va_arg(ap, unsigned long);
54223 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54224 + break;
54225 + case GR_PTRACE:
54226 + task = va_arg(ap, struct task_struct *);
54227 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54228 + break;
54229 + case GR_RESOURCE:
54230 + task = va_arg(ap, struct task_struct *);
54231 + cred = __task_cred(task);
54232 + pcred = __task_cred(task->real_parent);
54233 + ulong1 = va_arg(ap, unsigned long);
54234 + str1 = va_arg(ap, char *);
54235 + ulong2 = va_arg(ap, unsigned long);
54236 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54237 + break;
54238 + case GR_CAP:
54239 + task = va_arg(ap, struct task_struct *);
54240 + cred = __task_cred(task);
54241 + pcred = __task_cred(task->real_parent);
54242 + str1 = va_arg(ap, char *);
54243 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54244 + break;
54245 + case GR_SIG:
54246 + str1 = va_arg(ap, char *);
54247 + voidptr = va_arg(ap, void *);
54248 + gr_log_middle_varargs(audit, msg, str1, voidptr);
54249 + break;
54250 + case GR_SIG2:
54251 + task = va_arg(ap, struct task_struct *);
54252 + cred = __task_cred(task);
54253 + pcred = __task_cred(task->real_parent);
54254 + num1 = va_arg(ap, int);
54255 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54256 + break;
54257 + case GR_CRASH1:
54258 + task = va_arg(ap, struct task_struct *);
54259 + cred = __task_cred(task);
54260 + pcred = __task_cred(task->real_parent);
54261 + ulong1 = va_arg(ap, unsigned long);
54262 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54263 + break;
54264 + case GR_CRASH2:
54265 + task = va_arg(ap, struct task_struct *);
54266 + cred = __task_cred(task);
54267 + pcred = __task_cred(task->real_parent);
54268 + ulong1 = va_arg(ap, unsigned long);
54269 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54270 + break;
54271 + case GR_RWXMAP:
54272 + file = va_arg(ap, struct file *);
54273 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54274 + break;
54275 + case GR_PSACCT:
54276 + {
54277 + unsigned int wday, cday;
54278 + __u8 whr, chr;
54279 + __u8 wmin, cmin;
54280 + __u8 wsec, csec;
54281 + char cur_tty[64] = { 0 };
54282 + char parent_tty[64] = { 0 };
54283 +
54284 + task = va_arg(ap, struct task_struct *);
54285 + wday = va_arg(ap, unsigned int);
54286 + cday = va_arg(ap, unsigned int);
54287 + whr = va_arg(ap, int);
54288 + chr = va_arg(ap, int);
54289 + wmin = va_arg(ap, int);
54290 + cmin = va_arg(ap, int);
54291 + wsec = va_arg(ap, int);
54292 + csec = va_arg(ap, int);
54293 + ulong1 = va_arg(ap, unsigned long);
54294 + cred = __task_cred(task);
54295 + pcred = __task_cred(task->real_parent);
54296 +
54297 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54298 + }
54299 + break;
54300 + default:
54301 + gr_log_middle(audit, msg, ap);
54302 + }
54303 + va_end(ap);
54304 + gr_log_end(audit);
54305 + END_LOCKS(audit);
54306 +}
54307 diff -urNp linux-2.6.32.45/grsecurity/grsec_mem.c linux-2.6.32.45/grsecurity/grsec_mem.c
54308 --- linux-2.6.32.45/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54309 +++ linux-2.6.32.45/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
54310 @@ -0,0 +1,33 @@
54311 +#include <linux/kernel.h>
54312 +#include <linux/sched.h>
54313 +#include <linux/mm.h>
54314 +#include <linux/mman.h>
54315 +#include <linux/grinternal.h>
54316 +
54317 +void
54318 +gr_handle_ioperm(void)
54319 +{
54320 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54321 + return;
54322 +}
54323 +
54324 +void
54325 +gr_handle_iopl(void)
54326 +{
54327 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54328 + return;
54329 +}
54330 +
54331 +void
54332 +gr_handle_mem_readwrite(u64 from, u64 to)
54333 +{
54334 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54335 + return;
54336 +}
54337 +
54338 +void
54339 +gr_handle_vm86(void)
54340 +{
54341 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54342 + return;
54343 +}
54344 diff -urNp linux-2.6.32.45/grsecurity/grsec_mount.c linux-2.6.32.45/grsecurity/grsec_mount.c
54345 --- linux-2.6.32.45/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54346 +++ linux-2.6.32.45/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
54347 @@ -0,0 +1,62 @@
54348 +#include <linux/kernel.h>
54349 +#include <linux/sched.h>
54350 +#include <linux/mount.h>
54351 +#include <linux/grsecurity.h>
54352 +#include <linux/grinternal.h>
54353 +
54354 +void
54355 +gr_log_remount(const char *devname, const int retval)
54356 +{
54357 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54358 + if (grsec_enable_mount && (retval >= 0))
54359 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54360 +#endif
54361 + return;
54362 +}
54363 +
54364 +void
54365 +gr_log_unmount(const char *devname, const int retval)
54366 +{
54367 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54368 + if (grsec_enable_mount && (retval >= 0))
54369 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54370 +#endif
54371 + return;
54372 +}
54373 +
54374 +void
54375 +gr_log_mount(const char *from, const char *to, const int retval)
54376 +{
54377 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54378 + if (grsec_enable_mount && (retval >= 0))
54379 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54380 +#endif
54381 + return;
54382 +}
54383 +
54384 +int
54385 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54386 +{
54387 +#ifdef CONFIG_GRKERNSEC_ROFS
54388 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54389 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54390 + return -EPERM;
54391 + } else
54392 + return 0;
54393 +#endif
54394 + return 0;
54395 +}
54396 +
54397 +int
54398 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54399 +{
54400 +#ifdef CONFIG_GRKERNSEC_ROFS
54401 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54402 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54403 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54404 + return -EPERM;
54405 + } else
54406 + return 0;
54407 +#endif
54408 + return 0;
54409 +}
54410 diff -urNp linux-2.6.32.45/grsecurity/grsec_pax.c linux-2.6.32.45/grsecurity/grsec_pax.c
54411 --- linux-2.6.32.45/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54412 +++ linux-2.6.32.45/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
54413 @@ -0,0 +1,36 @@
54414 +#include <linux/kernel.h>
54415 +#include <linux/sched.h>
54416 +#include <linux/mm.h>
54417 +#include <linux/file.h>
54418 +#include <linux/grinternal.h>
54419 +#include <linux/grsecurity.h>
54420 +
54421 +void
54422 +gr_log_textrel(struct vm_area_struct * vma)
54423 +{
54424 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54425 + if (grsec_enable_audit_textrel)
54426 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54427 +#endif
54428 + return;
54429 +}
54430 +
54431 +void
54432 +gr_log_rwxmmap(struct file *file)
54433 +{
54434 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54435 + if (grsec_enable_log_rwxmaps)
54436 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54437 +#endif
54438 + return;
54439 +}
54440 +
54441 +void
54442 +gr_log_rwxmprotect(struct file *file)
54443 +{
54444 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54445 + if (grsec_enable_log_rwxmaps)
54446 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54447 +#endif
54448 + return;
54449 +}
54450 diff -urNp linux-2.6.32.45/grsecurity/grsec_ptrace.c linux-2.6.32.45/grsecurity/grsec_ptrace.c
54451 --- linux-2.6.32.45/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54452 +++ linux-2.6.32.45/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
54453 @@ -0,0 +1,14 @@
54454 +#include <linux/kernel.h>
54455 +#include <linux/sched.h>
54456 +#include <linux/grinternal.h>
54457 +#include <linux/grsecurity.h>
54458 +
54459 +void
54460 +gr_audit_ptrace(struct task_struct *task)
54461 +{
54462 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54463 + if (grsec_enable_audit_ptrace)
54464 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54465 +#endif
54466 + return;
54467 +}
54468 diff -urNp linux-2.6.32.45/grsecurity/grsec_sig.c linux-2.6.32.45/grsecurity/grsec_sig.c
54469 --- linux-2.6.32.45/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54470 +++ linux-2.6.32.45/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
54471 @@ -0,0 +1,205 @@
54472 +#include <linux/kernel.h>
54473 +#include <linux/sched.h>
54474 +#include <linux/delay.h>
54475 +#include <linux/grsecurity.h>
54476 +#include <linux/grinternal.h>
54477 +#include <linux/hardirq.h>
54478 +
54479 +char *signames[] = {
54480 + [SIGSEGV] = "Segmentation fault",
54481 + [SIGILL] = "Illegal instruction",
54482 + [SIGABRT] = "Abort",
54483 + [SIGBUS] = "Invalid alignment/Bus error"
54484 +};
54485 +
54486 +void
54487 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54488 +{
54489 +#ifdef CONFIG_GRKERNSEC_SIGNAL
54490 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54491 + (sig == SIGABRT) || (sig == SIGBUS))) {
54492 + if (t->pid == current->pid) {
54493 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54494 + } else {
54495 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54496 + }
54497 + }
54498 +#endif
54499 + return;
54500 +}
54501 +
54502 +int
54503 +gr_handle_signal(const struct task_struct *p, const int sig)
54504 +{
54505 +#ifdef CONFIG_GRKERNSEC
54506 + if (current->pid > 1 && gr_check_protected_task(p)) {
54507 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54508 + return -EPERM;
54509 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54510 + return -EPERM;
54511 + }
54512 +#endif
54513 + return 0;
54514 +}
54515 +
54516 +#ifdef CONFIG_GRKERNSEC
54517 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54518 +
54519 +int gr_fake_force_sig(int sig, struct task_struct *t)
54520 +{
54521 + unsigned long int flags;
54522 + int ret, blocked, ignored;
54523 + struct k_sigaction *action;
54524 +
54525 + spin_lock_irqsave(&t->sighand->siglock, flags);
54526 + action = &t->sighand->action[sig-1];
54527 + ignored = action->sa.sa_handler == SIG_IGN;
54528 + blocked = sigismember(&t->blocked, sig);
54529 + if (blocked || ignored) {
54530 + action->sa.sa_handler = SIG_DFL;
54531 + if (blocked) {
54532 + sigdelset(&t->blocked, sig);
54533 + recalc_sigpending_and_wake(t);
54534 + }
54535 + }
54536 + if (action->sa.sa_handler == SIG_DFL)
54537 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
54538 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54539 +
54540 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
54541 +
54542 + return ret;
54543 +}
54544 +#endif
54545 +
54546 +#ifdef CONFIG_GRKERNSEC_BRUTE
54547 +#define GR_USER_BAN_TIME (15 * 60)
54548 +
54549 +static int __get_dumpable(unsigned long mm_flags)
54550 +{
54551 + int ret;
54552 +
54553 + ret = mm_flags & MMF_DUMPABLE_MASK;
54554 + return (ret >= 2) ? 2 : ret;
54555 +}
54556 +#endif
54557 +
54558 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54559 +{
54560 +#ifdef CONFIG_GRKERNSEC_BRUTE
54561 + uid_t uid = 0;
54562 +
54563 + if (!grsec_enable_brute)
54564 + return;
54565 +
54566 + rcu_read_lock();
54567 + read_lock(&tasklist_lock);
54568 + read_lock(&grsec_exec_file_lock);
54569 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54570 + p->real_parent->brute = 1;
54571 + else {
54572 + const struct cred *cred = __task_cred(p), *cred2;
54573 + struct task_struct *tsk, *tsk2;
54574 +
54575 + if (!__get_dumpable(mm_flags) && cred->uid) {
54576 + struct user_struct *user;
54577 +
54578 + uid = cred->uid;
54579 +
54580 + /* this is put upon execution past expiration */
54581 + user = find_user(uid);
54582 + if (user == NULL)
54583 + goto unlock;
54584 + user->banned = 1;
54585 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54586 + if (user->ban_expires == ~0UL)
54587 + user->ban_expires--;
54588 +
54589 + do_each_thread(tsk2, tsk) {
54590 + cred2 = __task_cred(tsk);
54591 + if (tsk != p && cred2->uid == uid)
54592 + gr_fake_force_sig(SIGKILL, tsk);
54593 + } while_each_thread(tsk2, tsk);
54594 + }
54595 + }
54596 +unlock:
54597 + read_unlock(&grsec_exec_file_lock);
54598 + read_unlock(&tasklist_lock);
54599 + rcu_read_unlock();
54600 +
54601 + if (uid)
54602 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54603 +#endif
54604 + return;
54605 +}
54606 +
54607 +void gr_handle_brute_check(void)
54608 +{
54609 +#ifdef CONFIG_GRKERNSEC_BRUTE
54610 + if (current->brute)
54611 + msleep(30 * 1000);
54612 +#endif
54613 + return;
54614 +}
54615 +
54616 +void gr_handle_kernel_exploit(void)
54617 +{
54618 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54619 + const struct cred *cred;
54620 + struct task_struct *tsk, *tsk2;
54621 + struct user_struct *user;
54622 + uid_t uid;
54623 +
54624 + if (in_irq() || in_serving_softirq() || in_nmi())
54625 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54626 +
54627 + uid = current_uid();
54628 +
54629 + if (uid == 0)
54630 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
54631 + else {
54632 + /* kill all the processes of this user, hold a reference
54633 + to their creds struct, and prevent them from creating
54634 + another process until system reset
54635 + */
54636 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54637 + /* we intentionally leak this ref */
54638 + user = get_uid(current->cred->user);
54639 + if (user) {
54640 + user->banned = 1;
54641 + user->ban_expires = ~0UL;
54642 + }
54643 +
54644 + read_lock(&tasklist_lock);
54645 + do_each_thread(tsk2, tsk) {
54646 + cred = __task_cred(tsk);
54647 + if (cred->uid == uid)
54648 + gr_fake_force_sig(SIGKILL, tsk);
54649 + } while_each_thread(tsk2, tsk);
54650 + read_unlock(&tasklist_lock);
54651 + }
54652 +#endif
54653 +}
54654 +
54655 +int __gr_process_user_ban(struct user_struct *user)
54656 +{
54657 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54658 + if (unlikely(user->banned)) {
54659 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
54660 + user->banned = 0;
54661 + user->ban_expires = 0;
54662 + free_uid(user);
54663 + } else
54664 + return -EPERM;
54665 + }
54666 +#endif
54667 + return 0;
54668 +}
54669 +
54670 +int gr_process_user_ban(void)
54671 +{
54672 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54673 + return __gr_process_user_ban(current->cred->user);
54674 +#endif
54675 + return 0;
54676 +}
54677 diff -urNp linux-2.6.32.45/grsecurity/grsec_sock.c linux-2.6.32.45/grsecurity/grsec_sock.c
54678 --- linux-2.6.32.45/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
54679 +++ linux-2.6.32.45/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
54680 @@ -0,0 +1,275 @@
54681 +#include <linux/kernel.h>
54682 +#include <linux/module.h>
54683 +#include <linux/sched.h>
54684 +#include <linux/file.h>
54685 +#include <linux/net.h>
54686 +#include <linux/in.h>
54687 +#include <linux/ip.h>
54688 +#include <net/sock.h>
54689 +#include <net/inet_sock.h>
54690 +#include <linux/grsecurity.h>
54691 +#include <linux/grinternal.h>
54692 +#include <linux/gracl.h>
54693 +
54694 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
54695 +EXPORT_SYMBOL(gr_cap_rtnetlink);
54696 +
54697 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
54698 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
54699 +
54700 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
54701 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
54702 +
54703 +#ifdef CONFIG_UNIX_MODULE
54704 +EXPORT_SYMBOL(gr_acl_handle_unix);
54705 +EXPORT_SYMBOL(gr_acl_handle_mknod);
54706 +EXPORT_SYMBOL(gr_handle_chroot_unix);
54707 +EXPORT_SYMBOL(gr_handle_create);
54708 +#endif
54709 +
54710 +#ifdef CONFIG_GRKERNSEC
54711 +#define gr_conn_table_size 32749
54712 +struct conn_table_entry {
54713 + struct conn_table_entry *next;
54714 + struct signal_struct *sig;
54715 +};
54716 +
54717 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
54718 +DEFINE_SPINLOCK(gr_conn_table_lock);
54719 +
54720 +extern const char * gr_socktype_to_name(unsigned char type);
54721 +extern const char * gr_proto_to_name(unsigned char proto);
54722 +extern const char * gr_sockfamily_to_name(unsigned char family);
54723 +
54724 +static __inline__ int
54725 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
54726 +{
54727 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
54728 +}
54729 +
54730 +static __inline__ int
54731 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
54732 + __u16 sport, __u16 dport)
54733 +{
54734 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
54735 + sig->gr_sport == sport && sig->gr_dport == dport))
54736 + return 1;
54737 + else
54738 + return 0;
54739 +}
54740 +
54741 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
54742 +{
54743 + struct conn_table_entry **match;
54744 + unsigned int index;
54745 +
54746 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54747 + sig->gr_sport, sig->gr_dport,
54748 + gr_conn_table_size);
54749 +
54750 + newent->sig = sig;
54751 +
54752 + match = &gr_conn_table[index];
54753 + newent->next = *match;
54754 + *match = newent;
54755 +
54756 + return;
54757 +}
54758 +
54759 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
54760 +{
54761 + struct conn_table_entry *match, *last = NULL;
54762 + unsigned int index;
54763 +
54764 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54765 + sig->gr_sport, sig->gr_dport,
54766 + gr_conn_table_size);
54767 +
54768 + match = gr_conn_table[index];
54769 + while (match && !conn_match(match->sig,
54770 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
54771 + sig->gr_dport)) {
54772 + last = match;
54773 + match = match->next;
54774 + }
54775 +
54776 + if (match) {
54777 + if (last)
54778 + last->next = match->next;
54779 + else
54780 + gr_conn_table[index] = NULL;
54781 + kfree(match);
54782 + }
54783 +
54784 + return;
54785 +}
54786 +
54787 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
54788 + __u16 sport, __u16 dport)
54789 +{
54790 + struct conn_table_entry *match;
54791 + unsigned int index;
54792 +
54793 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
54794 +
54795 + match = gr_conn_table[index];
54796 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
54797 + match = match->next;
54798 +
54799 + if (match)
54800 + return match->sig;
54801 + else
54802 + return NULL;
54803 +}
54804 +
54805 +#endif
54806 +
54807 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
54808 +{
54809 +#ifdef CONFIG_GRKERNSEC
54810 + struct signal_struct *sig = task->signal;
54811 + struct conn_table_entry *newent;
54812 +
54813 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
54814 + if (newent == NULL)
54815 + return;
54816 + /* no bh lock needed since we are called with bh disabled */
54817 + spin_lock(&gr_conn_table_lock);
54818 + gr_del_task_from_ip_table_nolock(sig);
54819 + sig->gr_saddr = inet->rcv_saddr;
54820 + sig->gr_daddr = inet->daddr;
54821 + sig->gr_sport = inet->sport;
54822 + sig->gr_dport = inet->dport;
54823 + gr_add_to_task_ip_table_nolock(sig, newent);
54824 + spin_unlock(&gr_conn_table_lock);
54825 +#endif
54826 + return;
54827 +}
54828 +
54829 +void gr_del_task_from_ip_table(struct task_struct *task)
54830 +{
54831 +#ifdef CONFIG_GRKERNSEC
54832 + spin_lock_bh(&gr_conn_table_lock);
54833 + gr_del_task_from_ip_table_nolock(task->signal);
54834 + spin_unlock_bh(&gr_conn_table_lock);
54835 +#endif
54836 + return;
54837 +}
54838 +
54839 +void
54840 +gr_attach_curr_ip(const struct sock *sk)
54841 +{
54842 +#ifdef CONFIG_GRKERNSEC
54843 + struct signal_struct *p, *set;
54844 + const struct inet_sock *inet = inet_sk(sk);
54845 +
54846 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
54847 + return;
54848 +
54849 + set = current->signal;
54850 +
54851 + spin_lock_bh(&gr_conn_table_lock);
54852 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
54853 + inet->dport, inet->sport);
54854 + if (unlikely(p != NULL)) {
54855 + set->curr_ip = p->curr_ip;
54856 + set->used_accept = 1;
54857 + gr_del_task_from_ip_table_nolock(p);
54858 + spin_unlock_bh(&gr_conn_table_lock);
54859 + return;
54860 + }
54861 + spin_unlock_bh(&gr_conn_table_lock);
54862 +
54863 + set->curr_ip = inet->daddr;
54864 + set->used_accept = 1;
54865 +#endif
54866 + return;
54867 +}
54868 +
54869 +int
54870 +gr_handle_sock_all(const int family, const int type, const int protocol)
54871 +{
54872 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54873 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
54874 + (family != AF_UNIX)) {
54875 + if (family == AF_INET)
54876 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
54877 + else
54878 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
54879 + return -EACCES;
54880 + }
54881 +#endif
54882 + return 0;
54883 +}
54884 +
54885 +int
54886 +gr_handle_sock_server(const struct sockaddr *sck)
54887 +{
54888 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54889 + if (grsec_enable_socket_server &&
54890 + in_group_p(grsec_socket_server_gid) &&
54891 + sck && (sck->sa_family != AF_UNIX) &&
54892 + (sck->sa_family != AF_LOCAL)) {
54893 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54894 + return -EACCES;
54895 + }
54896 +#endif
54897 + return 0;
54898 +}
54899 +
54900 +int
54901 +gr_handle_sock_server_other(const struct sock *sck)
54902 +{
54903 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54904 + if (grsec_enable_socket_server &&
54905 + in_group_p(grsec_socket_server_gid) &&
54906 + sck && (sck->sk_family != AF_UNIX) &&
54907 + (sck->sk_family != AF_LOCAL)) {
54908 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54909 + return -EACCES;
54910 + }
54911 +#endif
54912 + return 0;
54913 +}
54914 +
54915 +int
54916 +gr_handle_sock_client(const struct sockaddr *sck)
54917 +{
54918 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54919 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
54920 + sck && (sck->sa_family != AF_UNIX) &&
54921 + (sck->sa_family != AF_LOCAL)) {
54922 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
54923 + return -EACCES;
54924 + }
54925 +#endif
54926 + return 0;
54927 +}
54928 +
54929 +kernel_cap_t
54930 +gr_cap_rtnetlink(struct sock *sock)
54931 +{
54932 +#ifdef CONFIG_GRKERNSEC
54933 + if (!gr_acl_is_enabled())
54934 + return current_cap();
54935 + else if (sock->sk_protocol == NETLINK_ISCSI &&
54936 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
54937 + gr_is_capable(CAP_SYS_ADMIN))
54938 + return current_cap();
54939 + else if (sock->sk_protocol == NETLINK_AUDIT &&
54940 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
54941 + gr_is_capable(CAP_AUDIT_WRITE) &&
54942 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
54943 + gr_is_capable(CAP_AUDIT_CONTROL))
54944 + return current_cap();
54945 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
54946 + ((sock->sk_protocol == NETLINK_ROUTE) ?
54947 + gr_is_capable_nolog(CAP_NET_ADMIN) :
54948 + gr_is_capable(CAP_NET_ADMIN)))
54949 + return current_cap();
54950 + else
54951 + return __cap_empty_set;
54952 +#else
54953 + return current_cap();
54954 +#endif
54955 +}
54956 diff -urNp linux-2.6.32.45/grsecurity/grsec_sysctl.c linux-2.6.32.45/grsecurity/grsec_sysctl.c
54957 --- linux-2.6.32.45/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
54958 +++ linux-2.6.32.45/grsecurity/grsec_sysctl.c 2011-08-11 19:57:54.000000000 -0400
54959 @@ -0,0 +1,479 @@
54960 +#include <linux/kernel.h>
54961 +#include <linux/sched.h>
54962 +#include <linux/sysctl.h>
54963 +#include <linux/grsecurity.h>
54964 +#include <linux/grinternal.h>
54965 +
54966 +int
54967 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
54968 +{
54969 +#ifdef CONFIG_GRKERNSEC_SYSCTL
54970 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
54971 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
54972 + return -EACCES;
54973 + }
54974 +#endif
54975 + return 0;
54976 +}
54977 +
54978 +#ifdef CONFIG_GRKERNSEC_ROFS
54979 +static int __maybe_unused one = 1;
54980 +#endif
54981 +
54982 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
54983 +ctl_table grsecurity_table[] = {
54984 +#ifdef CONFIG_GRKERNSEC_SYSCTL
54985 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
54986 +#ifdef CONFIG_GRKERNSEC_IO
54987 + {
54988 + .ctl_name = CTL_UNNUMBERED,
54989 + .procname = "disable_priv_io",
54990 + .data = &grsec_disable_privio,
54991 + .maxlen = sizeof(int),
54992 + .mode = 0600,
54993 + .proc_handler = &proc_dointvec,
54994 + },
54995 +#endif
54996 +#endif
54997 +#ifdef CONFIG_GRKERNSEC_LINK
54998 + {
54999 + .ctl_name = CTL_UNNUMBERED,
55000 + .procname = "linking_restrictions",
55001 + .data = &grsec_enable_link,
55002 + .maxlen = sizeof(int),
55003 + .mode = 0600,
55004 + .proc_handler = &proc_dointvec,
55005 + },
55006 +#endif
55007 +#ifdef CONFIG_GRKERNSEC_BRUTE
55008 + {
55009 + .ctl_name = CTL_UNNUMBERED,
55010 + .procname = "deter_bruteforce",
55011 + .data = &grsec_enable_brute,
55012 + .maxlen = sizeof(int),
55013 + .mode = 0600,
55014 + .proc_handler = &proc_dointvec,
55015 + },
55016 +#endif
55017 +#ifdef CONFIG_GRKERNSEC_FIFO
55018 + {
55019 + .ctl_name = CTL_UNNUMBERED,
55020 + .procname = "fifo_restrictions",
55021 + .data = &grsec_enable_fifo,
55022 + .maxlen = sizeof(int),
55023 + .mode = 0600,
55024 + .proc_handler = &proc_dointvec,
55025 + },
55026 +#endif
55027 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55028 + {
55029 + .ctl_name = CTL_UNNUMBERED,
55030 + .procname = "ip_blackhole",
55031 + .data = &grsec_enable_blackhole,
55032 + .maxlen = sizeof(int),
55033 + .mode = 0600,
55034 + .proc_handler = &proc_dointvec,
55035 + },
55036 + {
55037 + .ctl_name = CTL_UNNUMBERED,
55038 + .procname = "lastack_retries",
55039 + .data = &grsec_lastack_retries,
55040 + .maxlen = sizeof(int),
55041 + .mode = 0600,
55042 + .proc_handler = &proc_dointvec,
55043 + },
55044 +#endif
55045 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55046 + {
55047 + .ctl_name = CTL_UNNUMBERED,
55048 + .procname = "exec_logging",
55049 + .data = &grsec_enable_execlog,
55050 + .maxlen = sizeof(int),
55051 + .mode = 0600,
55052 + .proc_handler = &proc_dointvec,
55053 + },
55054 +#endif
55055 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55056 + {
55057 + .ctl_name = CTL_UNNUMBERED,
55058 + .procname = "rwxmap_logging",
55059 + .data = &grsec_enable_log_rwxmaps,
55060 + .maxlen = sizeof(int),
55061 + .mode = 0600,
55062 + .proc_handler = &proc_dointvec,
55063 + },
55064 +#endif
55065 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55066 + {
55067 + .ctl_name = CTL_UNNUMBERED,
55068 + .procname = "signal_logging",
55069 + .data = &grsec_enable_signal,
55070 + .maxlen = sizeof(int),
55071 + .mode = 0600,
55072 + .proc_handler = &proc_dointvec,
55073 + },
55074 +#endif
55075 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55076 + {
55077 + .ctl_name = CTL_UNNUMBERED,
55078 + .procname = "forkfail_logging",
55079 + .data = &grsec_enable_forkfail,
55080 + .maxlen = sizeof(int),
55081 + .mode = 0600,
55082 + .proc_handler = &proc_dointvec,
55083 + },
55084 +#endif
55085 +#ifdef CONFIG_GRKERNSEC_TIME
55086 + {
55087 + .ctl_name = CTL_UNNUMBERED,
55088 + .procname = "timechange_logging",
55089 + .data = &grsec_enable_time,
55090 + .maxlen = sizeof(int),
55091 + .mode = 0600,
55092 + .proc_handler = &proc_dointvec,
55093 + },
55094 +#endif
55095 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55096 + {
55097 + .ctl_name = CTL_UNNUMBERED,
55098 + .procname = "chroot_deny_shmat",
55099 + .data = &grsec_enable_chroot_shmat,
55100 + .maxlen = sizeof(int),
55101 + .mode = 0600,
55102 + .proc_handler = &proc_dointvec,
55103 + },
55104 +#endif
55105 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55106 + {
55107 + .ctl_name = CTL_UNNUMBERED,
55108 + .procname = "chroot_deny_unix",
55109 + .data = &grsec_enable_chroot_unix,
55110 + .maxlen = sizeof(int),
55111 + .mode = 0600,
55112 + .proc_handler = &proc_dointvec,
55113 + },
55114 +#endif
55115 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55116 + {
55117 + .ctl_name = CTL_UNNUMBERED,
55118 + .procname = "chroot_deny_mount",
55119 + .data = &grsec_enable_chroot_mount,
55120 + .maxlen = sizeof(int),
55121 + .mode = 0600,
55122 + .proc_handler = &proc_dointvec,
55123 + },
55124 +#endif
55125 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55126 + {
55127 + .ctl_name = CTL_UNNUMBERED,
55128 + .procname = "chroot_deny_fchdir",
55129 + .data = &grsec_enable_chroot_fchdir,
55130 + .maxlen = sizeof(int),
55131 + .mode = 0600,
55132 + .proc_handler = &proc_dointvec,
55133 + },
55134 +#endif
55135 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55136 + {
55137 + .ctl_name = CTL_UNNUMBERED,
55138 + .procname = "chroot_deny_chroot",
55139 + .data = &grsec_enable_chroot_double,
55140 + .maxlen = sizeof(int),
55141 + .mode = 0600,
55142 + .proc_handler = &proc_dointvec,
55143 + },
55144 +#endif
55145 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55146 + {
55147 + .ctl_name = CTL_UNNUMBERED,
55148 + .procname = "chroot_deny_pivot",
55149 + .data = &grsec_enable_chroot_pivot,
55150 + .maxlen = sizeof(int),
55151 + .mode = 0600,
55152 + .proc_handler = &proc_dointvec,
55153 + },
55154 +#endif
55155 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55156 + {
55157 + .ctl_name = CTL_UNNUMBERED,
55158 + .procname = "chroot_enforce_chdir",
55159 + .data = &grsec_enable_chroot_chdir,
55160 + .maxlen = sizeof(int),
55161 + .mode = 0600,
55162 + .proc_handler = &proc_dointvec,
55163 + },
55164 +#endif
55165 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55166 + {
55167 + .ctl_name = CTL_UNNUMBERED,
55168 + .procname = "chroot_deny_chmod",
55169 + .data = &grsec_enable_chroot_chmod,
55170 + .maxlen = sizeof(int),
55171 + .mode = 0600,
55172 + .proc_handler = &proc_dointvec,
55173 + },
55174 +#endif
55175 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55176 + {
55177 + .ctl_name = CTL_UNNUMBERED,
55178 + .procname = "chroot_deny_mknod",
55179 + .data = &grsec_enable_chroot_mknod,
55180 + .maxlen = sizeof(int),
55181 + .mode = 0600,
55182 + .proc_handler = &proc_dointvec,
55183 + },
55184 +#endif
55185 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55186 + {
55187 + .ctl_name = CTL_UNNUMBERED,
55188 + .procname = "chroot_restrict_nice",
55189 + .data = &grsec_enable_chroot_nice,
55190 + .maxlen = sizeof(int),
55191 + .mode = 0600,
55192 + .proc_handler = &proc_dointvec,
55193 + },
55194 +#endif
55195 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55196 + {
55197 + .ctl_name = CTL_UNNUMBERED,
55198 + .procname = "chroot_execlog",
55199 + .data = &grsec_enable_chroot_execlog,
55200 + .maxlen = sizeof(int),
55201 + .mode = 0600,
55202 + .proc_handler = &proc_dointvec,
55203 + },
55204 +#endif
55205 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55206 + {
55207 + .ctl_name = CTL_UNNUMBERED,
55208 + .procname = "chroot_caps",
55209 + .data = &grsec_enable_chroot_caps,
55210 + .maxlen = sizeof(int),
55211 + .mode = 0600,
55212 + .proc_handler = &proc_dointvec,
55213 + },
55214 +#endif
55215 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55216 + {
55217 + .ctl_name = CTL_UNNUMBERED,
55218 + .procname = "chroot_deny_sysctl",
55219 + .data = &grsec_enable_chroot_sysctl,
55220 + .maxlen = sizeof(int),
55221 + .mode = 0600,
55222 + .proc_handler = &proc_dointvec,
55223 + },
55224 +#endif
55225 +#ifdef CONFIG_GRKERNSEC_TPE
55226 + {
55227 + .ctl_name = CTL_UNNUMBERED,
55228 + .procname = "tpe",
55229 + .data = &grsec_enable_tpe,
55230 + .maxlen = sizeof(int),
55231 + .mode = 0600,
55232 + .proc_handler = &proc_dointvec,
55233 + },
55234 + {
55235 + .ctl_name = CTL_UNNUMBERED,
55236 + .procname = "tpe_gid",
55237 + .data = &grsec_tpe_gid,
55238 + .maxlen = sizeof(int),
55239 + .mode = 0600,
55240 + .proc_handler = &proc_dointvec,
55241 + },
55242 +#endif
55243 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55244 + {
55245 + .ctl_name = CTL_UNNUMBERED,
55246 + .procname = "tpe_invert",
55247 + .data = &grsec_enable_tpe_invert,
55248 + .maxlen = sizeof(int),
55249 + .mode = 0600,
55250 + .proc_handler = &proc_dointvec,
55251 + },
55252 +#endif
55253 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55254 + {
55255 + .ctl_name = CTL_UNNUMBERED,
55256 + .procname = "tpe_restrict_all",
55257 + .data = &grsec_enable_tpe_all,
55258 + .maxlen = sizeof(int),
55259 + .mode = 0600,
55260 + .proc_handler = &proc_dointvec,
55261 + },
55262 +#endif
55263 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55264 + {
55265 + .ctl_name = CTL_UNNUMBERED,
55266 + .procname = "socket_all",
55267 + .data = &grsec_enable_socket_all,
55268 + .maxlen = sizeof(int),
55269 + .mode = 0600,
55270 + .proc_handler = &proc_dointvec,
55271 + },
55272 + {
55273 + .ctl_name = CTL_UNNUMBERED,
55274 + .procname = "socket_all_gid",
55275 + .data = &grsec_socket_all_gid,
55276 + .maxlen = sizeof(int),
55277 + .mode = 0600,
55278 + .proc_handler = &proc_dointvec,
55279 + },
55280 +#endif
55281 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55282 + {
55283 + .ctl_name = CTL_UNNUMBERED,
55284 + .procname = "socket_client",
55285 + .data = &grsec_enable_socket_client,
55286 + .maxlen = sizeof(int),
55287 + .mode = 0600,
55288 + .proc_handler = &proc_dointvec,
55289 + },
55290 + {
55291 + .ctl_name = CTL_UNNUMBERED,
55292 + .procname = "socket_client_gid",
55293 + .data = &grsec_socket_client_gid,
55294 + .maxlen = sizeof(int),
55295 + .mode = 0600,
55296 + .proc_handler = &proc_dointvec,
55297 + },
55298 +#endif
55299 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55300 + {
55301 + .ctl_name = CTL_UNNUMBERED,
55302 + .procname = "socket_server",
55303 + .data = &grsec_enable_socket_server,
55304 + .maxlen = sizeof(int),
55305 + .mode = 0600,
55306 + .proc_handler = &proc_dointvec,
55307 + },
55308 + {
55309 + .ctl_name = CTL_UNNUMBERED,
55310 + .procname = "socket_server_gid",
55311 + .data = &grsec_socket_server_gid,
55312 + .maxlen = sizeof(int),
55313 + .mode = 0600,
55314 + .proc_handler = &proc_dointvec,
55315 + },
55316 +#endif
55317 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55318 + {
55319 + .ctl_name = CTL_UNNUMBERED,
55320 + .procname = "audit_group",
55321 + .data = &grsec_enable_group,
55322 + .maxlen = sizeof(int),
55323 + .mode = 0600,
55324 + .proc_handler = &proc_dointvec,
55325 + },
55326 + {
55327 + .ctl_name = CTL_UNNUMBERED,
55328 + .procname = "audit_gid",
55329 + .data = &grsec_audit_gid,
55330 + .maxlen = sizeof(int),
55331 + .mode = 0600,
55332 + .proc_handler = &proc_dointvec,
55333 + },
55334 +#endif
55335 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55336 + {
55337 + .ctl_name = CTL_UNNUMBERED,
55338 + .procname = "audit_chdir",
55339 + .data = &grsec_enable_chdir,
55340 + .maxlen = sizeof(int),
55341 + .mode = 0600,
55342 + .proc_handler = &proc_dointvec,
55343 + },
55344 +#endif
55345 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55346 + {
55347 + .ctl_name = CTL_UNNUMBERED,
55348 + .procname = "audit_mount",
55349 + .data = &grsec_enable_mount,
55350 + .maxlen = sizeof(int),
55351 + .mode = 0600,
55352 + .proc_handler = &proc_dointvec,
55353 + },
55354 +#endif
55355 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55356 + {
55357 + .ctl_name = CTL_UNNUMBERED,
55358 + .procname = "audit_textrel",
55359 + .data = &grsec_enable_audit_textrel,
55360 + .maxlen = sizeof(int),
55361 + .mode = 0600,
55362 + .proc_handler = &proc_dointvec,
55363 + },
55364 +#endif
55365 +#ifdef CONFIG_GRKERNSEC_DMESG
55366 + {
55367 + .ctl_name = CTL_UNNUMBERED,
55368 + .procname = "dmesg",
55369 + .data = &grsec_enable_dmesg,
55370 + .maxlen = sizeof(int),
55371 + .mode = 0600,
55372 + .proc_handler = &proc_dointvec,
55373 + },
55374 +#endif
55375 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55376 + {
55377 + .ctl_name = CTL_UNNUMBERED,
55378 + .procname = "chroot_findtask",
55379 + .data = &grsec_enable_chroot_findtask,
55380 + .maxlen = sizeof(int),
55381 + .mode = 0600,
55382 + .proc_handler = &proc_dointvec,
55383 + },
55384 +#endif
55385 +#ifdef CONFIG_GRKERNSEC_RESLOG
55386 + {
55387 + .ctl_name = CTL_UNNUMBERED,
55388 + .procname = "resource_logging",
55389 + .data = &grsec_resource_logging,
55390 + .maxlen = sizeof(int),
55391 + .mode = 0600,
55392 + .proc_handler = &proc_dointvec,
55393 + },
55394 +#endif
55395 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55396 + {
55397 + .ctl_name = CTL_UNNUMBERED,
55398 + .procname = "audit_ptrace",
55399 + .data = &grsec_enable_audit_ptrace,
55400 + .maxlen = sizeof(int),
55401 + .mode = 0600,
55402 + .proc_handler = &proc_dointvec,
55403 + },
55404 +#endif
55405 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55406 + {
55407 + .ctl_name = CTL_UNNUMBERED,
55408 + .procname = "harden_ptrace",
55409 + .data = &grsec_enable_harden_ptrace,
55410 + .maxlen = sizeof(int),
55411 + .mode = 0600,
55412 + .proc_handler = &proc_dointvec,
55413 + },
55414 +#endif
55415 + {
55416 + .ctl_name = CTL_UNNUMBERED,
55417 + .procname = "grsec_lock",
55418 + .data = &grsec_lock,
55419 + .maxlen = sizeof(int),
55420 + .mode = 0600,
55421 + .proc_handler = &proc_dointvec,
55422 + },
55423 +#endif
55424 +#ifdef CONFIG_GRKERNSEC_ROFS
55425 + {
55426 + .ctl_name = CTL_UNNUMBERED,
55427 + .procname = "romount_protect",
55428 + .data = &grsec_enable_rofs,
55429 + .maxlen = sizeof(int),
55430 + .mode = 0600,
55431 + .proc_handler = &proc_dointvec_minmax,
55432 + .extra1 = &one,
55433 + .extra2 = &one,
55434 + },
55435 +#endif
55436 + { .ctl_name = 0 }
55437 +};
55438 +#endif
55439 diff -urNp linux-2.6.32.45/grsecurity/grsec_time.c linux-2.6.32.45/grsecurity/grsec_time.c
55440 --- linux-2.6.32.45/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55441 +++ linux-2.6.32.45/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
55442 @@ -0,0 +1,16 @@
55443 +#include <linux/kernel.h>
55444 +#include <linux/sched.h>
55445 +#include <linux/grinternal.h>
55446 +#include <linux/module.h>
55447 +
55448 +void
55449 +gr_log_timechange(void)
55450 +{
55451 +#ifdef CONFIG_GRKERNSEC_TIME
55452 + if (grsec_enable_time)
55453 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55454 +#endif
55455 + return;
55456 +}
55457 +
55458 +EXPORT_SYMBOL(gr_log_timechange);
55459 diff -urNp linux-2.6.32.45/grsecurity/grsec_tpe.c linux-2.6.32.45/grsecurity/grsec_tpe.c
55460 --- linux-2.6.32.45/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55461 +++ linux-2.6.32.45/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
55462 @@ -0,0 +1,39 @@
55463 +#include <linux/kernel.h>
55464 +#include <linux/sched.h>
55465 +#include <linux/file.h>
55466 +#include <linux/fs.h>
55467 +#include <linux/grinternal.h>
55468 +
55469 +extern int gr_acl_tpe_check(void);
55470 +
55471 +int
55472 +gr_tpe_allow(const struct file *file)
55473 +{
55474 +#ifdef CONFIG_GRKERNSEC
55475 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55476 + const struct cred *cred = current_cred();
55477 +
55478 + if (cred->uid && ((grsec_enable_tpe &&
55479 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55480 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55481 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55482 +#else
55483 + in_group_p(grsec_tpe_gid)
55484 +#endif
55485 + ) || gr_acl_tpe_check()) &&
55486 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55487 + (inode->i_mode & S_IWOTH))))) {
55488 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55489 + return 0;
55490 + }
55491 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55492 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55493 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55494 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55495 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55496 + return 0;
55497 + }
55498 +#endif
55499 +#endif
55500 + return 1;
55501 +}
55502 diff -urNp linux-2.6.32.45/grsecurity/grsum.c linux-2.6.32.45/grsecurity/grsum.c
55503 --- linux-2.6.32.45/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55504 +++ linux-2.6.32.45/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
55505 @@ -0,0 +1,61 @@
55506 +#include <linux/err.h>
55507 +#include <linux/kernel.h>
55508 +#include <linux/sched.h>
55509 +#include <linux/mm.h>
55510 +#include <linux/scatterlist.h>
55511 +#include <linux/crypto.h>
55512 +#include <linux/gracl.h>
55513 +
55514 +
55515 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55516 +#error "crypto and sha256 must be built into the kernel"
55517 +#endif
55518 +
55519 +int
55520 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55521 +{
55522 + char *p;
55523 + struct crypto_hash *tfm;
55524 + struct hash_desc desc;
55525 + struct scatterlist sg;
55526 + unsigned char temp_sum[GR_SHA_LEN];
55527 + volatile int retval = 0;
55528 + volatile int dummy = 0;
55529 + unsigned int i;
55530 +
55531 + sg_init_table(&sg, 1);
55532 +
55533 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55534 + if (IS_ERR(tfm)) {
55535 + /* should never happen, since sha256 should be built in */
55536 + return 1;
55537 + }
55538 +
55539 + desc.tfm = tfm;
55540 + desc.flags = 0;
55541 +
55542 + crypto_hash_init(&desc);
55543 +
55544 + p = salt;
55545 + sg_set_buf(&sg, p, GR_SALT_LEN);
55546 + crypto_hash_update(&desc, &sg, sg.length);
55547 +
55548 + p = entry->pw;
55549 + sg_set_buf(&sg, p, strlen(p));
55550 +
55551 + crypto_hash_update(&desc, &sg, sg.length);
55552 +
55553 + crypto_hash_final(&desc, temp_sum);
55554 +
55555 + memset(entry->pw, 0, GR_PW_LEN);
55556 +
55557 + for (i = 0; i < GR_SHA_LEN; i++)
55558 + if (sum[i] != temp_sum[i])
55559 + retval = 1;
55560 + else
55561 + dummy = 1; // waste a cycle
55562 +
55563 + crypto_free_hash(tfm);
55564 +
55565 + return retval;
55566 +}
55567 diff -urNp linux-2.6.32.45/grsecurity/Kconfig linux-2.6.32.45/grsecurity/Kconfig
55568 --- linux-2.6.32.45/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55569 +++ linux-2.6.32.45/grsecurity/Kconfig 2011-08-17 19:04:25.000000000 -0400
55570 @@ -0,0 +1,1037 @@
55571 +#
55572 +# grecurity configuration
55573 +#
55574 +
55575 +menu "Grsecurity"
55576 +
55577 +config GRKERNSEC
55578 + bool "Grsecurity"
55579 + select CRYPTO
55580 + select CRYPTO_SHA256
55581 + help
55582 + If you say Y here, you will be able to configure many features
55583 + that will enhance the security of your system. It is highly
55584 + recommended that you say Y here and read through the help
55585 + for each option so that you fully understand the features and
55586 + can evaluate their usefulness for your machine.
55587 +
55588 +choice
55589 + prompt "Security Level"
55590 + depends on GRKERNSEC
55591 + default GRKERNSEC_CUSTOM
55592 +
55593 +config GRKERNSEC_LOW
55594 + bool "Low"
55595 + select GRKERNSEC_LINK
55596 + select GRKERNSEC_FIFO
55597 + select GRKERNSEC_RANDNET
55598 + select GRKERNSEC_DMESG
55599 + select GRKERNSEC_CHROOT
55600 + select GRKERNSEC_CHROOT_CHDIR
55601 +
55602 + help
55603 + If you choose this option, several of the grsecurity options will
55604 + be enabled that will give you greater protection against a number
55605 + of attacks, while assuring that none of your software will have any
55606 + conflicts with the additional security measures. If you run a lot
55607 + of unusual software, or you are having problems with the higher
55608 + security levels, you should say Y here. With this option, the
55609 + following features are enabled:
55610 +
55611 + - Linking restrictions
55612 + - FIFO restrictions
55613 + - Restricted dmesg
55614 + - Enforced chdir("/") on chroot
55615 + - Runtime module disabling
55616 +
55617 +config GRKERNSEC_MEDIUM
55618 + bool "Medium"
55619 + select PAX
55620 + select PAX_EI_PAX
55621 + select PAX_PT_PAX_FLAGS
55622 + select PAX_HAVE_ACL_FLAGS
55623 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55624 + select GRKERNSEC_CHROOT
55625 + select GRKERNSEC_CHROOT_SYSCTL
55626 + select GRKERNSEC_LINK
55627 + select GRKERNSEC_FIFO
55628 + select GRKERNSEC_DMESG
55629 + select GRKERNSEC_RANDNET
55630 + select GRKERNSEC_FORKFAIL
55631 + select GRKERNSEC_TIME
55632 + select GRKERNSEC_SIGNAL
55633 + select GRKERNSEC_CHROOT
55634 + select GRKERNSEC_CHROOT_UNIX
55635 + select GRKERNSEC_CHROOT_MOUNT
55636 + select GRKERNSEC_CHROOT_PIVOT
55637 + select GRKERNSEC_CHROOT_DOUBLE
55638 + select GRKERNSEC_CHROOT_CHDIR
55639 + select GRKERNSEC_CHROOT_MKNOD
55640 + select GRKERNSEC_PROC
55641 + select GRKERNSEC_PROC_USERGROUP
55642 + select PAX_RANDUSTACK
55643 + select PAX_ASLR
55644 + select PAX_RANDMMAP
55645 + select PAX_REFCOUNT if (X86 || SPARC64)
55646 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55647 +
55648 + help
55649 + If you say Y here, several features in addition to those included
55650 + in the low additional security level will be enabled. These
55651 + features provide even more security to your system, though in rare
55652 + cases they may be incompatible with very old or poorly written
55653 + software. If you enable this option, make sure that your auth
55654 + service (identd) is running as gid 1001. With this option,
55655 + the following features (in addition to those provided in the
55656 + low additional security level) will be enabled:
55657 +
55658 + - Failed fork logging
55659 + - Time change logging
55660 + - Signal logging
55661 + - Deny mounts in chroot
55662 + - Deny double chrooting
55663 + - Deny sysctl writes in chroot
55664 + - Deny mknod in chroot
55665 + - Deny access to abstract AF_UNIX sockets out of chroot
55666 + - Deny pivot_root in chroot
55667 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
55668 + - /proc restrictions with special GID set to 10 (usually wheel)
55669 + - Address Space Layout Randomization (ASLR)
55670 + - Prevent exploitation of most refcount overflows
55671 + - Bounds checking of copying between the kernel and userland
55672 +
55673 +config GRKERNSEC_HIGH
55674 + bool "High"
55675 + select GRKERNSEC_LINK
55676 + select GRKERNSEC_FIFO
55677 + select GRKERNSEC_DMESG
55678 + select GRKERNSEC_FORKFAIL
55679 + select GRKERNSEC_TIME
55680 + select GRKERNSEC_SIGNAL
55681 + select GRKERNSEC_CHROOT
55682 + select GRKERNSEC_CHROOT_SHMAT
55683 + select GRKERNSEC_CHROOT_UNIX
55684 + select GRKERNSEC_CHROOT_MOUNT
55685 + select GRKERNSEC_CHROOT_FCHDIR
55686 + select GRKERNSEC_CHROOT_PIVOT
55687 + select GRKERNSEC_CHROOT_DOUBLE
55688 + select GRKERNSEC_CHROOT_CHDIR
55689 + select GRKERNSEC_CHROOT_MKNOD
55690 + select GRKERNSEC_CHROOT_CAPS
55691 + select GRKERNSEC_CHROOT_SYSCTL
55692 + select GRKERNSEC_CHROOT_FINDTASK
55693 + select GRKERNSEC_SYSFS_RESTRICT
55694 + select GRKERNSEC_PROC
55695 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55696 + select GRKERNSEC_HIDESYM
55697 + select GRKERNSEC_BRUTE
55698 + select GRKERNSEC_PROC_USERGROUP
55699 + select GRKERNSEC_KMEM
55700 + select GRKERNSEC_RESLOG
55701 + select GRKERNSEC_RANDNET
55702 + select GRKERNSEC_PROC_ADD
55703 + select GRKERNSEC_CHROOT_CHMOD
55704 + select GRKERNSEC_CHROOT_NICE
55705 + select GRKERNSEC_AUDIT_MOUNT
55706 + select GRKERNSEC_MODHARDEN if (MODULES)
55707 + select GRKERNSEC_HARDEN_PTRACE
55708 + select GRKERNSEC_VM86 if (X86_32)
55709 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55710 + select PAX
55711 + select PAX_RANDUSTACK
55712 + select PAX_ASLR
55713 + select PAX_RANDMMAP
55714 + select PAX_NOEXEC
55715 + select PAX_MPROTECT
55716 + select PAX_EI_PAX
55717 + select PAX_PT_PAX_FLAGS
55718 + select PAX_HAVE_ACL_FLAGS
55719 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55720 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
55721 + select PAX_RANDKSTACK if (X86_TSC && X86)
55722 + select PAX_SEGMEXEC if (X86_32)
55723 + select PAX_PAGEEXEC
55724 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55725 + select PAX_EMUTRAMP if (PARISC)
55726 + select PAX_EMUSIGRT if (PARISC)
55727 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55728 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55729 + select PAX_REFCOUNT if (X86 || SPARC64)
55730 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55731 + help
55732 + If you say Y here, many of the features of grsecurity will be
55733 + enabled, which will protect you against many kinds of attacks
55734 + against your system. The heightened security comes at a cost
55735 + of an increased chance of incompatibilities with rare software
55736 + on your machine. Since this security level enables PaX, you should
55737 + view <http://pax.grsecurity.net> and read about the PaX
55738 + project. While you are there, download chpax and run it on
55739 + binaries that cause problems with PaX. Also remember that
55740 + since the /proc restrictions are enabled, you must run your
55741 + identd as gid 1001. This security level enables the following
55742 + features in addition to those listed in the low and medium
55743 + security levels:
55744 +
55745 + - Additional /proc restrictions
55746 + - Chmod restrictions in chroot
55747 + - No signals, ptrace, or viewing of processes outside of chroot
55748 + - Capability restrictions in chroot
55749 + - Deny fchdir out of chroot
55750 + - Priority restrictions in chroot
55751 + - Segmentation-based implementation of PaX
55752 + - Mprotect restrictions
55753 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55754 + - Kernel stack randomization
55755 + - Mount/unmount/remount logging
55756 + - Kernel symbol hiding
55757 + - Prevention of memory exhaustion-based exploits
55758 + - Hardening of module auto-loading
55759 + - Ptrace restrictions
55760 + - Restricted vm86 mode
55761 + - Restricted sysfs/debugfs
55762 + - Active kernel exploit response
55763 +
55764 +config GRKERNSEC_CUSTOM
55765 + bool "Custom"
55766 + help
55767 + If you say Y here, you will be able to configure every grsecurity
55768 + option, which allows you to enable many more features that aren't
55769 + covered in the basic security levels. These additional features
55770 + include TPE, socket restrictions, and the sysctl system for
55771 + grsecurity. It is advised that you read through the help for
55772 + each option to determine its usefulness in your situation.
55773 +
55774 +endchoice
55775 +
55776 +menu "Address Space Protection"
55777 +depends on GRKERNSEC
55778 +
55779 +config GRKERNSEC_KMEM
55780 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
55781 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55782 + help
55783 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55784 + be written to via mmap or otherwise to modify the running kernel.
55785 + /dev/port will also not be allowed to be opened. If you have module
55786 + support disabled, enabling this will close up four ways that are
55787 + currently used to insert malicious code into the running kernel.
55788 + Even with all these features enabled, we still highly recommend that
55789 + you use the RBAC system, as it is still possible for an attacker to
55790 + modify the running kernel through privileged I/O granted by ioperm/iopl.
55791 + If you are not using XFree86, you may be able to stop this additional
55792 + case by enabling the 'Disable privileged I/O' option. Though nothing
55793 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55794 + but only to video memory, which is the only writing we allow in this
55795 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55796 + not be allowed to mprotect it with PROT_WRITE later.
55797 + It is highly recommended that you say Y here if you meet all the
55798 + conditions above.
55799 +
55800 +config GRKERNSEC_VM86
55801 + bool "Restrict VM86 mode"
55802 + depends on X86_32
55803 +
55804 + help
55805 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55806 + make use of a special execution mode on 32bit x86 processors called
55807 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55808 + video cards and will still work with this option enabled. The purpose
55809 + of the option is to prevent exploitation of emulation errors in
55810 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
55811 + Nearly all users should be able to enable this option.
55812 +
55813 +config GRKERNSEC_IO
55814 + bool "Disable privileged I/O"
55815 + depends on X86
55816 + select RTC_CLASS
55817 + select RTC_INTF_DEV
55818 + select RTC_DRV_CMOS
55819 +
55820 + help
55821 + If you say Y here, all ioperm and iopl calls will return an error.
55822 + Ioperm and iopl can be used to modify the running kernel.
55823 + Unfortunately, some programs need this access to operate properly,
55824 + the most notable of which are XFree86 and hwclock. hwclock can be
55825 + remedied by having RTC support in the kernel, so real-time
55826 + clock support is enabled if this option is enabled, to ensure
55827 + that hwclock operates correctly. XFree86 still will not
55828 + operate correctly with this option enabled, so DO NOT CHOOSE Y
55829 + IF YOU USE XFree86. If you use XFree86 and you still want to
55830 + protect your kernel against modification, use the RBAC system.
55831 +
55832 +config GRKERNSEC_PROC_MEMMAP
55833 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55834 + default y if (PAX_NOEXEC || PAX_ASLR)
55835 + depends on PAX_NOEXEC || PAX_ASLR
55836 + help
55837 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55838 + give no information about the addresses of its mappings if
55839 + PaX features that rely on random addresses are enabled on the task.
55840 + If you use PaX it is greatly recommended that you say Y here as it
55841 + closes up a hole that makes the full ASLR useless for suid
55842 + binaries.
55843 +
55844 +config GRKERNSEC_BRUTE
55845 + bool "Deter exploit bruteforcing"
55846 + help
55847 + If you say Y here, attempts to bruteforce exploits against forking
55848 + daemons such as apache or sshd, as well as against suid/sgid binaries
55849 + will be deterred. When a child of a forking daemon is killed by PaX
55850 + or crashes due to an illegal instruction or other suspicious signal,
55851 + the parent process will be delayed 30 seconds upon every subsequent
55852 + fork until the administrator is able to assess the situation and
55853 + restart the daemon.
55854 + In the suid/sgid case, the attempt is logged, the user has all their
55855 + processes terminated, and they are prevented from executing any further
55856 + processes for 15 minutes.
55857 + It is recommended that you also enable signal logging in the auditing
55858 + section so that logs are generated when a process triggers a suspicious
55859 + signal.
55860 + If the sysctl option is enabled, a sysctl option with name
55861 + "deter_bruteforce" is created.
55862 +
55863 +config GRKERNSEC_MODHARDEN
55864 + bool "Harden module auto-loading"
55865 + depends on MODULES
55866 + help
55867 + If you say Y here, module auto-loading in response to use of some
55868 + feature implemented by an unloaded module will be restricted to
55869 + root users. Enabling this option helps defend against attacks
55870 + by unprivileged users who abuse the auto-loading behavior to
55871 + cause a vulnerable module to load that is then exploited.
55872 +
55873 + If this option prevents a legitimate use of auto-loading for a
55874 + non-root user, the administrator can execute modprobe manually
55875 + with the exact name of the module mentioned in the alert log.
55876 + Alternatively, the administrator can add the module to the list
55877 + of modules loaded at boot by modifying init scripts.
55878 +
55879 + Modification of init scripts will most likely be needed on
55880 + Ubuntu servers with encrypted home directory support enabled,
55881 + as the first non-root user logging in will cause the ecb(aes),
55882 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55883 +
55884 +config GRKERNSEC_HIDESYM
55885 + bool "Hide kernel symbols"
55886 + help
55887 + If you say Y here, getting information on loaded modules, and
55888 + displaying all kernel symbols through a syscall will be restricted
55889 + to users with CAP_SYS_MODULE. For software compatibility reasons,
55890 + /proc/kallsyms will be restricted to the root user. The RBAC
55891 + system can hide that entry even from root.
55892 +
55893 + This option also prevents leaking of kernel addresses through
55894 + several /proc entries.
55895 +
55896 + Note that this option is only effective provided the following
55897 + conditions are met:
55898 + 1) The kernel using grsecurity is not precompiled by some distribution
55899 + 2) You have also enabled GRKERNSEC_DMESG
55900 + 3) You are using the RBAC system and hiding other files such as your
55901 + kernel image and System.map. Alternatively, enabling this option
55902 + causes the permissions on /boot, /lib/modules, and the kernel
55903 + source directory to change at compile time to prevent
55904 + reading by non-root users.
55905 + If the above conditions are met, this option will aid in providing a
55906 + useful protection against local kernel exploitation of overflows
55907 + and arbitrary read/write vulnerabilities.
55908 +
55909 +config GRKERNSEC_KERN_LOCKOUT
55910 + bool "Active kernel exploit response"
55911 + depends on X86 || ARM || PPC || SPARC
55912 + help
55913 + If you say Y here, when a PaX alert is triggered due to suspicious
55914 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55915 + or an OOPs occurs due to bad memory accesses, instead of just
55916 + terminating the offending process (and potentially allowing
55917 + a subsequent exploit from the same user), we will take one of two
55918 + actions:
55919 + If the user was root, we will panic the system
55920 + If the user was non-root, we will log the attempt, terminate
55921 + all processes owned by the user, then prevent them from creating
55922 + any new processes until the system is restarted
55923 + This deters repeated kernel exploitation/bruteforcing attempts
55924 + and is useful for later forensics.
55925 +
55926 +endmenu
55927 +menu "Role Based Access Control Options"
55928 +depends on GRKERNSEC
55929 +
55930 +config GRKERNSEC_RBAC_DEBUG
55931 + bool
55932 +
55933 +config GRKERNSEC_NO_RBAC
55934 + bool "Disable RBAC system"
55935 + help
55936 + If you say Y here, the /dev/grsec device will be removed from the kernel,
55937 + preventing the RBAC system from being enabled. You should only say Y
55938 + here if you have no intention of using the RBAC system, so as to prevent
55939 + an attacker with root access from misusing the RBAC system to hide files
55940 + and processes when loadable module support and /dev/[k]mem have been
55941 + locked down.
55942 +
55943 +config GRKERNSEC_ACL_HIDEKERN
55944 + bool "Hide kernel processes"
55945 + help
55946 + If you say Y here, all kernel threads will be hidden to all
55947 + processes but those whose subject has the "view hidden processes"
55948 + flag.
55949 +
55950 +config GRKERNSEC_ACL_MAXTRIES
55951 + int "Maximum tries before password lockout"
55952 + default 3
55953 + help
55954 + This option enforces the maximum number of times a user can attempt
55955 + to authorize themselves with the grsecurity RBAC system before being
55956 + denied the ability to attempt authorization again for a specified time.
55957 + The lower the number, the harder it will be to brute-force a password.
55958 +
55959 +config GRKERNSEC_ACL_TIMEOUT
55960 + int "Time to wait after max password tries, in seconds"
55961 + default 30
55962 + help
55963 + This option specifies the time the user must wait after attempting to
55964 + authorize to the RBAC system with the maximum number of invalid
55965 + passwords. The higher the number, the harder it will be to brute-force
55966 + a password.
55967 +
55968 +endmenu
55969 +menu "Filesystem Protections"
55970 +depends on GRKERNSEC
55971 +
55972 +config GRKERNSEC_PROC
55973 + bool "Proc restrictions"
55974 + help
55975 + If you say Y here, the permissions of the /proc filesystem
55976 + will be altered to enhance system security and privacy. You MUST
55977 + choose either a user only restriction or a user and group restriction.
55978 + Depending upon the option you choose, you can either restrict users to
55979 + see only the processes they themselves run, or choose a group that can
55980 + view all processes and files normally restricted to root if you choose
55981 + the "restrict to user only" option. NOTE: If you're running identd as
55982 + a non-root user, you will have to run it as the group you specify here.
55983 +
55984 +config GRKERNSEC_PROC_USER
55985 + bool "Restrict /proc to user only"
55986 + depends on GRKERNSEC_PROC
55987 + help
55988 + If you say Y here, non-root users will only be able to view their own
55989 + processes, and restricts them from viewing network-related information,
55990 + and viewing kernel symbol and module information.
55991 +
55992 +config GRKERNSEC_PROC_USERGROUP
55993 + bool "Allow special group"
55994 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55995 + help
55996 + If you say Y here, you will be able to select a group that will be
55997 + able to view all processes and network-related information. If you've
55998 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55999 + remain hidden. This option is useful if you want to run identd as
56000 + a non-root user.
56001 +
56002 +config GRKERNSEC_PROC_GID
56003 + int "GID for special group"
56004 + depends on GRKERNSEC_PROC_USERGROUP
56005 + default 1001
56006 +
56007 +config GRKERNSEC_PROC_ADD
56008 + bool "Additional restrictions"
56009 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56010 + help
56011 + If you say Y here, additional restrictions will be placed on
56012 + /proc that keep normal users from viewing device information and
56013 + slabinfo information that could be useful for exploits.
56014 +
56015 +config GRKERNSEC_LINK
56016 + bool "Linking restrictions"
56017 + help
56018 + If you say Y here, /tmp race exploits will be prevented, since users
56019 + will no longer be able to follow symlinks owned by other users in
56020 + world-writable +t directories (e.g. /tmp), unless the owner of the
56021 + symlink is the owner of the directory. users will also not be
56022 + able to hardlink to files they do not own. If the sysctl option is
56023 + enabled, a sysctl option with name "linking_restrictions" is created.
56024 +
56025 +config GRKERNSEC_FIFO
56026 + bool "FIFO restrictions"
56027 + help
56028 + If you say Y here, users will not be able to write to FIFOs they don't
56029 + own in world-writable +t directories (e.g. /tmp), unless the owner of
56030 + the FIFO is the same owner of the directory it's held in. If the sysctl
56031 + option is enabled, a sysctl option with name "fifo_restrictions" is
56032 + created.
56033 +
56034 +config GRKERNSEC_SYSFS_RESTRICT
56035 + bool "Sysfs/debugfs restriction"
56036 + depends on SYSFS
56037 + help
56038 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56039 + any filesystem normally mounted under it (e.g. debugfs) will only
56040 + be accessible by root. These filesystems generally provide access
56041 + to hardware and debug information that isn't appropriate for unprivileged
56042 + users of the system. Sysfs and debugfs have also become a large source
56043 + of new vulnerabilities, ranging from infoleaks to local compromise.
56044 + There has been very little oversight with an eye toward security involved
56045 + in adding new exporters of information to these filesystems, so their
56046 + use is discouraged.
56047 + This option is equivalent to a chmod 0700 of the mount paths.
56048 +
56049 +config GRKERNSEC_ROFS
56050 + bool "Runtime read-only mount protection"
56051 + help
56052 + If you say Y here, a sysctl option with name "romount_protect" will
56053 + be created. By setting this option to 1 at runtime, filesystems
56054 + will be protected in the following ways:
56055 + * No new writable mounts will be allowed
56056 + * Existing read-only mounts won't be able to be remounted read/write
56057 + * Write operations will be denied on all block devices
56058 + This option acts independently of grsec_lock: once it is set to 1,
56059 + it cannot be turned off. Therefore, please be mindful of the resulting
56060 + behavior if this option is enabled in an init script on a read-only
56061 + filesystem. This feature is mainly intended for secure embedded systems.
56062 +
56063 +config GRKERNSEC_CHROOT
56064 + bool "Chroot jail restrictions"
56065 + help
56066 + If you say Y here, you will be able to choose several options that will
56067 + make breaking out of a chrooted jail much more difficult. If you
56068 + encounter no software incompatibilities with the following options, it
56069 + is recommended that you enable each one.
56070 +
56071 +config GRKERNSEC_CHROOT_MOUNT
56072 + bool "Deny mounts"
56073 + depends on GRKERNSEC_CHROOT
56074 + help
56075 + If you say Y here, processes inside a chroot will not be able to
56076 + mount or remount filesystems. If the sysctl option is enabled, a
56077 + sysctl option with name "chroot_deny_mount" is created.
56078 +
56079 +config GRKERNSEC_CHROOT_DOUBLE
56080 + bool "Deny double-chroots"
56081 + depends on GRKERNSEC_CHROOT
56082 + help
56083 + If you say Y here, processes inside a chroot will not be able to chroot
56084 + again outside the chroot. This is a widely used method of breaking
56085 + out of a chroot jail and should not be allowed. If the sysctl
56086 + option is enabled, a sysctl option with name
56087 + "chroot_deny_chroot" is created.
56088 +
56089 +config GRKERNSEC_CHROOT_PIVOT
56090 + bool "Deny pivot_root in chroot"
56091 + depends on GRKERNSEC_CHROOT
56092 + help
56093 + If you say Y here, processes inside a chroot will not be able to use
56094 + a function called pivot_root() that was introduced in Linux 2.3.41. It
56095 + works similar to chroot in that it changes the root filesystem. This
56096 + function could be misused in a chrooted process to attempt to break out
56097 + of the chroot, and therefore should not be allowed. If the sysctl
56098 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
56099 + created.
56100 +
56101 +config GRKERNSEC_CHROOT_CHDIR
56102 + bool "Enforce chdir(\"/\") on all chroots"
56103 + depends on GRKERNSEC_CHROOT
56104 + help
56105 + If you say Y here, the current working directory of all newly-chrooted
56106 + applications will be set to the the root directory of the chroot.
56107 + The man page on chroot(2) states:
56108 + Note that this call does not change the current working
56109 + directory, so that `.' can be outside the tree rooted at
56110 + `/'. In particular, the super-user can escape from a
56111 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56112 +
56113 + It is recommended that you say Y here, since it's not known to break
56114 + any software. If the sysctl option is enabled, a sysctl option with
56115 + name "chroot_enforce_chdir" is created.
56116 +
56117 +config GRKERNSEC_CHROOT_CHMOD
56118 + bool "Deny (f)chmod +s"
56119 + depends on GRKERNSEC_CHROOT
56120 + help
56121 + If you say Y here, processes inside a chroot will not be able to chmod
56122 + or fchmod files to make them have suid or sgid bits. This protects
56123 + against another published method of breaking a chroot. If the sysctl
56124 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
56125 + created.
56126 +
56127 +config GRKERNSEC_CHROOT_FCHDIR
56128 + bool "Deny fchdir out of chroot"
56129 + depends on GRKERNSEC_CHROOT
56130 + help
56131 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
56132 + to a file descriptor of the chrooting process that points to a directory
56133 + outside the filesystem will be stopped. If the sysctl option
56134 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56135 +
56136 +config GRKERNSEC_CHROOT_MKNOD
56137 + bool "Deny mknod"
56138 + depends on GRKERNSEC_CHROOT
56139 + help
56140 + If you say Y here, processes inside a chroot will not be allowed to
56141 + mknod. The problem with using mknod inside a chroot is that it
56142 + would allow an attacker to create a device entry that is the same
56143 + as one on the physical root of your system, which could range from
56144 + anything from the console device to a device for your harddrive (which
56145 + they could then use to wipe the drive or steal data). It is recommended
56146 + that you say Y here, unless you run into software incompatibilities.
56147 + If the sysctl option is enabled, a sysctl option with name
56148 + "chroot_deny_mknod" is created.
56149 +
56150 +config GRKERNSEC_CHROOT_SHMAT
56151 + bool "Deny shmat() out of chroot"
56152 + depends on GRKERNSEC_CHROOT
56153 + help
56154 + If you say Y here, processes inside a chroot will not be able to attach
56155 + to shared memory segments that were created outside of the chroot jail.
56156 + It is recommended that you say Y here. If the sysctl option is enabled,
56157 + a sysctl option with name "chroot_deny_shmat" is created.
56158 +
56159 +config GRKERNSEC_CHROOT_UNIX
56160 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
56161 + depends on GRKERNSEC_CHROOT
56162 + help
56163 + If you say Y here, processes inside a chroot will not be able to
56164 + connect to abstract (meaning not belonging to a filesystem) Unix
56165 + domain sockets that were bound outside of a chroot. It is recommended
56166 + that you say Y here. If the sysctl option is enabled, a sysctl option
56167 + with name "chroot_deny_unix" is created.
56168 +
56169 +config GRKERNSEC_CHROOT_FINDTASK
56170 + bool "Protect outside processes"
56171 + depends on GRKERNSEC_CHROOT
56172 + help
56173 + If you say Y here, processes inside a chroot will not be able to
56174 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56175 + getsid, or view any process outside of the chroot. If the sysctl
56176 + option is enabled, a sysctl option with name "chroot_findtask" is
56177 + created.
56178 +
56179 +config GRKERNSEC_CHROOT_NICE
56180 + bool "Restrict priority changes"
56181 + depends on GRKERNSEC_CHROOT
56182 + help
56183 + If you say Y here, processes inside a chroot will not be able to raise
56184 + the priority of processes in the chroot, or alter the priority of
56185 + processes outside the chroot. This provides more security than simply
56186 + removing CAP_SYS_NICE from the process' capability set. If the
56187 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56188 + is created.
56189 +
56190 +config GRKERNSEC_CHROOT_SYSCTL
56191 + bool "Deny sysctl writes"
56192 + depends on GRKERNSEC_CHROOT
56193 + help
56194 + If you say Y here, an attacker in a chroot will not be able to
56195 + write to sysctl entries, either by sysctl(2) or through a /proc
56196 + interface. It is strongly recommended that you say Y here. If the
56197 + sysctl option is enabled, a sysctl option with name
56198 + "chroot_deny_sysctl" is created.
56199 +
56200 +config GRKERNSEC_CHROOT_CAPS
56201 + bool "Capability restrictions"
56202 + depends on GRKERNSEC_CHROOT
56203 + help
56204 + If you say Y here, the capabilities on all root processes within a
56205 + chroot jail will be lowered to stop module insertion, raw i/o,
56206 + system and net admin tasks, rebooting the system, modifying immutable
56207 + files, modifying IPC owned by another, and changing the system time.
56208 + This is left an option because it can break some apps. Disable this
56209 + if your chrooted apps are having problems performing those kinds of
56210 + tasks. If the sysctl option is enabled, a sysctl option with
56211 + name "chroot_caps" is created.
56212 +
56213 +endmenu
56214 +menu "Kernel Auditing"
56215 +depends on GRKERNSEC
56216 +
56217 +config GRKERNSEC_AUDIT_GROUP
56218 + bool "Single group for auditing"
56219 + help
56220 + If you say Y here, the exec, chdir, and (un)mount logging features
56221 + will only operate on a group you specify. This option is recommended
56222 + if you only want to watch certain users instead of having a large
56223 + amount of logs from the entire system. If the sysctl option is enabled,
56224 + a sysctl option with name "audit_group" is created.
56225 +
56226 +config GRKERNSEC_AUDIT_GID
56227 + int "GID for auditing"
56228 + depends on GRKERNSEC_AUDIT_GROUP
56229 + default 1007
56230 +
56231 +config GRKERNSEC_EXECLOG
56232 + bool "Exec logging"
56233 + help
56234 + If you say Y here, all execve() calls will be logged (since the
56235 + other exec*() calls are frontends to execve(), all execution
56236 + will be logged). Useful for shell-servers that like to keep track
56237 + of their users. If the sysctl option is enabled, a sysctl option with
56238 + name "exec_logging" is created.
56239 + WARNING: This option when enabled will produce a LOT of logs, especially
56240 + on an active system.
56241 +
56242 +config GRKERNSEC_RESLOG
56243 + bool "Resource logging"
56244 + help
56245 + If you say Y here, all attempts to overstep resource limits will
56246 + be logged with the resource name, the requested size, and the current
56247 + limit. It is highly recommended that you say Y here. If the sysctl
56248 + option is enabled, a sysctl option with name "resource_logging" is
56249 + created. If the RBAC system is enabled, the sysctl value is ignored.
56250 +
56251 +config GRKERNSEC_CHROOT_EXECLOG
56252 + bool "Log execs within chroot"
56253 + help
56254 + If you say Y here, all executions inside a chroot jail will be logged
56255 + to syslog. This can cause a large amount of logs if certain
56256 + applications (eg. djb's daemontools) are installed on the system, and
56257 + is therefore left as an option. If the sysctl option is enabled, a
56258 + sysctl option with name "chroot_execlog" is created.
56259 +
56260 +config GRKERNSEC_AUDIT_PTRACE
56261 + bool "Ptrace logging"
56262 + help
56263 + If you say Y here, all attempts to attach to a process via ptrace
56264 + will be logged. If the sysctl option is enabled, a sysctl option
56265 + with name "audit_ptrace" is created.
56266 +
56267 +config GRKERNSEC_AUDIT_CHDIR
56268 + bool "Chdir logging"
56269 + help
56270 + If you say Y here, all chdir() calls will be logged. If the sysctl
56271 + option is enabled, a sysctl option with name "audit_chdir" is created.
56272 +
56273 +config GRKERNSEC_AUDIT_MOUNT
56274 + bool "(Un)Mount logging"
56275 + help
56276 + If you say Y here, all mounts and unmounts will be logged. If the
56277 + sysctl option is enabled, a sysctl option with name "audit_mount" is
56278 + created.
56279 +
56280 +config GRKERNSEC_SIGNAL
56281 + bool "Signal logging"
56282 + help
56283 + If you say Y here, certain important signals will be logged, such as
56284 + SIGSEGV, which will as a result inform you of when a error in a program
56285 + occurred, which in some cases could mean a possible exploit attempt.
56286 + If the sysctl option is enabled, a sysctl option with name
56287 + "signal_logging" is created.
56288 +
56289 +config GRKERNSEC_FORKFAIL
56290 + bool "Fork failure logging"
56291 + help
56292 + If you say Y here, all failed fork() attempts will be logged.
56293 + This could suggest a fork bomb, or someone attempting to overstep
56294 + their process limit. If the sysctl option is enabled, a sysctl option
56295 + with name "forkfail_logging" is created.
56296 +
56297 +config GRKERNSEC_TIME
56298 + bool "Time change logging"
56299 + help
56300 + If you say Y here, any changes of the system clock will be logged.
56301 + If the sysctl option is enabled, a sysctl option with name
56302 + "timechange_logging" is created.
56303 +
56304 +config GRKERNSEC_PROC_IPADDR
56305 + bool "/proc/<pid>/ipaddr support"
56306 + help
56307 + If you say Y here, a new entry will be added to each /proc/<pid>
56308 + directory that contains the IP address of the person using the task.
56309 + The IP is carried across local TCP and AF_UNIX stream sockets.
56310 + This information can be useful for IDS/IPSes to perform remote response
56311 + to a local attack. The entry is readable by only the owner of the
56312 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56313 + the RBAC system), and thus does not create privacy concerns.
56314 +
56315 +config GRKERNSEC_RWXMAP_LOG
56316 + bool 'Denied RWX mmap/mprotect logging'
56317 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56318 + help
56319 + If you say Y here, calls to mmap() and mprotect() with explicit
56320 + usage of PROT_WRITE and PROT_EXEC together will be logged when
56321 + denied by the PAX_MPROTECT feature. If the sysctl option is
56322 + enabled, a sysctl option with name "rwxmap_logging" is created.
56323 +
56324 +config GRKERNSEC_AUDIT_TEXTREL
56325 + bool 'ELF text relocations logging (READ HELP)'
56326 + depends on PAX_MPROTECT
56327 + help
56328 + If you say Y here, text relocations will be logged with the filename
56329 + of the offending library or binary. The purpose of the feature is
56330 + to help Linux distribution developers get rid of libraries and
56331 + binaries that need text relocations which hinder the future progress
56332 + of PaX. Only Linux distribution developers should say Y here, and
56333 + never on a production machine, as this option creates an information
56334 + leak that could aid an attacker in defeating the randomization of
56335 + a single memory region. If the sysctl option is enabled, a sysctl
56336 + option with name "audit_textrel" is created.
56337 +
56338 +endmenu
56339 +
56340 +menu "Executable Protections"
56341 +depends on GRKERNSEC
56342 +
56343 +config GRKERNSEC_DMESG
56344 + bool "Dmesg(8) restriction"
56345 + help
56346 + If you say Y here, non-root users will not be able to use dmesg(8)
56347 + to view up to the last 4kb of messages in the kernel's log buffer.
56348 + The kernel's log buffer often contains kernel addresses and other
56349 + identifying information useful to an attacker in fingerprinting a
56350 + system for a targeted exploit.
56351 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
56352 + created.
56353 +
56354 +config GRKERNSEC_HARDEN_PTRACE
56355 + bool "Deter ptrace-based process snooping"
56356 + help
56357 + If you say Y here, TTY sniffers and other malicious monitoring
56358 + programs implemented through ptrace will be defeated. If you
56359 + have been using the RBAC system, this option has already been
56360 + enabled for several years for all users, with the ability to make
56361 + fine-grained exceptions.
56362 +
56363 + This option only affects the ability of non-root users to ptrace
56364 + processes that are not a descendent of the ptracing process.
56365 + This means that strace ./binary and gdb ./binary will still work,
56366 + but attaching to arbitrary processes will not. If the sysctl
56367 + option is enabled, a sysctl option with name "harden_ptrace" is
56368 + created.
56369 +
56370 +config GRKERNSEC_TPE
56371 + bool "Trusted Path Execution (TPE)"
56372 + help
56373 + If you say Y here, you will be able to choose a gid to add to the
56374 + supplementary groups of users you want to mark as "untrusted."
56375 + These users will not be able to execute any files that are not in
56376 + root-owned directories writable only by root. If the sysctl option
56377 + is enabled, a sysctl option with name "tpe" is created.
56378 +
56379 +config GRKERNSEC_TPE_ALL
56380 + bool "Partially restrict all non-root users"
56381 + depends on GRKERNSEC_TPE
56382 + help
56383 + If you say Y here, all non-root users will be covered under
56384 + a weaker TPE restriction. This is separate from, and in addition to,
56385 + the main TPE options that you have selected elsewhere. Thus, if a
56386 + "trusted" GID is chosen, this restriction applies to even that GID.
56387 + Under this restriction, all non-root users will only be allowed to
56388 + execute files in directories they own that are not group or
56389 + world-writable, or in directories owned by root and writable only by
56390 + root. If the sysctl option is enabled, a sysctl option with name
56391 + "tpe_restrict_all" is created.
56392 +
56393 +config GRKERNSEC_TPE_INVERT
56394 + bool "Invert GID option"
56395 + depends on GRKERNSEC_TPE
56396 + help
56397 + If you say Y here, the group you specify in the TPE configuration will
56398 + decide what group TPE restrictions will be *disabled* for. This
56399 + option is useful if you want TPE restrictions to be applied to most
56400 + users on the system. If the sysctl option is enabled, a sysctl option
56401 + with name "tpe_invert" is created. Unlike other sysctl options, this
56402 + entry will default to on for backward-compatibility.
56403 +
56404 +config GRKERNSEC_TPE_GID
56405 + int "GID for untrusted users"
56406 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56407 + default 1005
56408 + help
56409 + Setting this GID determines what group TPE restrictions will be
56410 + *enabled* for. If the sysctl option is enabled, a sysctl option
56411 + with name "tpe_gid" is created.
56412 +
56413 +config GRKERNSEC_TPE_GID
56414 + int "GID for trusted users"
56415 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56416 + default 1005
56417 + help
56418 + Setting this GID determines what group TPE restrictions will be
56419 + *disabled* for. If the sysctl option is enabled, a sysctl option
56420 + with name "tpe_gid" is created.
56421 +
56422 +endmenu
56423 +menu "Network Protections"
56424 +depends on GRKERNSEC
56425 +
56426 +config GRKERNSEC_RANDNET
56427 + bool "Larger entropy pools"
56428 + help
56429 + If you say Y here, the entropy pools used for many features of Linux
56430 + and grsecurity will be doubled in size. Since several grsecurity
56431 + features use additional randomness, it is recommended that you say Y
56432 + here. Saying Y here has a similar effect as modifying
56433 + /proc/sys/kernel/random/poolsize.
56434 +
56435 +config GRKERNSEC_BLACKHOLE
56436 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56437 + depends on NET
56438 + help
56439 + If you say Y here, neither TCP resets nor ICMP
56440 + destination-unreachable packets will be sent in response to packets
56441 + sent to ports for which no associated listening process exists.
56442 + This feature supports both IPV4 and IPV6 and exempts the
56443 + loopback interface from blackholing. Enabling this feature
56444 + makes a host more resilient to DoS attacks and reduces network
56445 + visibility against scanners.
56446 +
56447 + The blackhole feature as-implemented is equivalent to the FreeBSD
56448 + blackhole feature, as it prevents RST responses to all packets, not
56449 + just SYNs. Under most application behavior this causes no
56450 + problems, but applications (like haproxy) may not close certain
56451 + connections in a way that cleanly terminates them on the remote
56452 + end, leaving the remote host in LAST_ACK state. Because of this
56453 + side-effect and to prevent intentional LAST_ACK DoSes, this
56454 + feature also adds automatic mitigation against such attacks.
56455 + The mitigation drastically reduces the amount of time a socket
56456 + can spend in LAST_ACK state. If you're using haproxy and not
56457 + all servers it connects to have this option enabled, consider
56458 + disabling this feature on the haproxy host.
56459 +
56460 + If the sysctl option is enabled, two sysctl options with names
56461 + "ip_blackhole" and "lastack_retries" will be created.
56462 + While "ip_blackhole" takes the standard zero/non-zero on/off
56463 + toggle, "lastack_retries" uses the same kinds of values as
56464 + "tcp_retries1" and "tcp_retries2". The default value of 4
56465 + prevents a socket from lasting more than 45 seconds in LAST_ACK
56466 + state.
56467 +
56468 +config GRKERNSEC_SOCKET
56469 + bool "Socket restrictions"
56470 + depends on NET
56471 + help
56472 + If you say Y here, you will be able to choose from several options.
56473 + If you assign a GID on your system and add it to the supplementary
56474 + groups of users you want to restrict socket access to, this patch
56475 + will perform up to three things, based on the option(s) you choose.
56476 +
56477 +config GRKERNSEC_SOCKET_ALL
56478 + bool "Deny any sockets to group"
56479 + depends on GRKERNSEC_SOCKET
56480 + help
56481 + If you say Y here, you will be able to choose a GID of whose users will
56482 + be unable to connect to other hosts from your machine or run server
56483 + applications from your machine. If the sysctl option is enabled, a
56484 + sysctl option with name "socket_all" is created.
56485 +
56486 +config GRKERNSEC_SOCKET_ALL_GID
56487 + int "GID to deny all sockets for"
56488 + depends on GRKERNSEC_SOCKET_ALL
56489 + default 1004
56490 + help
56491 + Here you can choose the GID to disable socket access for. Remember to
56492 + add the users you want socket access disabled for to the GID
56493 + specified here. If the sysctl option is enabled, a sysctl option
56494 + with name "socket_all_gid" is created.
56495 +
56496 +config GRKERNSEC_SOCKET_CLIENT
56497 + bool "Deny client sockets to group"
56498 + depends on GRKERNSEC_SOCKET
56499 + help
56500 + If you say Y here, you will be able to choose a GID of whose users will
56501 + be unable to connect to other hosts from your machine, but will be
56502 + able to run servers. If this option is enabled, all users in the group
56503 + you specify will have to use passive mode when initiating ftp transfers
56504 + from the shell on your machine. If the sysctl option is enabled, a
56505 + sysctl option with name "socket_client" is created.
56506 +
56507 +config GRKERNSEC_SOCKET_CLIENT_GID
56508 + int "GID to deny client sockets for"
56509 + depends on GRKERNSEC_SOCKET_CLIENT
56510 + default 1003
56511 + help
56512 + Here you can choose the GID to disable client socket access for.
56513 + Remember to add the users you want client socket access disabled for to
56514 + the GID specified here. If the sysctl option is enabled, a sysctl
56515 + option with name "socket_client_gid" is created.
56516 +
56517 +config GRKERNSEC_SOCKET_SERVER
56518 + bool "Deny server sockets to group"
56519 + depends on GRKERNSEC_SOCKET
56520 + help
56521 + If you say Y here, you will be able to choose a GID of whose users will
56522 + be unable to run server applications from your machine. If the sysctl
56523 + option is enabled, a sysctl option with name "socket_server" is created.
56524 +
56525 +config GRKERNSEC_SOCKET_SERVER_GID
56526 + int "GID to deny server sockets for"
56527 + depends on GRKERNSEC_SOCKET_SERVER
56528 + default 1002
56529 + help
56530 + Here you can choose the GID to disable server socket access for.
56531 + Remember to add the users you want server socket access disabled for to
56532 + the GID specified here. If the sysctl option is enabled, a sysctl
56533 + option with name "socket_server_gid" is created.
56534 +
56535 +endmenu
56536 +menu "Sysctl support"
56537 +depends on GRKERNSEC && SYSCTL
56538 +
56539 +config GRKERNSEC_SYSCTL
56540 + bool "Sysctl support"
56541 + help
56542 + If you say Y here, you will be able to change the options that
56543 + grsecurity runs with at bootup, without having to recompile your
56544 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56545 + to enable (1) or disable (0) various features. All the sysctl entries
56546 + are mutable until the "grsec_lock" entry is set to a non-zero value.
56547 + All features enabled in the kernel configuration are disabled at boot
56548 + if you do not say Y to the "Turn on features by default" option.
56549 + All options should be set at startup, and the grsec_lock entry should
56550 + be set to a non-zero value after all the options are set.
56551 + *THIS IS EXTREMELY IMPORTANT*
56552 +
56553 +config GRKERNSEC_SYSCTL_DISTRO
56554 + bool "Extra sysctl support for distro makers (READ HELP)"
56555 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56556 + help
56557 + If you say Y here, additional sysctl options will be created
56558 + for features that affect processes running as root. Therefore,
56559 + it is critical when using this option that the grsec_lock entry be
56560 + enabled after boot. Only distros with prebuilt kernel packages
56561 + with this option enabled that can ensure grsec_lock is enabled
56562 + after boot should use this option.
56563 + *Failure to set grsec_lock after boot makes all grsec features
56564 + this option covers useless*
56565 +
56566 + Currently this option creates the following sysctl entries:
56567 + "Disable Privileged I/O": "disable_priv_io"
56568 +
56569 +config GRKERNSEC_SYSCTL_ON
56570 + bool "Turn on features by default"
56571 + depends on GRKERNSEC_SYSCTL
56572 + help
56573 + If you say Y here, instead of having all features enabled in the
56574 + kernel configuration disabled at boot time, the features will be
56575 + enabled at boot time. It is recommended you say Y here unless
56576 + there is some reason you would want all sysctl-tunable features to
56577 + be disabled by default. As mentioned elsewhere, it is important
56578 + to enable the grsec_lock entry once you have finished modifying
56579 + the sysctl entries.
56580 +
56581 +endmenu
56582 +menu "Logging Options"
56583 +depends on GRKERNSEC
56584 +
56585 +config GRKERNSEC_FLOODTIME
56586 + int "Seconds in between log messages (minimum)"
56587 + default 10
56588 + help
56589 + This option allows you to enforce the number of seconds between
56590 + grsecurity log messages. The default should be suitable for most
56591 + people, however, if you choose to change it, choose a value small enough
56592 + to allow informative logs to be produced, but large enough to
56593 + prevent flooding.
56594 +
56595 +config GRKERNSEC_FLOODBURST
56596 + int "Number of messages in a burst (maximum)"
56597 + default 4
56598 + help
56599 + This option allows you to choose the maximum number of messages allowed
56600 + within the flood time interval you chose in a separate option. The
56601 + default should be suitable for most people, however if you find that
56602 + many of your logs are being interpreted as flooding, you may want to
56603 + raise this value.
56604 +
56605 +endmenu
56606 +
56607 +endmenu
56608 diff -urNp linux-2.6.32.45/grsecurity/Makefile linux-2.6.32.45/grsecurity/Makefile
56609 --- linux-2.6.32.45/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56610 +++ linux-2.6.32.45/grsecurity/Makefile 2011-08-17 19:02:41.000000000 -0400
56611 @@ -0,0 +1,33 @@
56612 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56613 +# during 2001-2009 it has been completely redesigned by Brad Spengler
56614 +# into an RBAC system
56615 +#
56616 +# All code in this directory and various hooks inserted throughout the kernel
56617 +# are copyright Brad Spengler - Open Source Security, Inc., and released
56618 +# under the GPL v2 or higher
56619 +
56620 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56621 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
56622 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56623 +
56624 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56625 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56626 + gracl_learn.o grsec_log.o
56627 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56628 +
56629 +ifdef CONFIG_NET
56630 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o grsec_sock.o
56631 +endif
56632 +
56633 +ifndef CONFIG_GRKERNSEC
56634 +obj-y += grsec_disabled.o
56635 +endif
56636 +
56637 +ifdef CONFIG_GRKERNSEC_HIDESYM
56638 +extra-y := grsec_hidesym.o
56639 +$(obj)/grsec_hidesym.o:
56640 + @-chmod -f 500 /boot
56641 + @-chmod -f 500 /lib/modules
56642 + @-chmod -f 700 .
56643 + @echo ' grsec: protected kernel image paths'
56644 +endif
56645 diff -urNp linux-2.6.32.45/include/acpi/acpi_bus.h linux-2.6.32.45/include/acpi/acpi_bus.h
56646 --- linux-2.6.32.45/include/acpi/acpi_bus.h 2011-03-27 14:31:47.000000000 -0400
56647 +++ linux-2.6.32.45/include/acpi/acpi_bus.h 2011-08-05 20:33:55.000000000 -0400
56648 @@ -107,7 +107,7 @@ struct acpi_device_ops {
56649 acpi_op_bind bind;
56650 acpi_op_unbind unbind;
56651 acpi_op_notify notify;
56652 -};
56653 +} __no_const;
56654
56655 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56656
56657 diff -urNp linux-2.6.32.45/include/acpi/acpi_drivers.h linux-2.6.32.45/include/acpi/acpi_drivers.h
56658 --- linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
56659 +++ linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
56660 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
56661 Dock Station
56662 -------------------------------------------------------------------------- */
56663 struct acpi_dock_ops {
56664 - acpi_notify_handler handler;
56665 - acpi_notify_handler uevent;
56666 + const acpi_notify_handler handler;
56667 + const acpi_notify_handler uevent;
56668 };
56669
56670 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
56671 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
56672 extern int register_dock_notifier(struct notifier_block *nb);
56673 extern void unregister_dock_notifier(struct notifier_block *nb);
56674 extern int register_hotplug_dock_device(acpi_handle handle,
56675 - struct acpi_dock_ops *ops,
56676 + const struct acpi_dock_ops *ops,
56677 void *context);
56678 extern void unregister_hotplug_dock_device(acpi_handle handle);
56679 #else
56680 @@ -144,7 +144,7 @@ static inline void unregister_dock_notif
56681 {
56682 }
56683 static inline int register_hotplug_dock_device(acpi_handle handle,
56684 - struct acpi_dock_ops *ops,
56685 + const struct acpi_dock_ops *ops,
56686 void *context)
56687 {
56688 return -ENODEV;
56689 diff -urNp linux-2.6.32.45/include/asm-generic/atomic-long.h linux-2.6.32.45/include/asm-generic/atomic-long.h
56690 --- linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
56691 +++ linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-07-13 22:21:25.000000000 -0400
56692 @@ -22,6 +22,12 @@
56693
56694 typedef atomic64_t atomic_long_t;
56695
56696 +#ifdef CONFIG_PAX_REFCOUNT
56697 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
56698 +#else
56699 +typedef atomic64_t atomic_long_unchecked_t;
56700 +#endif
56701 +
56702 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56703
56704 static inline long atomic_long_read(atomic_long_t *l)
56705 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56706 return (long)atomic64_read(v);
56707 }
56708
56709 +#ifdef CONFIG_PAX_REFCOUNT
56710 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56711 +{
56712 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56713 +
56714 + return (long)atomic64_read_unchecked(v);
56715 +}
56716 +#endif
56717 +
56718 static inline void atomic_long_set(atomic_long_t *l, long i)
56719 {
56720 atomic64_t *v = (atomic64_t *)l;
56721 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56722 atomic64_set(v, i);
56723 }
56724
56725 +#ifdef CONFIG_PAX_REFCOUNT
56726 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56727 +{
56728 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56729 +
56730 + atomic64_set_unchecked(v, i);
56731 +}
56732 +#endif
56733 +
56734 static inline void atomic_long_inc(atomic_long_t *l)
56735 {
56736 atomic64_t *v = (atomic64_t *)l;
56737 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56738 atomic64_inc(v);
56739 }
56740
56741 +#ifdef CONFIG_PAX_REFCOUNT
56742 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56743 +{
56744 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56745 +
56746 + atomic64_inc_unchecked(v);
56747 +}
56748 +#endif
56749 +
56750 static inline void atomic_long_dec(atomic_long_t *l)
56751 {
56752 atomic64_t *v = (atomic64_t *)l;
56753 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56754 atomic64_dec(v);
56755 }
56756
56757 +#ifdef CONFIG_PAX_REFCOUNT
56758 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56759 +{
56760 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56761 +
56762 + atomic64_dec_unchecked(v);
56763 +}
56764 +#endif
56765 +
56766 static inline void atomic_long_add(long i, atomic_long_t *l)
56767 {
56768 atomic64_t *v = (atomic64_t *)l;
56769 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56770 atomic64_add(i, v);
56771 }
56772
56773 +#ifdef CONFIG_PAX_REFCOUNT
56774 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56775 +{
56776 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56777 +
56778 + atomic64_add_unchecked(i, v);
56779 +}
56780 +#endif
56781 +
56782 static inline void atomic_long_sub(long i, atomic_long_t *l)
56783 {
56784 atomic64_t *v = (atomic64_t *)l;
56785 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
56786 return (long)atomic64_inc_return(v);
56787 }
56788
56789 +#ifdef CONFIG_PAX_REFCOUNT
56790 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56791 +{
56792 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56793 +
56794 + return (long)atomic64_inc_return_unchecked(v);
56795 +}
56796 +#endif
56797 +
56798 static inline long atomic_long_dec_return(atomic_long_t *l)
56799 {
56800 atomic64_t *v = (atomic64_t *)l;
56801 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
56802
56803 typedef atomic_t atomic_long_t;
56804
56805 +#ifdef CONFIG_PAX_REFCOUNT
56806 +typedef atomic_unchecked_t atomic_long_unchecked_t;
56807 +#else
56808 +typedef atomic_t atomic_long_unchecked_t;
56809 +#endif
56810 +
56811 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56812 static inline long atomic_long_read(atomic_long_t *l)
56813 {
56814 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
56815 return (long)atomic_read(v);
56816 }
56817
56818 +#ifdef CONFIG_PAX_REFCOUNT
56819 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56820 +{
56821 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56822 +
56823 + return (long)atomic_read_unchecked(v);
56824 +}
56825 +#endif
56826 +
56827 static inline void atomic_long_set(atomic_long_t *l, long i)
56828 {
56829 atomic_t *v = (atomic_t *)l;
56830 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
56831 atomic_set(v, i);
56832 }
56833
56834 +#ifdef CONFIG_PAX_REFCOUNT
56835 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56836 +{
56837 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56838 +
56839 + atomic_set_unchecked(v, i);
56840 +}
56841 +#endif
56842 +
56843 static inline void atomic_long_inc(atomic_long_t *l)
56844 {
56845 atomic_t *v = (atomic_t *)l;
56846 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
56847 atomic_inc(v);
56848 }
56849
56850 +#ifdef CONFIG_PAX_REFCOUNT
56851 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56852 +{
56853 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56854 +
56855 + atomic_inc_unchecked(v);
56856 +}
56857 +#endif
56858 +
56859 static inline void atomic_long_dec(atomic_long_t *l)
56860 {
56861 atomic_t *v = (atomic_t *)l;
56862 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
56863 atomic_dec(v);
56864 }
56865
56866 +#ifdef CONFIG_PAX_REFCOUNT
56867 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56868 +{
56869 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56870 +
56871 + atomic_dec_unchecked(v);
56872 +}
56873 +#endif
56874 +
56875 static inline void atomic_long_add(long i, atomic_long_t *l)
56876 {
56877 atomic_t *v = (atomic_t *)l;
56878 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
56879 atomic_add(i, v);
56880 }
56881
56882 +#ifdef CONFIG_PAX_REFCOUNT
56883 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56884 +{
56885 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56886 +
56887 + atomic_add_unchecked(i, v);
56888 +}
56889 +#endif
56890 +
56891 static inline void atomic_long_sub(long i, atomic_long_t *l)
56892 {
56893 atomic_t *v = (atomic_t *)l;
56894 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
56895 return (long)atomic_inc_return(v);
56896 }
56897
56898 +#ifdef CONFIG_PAX_REFCOUNT
56899 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56900 +{
56901 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56902 +
56903 + return (long)atomic_inc_return_unchecked(v);
56904 +}
56905 +#endif
56906 +
56907 static inline long atomic_long_dec_return(atomic_long_t *l)
56908 {
56909 atomic_t *v = (atomic_t *)l;
56910 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
56911
56912 #endif /* BITS_PER_LONG == 64 */
56913
56914 +#ifdef CONFIG_PAX_REFCOUNT
56915 +static inline void pax_refcount_needs_these_functions(void)
56916 +{
56917 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
56918 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
56919 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
56920 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
56921 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
56922 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
56923 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
56924 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
56925 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
56926 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
56927 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
56928 +
56929 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
56930 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
56931 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
56932 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
56933 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
56934 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
56935 +}
56936 +#else
56937 +#define atomic_read_unchecked(v) atomic_read(v)
56938 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
56939 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
56940 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
56941 +#define atomic_inc_unchecked(v) atomic_inc(v)
56942 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
56943 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
56944 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
56945 +#define atomic_dec_unchecked(v) atomic_dec(v)
56946 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
56947 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
56948 +
56949 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
56950 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
56951 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
56952 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
56953 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
56954 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
56955 +#endif
56956 +
56957 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
56958 diff -urNp linux-2.6.32.45/include/asm-generic/cache.h linux-2.6.32.45/include/asm-generic/cache.h
56959 --- linux-2.6.32.45/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
56960 +++ linux-2.6.32.45/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
56961 @@ -6,7 +6,7 @@
56962 * cache lines need to provide their own cache.h.
56963 */
56964
56965 -#define L1_CACHE_SHIFT 5
56966 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
56967 +#define L1_CACHE_SHIFT 5UL
56968 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
56969
56970 #endif /* __ASM_GENERIC_CACHE_H */
56971 diff -urNp linux-2.6.32.45/include/asm-generic/dma-mapping-common.h linux-2.6.32.45/include/asm-generic/dma-mapping-common.h
56972 --- linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
56973 +++ linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
56974 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
56975 enum dma_data_direction dir,
56976 struct dma_attrs *attrs)
56977 {
56978 - struct dma_map_ops *ops = get_dma_ops(dev);
56979 + const struct dma_map_ops *ops = get_dma_ops(dev);
56980 dma_addr_t addr;
56981
56982 kmemcheck_mark_initialized(ptr, size);
56983 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
56984 enum dma_data_direction dir,
56985 struct dma_attrs *attrs)
56986 {
56987 - struct dma_map_ops *ops = get_dma_ops(dev);
56988 + const struct dma_map_ops *ops = get_dma_ops(dev);
56989
56990 BUG_ON(!valid_dma_direction(dir));
56991 if (ops->unmap_page)
56992 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
56993 int nents, enum dma_data_direction dir,
56994 struct dma_attrs *attrs)
56995 {
56996 - struct dma_map_ops *ops = get_dma_ops(dev);
56997 + const struct dma_map_ops *ops = get_dma_ops(dev);
56998 int i, ents;
56999 struct scatterlist *s;
57000
57001 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
57002 int nents, enum dma_data_direction dir,
57003 struct dma_attrs *attrs)
57004 {
57005 - struct dma_map_ops *ops = get_dma_ops(dev);
57006 + const struct dma_map_ops *ops = get_dma_ops(dev);
57007
57008 BUG_ON(!valid_dma_direction(dir));
57009 debug_dma_unmap_sg(dev, sg, nents, dir);
57010 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
57011 size_t offset, size_t size,
57012 enum dma_data_direction dir)
57013 {
57014 - struct dma_map_ops *ops = get_dma_ops(dev);
57015 + const struct dma_map_ops *ops = get_dma_ops(dev);
57016 dma_addr_t addr;
57017
57018 kmemcheck_mark_initialized(page_address(page) + offset, size);
57019 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
57020 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
57021 size_t size, enum dma_data_direction dir)
57022 {
57023 - struct dma_map_ops *ops = get_dma_ops(dev);
57024 + const struct dma_map_ops *ops = get_dma_ops(dev);
57025
57026 BUG_ON(!valid_dma_direction(dir));
57027 if (ops->unmap_page)
57028 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
57029 size_t size,
57030 enum dma_data_direction dir)
57031 {
57032 - struct dma_map_ops *ops = get_dma_ops(dev);
57033 + const struct dma_map_ops *ops = get_dma_ops(dev);
57034
57035 BUG_ON(!valid_dma_direction(dir));
57036 if (ops->sync_single_for_cpu)
57037 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
57038 dma_addr_t addr, size_t size,
57039 enum dma_data_direction dir)
57040 {
57041 - struct dma_map_ops *ops = get_dma_ops(dev);
57042 + const struct dma_map_ops *ops = get_dma_ops(dev);
57043
57044 BUG_ON(!valid_dma_direction(dir));
57045 if (ops->sync_single_for_device)
57046 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
57047 size_t size,
57048 enum dma_data_direction dir)
57049 {
57050 - struct dma_map_ops *ops = get_dma_ops(dev);
57051 + const struct dma_map_ops *ops = get_dma_ops(dev);
57052
57053 BUG_ON(!valid_dma_direction(dir));
57054 if (ops->sync_single_range_for_cpu) {
57055 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
57056 size_t size,
57057 enum dma_data_direction dir)
57058 {
57059 - struct dma_map_ops *ops = get_dma_ops(dev);
57060 + const struct dma_map_ops *ops = get_dma_ops(dev);
57061
57062 BUG_ON(!valid_dma_direction(dir));
57063 if (ops->sync_single_range_for_device) {
57064 @@ -155,7 +155,7 @@ static inline void
57065 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
57066 int nelems, enum dma_data_direction dir)
57067 {
57068 - struct dma_map_ops *ops = get_dma_ops(dev);
57069 + const struct dma_map_ops *ops = get_dma_ops(dev);
57070
57071 BUG_ON(!valid_dma_direction(dir));
57072 if (ops->sync_sg_for_cpu)
57073 @@ -167,7 +167,7 @@ static inline void
57074 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
57075 int nelems, enum dma_data_direction dir)
57076 {
57077 - struct dma_map_ops *ops = get_dma_ops(dev);
57078 + const struct dma_map_ops *ops = get_dma_ops(dev);
57079
57080 BUG_ON(!valid_dma_direction(dir));
57081 if (ops->sync_sg_for_device)
57082 diff -urNp linux-2.6.32.45/include/asm-generic/futex.h linux-2.6.32.45/include/asm-generic/futex.h
57083 --- linux-2.6.32.45/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
57084 +++ linux-2.6.32.45/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
57085 @@ -6,7 +6,7 @@
57086 #include <asm/errno.h>
57087
57088 static inline int
57089 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
57090 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
57091 {
57092 int op = (encoded_op >> 28) & 7;
57093 int cmp = (encoded_op >> 24) & 15;
57094 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
57095 }
57096
57097 static inline int
57098 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
57099 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
57100 {
57101 return -ENOSYS;
57102 }
57103 diff -urNp linux-2.6.32.45/include/asm-generic/int-l64.h linux-2.6.32.45/include/asm-generic/int-l64.h
57104 --- linux-2.6.32.45/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
57105 +++ linux-2.6.32.45/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
57106 @@ -46,6 +46,8 @@ typedef unsigned int u32;
57107 typedef signed long s64;
57108 typedef unsigned long u64;
57109
57110 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57111 +
57112 #define S8_C(x) x
57113 #define U8_C(x) x ## U
57114 #define S16_C(x) x
57115 diff -urNp linux-2.6.32.45/include/asm-generic/int-ll64.h linux-2.6.32.45/include/asm-generic/int-ll64.h
57116 --- linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
57117 +++ linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
57118 @@ -51,6 +51,8 @@ typedef unsigned int u32;
57119 typedef signed long long s64;
57120 typedef unsigned long long u64;
57121
57122 +typedef unsigned long long intoverflow_t;
57123 +
57124 #define S8_C(x) x
57125 #define U8_C(x) x ## U
57126 #define S16_C(x) x
57127 diff -urNp linux-2.6.32.45/include/asm-generic/kmap_types.h linux-2.6.32.45/include/asm-generic/kmap_types.h
57128 --- linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
57129 +++ linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
57130 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
57131 KMAP_D(16) KM_IRQ_PTE,
57132 KMAP_D(17) KM_NMI,
57133 KMAP_D(18) KM_NMI_PTE,
57134 -KMAP_D(19) KM_TYPE_NR
57135 +KMAP_D(19) KM_CLEARPAGE,
57136 +KMAP_D(20) KM_TYPE_NR
57137 };
57138
57139 #undef KMAP_D
57140 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable.h linux-2.6.32.45/include/asm-generic/pgtable.h
57141 --- linux-2.6.32.45/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
57142 +++ linux-2.6.32.45/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
57143 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
57144 unsigned long size);
57145 #endif
57146
57147 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57148 +static inline unsigned long pax_open_kernel(void) { return 0; }
57149 +#endif
57150 +
57151 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57152 +static inline unsigned long pax_close_kernel(void) { return 0; }
57153 +#endif
57154 +
57155 #endif /* !__ASSEMBLY__ */
57156
57157 #endif /* _ASM_GENERIC_PGTABLE_H */
57158 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h
57159 --- linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
57160 +++ linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
57161 @@ -1,14 +1,19 @@
57162 #ifndef _PGTABLE_NOPMD_H
57163 #define _PGTABLE_NOPMD_H
57164
57165 -#ifndef __ASSEMBLY__
57166 -
57167 #include <asm-generic/pgtable-nopud.h>
57168
57169 -struct mm_struct;
57170 -
57171 #define __PAGETABLE_PMD_FOLDED
57172
57173 +#define PMD_SHIFT PUD_SHIFT
57174 +#define PTRS_PER_PMD 1
57175 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57176 +#define PMD_MASK (~(PMD_SIZE-1))
57177 +
57178 +#ifndef __ASSEMBLY__
57179 +
57180 +struct mm_struct;
57181 +
57182 /*
57183 * Having the pmd type consist of a pud gets the size right, and allows
57184 * us to conceptually access the pud entry that this pmd is folded into
57185 @@ -16,11 +21,6 @@ struct mm_struct;
57186 */
57187 typedef struct { pud_t pud; } pmd_t;
57188
57189 -#define PMD_SHIFT PUD_SHIFT
57190 -#define PTRS_PER_PMD 1
57191 -#define PMD_SIZE (1UL << PMD_SHIFT)
57192 -#define PMD_MASK (~(PMD_SIZE-1))
57193 -
57194 /*
57195 * The "pud_xxx()" functions here are trivial for a folded two-level
57196 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57197 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopud.h linux-2.6.32.45/include/asm-generic/pgtable-nopud.h
57198 --- linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
57199 +++ linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
57200 @@ -1,10 +1,15 @@
57201 #ifndef _PGTABLE_NOPUD_H
57202 #define _PGTABLE_NOPUD_H
57203
57204 -#ifndef __ASSEMBLY__
57205 -
57206 #define __PAGETABLE_PUD_FOLDED
57207
57208 +#define PUD_SHIFT PGDIR_SHIFT
57209 +#define PTRS_PER_PUD 1
57210 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57211 +#define PUD_MASK (~(PUD_SIZE-1))
57212 +
57213 +#ifndef __ASSEMBLY__
57214 +
57215 /*
57216 * Having the pud type consist of a pgd gets the size right, and allows
57217 * us to conceptually access the pgd entry that this pud is folded into
57218 @@ -12,11 +17,6 @@
57219 */
57220 typedef struct { pgd_t pgd; } pud_t;
57221
57222 -#define PUD_SHIFT PGDIR_SHIFT
57223 -#define PTRS_PER_PUD 1
57224 -#define PUD_SIZE (1UL << PUD_SHIFT)
57225 -#define PUD_MASK (~(PUD_SIZE-1))
57226 -
57227 /*
57228 * The "pgd_xxx()" functions here are trivial for a folded two-level
57229 * setup: the pud is never bad, and a pud always exists (as it's folded
57230 diff -urNp linux-2.6.32.45/include/asm-generic/vmlinux.lds.h linux-2.6.32.45/include/asm-generic/vmlinux.lds.h
57231 --- linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
57232 +++ linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
57233 @@ -199,6 +199,7 @@
57234 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57235 VMLINUX_SYMBOL(__start_rodata) = .; \
57236 *(.rodata) *(.rodata.*) \
57237 + *(.data.read_only) \
57238 *(__vermagic) /* Kernel version magic */ \
57239 *(__markers_strings) /* Markers: strings */ \
57240 *(__tracepoints_strings)/* Tracepoints: strings */ \
57241 @@ -656,22 +657,24 @@
57242 * section in the linker script will go there too. @phdr should have
57243 * a leading colon.
57244 *
57245 - * Note that this macros defines __per_cpu_load as an absolute symbol.
57246 + * Note that this macros defines per_cpu_load as an absolute symbol.
57247 * If there is no need to put the percpu section at a predetermined
57248 * address, use PERCPU().
57249 */
57250 #define PERCPU_VADDR(vaddr, phdr) \
57251 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
57252 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57253 + per_cpu_load = .; \
57254 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57255 - LOAD_OFFSET) { \
57256 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57257 VMLINUX_SYMBOL(__per_cpu_start) = .; \
57258 *(.data.percpu.first) \
57259 - *(.data.percpu.page_aligned) \
57260 *(.data.percpu) \
57261 + . = ALIGN(PAGE_SIZE); \
57262 + *(.data.percpu.page_aligned) \
57263 *(.data.percpu.shared_aligned) \
57264 VMLINUX_SYMBOL(__per_cpu_end) = .; \
57265 } phdr \
57266 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
57267 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
57268
57269 /**
57270 * PERCPU - define output section for percpu area, simple version
57271 diff -urNp linux-2.6.32.45/include/drm/drm_crtc_helper.h linux-2.6.32.45/include/drm/drm_crtc_helper.h
57272 --- linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-03-27 14:31:47.000000000 -0400
57273 +++ linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-08-05 20:33:55.000000000 -0400
57274 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
57275
57276 /* reload the current crtc LUT */
57277 void (*load_lut)(struct drm_crtc *crtc);
57278 -};
57279 +} __no_const;
57280
57281 struct drm_encoder_helper_funcs {
57282 void (*dpms)(struct drm_encoder *encoder, int mode);
57283 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
57284 struct drm_connector *connector);
57285 /* disable encoder when not in use - more explicit than dpms off */
57286 void (*disable)(struct drm_encoder *encoder);
57287 -};
57288 +} __no_const;
57289
57290 struct drm_connector_helper_funcs {
57291 int (*get_modes)(struct drm_connector *connector);
57292 diff -urNp linux-2.6.32.45/include/drm/drmP.h linux-2.6.32.45/include/drm/drmP.h
57293 --- linux-2.6.32.45/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
57294 +++ linux-2.6.32.45/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
57295 @@ -71,6 +71,7 @@
57296 #include <linux/workqueue.h>
57297 #include <linux/poll.h>
57298 #include <asm/pgalloc.h>
57299 +#include <asm/local.h>
57300 #include "drm.h"
57301
57302 #include <linux/idr.h>
57303 @@ -814,7 +815,7 @@ struct drm_driver {
57304 void (*vgaarb_irq)(struct drm_device *dev, bool state);
57305
57306 /* Driver private ops for this object */
57307 - struct vm_operations_struct *gem_vm_ops;
57308 + const struct vm_operations_struct *gem_vm_ops;
57309
57310 int major;
57311 int minor;
57312 @@ -917,7 +918,7 @@ struct drm_device {
57313
57314 /** \name Usage Counters */
57315 /*@{ */
57316 - int open_count; /**< Outstanding files open */
57317 + local_t open_count; /**< Outstanding files open */
57318 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57319 atomic_t vma_count; /**< Outstanding vma areas open */
57320 int buf_use; /**< Buffers in use -- cannot alloc */
57321 @@ -928,7 +929,7 @@ struct drm_device {
57322 /*@{ */
57323 unsigned long counters;
57324 enum drm_stat_type types[15];
57325 - atomic_t counts[15];
57326 + atomic_unchecked_t counts[15];
57327 /*@} */
57328
57329 struct list_head filelist;
57330 @@ -1016,7 +1017,7 @@ struct drm_device {
57331 struct pci_controller *hose;
57332 #endif
57333 struct drm_sg_mem *sg; /**< Scatter gather memory */
57334 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
57335 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
57336 void *dev_private; /**< device private data */
57337 void *mm_private;
57338 struct address_space *dev_mapping;
57339 @@ -1042,11 +1043,11 @@ struct drm_device {
57340 spinlock_t object_name_lock;
57341 struct idr object_name_idr;
57342 atomic_t object_count;
57343 - atomic_t object_memory;
57344 + atomic_unchecked_t object_memory;
57345 atomic_t pin_count;
57346 - atomic_t pin_memory;
57347 + atomic_unchecked_t pin_memory;
57348 atomic_t gtt_count;
57349 - atomic_t gtt_memory;
57350 + atomic_unchecked_t gtt_memory;
57351 uint32_t gtt_total;
57352 uint32_t invalidate_domains; /* domains pending invalidation */
57353 uint32_t flush_domains; /* domains pending flush */
57354 diff -urNp linux-2.6.32.45/include/drm/ttm/ttm_memory.h linux-2.6.32.45/include/drm/ttm/ttm_memory.h
57355 --- linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-03-27 14:31:47.000000000 -0400
57356 +++ linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-08-05 20:33:55.000000000 -0400
57357 @@ -47,7 +47,7 @@
57358
57359 struct ttm_mem_shrink {
57360 int (*do_shrink) (struct ttm_mem_shrink *);
57361 -};
57362 +} __no_const;
57363
57364 /**
57365 * struct ttm_mem_global - Global memory accounting structure.
57366 diff -urNp linux-2.6.32.45/include/linux/a.out.h linux-2.6.32.45/include/linux/a.out.h
57367 --- linux-2.6.32.45/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
57368 +++ linux-2.6.32.45/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
57369 @@ -39,6 +39,14 @@ enum machine_type {
57370 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57371 };
57372
57373 +/* Constants for the N_FLAGS field */
57374 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57375 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57376 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57377 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57378 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57379 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57380 +
57381 #if !defined (N_MAGIC)
57382 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57383 #endif
57384 diff -urNp linux-2.6.32.45/include/linux/atmdev.h linux-2.6.32.45/include/linux/atmdev.h
57385 --- linux-2.6.32.45/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
57386 +++ linux-2.6.32.45/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
57387 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57388 #endif
57389
57390 struct k_atm_aal_stats {
57391 -#define __HANDLE_ITEM(i) atomic_t i
57392 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57393 __AAL_STAT_ITEMS
57394 #undef __HANDLE_ITEM
57395 };
57396 diff -urNp linux-2.6.32.45/include/linux/backlight.h linux-2.6.32.45/include/linux/backlight.h
57397 --- linux-2.6.32.45/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
57398 +++ linux-2.6.32.45/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
57399 @@ -36,18 +36,18 @@ struct backlight_device;
57400 struct fb_info;
57401
57402 struct backlight_ops {
57403 - unsigned int options;
57404 + const unsigned int options;
57405
57406 #define BL_CORE_SUSPENDRESUME (1 << 0)
57407
57408 /* Notify the backlight driver some property has changed */
57409 - int (*update_status)(struct backlight_device *);
57410 + int (* const update_status)(struct backlight_device *);
57411 /* Return the current backlight brightness (accounting for power,
57412 fb_blank etc.) */
57413 - int (*get_brightness)(struct backlight_device *);
57414 + int (* const get_brightness)(struct backlight_device *);
57415 /* Check if given framebuffer device is the one bound to this backlight;
57416 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
57417 - int (*check_fb)(struct fb_info *);
57418 + int (* const check_fb)(struct fb_info *);
57419 };
57420
57421 /* This structure defines all the properties of a backlight */
57422 @@ -86,7 +86,7 @@ struct backlight_device {
57423 registered this device has been unloaded, and if class_get_devdata()
57424 points to something in the body of that driver, it is also invalid. */
57425 struct mutex ops_lock;
57426 - struct backlight_ops *ops;
57427 + const struct backlight_ops *ops;
57428
57429 /* The framebuffer notifier block */
57430 struct notifier_block fb_notif;
57431 @@ -103,7 +103,7 @@ static inline void backlight_update_stat
57432 }
57433
57434 extern struct backlight_device *backlight_device_register(const char *name,
57435 - struct device *dev, void *devdata, struct backlight_ops *ops);
57436 + struct device *dev, void *devdata, const struct backlight_ops *ops);
57437 extern void backlight_device_unregister(struct backlight_device *bd);
57438 extern void backlight_force_update(struct backlight_device *bd,
57439 enum backlight_update_reason reason);
57440 diff -urNp linux-2.6.32.45/include/linux/binfmts.h linux-2.6.32.45/include/linux/binfmts.h
57441 --- linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
57442 +++ linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
57443 @@ -83,6 +83,7 @@ struct linux_binfmt {
57444 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57445 int (*load_shlib)(struct file *);
57446 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
57447 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57448 unsigned long min_coredump; /* minimal dump size */
57449 int hasvdso;
57450 };
57451 diff -urNp linux-2.6.32.45/include/linux/blkdev.h linux-2.6.32.45/include/linux/blkdev.h
57452 --- linux-2.6.32.45/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
57453 +++ linux-2.6.32.45/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
57454 @@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
57455 #endif /* CONFIG_BLK_DEV_INTEGRITY */
57456
57457 struct block_device_operations {
57458 - int (*open) (struct block_device *, fmode_t);
57459 - int (*release) (struct gendisk *, fmode_t);
57460 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57461 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57462 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57463 - int (*direct_access) (struct block_device *, sector_t,
57464 + int (* const open) (struct block_device *, fmode_t);
57465 + int (* const release) (struct gendisk *, fmode_t);
57466 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57467 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57468 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57469 + int (* const direct_access) (struct block_device *, sector_t,
57470 void **, unsigned long *);
57471 - int (*media_changed) (struct gendisk *);
57472 - unsigned long long (*set_capacity) (struct gendisk *,
57473 + int (* const media_changed) (struct gendisk *);
57474 + unsigned long long (* const set_capacity) (struct gendisk *,
57475 unsigned long long);
57476 - int (*revalidate_disk) (struct gendisk *);
57477 - int (*getgeo)(struct block_device *, struct hd_geometry *);
57478 - struct module *owner;
57479 + int (* const revalidate_disk) (struct gendisk *);
57480 + int (*const getgeo)(struct block_device *, struct hd_geometry *);
57481 + struct module * const owner;
57482 };
57483
57484 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57485 diff -urNp linux-2.6.32.45/include/linux/blktrace_api.h linux-2.6.32.45/include/linux/blktrace_api.h
57486 --- linux-2.6.32.45/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
57487 +++ linux-2.6.32.45/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
57488 @@ -160,7 +160,7 @@ struct blk_trace {
57489 struct dentry *dir;
57490 struct dentry *dropped_file;
57491 struct dentry *msg_file;
57492 - atomic_t dropped;
57493 + atomic_unchecked_t dropped;
57494 };
57495
57496 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57497 diff -urNp linux-2.6.32.45/include/linux/byteorder/little_endian.h linux-2.6.32.45/include/linux/byteorder/little_endian.h
57498 --- linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
57499 +++ linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
57500 @@ -42,51 +42,51 @@
57501
57502 static inline __le64 __cpu_to_le64p(const __u64 *p)
57503 {
57504 - return (__force __le64)*p;
57505 + return (__force const __le64)*p;
57506 }
57507 static inline __u64 __le64_to_cpup(const __le64 *p)
57508 {
57509 - return (__force __u64)*p;
57510 + return (__force const __u64)*p;
57511 }
57512 static inline __le32 __cpu_to_le32p(const __u32 *p)
57513 {
57514 - return (__force __le32)*p;
57515 + return (__force const __le32)*p;
57516 }
57517 static inline __u32 __le32_to_cpup(const __le32 *p)
57518 {
57519 - return (__force __u32)*p;
57520 + return (__force const __u32)*p;
57521 }
57522 static inline __le16 __cpu_to_le16p(const __u16 *p)
57523 {
57524 - return (__force __le16)*p;
57525 + return (__force const __le16)*p;
57526 }
57527 static inline __u16 __le16_to_cpup(const __le16 *p)
57528 {
57529 - return (__force __u16)*p;
57530 + return (__force const __u16)*p;
57531 }
57532 static inline __be64 __cpu_to_be64p(const __u64 *p)
57533 {
57534 - return (__force __be64)__swab64p(p);
57535 + return (__force const __be64)__swab64p(p);
57536 }
57537 static inline __u64 __be64_to_cpup(const __be64 *p)
57538 {
57539 - return __swab64p((__u64 *)p);
57540 + return __swab64p((const __u64 *)p);
57541 }
57542 static inline __be32 __cpu_to_be32p(const __u32 *p)
57543 {
57544 - return (__force __be32)__swab32p(p);
57545 + return (__force const __be32)__swab32p(p);
57546 }
57547 static inline __u32 __be32_to_cpup(const __be32 *p)
57548 {
57549 - return __swab32p((__u32 *)p);
57550 + return __swab32p((const __u32 *)p);
57551 }
57552 static inline __be16 __cpu_to_be16p(const __u16 *p)
57553 {
57554 - return (__force __be16)__swab16p(p);
57555 + return (__force const __be16)__swab16p(p);
57556 }
57557 static inline __u16 __be16_to_cpup(const __be16 *p)
57558 {
57559 - return __swab16p((__u16 *)p);
57560 + return __swab16p((const __u16 *)p);
57561 }
57562 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57563 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57564 diff -urNp linux-2.6.32.45/include/linux/cache.h linux-2.6.32.45/include/linux/cache.h
57565 --- linux-2.6.32.45/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
57566 +++ linux-2.6.32.45/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
57567 @@ -16,6 +16,10 @@
57568 #define __read_mostly
57569 #endif
57570
57571 +#ifndef __read_only
57572 +#define __read_only __read_mostly
57573 +#endif
57574 +
57575 #ifndef ____cacheline_aligned
57576 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57577 #endif
57578 diff -urNp linux-2.6.32.45/include/linux/capability.h linux-2.6.32.45/include/linux/capability.h
57579 --- linux-2.6.32.45/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
57580 +++ linux-2.6.32.45/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
57581 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
57582 (security_real_capable_noaudit((t), (cap)) == 0)
57583
57584 extern int capable(int cap);
57585 +int capable_nolog(int cap);
57586
57587 /* audit system wants to get cap info from files as well */
57588 struct dentry;
57589 diff -urNp linux-2.6.32.45/include/linux/compiler-gcc4.h linux-2.6.32.45/include/linux/compiler-gcc4.h
57590 --- linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
57591 +++ linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-08-05 20:33:55.000000000 -0400
57592 @@ -36,4 +36,13 @@
57593 the kernel context */
57594 #define __cold __attribute__((__cold__))
57595
57596 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57597 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57598 +#define __bos0(ptr) __bos((ptr), 0)
57599 +#define __bos1(ptr) __bos((ptr), 1)
57600 +
57601 +#if __GNUC_MINOR__ >= 5
57602 +#define __no_const __attribute__((no_const))
57603 +#endif
57604 +
57605 #endif
57606 diff -urNp linux-2.6.32.45/include/linux/compiler.h linux-2.6.32.45/include/linux/compiler.h
57607 --- linux-2.6.32.45/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
57608 +++ linux-2.6.32.45/include/linux/compiler.h 2011-08-05 20:33:55.000000000 -0400
57609 @@ -247,6 +247,10 @@ void ftrace_likely_update(struct ftrace_
57610 # define __attribute_const__ /* unimplemented */
57611 #endif
57612
57613 +#ifndef __no_const
57614 +# define __no_const
57615 +#endif
57616 +
57617 /*
57618 * Tell gcc if a function is cold. The compiler will assume any path
57619 * directly leading to the call is unlikely.
57620 @@ -256,6 +260,22 @@ void ftrace_likely_update(struct ftrace_
57621 #define __cold
57622 #endif
57623
57624 +#ifndef __alloc_size
57625 +#define __alloc_size(...)
57626 +#endif
57627 +
57628 +#ifndef __bos
57629 +#define __bos(ptr, arg)
57630 +#endif
57631 +
57632 +#ifndef __bos0
57633 +#define __bos0(ptr)
57634 +#endif
57635 +
57636 +#ifndef __bos1
57637 +#define __bos1(ptr)
57638 +#endif
57639 +
57640 /* Simple shorthand for a section definition */
57641 #ifndef __section
57642 # define __section(S) __attribute__ ((__section__(#S)))
57643 @@ -278,6 +298,7 @@ void ftrace_likely_update(struct ftrace_
57644 * use is to mediate communication between process-level code and irq/NMI
57645 * handlers, all running on the same CPU.
57646 */
57647 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57648 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57649 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57650
57651 #endif /* __LINUX_COMPILER_H */
57652 diff -urNp linux-2.6.32.45/include/linux/crypto.h linux-2.6.32.45/include/linux/crypto.h
57653 --- linux-2.6.32.45/include/linux/crypto.h 2011-03-27 14:31:47.000000000 -0400
57654 +++ linux-2.6.32.45/include/linux/crypto.h 2011-08-05 20:33:55.000000000 -0400
57655 @@ -394,7 +394,7 @@ struct cipher_tfm {
57656 const u8 *key, unsigned int keylen);
57657 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57658 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57659 -};
57660 +} __no_const;
57661
57662 struct hash_tfm {
57663 int (*init)(struct hash_desc *desc);
57664 @@ -415,13 +415,13 @@ struct compress_tfm {
57665 int (*cot_decompress)(struct crypto_tfm *tfm,
57666 const u8 *src, unsigned int slen,
57667 u8 *dst, unsigned int *dlen);
57668 -};
57669 +} __no_const;
57670
57671 struct rng_tfm {
57672 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57673 unsigned int dlen);
57674 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57675 -};
57676 +} __no_const;
57677
57678 #define crt_ablkcipher crt_u.ablkcipher
57679 #define crt_aead crt_u.aead
57680 diff -urNp linux-2.6.32.45/include/linux/dcache.h linux-2.6.32.45/include/linux/dcache.h
57681 --- linux-2.6.32.45/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
57682 +++ linux-2.6.32.45/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
57683 @@ -119,6 +119,8 @@ struct dentry {
57684 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
57685 };
57686
57687 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
57688 +
57689 /*
57690 * dentry->d_lock spinlock nesting subclasses:
57691 *
57692 diff -urNp linux-2.6.32.45/include/linux/decompress/mm.h linux-2.6.32.45/include/linux/decompress/mm.h
57693 --- linux-2.6.32.45/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
57694 +++ linux-2.6.32.45/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
57695 @@ -78,7 +78,7 @@ static void free(void *where)
57696 * warnings when not needed (indeed large_malloc / large_free are not
57697 * needed by inflate */
57698
57699 -#define malloc(a) kmalloc(a, GFP_KERNEL)
57700 +#define malloc(a) kmalloc((a), GFP_KERNEL)
57701 #define free(a) kfree(a)
57702
57703 #define large_malloc(a) vmalloc(a)
57704 diff -urNp linux-2.6.32.45/include/linux/dma-mapping.h linux-2.6.32.45/include/linux/dma-mapping.h
57705 --- linux-2.6.32.45/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
57706 +++ linux-2.6.32.45/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
57707 @@ -16,50 +16,50 @@ enum dma_data_direction {
57708 };
57709
57710 struct dma_map_ops {
57711 - void* (*alloc_coherent)(struct device *dev, size_t size,
57712 + void* (* const alloc_coherent)(struct device *dev, size_t size,
57713 dma_addr_t *dma_handle, gfp_t gfp);
57714 - void (*free_coherent)(struct device *dev, size_t size,
57715 + void (* const free_coherent)(struct device *dev, size_t size,
57716 void *vaddr, dma_addr_t dma_handle);
57717 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
57718 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
57719 unsigned long offset, size_t size,
57720 enum dma_data_direction dir,
57721 struct dma_attrs *attrs);
57722 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
57723 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
57724 size_t size, enum dma_data_direction dir,
57725 struct dma_attrs *attrs);
57726 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
57727 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
57728 int nents, enum dma_data_direction dir,
57729 struct dma_attrs *attrs);
57730 - void (*unmap_sg)(struct device *dev,
57731 + void (* const unmap_sg)(struct device *dev,
57732 struct scatterlist *sg, int nents,
57733 enum dma_data_direction dir,
57734 struct dma_attrs *attrs);
57735 - void (*sync_single_for_cpu)(struct device *dev,
57736 + void (* const sync_single_for_cpu)(struct device *dev,
57737 dma_addr_t dma_handle, size_t size,
57738 enum dma_data_direction dir);
57739 - void (*sync_single_for_device)(struct device *dev,
57740 + void (* const sync_single_for_device)(struct device *dev,
57741 dma_addr_t dma_handle, size_t size,
57742 enum dma_data_direction dir);
57743 - void (*sync_single_range_for_cpu)(struct device *dev,
57744 + void (* const sync_single_range_for_cpu)(struct device *dev,
57745 dma_addr_t dma_handle,
57746 unsigned long offset,
57747 size_t size,
57748 enum dma_data_direction dir);
57749 - void (*sync_single_range_for_device)(struct device *dev,
57750 + void (* const sync_single_range_for_device)(struct device *dev,
57751 dma_addr_t dma_handle,
57752 unsigned long offset,
57753 size_t size,
57754 enum dma_data_direction dir);
57755 - void (*sync_sg_for_cpu)(struct device *dev,
57756 + void (* const sync_sg_for_cpu)(struct device *dev,
57757 struct scatterlist *sg, int nents,
57758 enum dma_data_direction dir);
57759 - void (*sync_sg_for_device)(struct device *dev,
57760 + void (* const sync_sg_for_device)(struct device *dev,
57761 struct scatterlist *sg, int nents,
57762 enum dma_data_direction dir);
57763 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
57764 - int (*dma_supported)(struct device *dev, u64 mask);
57765 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
57766 + int (* const dma_supported)(struct device *dev, u64 mask);
57767 int (*set_dma_mask)(struct device *dev, u64 mask);
57768 - int is_phys;
57769 + const int is_phys;
57770 };
57771
57772 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57773 diff -urNp linux-2.6.32.45/include/linux/dst.h linux-2.6.32.45/include/linux/dst.h
57774 --- linux-2.6.32.45/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
57775 +++ linux-2.6.32.45/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
57776 @@ -380,7 +380,7 @@ struct dst_node
57777 struct thread_pool *pool;
57778
57779 /* Transaction IDs live here */
57780 - atomic_long_t gen;
57781 + atomic_long_unchecked_t gen;
57782
57783 /*
57784 * How frequently and how many times transaction
57785 diff -urNp linux-2.6.32.45/include/linux/elf.h linux-2.6.32.45/include/linux/elf.h
57786 --- linux-2.6.32.45/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
57787 +++ linux-2.6.32.45/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
57788 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57789 #define PT_GNU_EH_FRAME 0x6474e550
57790
57791 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57792 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57793 +
57794 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57795 +
57796 +/* Constants for the e_flags field */
57797 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57798 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57799 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57800 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57801 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57802 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57803
57804 /* These constants define the different elf file types */
57805 #define ET_NONE 0
57806 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
57807 #define DT_DEBUG 21
57808 #define DT_TEXTREL 22
57809 #define DT_JMPREL 23
57810 +#define DT_FLAGS 30
57811 + #define DF_TEXTREL 0x00000004
57812 #define DT_ENCODING 32
57813 #define OLD_DT_LOOS 0x60000000
57814 #define DT_LOOS 0x6000000d
57815 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
57816 #define PF_W 0x2
57817 #define PF_X 0x1
57818
57819 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57820 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57821 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57822 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57823 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57824 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57825 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57826 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57827 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57828 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57829 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57830 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57831 +
57832 typedef struct elf32_phdr{
57833 Elf32_Word p_type;
57834 Elf32_Off p_offset;
57835 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
57836 #define EI_OSABI 7
57837 #define EI_PAD 8
57838
57839 +#define EI_PAX 14
57840 +
57841 #define ELFMAG0 0x7f /* EI_MAG */
57842 #define ELFMAG1 'E'
57843 #define ELFMAG2 'L'
57844 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
57845 #define elf_phdr elf32_phdr
57846 #define elf_note elf32_note
57847 #define elf_addr_t Elf32_Off
57848 +#define elf_dyn Elf32_Dyn
57849
57850 #else
57851
57852 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
57853 #define elf_phdr elf64_phdr
57854 #define elf_note elf64_note
57855 #define elf_addr_t Elf64_Off
57856 +#define elf_dyn Elf64_Dyn
57857
57858 #endif
57859
57860 diff -urNp linux-2.6.32.45/include/linux/fscache-cache.h linux-2.6.32.45/include/linux/fscache-cache.h
57861 --- linux-2.6.32.45/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
57862 +++ linux-2.6.32.45/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
57863 @@ -116,7 +116,7 @@ struct fscache_operation {
57864 #endif
57865 };
57866
57867 -extern atomic_t fscache_op_debug_id;
57868 +extern atomic_unchecked_t fscache_op_debug_id;
57869 extern const struct slow_work_ops fscache_op_slow_work_ops;
57870
57871 extern void fscache_enqueue_operation(struct fscache_operation *);
57872 @@ -134,7 +134,7 @@ static inline void fscache_operation_ini
57873 fscache_operation_release_t release)
57874 {
57875 atomic_set(&op->usage, 1);
57876 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57877 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57878 op->release = release;
57879 INIT_LIST_HEAD(&op->pend_link);
57880 fscache_set_op_state(op, "Init");
57881 diff -urNp linux-2.6.32.45/include/linux/fs.h linux-2.6.32.45/include/linux/fs.h
57882 --- linux-2.6.32.45/include/linux/fs.h 2011-07-13 17:23:04.000000000 -0400
57883 +++ linux-2.6.32.45/include/linux/fs.h 2011-08-05 20:33:55.000000000 -0400
57884 @@ -90,6 +90,11 @@ struct inodes_stat_t {
57885 /* Expect random access pattern */
57886 #define FMODE_RANDOM ((__force fmode_t)4096)
57887
57888 +/* Hack for grsec so as not to require read permission simply to execute
57889 + * a binary
57890 + */
57891 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
57892 +
57893 /*
57894 * The below are the various read and write types that we support. Some of
57895 * them include behavioral modifiers that send information down to the
57896 @@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
57897 unsigned long, unsigned long);
57898
57899 struct address_space_operations {
57900 - int (*writepage)(struct page *page, struct writeback_control *wbc);
57901 - int (*readpage)(struct file *, struct page *);
57902 - void (*sync_page)(struct page *);
57903 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
57904 + int (* const readpage)(struct file *, struct page *);
57905 + void (* const sync_page)(struct page *);
57906
57907 /* Write back some dirty pages from this mapping. */
57908 - int (*writepages)(struct address_space *, struct writeback_control *);
57909 + int (* const writepages)(struct address_space *, struct writeback_control *);
57910
57911 /* Set a page dirty. Return true if this dirtied it */
57912 - int (*set_page_dirty)(struct page *page);
57913 + int (* const set_page_dirty)(struct page *page);
57914
57915 - int (*readpages)(struct file *filp, struct address_space *mapping,
57916 + int (* const readpages)(struct file *filp, struct address_space *mapping,
57917 struct list_head *pages, unsigned nr_pages);
57918
57919 - int (*write_begin)(struct file *, struct address_space *mapping,
57920 + int (* const write_begin)(struct file *, struct address_space *mapping,
57921 loff_t pos, unsigned len, unsigned flags,
57922 struct page **pagep, void **fsdata);
57923 - int (*write_end)(struct file *, struct address_space *mapping,
57924 + int (* const write_end)(struct file *, struct address_space *mapping,
57925 loff_t pos, unsigned len, unsigned copied,
57926 struct page *page, void *fsdata);
57927
57928 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
57929 - sector_t (*bmap)(struct address_space *, sector_t);
57930 - void (*invalidatepage) (struct page *, unsigned long);
57931 - int (*releasepage) (struct page *, gfp_t);
57932 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
57933 + sector_t (* const bmap)(struct address_space *, sector_t);
57934 + void (* const invalidatepage) (struct page *, unsigned long);
57935 + int (* const releasepage) (struct page *, gfp_t);
57936 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
57937 loff_t offset, unsigned long nr_segs);
57938 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
57939 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
57940 void **, unsigned long *);
57941 /* migrate the contents of a page to the specified target */
57942 - int (*migratepage) (struct address_space *,
57943 + int (* const migratepage) (struct address_space *,
57944 struct page *, struct page *);
57945 - int (*launder_page) (struct page *);
57946 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
57947 + int (* const launder_page) (struct page *);
57948 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
57949 unsigned long);
57950 - int (*error_remove_page)(struct address_space *, struct page *);
57951 + int (* const error_remove_page)(struct address_space *, struct page *);
57952 };
57953
57954 /*
57955 @@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
57956 typedef struct files_struct *fl_owner_t;
57957
57958 struct file_lock_operations {
57959 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57960 - void (*fl_release_private)(struct file_lock *);
57961 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57962 + void (* const fl_release_private)(struct file_lock *);
57963 };
57964
57965 struct lock_manager_operations {
57966 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
57967 - void (*fl_notify)(struct file_lock *); /* unblock callback */
57968 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
57969 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57970 - void (*fl_release_private)(struct file_lock *);
57971 - void (*fl_break)(struct file_lock *);
57972 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
57973 - int (*fl_change)(struct file_lock **, int);
57974 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
57975 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
57976 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
57977 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57978 + void (* const fl_release_private)(struct file_lock *);
57979 + void (* const fl_break)(struct file_lock *);
57980 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
57981 + int (* const fl_change)(struct file_lock **, int);
57982 };
57983
57984 struct lock_manager {
57985 @@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
57986 unsigned int fi_flags; /* Flags as passed from user */
57987 unsigned int fi_extents_mapped; /* Number of mapped extents */
57988 unsigned int fi_extents_max; /* Size of fiemap_extent array */
57989 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
57990 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
57991 * array */
57992 };
57993 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
57994 @@ -1486,7 +1491,7 @@ struct block_device_operations;
57995 * can be called without the big kernel lock held in all filesystems.
57996 */
57997 struct file_operations {
57998 - struct module *owner;
57999 + struct module * const owner;
58000 loff_t (*llseek) (struct file *, loff_t, int);
58001 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
58002 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
58003 @@ -1559,30 +1564,30 @@ extern ssize_t vfs_writev(struct file *,
58004 unsigned long, loff_t *);
58005
58006 struct super_operations {
58007 - struct inode *(*alloc_inode)(struct super_block *sb);
58008 - void (*destroy_inode)(struct inode *);
58009 + struct inode *(* const alloc_inode)(struct super_block *sb);
58010 + void (* const destroy_inode)(struct inode *);
58011
58012 - void (*dirty_inode) (struct inode *);
58013 - int (*write_inode) (struct inode *, int);
58014 - void (*drop_inode) (struct inode *);
58015 - void (*delete_inode) (struct inode *);
58016 - void (*put_super) (struct super_block *);
58017 - void (*write_super) (struct super_block *);
58018 - int (*sync_fs)(struct super_block *sb, int wait);
58019 - int (*freeze_fs) (struct super_block *);
58020 - int (*unfreeze_fs) (struct super_block *);
58021 - int (*statfs) (struct dentry *, struct kstatfs *);
58022 - int (*remount_fs) (struct super_block *, int *, char *);
58023 - void (*clear_inode) (struct inode *);
58024 - void (*umount_begin) (struct super_block *);
58025 + void (* const dirty_inode) (struct inode *);
58026 + int (* const write_inode) (struct inode *, int);
58027 + void (* const drop_inode) (struct inode *);
58028 + void (* const delete_inode) (struct inode *);
58029 + void (* const put_super) (struct super_block *);
58030 + void (* const write_super) (struct super_block *);
58031 + int (* const sync_fs)(struct super_block *sb, int wait);
58032 + int (* const freeze_fs) (struct super_block *);
58033 + int (* const unfreeze_fs) (struct super_block *);
58034 + int (* const statfs) (struct dentry *, struct kstatfs *);
58035 + int (* const remount_fs) (struct super_block *, int *, char *);
58036 + void (* const clear_inode) (struct inode *);
58037 + void (* const umount_begin) (struct super_block *);
58038
58039 - int (*show_options)(struct seq_file *, struct vfsmount *);
58040 - int (*show_stats)(struct seq_file *, struct vfsmount *);
58041 + int (* const show_options)(struct seq_file *, struct vfsmount *);
58042 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
58043 #ifdef CONFIG_QUOTA
58044 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
58045 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
58046 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
58047 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
58048 #endif
58049 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
58050 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
58051 };
58052
58053 /*
58054 diff -urNp linux-2.6.32.45/include/linux/fs_struct.h linux-2.6.32.45/include/linux/fs_struct.h
58055 --- linux-2.6.32.45/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
58056 +++ linux-2.6.32.45/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
58057 @@ -4,7 +4,7 @@
58058 #include <linux/path.h>
58059
58060 struct fs_struct {
58061 - int users;
58062 + atomic_t users;
58063 rwlock_t lock;
58064 int umask;
58065 int in_exec;
58066 diff -urNp linux-2.6.32.45/include/linux/ftrace_event.h linux-2.6.32.45/include/linux/ftrace_event.h
58067 --- linux-2.6.32.45/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
58068 +++ linux-2.6.32.45/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
58069 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
58070 int filter_type);
58071 extern int trace_define_common_fields(struct ftrace_event_call *call);
58072
58073 -#define is_signed_type(type) (((type)(-1)) < 0)
58074 +#define is_signed_type(type) (((type)(-1)) < (type)1)
58075
58076 int trace_set_clr_event(const char *system, const char *event, int set);
58077
58078 diff -urNp linux-2.6.32.45/include/linux/genhd.h linux-2.6.32.45/include/linux/genhd.h
58079 --- linux-2.6.32.45/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
58080 +++ linux-2.6.32.45/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
58081 @@ -161,7 +161,7 @@ struct gendisk {
58082
58083 struct timer_rand_state *random;
58084
58085 - atomic_t sync_io; /* RAID */
58086 + atomic_unchecked_t sync_io; /* RAID */
58087 struct work_struct async_notify;
58088 #ifdef CONFIG_BLK_DEV_INTEGRITY
58089 struct blk_integrity *integrity;
58090 diff -urNp linux-2.6.32.45/include/linux/gracl.h linux-2.6.32.45/include/linux/gracl.h
58091 --- linux-2.6.32.45/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
58092 +++ linux-2.6.32.45/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
58093 @@ -0,0 +1,317 @@
58094 +#ifndef GR_ACL_H
58095 +#define GR_ACL_H
58096 +
58097 +#include <linux/grdefs.h>
58098 +#include <linux/resource.h>
58099 +#include <linux/capability.h>
58100 +#include <linux/dcache.h>
58101 +#include <asm/resource.h>
58102 +
58103 +/* Major status information */
58104 +
58105 +#define GR_VERSION "grsecurity 2.2.2"
58106 +#define GRSECURITY_VERSION 0x2202
58107 +
58108 +enum {
58109 + GR_SHUTDOWN = 0,
58110 + GR_ENABLE = 1,
58111 + GR_SPROLE = 2,
58112 + GR_RELOAD = 3,
58113 + GR_SEGVMOD = 4,
58114 + GR_STATUS = 5,
58115 + GR_UNSPROLE = 6,
58116 + GR_PASSSET = 7,
58117 + GR_SPROLEPAM = 8,
58118 +};
58119 +
58120 +/* Password setup definitions
58121 + * kernel/grhash.c */
58122 +enum {
58123 + GR_PW_LEN = 128,
58124 + GR_SALT_LEN = 16,
58125 + GR_SHA_LEN = 32,
58126 +};
58127 +
58128 +enum {
58129 + GR_SPROLE_LEN = 64,
58130 +};
58131 +
58132 +enum {
58133 + GR_NO_GLOB = 0,
58134 + GR_REG_GLOB,
58135 + GR_CREATE_GLOB
58136 +};
58137 +
58138 +#define GR_NLIMITS 32
58139 +
58140 +/* Begin Data Structures */
58141 +
58142 +struct sprole_pw {
58143 + unsigned char *rolename;
58144 + unsigned char salt[GR_SALT_LEN];
58145 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58146 +};
58147 +
58148 +struct name_entry {
58149 + __u32 key;
58150 + ino_t inode;
58151 + dev_t device;
58152 + char *name;
58153 + __u16 len;
58154 + __u8 deleted;
58155 + struct name_entry *prev;
58156 + struct name_entry *next;
58157 +};
58158 +
58159 +struct inodev_entry {
58160 + struct name_entry *nentry;
58161 + struct inodev_entry *prev;
58162 + struct inodev_entry *next;
58163 +};
58164 +
58165 +struct acl_role_db {
58166 + struct acl_role_label **r_hash;
58167 + __u32 r_size;
58168 +};
58169 +
58170 +struct inodev_db {
58171 + struct inodev_entry **i_hash;
58172 + __u32 i_size;
58173 +};
58174 +
58175 +struct name_db {
58176 + struct name_entry **n_hash;
58177 + __u32 n_size;
58178 +};
58179 +
58180 +struct crash_uid {
58181 + uid_t uid;
58182 + unsigned long expires;
58183 +};
58184 +
58185 +struct gr_hash_struct {
58186 + void **table;
58187 + void **nametable;
58188 + void *first;
58189 + __u32 table_size;
58190 + __u32 used_size;
58191 + int type;
58192 +};
58193 +
58194 +/* Userspace Grsecurity ACL data structures */
58195 +
58196 +struct acl_subject_label {
58197 + char *filename;
58198 + ino_t inode;
58199 + dev_t device;
58200 + __u32 mode;
58201 + kernel_cap_t cap_mask;
58202 + kernel_cap_t cap_lower;
58203 + kernel_cap_t cap_invert_audit;
58204 +
58205 + struct rlimit res[GR_NLIMITS];
58206 + __u32 resmask;
58207 +
58208 + __u8 user_trans_type;
58209 + __u8 group_trans_type;
58210 + uid_t *user_transitions;
58211 + gid_t *group_transitions;
58212 + __u16 user_trans_num;
58213 + __u16 group_trans_num;
58214 +
58215 + __u32 sock_families[2];
58216 + __u32 ip_proto[8];
58217 + __u32 ip_type;
58218 + struct acl_ip_label **ips;
58219 + __u32 ip_num;
58220 + __u32 inaddr_any_override;
58221 +
58222 + __u32 crashes;
58223 + unsigned long expires;
58224 +
58225 + struct acl_subject_label *parent_subject;
58226 + struct gr_hash_struct *hash;
58227 + struct acl_subject_label *prev;
58228 + struct acl_subject_label *next;
58229 +
58230 + struct acl_object_label **obj_hash;
58231 + __u32 obj_hash_size;
58232 + __u16 pax_flags;
58233 +};
58234 +
58235 +struct role_allowed_ip {
58236 + __u32 addr;
58237 + __u32 netmask;
58238 +
58239 + struct role_allowed_ip *prev;
58240 + struct role_allowed_ip *next;
58241 +};
58242 +
58243 +struct role_transition {
58244 + char *rolename;
58245 +
58246 + struct role_transition *prev;
58247 + struct role_transition *next;
58248 +};
58249 +
58250 +struct acl_role_label {
58251 + char *rolename;
58252 + uid_t uidgid;
58253 + __u16 roletype;
58254 +
58255 + __u16 auth_attempts;
58256 + unsigned long expires;
58257 +
58258 + struct acl_subject_label *root_label;
58259 + struct gr_hash_struct *hash;
58260 +
58261 + struct acl_role_label *prev;
58262 + struct acl_role_label *next;
58263 +
58264 + struct role_transition *transitions;
58265 + struct role_allowed_ip *allowed_ips;
58266 + uid_t *domain_children;
58267 + __u16 domain_child_num;
58268 +
58269 + struct acl_subject_label **subj_hash;
58270 + __u32 subj_hash_size;
58271 +};
58272 +
58273 +struct user_acl_role_db {
58274 + struct acl_role_label **r_table;
58275 + __u32 num_pointers; /* Number of allocations to track */
58276 + __u32 num_roles; /* Number of roles */
58277 + __u32 num_domain_children; /* Number of domain children */
58278 + __u32 num_subjects; /* Number of subjects */
58279 + __u32 num_objects; /* Number of objects */
58280 +};
58281 +
58282 +struct acl_object_label {
58283 + char *filename;
58284 + ino_t inode;
58285 + dev_t device;
58286 + __u32 mode;
58287 +
58288 + struct acl_subject_label *nested;
58289 + struct acl_object_label *globbed;
58290 +
58291 + /* next two structures not used */
58292 +
58293 + struct acl_object_label *prev;
58294 + struct acl_object_label *next;
58295 +};
58296 +
58297 +struct acl_ip_label {
58298 + char *iface;
58299 + __u32 addr;
58300 + __u32 netmask;
58301 + __u16 low, high;
58302 + __u8 mode;
58303 + __u32 type;
58304 + __u32 proto[8];
58305 +
58306 + /* next two structures not used */
58307 +
58308 + struct acl_ip_label *prev;
58309 + struct acl_ip_label *next;
58310 +};
58311 +
58312 +struct gr_arg {
58313 + struct user_acl_role_db role_db;
58314 + unsigned char pw[GR_PW_LEN];
58315 + unsigned char salt[GR_SALT_LEN];
58316 + unsigned char sum[GR_SHA_LEN];
58317 + unsigned char sp_role[GR_SPROLE_LEN];
58318 + struct sprole_pw *sprole_pws;
58319 + dev_t segv_device;
58320 + ino_t segv_inode;
58321 + uid_t segv_uid;
58322 + __u16 num_sprole_pws;
58323 + __u16 mode;
58324 +};
58325 +
58326 +struct gr_arg_wrapper {
58327 + struct gr_arg *arg;
58328 + __u32 version;
58329 + __u32 size;
58330 +};
58331 +
58332 +struct subject_map {
58333 + struct acl_subject_label *user;
58334 + struct acl_subject_label *kernel;
58335 + struct subject_map *prev;
58336 + struct subject_map *next;
58337 +};
58338 +
58339 +struct acl_subj_map_db {
58340 + struct subject_map **s_hash;
58341 + __u32 s_size;
58342 +};
58343 +
58344 +/* End Data Structures Section */
58345 +
58346 +/* Hash functions generated by empirical testing by Brad Spengler
58347 + Makes good use of the low bits of the inode. Generally 0-1 times
58348 + in loop for successful match. 0-3 for unsuccessful match.
58349 + Shift/add algorithm with modulus of table size and an XOR*/
58350 +
58351 +static __inline__ unsigned int
58352 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58353 +{
58354 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
58355 +}
58356 +
58357 + static __inline__ unsigned int
58358 +shash(const struct acl_subject_label *userp, const unsigned int sz)
58359 +{
58360 + return ((const unsigned long)userp % sz);
58361 +}
58362 +
58363 +static __inline__ unsigned int
58364 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58365 +{
58366 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58367 +}
58368 +
58369 +static __inline__ unsigned int
58370 +nhash(const char *name, const __u16 len, const unsigned int sz)
58371 +{
58372 + return full_name_hash((const unsigned char *)name, len) % sz;
58373 +}
58374 +
58375 +#define FOR_EACH_ROLE_START(role) \
58376 + role = role_list; \
58377 + while (role) {
58378 +
58379 +#define FOR_EACH_ROLE_END(role) \
58380 + role = role->prev; \
58381 + }
58382 +
58383 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58384 + subj = NULL; \
58385 + iter = 0; \
58386 + while (iter < role->subj_hash_size) { \
58387 + if (subj == NULL) \
58388 + subj = role->subj_hash[iter]; \
58389 + if (subj == NULL) { \
58390 + iter++; \
58391 + continue; \
58392 + }
58393 +
58394 +#define FOR_EACH_SUBJECT_END(subj,iter) \
58395 + subj = subj->next; \
58396 + if (subj == NULL) \
58397 + iter++; \
58398 + }
58399 +
58400 +
58401 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58402 + subj = role->hash->first; \
58403 + while (subj != NULL) {
58404 +
58405 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58406 + subj = subj->next; \
58407 + }
58408 +
58409 +#endif
58410 +
58411 diff -urNp linux-2.6.32.45/include/linux/gralloc.h linux-2.6.32.45/include/linux/gralloc.h
58412 --- linux-2.6.32.45/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58413 +++ linux-2.6.32.45/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
58414 @@ -0,0 +1,9 @@
58415 +#ifndef __GRALLOC_H
58416 +#define __GRALLOC_H
58417 +
58418 +void acl_free_all(void);
58419 +int acl_alloc_stack_init(unsigned long size);
58420 +void *acl_alloc(unsigned long len);
58421 +void *acl_alloc_num(unsigned long num, unsigned long len);
58422 +
58423 +#endif
58424 diff -urNp linux-2.6.32.45/include/linux/grdefs.h linux-2.6.32.45/include/linux/grdefs.h
58425 --- linux-2.6.32.45/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58426 +++ linux-2.6.32.45/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
58427 @@ -0,0 +1,140 @@
58428 +#ifndef GRDEFS_H
58429 +#define GRDEFS_H
58430 +
58431 +/* Begin grsecurity status declarations */
58432 +
58433 +enum {
58434 + GR_READY = 0x01,
58435 + GR_STATUS_INIT = 0x00 // disabled state
58436 +};
58437 +
58438 +/* Begin ACL declarations */
58439 +
58440 +/* Role flags */
58441 +
58442 +enum {
58443 + GR_ROLE_USER = 0x0001,
58444 + GR_ROLE_GROUP = 0x0002,
58445 + GR_ROLE_DEFAULT = 0x0004,
58446 + GR_ROLE_SPECIAL = 0x0008,
58447 + GR_ROLE_AUTH = 0x0010,
58448 + GR_ROLE_NOPW = 0x0020,
58449 + GR_ROLE_GOD = 0x0040,
58450 + GR_ROLE_LEARN = 0x0080,
58451 + GR_ROLE_TPE = 0x0100,
58452 + GR_ROLE_DOMAIN = 0x0200,
58453 + GR_ROLE_PAM = 0x0400,
58454 + GR_ROLE_PERSIST = 0x800
58455 +};
58456 +
58457 +/* ACL Subject and Object mode flags */
58458 +enum {
58459 + GR_DELETED = 0x80000000
58460 +};
58461 +
58462 +/* ACL Object-only mode flags */
58463 +enum {
58464 + GR_READ = 0x00000001,
58465 + GR_APPEND = 0x00000002,
58466 + GR_WRITE = 0x00000004,
58467 + GR_EXEC = 0x00000008,
58468 + GR_FIND = 0x00000010,
58469 + GR_INHERIT = 0x00000020,
58470 + GR_SETID = 0x00000040,
58471 + GR_CREATE = 0x00000080,
58472 + GR_DELETE = 0x00000100,
58473 + GR_LINK = 0x00000200,
58474 + GR_AUDIT_READ = 0x00000400,
58475 + GR_AUDIT_APPEND = 0x00000800,
58476 + GR_AUDIT_WRITE = 0x00001000,
58477 + GR_AUDIT_EXEC = 0x00002000,
58478 + GR_AUDIT_FIND = 0x00004000,
58479 + GR_AUDIT_INHERIT= 0x00008000,
58480 + GR_AUDIT_SETID = 0x00010000,
58481 + GR_AUDIT_CREATE = 0x00020000,
58482 + GR_AUDIT_DELETE = 0x00040000,
58483 + GR_AUDIT_LINK = 0x00080000,
58484 + GR_PTRACERD = 0x00100000,
58485 + GR_NOPTRACE = 0x00200000,
58486 + GR_SUPPRESS = 0x00400000,
58487 + GR_NOLEARN = 0x00800000,
58488 + GR_INIT_TRANSFER= 0x01000000
58489 +};
58490 +
58491 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58492 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58493 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58494 +
58495 +/* ACL subject-only mode flags */
58496 +enum {
58497 + GR_KILL = 0x00000001,
58498 + GR_VIEW = 0x00000002,
58499 + GR_PROTECTED = 0x00000004,
58500 + GR_LEARN = 0x00000008,
58501 + GR_OVERRIDE = 0x00000010,
58502 + /* just a placeholder, this mode is only used in userspace */
58503 + GR_DUMMY = 0x00000020,
58504 + GR_PROTSHM = 0x00000040,
58505 + GR_KILLPROC = 0x00000080,
58506 + GR_KILLIPPROC = 0x00000100,
58507 + /* just a placeholder, this mode is only used in userspace */
58508 + GR_NOTROJAN = 0x00000200,
58509 + GR_PROTPROCFD = 0x00000400,
58510 + GR_PROCACCT = 0x00000800,
58511 + GR_RELAXPTRACE = 0x00001000,
58512 + GR_NESTED = 0x00002000,
58513 + GR_INHERITLEARN = 0x00004000,
58514 + GR_PROCFIND = 0x00008000,
58515 + GR_POVERRIDE = 0x00010000,
58516 + GR_KERNELAUTH = 0x00020000,
58517 + GR_ATSECURE = 0x00040000,
58518 + GR_SHMEXEC = 0x00080000
58519 +};
58520 +
58521 +enum {
58522 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58523 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58524 + GR_PAX_ENABLE_MPROTECT = 0x0004,
58525 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
58526 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58527 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58528 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58529 + GR_PAX_DISABLE_MPROTECT = 0x0400,
58530 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
58531 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58532 +};
58533 +
58534 +enum {
58535 + GR_ID_USER = 0x01,
58536 + GR_ID_GROUP = 0x02,
58537 +};
58538 +
58539 +enum {
58540 + GR_ID_ALLOW = 0x01,
58541 + GR_ID_DENY = 0x02,
58542 +};
58543 +
58544 +#define GR_CRASH_RES 31
58545 +#define GR_UIDTABLE_MAX 500
58546 +
58547 +/* begin resource learning section */
58548 +enum {
58549 + GR_RLIM_CPU_BUMP = 60,
58550 + GR_RLIM_FSIZE_BUMP = 50000,
58551 + GR_RLIM_DATA_BUMP = 10000,
58552 + GR_RLIM_STACK_BUMP = 1000,
58553 + GR_RLIM_CORE_BUMP = 10000,
58554 + GR_RLIM_RSS_BUMP = 500000,
58555 + GR_RLIM_NPROC_BUMP = 1,
58556 + GR_RLIM_NOFILE_BUMP = 5,
58557 + GR_RLIM_MEMLOCK_BUMP = 50000,
58558 + GR_RLIM_AS_BUMP = 500000,
58559 + GR_RLIM_LOCKS_BUMP = 2,
58560 + GR_RLIM_SIGPENDING_BUMP = 5,
58561 + GR_RLIM_MSGQUEUE_BUMP = 10000,
58562 + GR_RLIM_NICE_BUMP = 1,
58563 + GR_RLIM_RTPRIO_BUMP = 1,
58564 + GR_RLIM_RTTIME_BUMP = 1000000
58565 +};
58566 +
58567 +#endif
58568 diff -urNp linux-2.6.32.45/include/linux/grinternal.h linux-2.6.32.45/include/linux/grinternal.h
58569 --- linux-2.6.32.45/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58570 +++ linux-2.6.32.45/include/linux/grinternal.h 2011-08-11 19:58:37.000000000 -0400
58571 @@ -0,0 +1,217 @@
58572 +#ifndef __GRINTERNAL_H
58573 +#define __GRINTERNAL_H
58574 +
58575 +#ifdef CONFIG_GRKERNSEC
58576 +
58577 +#include <linux/fs.h>
58578 +#include <linux/mnt_namespace.h>
58579 +#include <linux/nsproxy.h>
58580 +#include <linux/gracl.h>
58581 +#include <linux/grdefs.h>
58582 +#include <linux/grmsg.h>
58583 +
58584 +void gr_add_learn_entry(const char *fmt, ...)
58585 + __attribute__ ((format (printf, 1, 2)));
58586 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58587 + const struct vfsmount *mnt);
58588 +__u32 gr_check_create(const struct dentry *new_dentry,
58589 + const struct dentry *parent,
58590 + const struct vfsmount *mnt, const __u32 mode);
58591 +int gr_check_protected_task(const struct task_struct *task);
58592 +__u32 to_gr_audit(const __u32 reqmode);
58593 +int gr_set_acls(const int type);
58594 +int gr_apply_subject_to_task(struct task_struct *task);
58595 +int gr_acl_is_enabled(void);
58596 +char gr_roletype_to_char(void);
58597 +
58598 +void gr_handle_alertkill(struct task_struct *task);
58599 +char *gr_to_filename(const struct dentry *dentry,
58600 + const struct vfsmount *mnt);
58601 +char *gr_to_filename1(const struct dentry *dentry,
58602 + const struct vfsmount *mnt);
58603 +char *gr_to_filename2(const struct dentry *dentry,
58604 + const struct vfsmount *mnt);
58605 +char *gr_to_filename3(const struct dentry *dentry,
58606 + const struct vfsmount *mnt);
58607 +
58608 +extern int grsec_enable_harden_ptrace;
58609 +extern int grsec_enable_link;
58610 +extern int grsec_enable_fifo;
58611 +extern int grsec_enable_shm;
58612 +extern int grsec_enable_execlog;
58613 +extern int grsec_enable_signal;
58614 +extern int grsec_enable_audit_ptrace;
58615 +extern int grsec_enable_forkfail;
58616 +extern int grsec_enable_time;
58617 +extern int grsec_enable_rofs;
58618 +extern int grsec_enable_chroot_shmat;
58619 +extern int grsec_enable_chroot_mount;
58620 +extern int grsec_enable_chroot_double;
58621 +extern int grsec_enable_chroot_pivot;
58622 +extern int grsec_enable_chroot_chdir;
58623 +extern int grsec_enable_chroot_chmod;
58624 +extern int grsec_enable_chroot_mknod;
58625 +extern int grsec_enable_chroot_fchdir;
58626 +extern int grsec_enable_chroot_nice;
58627 +extern int grsec_enable_chroot_execlog;
58628 +extern int grsec_enable_chroot_caps;
58629 +extern int grsec_enable_chroot_sysctl;
58630 +extern int grsec_enable_chroot_unix;
58631 +extern int grsec_enable_tpe;
58632 +extern int grsec_tpe_gid;
58633 +extern int grsec_enable_tpe_all;
58634 +extern int grsec_enable_tpe_invert;
58635 +extern int grsec_enable_socket_all;
58636 +extern int grsec_socket_all_gid;
58637 +extern int grsec_enable_socket_client;
58638 +extern int grsec_socket_client_gid;
58639 +extern int grsec_enable_socket_server;
58640 +extern int grsec_socket_server_gid;
58641 +extern int grsec_audit_gid;
58642 +extern int grsec_enable_group;
58643 +extern int grsec_enable_audit_textrel;
58644 +extern int grsec_enable_log_rwxmaps;
58645 +extern int grsec_enable_mount;
58646 +extern int grsec_enable_chdir;
58647 +extern int grsec_resource_logging;
58648 +extern int grsec_enable_blackhole;
58649 +extern int grsec_lastack_retries;
58650 +extern int grsec_enable_brute;
58651 +extern int grsec_lock;
58652 +
58653 +extern spinlock_t grsec_alert_lock;
58654 +extern unsigned long grsec_alert_wtime;
58655 +extern unsigned long grsec_alert_fyet;
58656 +
58657 +extern spinlock_t grsec_audit_lock;
58658 +
58659 +extern rwlock_t grsec_exec_file_lock;
58660 +
58661 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58662 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58663 + (tsk)->exec_file->f_vfsmnt) : "/")
58664 +
58665 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58666 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58667 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58668 +
58669 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58670 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
58671 + (tsk)->exec_file->f_vfsmnt) : "/")
58672 +
58673 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58674 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58675 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58676 +
58677 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58678 +
58679 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58680 +
58681 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58682 + (task)->pid, (cred)->uid, \
58683 + (cred)->euid, (cred)->gid, (cred)->egid, \
58684 + gr_parent_task_fullpath(task), \
58685 + (task)->real_parent->comm, (task)->real_parent->pid, \
58686 + (pcred)->uid, (pcred)->euid, \
58687 + (pcred)->gid, (pcred)->egid
58688 +
58689 +#define GR_CHROOT_CAPS {{ \
58690 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58691 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58692 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58693 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58694 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58695 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
58696 +
58697 +#define security_learn(normal_msg,args...) \
58698 +({ \
58699 + read_lock(&grsec_exec_file_lock); \
58700 + gr_add_learn_entry(normal_msg "\n", ## args); \
58701 + read_unlock(&grsec_exec_file_lock); \
58702 +})
58703 +
58704 +enum {
58705 + GR_DO_AUDIT,
58706 + GR_DONT_AUDIT,
58707 + GR_DONT_AUDIT_GOOD
58708 +};
58709 +
58710 +enum {
58711 + GR_TTYSNIFF,
58712 + GR_RBAC,
58713 + GR_RBAC_STR,
58714 + GR_STR_RBAC,
58715 + GR_RBAC_MODE2,
58716 + GR_RBAC_MODE3,
58717 + GR_FILENAME,
58718 + GR_SYSCTL_HIDDEN,
58719 + GR_NOARGS,
58720 + GR_ONE_INT,
58721 + GR_ONE_INT_TWO_STR,
58722 + GR_ONE_STR,
58723 + GR_STR_INT,
58724 + GR_TWO_STR_INT,
58725 + GR_TWO_INT,
58726 + GR_TWO_U64,
58727 + GR_THREE_INT,
58728 + GR_FIVE_INT_TWO_STR,
58729 + GR_TWO_STR,
58730 + GR_THREE_STR,
58731 + GR_FOUR_STR,
58732 + GR_STR_FILENAME,
58733 + GR_FILENAME_STR,
58734 + GR_FILENAME_TWO_INT,
58735 + GR_FILENAME_TWO_INT_STR,
58736 + GR_TEXTREL,
58737 + GR_PTRACE,
58738 + GR_RESOURCE,
58739 + GR_CAP,
58740 + GR_SIG,
58741 + GR_SIG2,
58742 + GR_CRASH1,
58743 + GR_CRASH2,
58744 + GR_PSACCT,
58745 + GR_RWXMAP
58746 +};
58747 +
58748 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58749 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58750 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58751 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58752 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58753 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58754 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58755 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58756 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58757 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58758 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58759 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58760 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58761 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58762 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58763 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58764 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58765 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58766 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58767 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58768 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58769 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58770 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58771 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58772 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58773 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58774 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58775 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58776 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58777 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58778 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58779 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58780 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58781 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58782 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58783 +
58784 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58785 +
58786 +#endif
58787 +
58788 +#endif
58789 diff -urNp linux-2.6.32.45/include/linux/grmsg.h linux-2.6.32.45/include/linux/grmsg.h
58790 --- linux-2.6.32.45/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58791 +++ linux-2.6.32.45/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
58792 @@ -0,0 +1,108 @@
58793 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58794 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58795 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58796 +#define GR_STOPMOD_MSG "denied modification of module state by "
58797 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58798 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58799 +#define GR_IOPERM_MSG "denied use of ioperm() by "
58800 +#define GR_IOPL_MSG "denied use of iopl() by "
58801 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58802 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58803 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58804 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58805 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58806 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58807 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58808 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58809 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58810 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58811 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58812 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58813 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58814 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58815 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58816 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58817 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58818 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58819 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58820 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58821 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58822 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58823 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58824 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58825 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58826 +#define GR_NPROC_MSG "denied overstep of process limit by "
58827 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58828 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58829 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58830 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58831 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58832 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58833 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58834 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58835 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58836 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58837 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58838 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58839 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58840 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58841 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58842 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58843 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58844 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58845 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58846 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58847 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58848 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58849 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58850 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58851 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58852 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58853 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58854 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58855 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58856 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58857 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58858 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58859 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58860 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58861 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58862 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58863 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58864 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58865 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58866 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
58867 +#define GR_NICE_CHROOT_MSG "denied priority change by "
58868 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58869 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58870 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58871 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58872 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58873 +#define GR_TIME_MSG "time set by "
58874 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58875 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58876 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58877 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58878 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58879 +#define GR_BIND_MSG "denied bind() by "
58880 +#define GR_CONNECT_MSG "denied connect() by "
58881 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58882 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58883 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58884 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58885 +#define GR_CAP_ACL_MSG "use of %s denied for "
58886 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58887 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58888 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58889 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58890 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58891 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58892 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58893 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58894 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58895 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58896 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58897 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58898 +#define GR_VM86_MSG "denied use of vm86 by "
58899 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58900 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58901 diff -urNp linux-2.6.32.45/include/linux/grsecurity.h linux-2.6.32.45/include/linux/grsecurity.h
58902 --- linux-2.6.32.45/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58903 +++ linux-2.6.32.45/include/linux/grsecurity.h 2011-08-11 19:58:57.000000000 -0400
58904 @@ -0,0 +1,217 @@
58905 +#ifndef GR_SECURITY_H
58906 +#define GR_SECURITY_H
58907 +#include <linux/fs.h>
58908 +#include <linux/fs_struct.h>
58909 +#include <linux/binfmts.h>
58910 +#include <linux/gracl.h>
58911 +#include <linux/compat.h>
58912 +
58913 +/* notify of brain-dead configs */
58914 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58915 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58916 +#endif
58917 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58918 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58919 +#endif
58920 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58921 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58922 +#endif
58923 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58924 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58925 +#endif
58926 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58927 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58928 +#endif
58929 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58930 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
58931 +#endif
58932 +
58933 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58934 +void gr_handle_brute_check(void);
58935 +void gr_handle_kernel_exploit(void);
58936 +int gr_process_user_ban(void);
58937 +
58938 +char gr_roletype_to_char(void);
58939 +
58940 +int gr_acl_enable_at_secure(void);
58941 +
58942 +int gr_check_user_change(int real, int effective, int fs);
58943 +int gr_check_group_change(int real, int effective, int fs);
58944 +
58945 +void gr_del_task_from_ip_table(struct task_struct *p);
58946 +
58947 +int gr_pid_is_chrooted(struct task_struct *p);
58948 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58949 +int gr_handle_chroot_nice(void);
58950 +int gr_handle_chroot_sysctl(const int op);
58951 +int gr_handle_chroot_setpriority(struct task_struct *p,
58952 + const int niceval);
58953 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58954 +int gr_handle_chroot_chroot(const struct dentry *dentry,
58955 + const struct vfsmount *mnt);
58956 +int gr_handle_chroot_caps(struct path *path);
58957 +void gr_handle_chroot_chdir(struct path *path);
58958 +int gr_handle_chroot_chmod(const struct dentry *dentry,
58959 + const struct vfsmount *mnt, const int mode);
58960 +int gr_handle_chroot_mknod(const struct dentry *dentry,
58961 + const struct vfsmount *mnt, const int mode);
58962 +int gr_handle_chroot_mount(const struct dentry *dentry,
58963 + const struct vfsmount *mnt,
58964 + const char *dev_name);
58965 +int gr_handle_chroot_pivot(void);
58966 +int gr_handle_chroot_unix(const pid_t pid);
58967 +
58968 +int gr_handle_rawio(const struct inode *inode);
58969 +
58970 +void gr_handle_ioperm(void);
58971 +void gr_handle_iopl(void);
58972 +
58973 +int gr_tpe_allow(const struct file *file);
58974 +
58975 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
58976 +void gr_clear_chroot_entries(struct task_struct *task);
58977 +
58978 +void gr_log_forkfail(const int retval);
58979 +void gr_log_timechange(void);
58980 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
58981 +void gr_log_chdir(const struct dentry *dentry,
58982 + const struct vfsmount *mnt);
58983 +void gr_log_chroot_exec(const struct dentry *dentry,
58984 + const struct vfsmount *mnt);
58985 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
58986 +#ifdef CONFIG_COMPAT
58987 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
58988 +#endif
58989 +void gr_log_remount(const char *devname, const int retval);
58990 +void gr_log_unmount(const char *devname, const int retval);
58991 +void gr_log_mount(const char *from, const char *to, const int retval);
58992 +void gr_log_textrel(struct vm_area_struct *vma);
58993 +void gr_log_rwxmmap(struct file *file);
58994 +void gr_log_rwxmprotect(struct file *file);
58995 +
58996 +int gr_handle_follow_link(const struct inode *parent,
58997 + const struct inode *inode,
58998 + const struct dentry *dentry,
58999 + const struct vfsmount *mnt);
59000 +int gr_handle_fifo(const struct dentry *dentry,
59001 + const struct vfsmount *mnt,
59002 + const struct dentry *dir, const int flag,
59003 + const int acc_mode);
59004 +int gr_handle_hardlink(const struct dentry *dentry,
59005 + const struct vfsmount *mnt,
59006 + struct inode *inode,
59007 + const int mode, const char *to);
59008 +
59009 +int gr_is_capable(const int cap);
59010 +int gr_is_capable_nolog(const int cap);
59011 +void gr_learn_resource(const struct task_struct *task, const int limit,
59012 + const unsigned long wanted, const int gt);
59013 +void gr_copy_label(struct task_struct *tsk);
59014 +void gr_handle_crash(struct task_struct *task, const int sig);
59015 +int gr_handle_signal(const struct task_struct *p, const int sig);
59016 +int gr_check_crash_uid(const uid_t uid);
59017 +int gr_check_protected_task(const struct task_struct *task);
59018 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59019 +int gr_acl_handle_mmap(const struct file *file,
59020 + const unsigned long prot);
59021 +int gr_acl_handle_mprotect(const struct file *file,
59022 + const unsigned long prot);
59023 +int gr_check_hidden_task(const struct task_struct *tsk);
59024 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59025 + const struct vfsmount *mnt);
59026 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
59027 + const struct vfsmount *mnt);
59028 +__u32 gr_acl_handle_access(const struct dentry *dentry,
59029 + const struct vfsmount *mnt, const int fmode);
59030 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59031 + const struct vfsmount *mnt, mode_t mode);
59032 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59033 + const struct vfsmount *mnt, mode_t mode);
59034 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
59035 + const struct vfsmount *mnt);
59036 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59037 + const struct vfsmount *mnt);
59038 +int gr_handle_ptrace(struct task_struct *task, const long request);
59039 +int gr_handle_proc_ptrace(struct task_struct *task);
59040 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
59041 + const struct vfsmount *mnt);
59042 +int gr_check_crash_exec(const struct file *filp);
59043 +int gr_acl_is_enabled(void);
59044 +void gr_set_kernel_label(struct task_struct *task);
59045 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
59046 + const gid_t gid);
59047 +int gr_set_proc_label(const struct dentry *dentry,
59048 + const struct vfsmount *mnt,
59049 + const int unsafe_share);
59050 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59051 + const struct vfsmount *mnt);
59052 +__u32 gr_acl_handle_open(const struct dentry *dentry,
59053 + const struct vfsmount *mnt, const int fmode);
59054 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
59055 + const struct dentry *p_dentry,
59056 + const struct vfsmount *p_mnt, const int fmode,
59057 + const int imode);
59058 +void gr_handle_create(const struct dentry *dentry,
59059 + const struct vfsmount *mnt);
59060 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59061 + const struct dentry *parent_dentry,
59062 + const struct vfsmount *parent_mnt,
59063 + const int mode);
59064 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59065 + const struct dentry *parent_dentry,
59066 + const struct vfsmount *parent_mnt);
59067 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59068 + const struct vfsmount *mnt);
59069 +void gr_handle_delete(const ino_t ino, const dev_t dev);
59070 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59071 + const struct vfsmount *mnt);
59072 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59073 + const struct dentry *parent_dentry,
59074 + const struct vfsmount *parent_mnt,
59075 + const char *from);
59076 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59077 + const struct dentry *parent_dentry,
59078 + const struct vfsmount *parent_mnt,
59079 + const struct dentry *old_dentry,
59080 + const struct vfsmount *old_mnt, const char *to);
59081 +int gr_acl_handle_rename(struct dentry *new_dentry,
59082 + struct dentry *parent_dentry,
59083 + const struct vfsmount *parent_mnt,
59084 + struct dentry *old_dentry,
59085 + struct inode *old_parent_inode,
59086 + struct vfsmount *old_mnt, const char *newname);
59087 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59088 + struct dentry *old_dentry,
59089 + struct dentry *new_dentry,
59090 + struct vfsmount *mnt, const __u8 replace);
59091 +__u32 gr_check_link(const struct dentry *new_dentry,
59092 + const struct dentry *parent_dentry,
59093 + const struct vfsmount *parent_mnt,
59094 + const struct dentry *old_dentry,
59095 + const struct vfsmount *old_mnt);
59096 +int gr_acl_handle_filldir(const struct file *file, const char *name,
59097 + const unsigned int namelen, const ino_t ino);
59098 +
59099 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
59100 + const struct vfsmount *mnt);
59101 +void gr_acl_handle_exit(void);
59102 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
59103 +int gr_acl_handle_procpidmem(const struct task_struct *task);
59104 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59105 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59106 +void gr_audit_ptrace(struct task_struct *task);
59107 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59108 +
59109 +#ifdef CONFIG_GRKERNSEC
59110 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59111 +void gr_handle_vm86(void);
59112 +void gr_handle_mem_readwrite(u64 from, u64 to);
59113 +
59114 +extern int grsec_enable_dmesg;
59115 +extern int grsec_disable_privio;
59116 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59117 +extern int grsec_enable_chroot_findtask;
59118 +#endif
59119 +#endif
59120 +
59121 +#endif
59122 diff -urNp linux-2.6.32.45/include/linux/hdpu_features.h linux-2.6.32.45/include/linux/hdpu_features.h
59123 --- linux-2.6.32.45/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
59124 +++ linux-2.6.32.45/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
59125 @@ -3,7 +3,7 @@
59126 struct cpustate_t {
59127 spinlock_t lock;
59128 int excl;
59129 - int open_count;
59130 + atomic_t open_count;
59131 unsigned char cached_val;
59132 int inited;
59133 unsigned long *set_addr;
59134 diff -urNp linux-2.6.32.45/include/linux/highmem.h linux-2.6.32.45/include/linux/highmem.h
59135 --- linux-2.6.32.45/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
59136 +++ linux-2.6.32.45/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
59137 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct
59138 kunmap_atomic(kaddr, KM_USER0);
59139 }
59140
59141 +static inline void sanitize_highpage(struct page *page)
59142 +{
59143 + void *kaddr;
59144 + unsigned long flags;
59145 +
59146 + local_irq_save(flags);
59147 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
59148 + clear_page(kaddr);
59149 + kunmap_atomic(kaddr, KM_CLEARPAGE);
59150 + local_irq_restore(flags);
59151 +}
59152 +
59153 static inline void zero_user_segments(struct page *page,
59154 unsigned start1, unsigned end1,
59155 unsigned start2, unsigned end2)
59156 diff -urNp linux-2.6.32.45/include/linux/i2o.h linux-2.6.32.45/include/linux/i2o.h
59157 --- linux-2.6.32.45/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
59158 +++ linux-2.6.32.45/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
59159 @@ -564,7 +564,7 @@ struct i2o_controller {
59160 struct i2o_device *exec; /* Executive */
59161 #if BITS_PER_LONG == 64
59162 spinlock_t context_list_lock; /* lock for context_list */
59163 - atomic_t context_list_counter; /* needed for unique contexts */
59164 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59165 struct list_head context_list; /* list of context id's
59166 and pointers */
59167 #endif
59168 diff -urNp linux-2.6.32.45/include/linux/init_task.h linux-2.6.32.45/include/linux/init_task.h
59169 --- linux-2.6.32.45/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
59170 +++ linux-2.6.32.45/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
59171 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
59172 #define INIT_IDS
59173 #endif
59174
59175 +#ifdef CONFIG_X86
59176 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59177 +#else
59178 +#define INIT_TASK_THREAD_INFO
59179 +#endif
59180 +
59181 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
59182 /*
59183 * Because of the reduced scope of CAP_SETPCAP when filesystem
59184 @@ -156,6 +162,7 @@ extern struct cred init_cred;
59185 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
59186 .comm = "swapper", \
59187 .thread = INIT_THREAD, \
59188 + INIT_TASK_THREAD_INFO \
59189 .fs = &init_fs, \
59190 .files = &init_files, \
59191 .signal = &init_signals, \
59192 diff -urNp linux-2.6.32.45/include/linux/intel-iommu.h linux-2.6.32.45/include/linux/intel-iommu.h
59193 --- linux-2.6.32.45/include/linux/intel-iommu.h 2011-03-27 14:31:47.000000000 -0400
59194 +++ linux-2.6.32.45/include/linux/intel-iommu.h 2011-08-05 20:33:55.000000000 -0400
59195 @@ -296,7 +296,7 @@ struct iommu_flush {
59196 u8 fm, u64 type);
59197 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59198 unsigned int size_order, u64 type);
59199 -};
59200 +} __no_const;
59201
59202 enum {
59203 SR_DMAR_FECTL_REG,
59204 diff -urNp linux-2.6.32.45/include/linux/interrupt.h linux-2.6.32.45/include/linux/interrupt.h
59205 --- linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
59206 +++ linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
59207 @@ -363,7 +363,7 @@ enum
59208 /* map softirq index to softirq name. update 'softirq_to_name' in
59209 * kernel/softirq.c when adding a new softirq.
59210 */
59211 -extern char *softirq_to_name[NR_SOFTIRQS];
59212 +extern const char * const softirq_to_name[NR_SOFTIRQS];
59213
59214 /* softirq mask and active fields moved to irq_cpustat_t in
59215 * asm/hardirq.h to get better cache usage. KAO
59216 @@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
59217
59218 struct softirq_action
59219 {
59220 - void (*action)(struct softirq_action *);
59221 + void (*action)(void);
59222 };
59223
59224 asmlinkage void do_softirq(void);
59225 asmlinkage void __do_softirq(void);
59226 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59227 +extern void open_softirq(int nr, void (*action)(void));
59228 extern void softirq_init(void);
59229 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
59230 extern void raise_softirq_irqoff(unsigned int nr);
59231 diff -urNp linux-2.6.32.45/include/linux/irq.h linux-2.6.32.45/include/linux/irq.h
59232 --- linux-2.6.32.45/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
59233 +++ linux-2.6.32.45/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
59234 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
59235 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
59236 bool boot)
59237 {
59238 +#ifdef CONFIG_CPUMASK_OFFSTACK
59239 gfp_t gfp = GFP_ATOMIC;
59240
59241 if (boot)
59242 gfp = GFP_NOWAIT;
59243
59244 -#ifdef CONFIG_CPUMASK_OFFSTACK
59245 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
59246 return false;
59247
59248 diff -urNp linux-2.6.32.45/include/linux/kallsyms.h linux-2.6.32.45/include/linux/kallsyms.h
59249 --- linux-2.6.32.45/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
59250 +++ linux-2.6.32.45/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
59251 @@ -15,7 +15,8 @@
59252
59253 struct module;
59254
59255 -#ifdef CONFIG_KALLSYMS
59256 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59257 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59258 /* Lookup the address for a symbol. Returns 0 if not found. */
59259 unsigned long kallsyms_lookup_name(const char *name);
59260
59261 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
59262 /* Stupid that this does nothing, but I didn't create this mess. */
59263 #define __print_symbol(fmt, addr)
59264 #endif /*CONFIG_KALLSYMS*/
59265 +#else /* when included by kallsyms.c, vsnprintf.c, or
59266 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59267 +extern void __print_symbol(const char *fmt, unsigned long address);
59268 +extern int sprint_symbol(char *buffer, unsigned long address);
59269 +const char *kallsyms_lookup(unsigned long addr,
59270 + unsigned long *symbolsize,
59271 + unsigned long *offset,
59272 + char **modname, char *namebuf);
59273 +#endif
59274
59275 /* This macro allows us to keep printk typechecking */
59276 static void __check_printsym_format(const char *fmt, ...)
59277 diff -urNp linux-2.6.32.45/include/linux/kgdb.h linux-2.6.32.45/include/linux/kgdb.h
59278 --- linux-2.6.32.45/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
59279 +++ linux-2.6.32.45/include/linux/kgdb.h 2011-08-05 20:33:55.000000000 -0400
59280 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
59281
59282 extern int kgdb_connected;
59283
59284 -extern atomic_t kgdb_setting_breakpoint;
59285 -extern atomic_t kgdb_cpu_doing_single_step;
59286 +extern atomic_unchecked_t kgdb_setting_breakpoint;
59287 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59288
59289 extern struct task_struct *kgdb_usethread;
59290 extern struct task_struct *kgdb_contthread;
59291 @@ -226,8 +226,8 @@ extern int kgdb_arch_remove_breakpoint(u
59292 * hardware debug registers.
59293 */
59294 struct kgdb_arch {
59295 - unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59296 - unsigned long flags;
59297 + const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59298 + const unsigned long flags;
59299
59300 int (*set_breakpoint)(unsigned long, char *);
59301 int (*remove_breakpoint)(unsigned long, char *);
59302 @@ -251,20 +251,20 @@ struct kgdb_arch {
59303 */
59304 struct kgdb_io {
59305 const char *name;
59306 - int (*read_char) (void);
59307 - void (*write_char) (u8);
59308 - void (*flush) (void);
59309 - int (*init) (void);
59310 - void (*pre_exception) (void);
59311 - void (*post_exception) (void);
59312 + int (* const read_char) (void);
59313 + void (* const write_char) (u8);
59314 + void (* const flush) (void);
59315 + int (* const init) (void);
59316 + void (* const pre_exception) (void);
59317 + void (* const post_exception) (void);
59318 };
59319
59320 -extern struct kgdb_arch arch_kgdb_ops;
59321 +extern const struct kgdb_arch arch_kgdb_ops;
59322
59323 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
59324
59325 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
59326 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
59327 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
59328 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
59329
59330 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
59331 extern int kgdb_mem2hex(char *mem, char *buf, int count);
59332 diff -urNp linux-2.6.32.45/include/linux/kmod.h linux-2.6.32.45/include/linux/kmod.h
59333 --- linux-2.6.32.45/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
59334 +++ linux-2.6.32.45/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
59335 @@ -31,6 +31,8 @@
59336 * usually useless though. */
59337 extern int __request_module(bool wait, const char *name, ...) \
59338 __attribute__((format(printf, 2, 3)));
59339 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59340 + __attribute__((format(printf, 3, 4)));
59341 #define request_module(mod...) __request_module(true, mod)
59342 #define request_module_nowait(mod...) __request_module(false, mod)
59343 #define try_then_request_module(x, mod...) \
59344 diff -urNp linux-2.6.32.45/include/linux/kobject.h linux-2.6.32.45/include/linux/kobject.h
59345 --- linux-2.6.32.45/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
59346 +++ linux-2.6.32.45/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
59347 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
59348
59349 struct kobj_type {
59350 void (*release)(struct kobject *kobj);
59351 - struct sysfs_ops *sysfs_ops;
59352 + const struct sysfs_ops *sysfs_ops;
59353 struct attribute **default_attrs;
59354 };
59355
59356 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
59357 };
59358
59359 struct kset_uevent_ops {
59360 - int (*filter)(struct kset *kset, struct kobject *kobj);
59361 - const char *(*name)(struct kset *kset, struct kobject *kobj);
59362 - int (*uevent)(struct kset *kset, struct kobject *kobj,
59363 + int (* const filter)(struct kset *kset, struct kobject *kobj);
59364 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
59365 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
59366 struct kobj_uevent_env *env);
59367 };
59368
59369 @@ -132,7 +132,7 @@ struct kobj_attribute {
59370 const char *buf, size_t count);
59371 };
59372
59373 -extern struct sysfs_ops kobj_sysfs_ops;
59374 +extern const struct sysfs_ops kobj_sysfs_ops;
59375
59376 /**
59377 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
59378 @@ -155,14 +155,14 @@ struct kset {
59379 struct list_head list;
59380 spinlock_t list_lock;
59381 struct kobject kobj;
59382 - struct kset_uevent_ops *uevent_ops;
59383 + const struct kset_uevent_ops *uevent_ops;
59384 };
59385
59386 extern void kset_init(struct kset *kset);
59387 extern int __must_check kset_register(struct kset *kset);
59388 extern void kset_unregister(struct kset *kset);
59389 extern struct kset * __must_check kset_create_and_add(const char *name,
59390 - struct kset_uevent_ops *u,
59391 + const struct kset_uevent_ops *u,
59392 struct kobject *parent_kobj);
59393
59394 static inline struct kset *to_kset(struct kobject *kobj)
59395 diff -urNp linux-2.6.32.45/include/linux/kvm_host.h linux-2.6.32.45/include/linux/kvm_host.h
59396 --- linux-2.6.32.45/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
59397 +++ linux-2.6.32.45/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
59398 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59399 void vcpu_load(struct kvm_vcpu *vcpu);
59400 void vcpu_put(struct kvm_vcpu *vcpu);
59401
59402 -int kvm_init(void *opaque, unsigned int vcpu_size,
59403 +int kvm_init(const void *opaque, unsigned int vcpu_size,
59404 struct module *module);
59405 void kvm_exit(void);
59406
59407 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59408 struct kvm_guest_debug *dbg);
59409 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59410
59411 -int kvm_arch_init(void *opaque);
59412 +int kvm_arch_init(const void *opaque);
59413 void kvm_arch_exit(void);
59414
59415 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59416 diff -urNp linux-2.6.32.45/include/linux/libata.h linux-2.6.32.45/include/linux/libata.h
59417 --- linux-2.6.32.45/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
59418 +++ linux-2.6.32.45/include/linux/libata.h 2011-08-05 20:33:55.000000000 -0400
59419 @@ -525,11 +525,11 @@ struct ata_ioports {
59420
59421 struct ata_host {
59422 spinlock_t lock;
59423 - struct device *dev;
59424 + struct device *dev;
59425 void __iomem * const *iomap;
59426 unsigned int n_ports;
59427 void *private_data;
59428 - struct ata_port_operations *ops;
59429 + const struct ata_port_operations *ops;
59430 unsigned long flags;
59431 #ifdef CONFIG_ATA_ACPI
59432 acpi_handle acpi_handle;
59433 @@ -710,7 +710,7 @@ struct ata_link {
59434
59435 struct ata_port {
59436 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
59437 - struct ata_port_operations *ops;
59438 + const struct ata_port_operations *ops;
59439 spinlock_t *lock;
59440 /* Flags owned by the EH context. Only EH should touch these once the
59441 port is active */
59442 @@ -883,7 +883,7 @@ struct ata_port_operations {
59443 * ->inherits must be the last field and all the preceding
59444 * fields must be pointers.
59445 */
59446 - const struct ata_port_operations *inherits;
59447 + const struct ata_port_operations * const inherits;
59448 };
59449
59450 struct ata_port_info {
59451 @@ -892,7 +892,7 @@ struct ata_port_info {
59452 unsigned long pio_mask;
59453 unsigned long mwdma_mask;
59454 unsigned long udma_mask;
59455 - struct ata_port_operations *port_ops;
59456 + const struct ata_port_operations *port_ops;
59457 void *private_data;
59458 };
59459
59460 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
59461 extern const unsigned long sata_deb_timing_hotplug[];
59462 extern const unsigned long sata_deb_timing_long[];
59463
59464 -extern struct ata_port_operations ata_dummy_port_ops;
59465 +extern const struct ata_port_operations ata_dummy_port_ops;
59466 extern const struct ata_port_info ata_dummy_port_info;
59467
59468 static inline const unsigned long *
59469 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
59470 struct scsi_host_template *sht);
59471 extern void ata_host_detach(struct ata_host *host);
59472 extern void ata_host_init(struct ata_host *, struct device *,
59473 - unsigned long, struct ata_port_operations *);
59474 + unsigned long, const struct ata_port_operations *);
59475 extern int ata_scsi_detect(struct scsi_host_template *sht);
59476 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
59477 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
59478 diff -urNp linux-2.6.32.45/include/linux/lockd/bind.h linux-2.6.32.45/include/linux/lockd/bind.h
59479 --- linux-2.6.32.45/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
59480 +++ linux-2.6.32.45/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
59481 @@ -23,13 +23,13 @@ struct svc_rqst;
59482 * This is the set of functions for lockd->nfsd communication
59483 */
59484 struct nlmsvc_binding {
59485 - __be32 (*fopen)(struct svc_rqst *,
59486 + __be32 (* const fopen)(struct svc_rqst *,
59487 struct nfs_fh *,
59488 struct file **);
59489 - void (*fclose)(struct file *);
59490 + void (* const fclose)(struct file *);
59491 };
59492
59493 -extern struct nlmsvc_binding * nlmsvc_ops;
59494 +extern const struct nlmsvc_binding * nlmsvc_ops;
59495
59496 /*
59497 * Similar to nfs_client_initdata, but without the NFS-specific
59498 diff -urNp linux-2.6.32.45/include/linux/mca.h linux-2.6.32.45/include/linux/mca.h
59499 --- linux-2.6.32.45/include/linux/mca.h 2011-03-27 14:31:47.000000000 -0400
59500 +++ linux-2.6.32.45/include/linux/mca.h 2011-08-05 20:33:55.000000000 -0400
59501 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59502 int region);
59503 void * (*mca_transform_memory)(struct mca_device *,
59504 void *memory);
59505 -};
59506 +} __no_const;
59507
59508 struct mca_bus {
59509 u64 default_dma_mask;
59510 diff -urNp linux-2.6.32.45/include/linux/memory.h linux-2.6.32.45/include/linux/memory.h
59511 --- linux-2.6.32.45/include/linux/memory.h 2011-03-27 14:31:47.000000000 -0400
59512 +++ linux-2.6.32.45/include/linux/memory.h 2011-08-05 20:33:55.000000000 -0400
59513 @@ -108,7 +108,7 @@ struct memory_accessor {
59514 size_t count);
59515 ssize_t (*write)(struct memory_accessor *, const char *buf,
59516 off_t offset, size_t count);
59517 -};
59518 +} __no_const;
59519
59520 /*
59521 * Kernel text modification mutex, used for code patching. Users of this lock
59522 diff -urNp linux-2.6.32.45/include/linux/mm.h linux-2.6.32.45/include/linux/mm.h
59523 --- linux-2.6.32.45/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
59524 +++ linux-2.6.32.45/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
59525 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
59526
59527 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59528 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59529 +
59530 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59531 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59532 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59533 +#else
59534 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59535 +#endif
59536 +
59537 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59538 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59539
59540 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
59541 int set_page_dirty_lock(struct page *page);
59542 int clear_page_dirty_for_io(struct page *page);
59543
59544 -/* Is the vma a continuation of the stack vma above it? */
59545 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
59546 -{
59547 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59548 -}
59549 -
59550 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59551 unsigned long old_addr, struct vm_area_struct *new_vma,
59552 unsigned long new_addr, unsigned long len);
59553 @@ -890,6 +891,8 @@ struct shrinker {
59554 extern void register_shrinker(struct shrinker *);
59555 extern void unregister_shrinker(struct shrinker *);
59556
59557 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
59558 +
59559 int vma_wants_writenotify(struct vm_area_struct *vma);
59560
59561 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
59562 @@ -1162,6 +1165,7 @@ out:
59563 }
59564
59565 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59566 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59567
59568 extern unsigned long do_brk(unsigned long, unsigned long);
59569
59570 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
59571 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59572 struct vm_area_struct **pprev);
59573
59574 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59575 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59576 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59577 +
59578 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59579 NULL if none. Assume start_addr < end_addr. */
59580 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59581 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
59582 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59583 }
59584
59585 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
59586 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59587 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59588 unsigned long pfn, unsigned long size, pgprot_t);
59589 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
59590 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
59591 extern int sysctl_memory_failure_early_kill;
59592 extern int sysctl_memory_failure_recovery;
59593 -extern atomic_long_t mce_bad_pages;
59594 +extern atomic_long_unchecked_t mce_bad_pages;
59595 +
59596 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59597 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59598 +#else
59599 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59600 +#endif
59601
59602 #endif /* __KERNEL__ */
59603 #endif /* _LINUX_MM_H */
59604 diff -urNp linux-2.6.32.45/include/linux/mm_types.h linux-2.6.32.45/include/linux/mm_types.h
59605 --- linux-2.6.32.45/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
59606 +++ linux-2.6.32.45/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
59607 @@ -186,6 +186,8 @@ struct vm_area_struct {
59608 #ifdef CONFIG_NUMA
59609 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59610 #endif
59611 +
59612 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59613 };
59614
59615 struct core_thread {
59616 @@ -287,6 +289,24 @@ struct mm_struct {
59617 #ifdef CONFIG_MMU_NOTIFIER
59618 struct mmu_notifier_mm *mmu_notifier_mm;
59619 #endif
59620 +
59621 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59622 + unsigned long pax_flags;
59623 +#endif
59624 +
59625 +#ifdef CONFIG_PAX_DLRESOLVE
59626 + unsigned long call_dl_resolve;
59627 +#endif
59628 +
59629 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59630 + unsigned long call_syscall;
59631 +#endif
59632 +
59633 +#ifdef CONFIG_PAX_ASLR
59634 + unsigned long delta_mmap; /* randomized offset */
59635 + unsigned long delta_stack; /* randomized offset */
59636 +#endif
59637 +
59638 };
59639
59640 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
59641 diff -urNp linux-2.6.32.45/include/linux/mmu_notifier.h linux-2.6.32.45/include/linux/mmu_notifier.h
59642 --- linux-2.6.32.45/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
59643 +++ linux-2.6.32.45/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
59644 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
59645 */
59646 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59647 ({ \
59648 - pte_t __pte; \
59649 + pte_t ___pte; \
59650 struct vm_area_struct *___vma = __vma; \
59651 unsigned long ___address = __address; \
59652 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59653 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59654 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59655 - __pte; \
59656 + ___pte; \
59657 })
59658
59659 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
59660 diff -urNp linux-2.6.32.45/include/linux/mmzone.h linux-2.6.32.45/include/linux/mmzone.h
59661 --- linux-2.6.32.45/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
59662 +++ linux-2.6.32.45/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
59663 @@ -350,7 +350,7 @@ struct zone {
59664 unsigned long flags; /* zone flags, see below */
59665
59666 /* Zone statistics */
59667 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59668 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59669
59670 /*
59671 * prev_priority holds the scanning priority for this zone. It is
59672 diff -urNp linux-2.6.32.45/include/linux/mod_devicetable.h linux-2.6.32.45/include/linux/mod_devicetable.h
59673 --- linux-2.6.32.45/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
59674 +++ linux-2.6.32.45/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
59675 @@ -12,7 +12,7 @@
59676 typedef unsigned long kernel_ulong_t;
59677 #endif
59678
59679 -#define PCI_ANY_ID (~0)
59680 +#define PCI_ANY_ID ((__u16)~0)
59681
59682 struct pci_device_id {
59683 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59684 @@ -131,7 +131,7 @@ struct usb_device_id {
59685 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59686 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59687
59688 -#define HID_ANY_ID (~0)
59689 +#define HID_ANY_ID (~0U)
59690
59691 struct hid_device_id {
59692 __u16 bus;
59693 diff -urNp linux-2.6.32.45/include/linux/module.h linux-2.6.32.45/include/linux/module.h
59694 --- linux-2.6.32.45/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
59695 +++ linux-2.6.32.45/include/linux/module.h 2011-08-05 20:33:55.000000000 -0400
59696 @@ -16,6 +16,7 @@
59697 #include <linux/kobject.h>
59698 #include <linux/moduleparam.h>
59699 #include <linux/tracepoint.h>
59700 +#include <linux/fs.h>
59701
59702 #include <asm/local.h>
59703 #include <asm/module.h>
59704 @@ -287,16 +288,16 @@ struct module
59705 int (*init)(void);
59706
59707 /* If this is non-NULL, vfree after init() returns */
59708 - void *module_init;
59709 + void *module_init_rx, *module_init_rw;
59710
59711 /* Here is the actual code + data, vfree'd on unload. */
59712 - void *module_core;
59713 + void *module_core_rx, *module_core_rw;
59714
59715 /* Here are the sizes of the init and core sections */
59716 - unsigned int init_size, core_size;
59717 + unsigned int init_size_rw, core_size_rw;
59718
59719 /* The size of the executable code in each section. */
59720 - unsigned int init_text_size, core_text_size;
59721 + unsigned int init_size_rx, core_size_rx;
59722
59723 /* Arch-specific module values */
59724 struct mod_arch_specific arch;
59725 @@ -345,6 +346,10 @@ struct module
59726 #ifdef CONFIG_EVENT_TRACING
59727 struct ftrace_event_call *trace_events;
59728 unsigned int num_trace_events;
59729 + struct file_operations trace_id;
59730 + struct file_operations trace_enable;
59731 + struct file_operations trace_format;
59732 + struct file_operations trace_filter;
59733 #endif
59734 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59735 unsigned long *ftrace_callsites;
59736 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned
59737 bool is_module_address(unsigned long addr);
59738 bool is_module_text_address(unsigned long addr);
59739
59740 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59741 +{
59742 +
59743 +#ifdef CONFIG_PAX_KERNEXEC
59744 + if (ktla_ktva(addr) >= (unsigned long)start &&
59745 + ktla_ktva(addr) < (unsigned long)start + size)
59746 + return 1;
59747 +#endif
59748 +
59749 + return ((void *)addr >= start && (void *)addr < start + size);
59750 +}
59751 +
59752 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59753 +{
59754 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59755 +}
59756 +
59757 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59758 +{
59759 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59760 +}
59761 +
59762 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59763 +{
59764 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59765 +}
59766 +
59767 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59768 +{
59769 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59770 +}
59771 +
59772 static inline int within_module_core(unsigned long addr, struct module *mod)
59773 {
59774 - return (unsigned long)mod->module_core <= addr &&
59775 - addr < (unsigned long)mod->module_core + mod->core_size;
59776 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59777 }
59778
59779 static inline int within_module_init(unsigned long addr, struct module *mod)
59780 {
59781 - return (unsigned long)mod->module_init <= addr &&
59782 - addr < (unsigned long)mod->module_init + mod->init_size;
59783 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59784 }
59785
59786 /* Search for module by name: must hold module_mutex. */
59787 diff -urNp linux-2.6.32.45/include/linux/moduleloader.h linux-2.6.32.45/include/linux/moduleloader.h
59788 --- linux-2.6.32.45/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
59789 +++ linux-2.6.32.45/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
59790 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59791 sections. Returns NULL on failure. */
59792 void *module_alloc(unsigned long size);
59793
59794 +#ifdef CONFIG_PAX_KERNEXEC
59795 +void *module_alloc_exec(unsigned long size);
59796 +#else
59797 +#define module_alloc_exec(x) module_alloc(x)
59798 +#endif
59799 +
59800 /* Free memory returned from module_alloc. */
59801 void module_free(struct module *mod, void *module_region);
59802
59803 +#ifdef CONFIG_PAX_KERNEXEC
59804 +void module_free_exec(struct module *mod, void *module_region);
59805 +#else
59806 +#define module_free_exec(x, y) module_free((x), (y))
59807 +#endif
59808 +
59809 /* Apply the given relocation to the (simplified) ELF. Return -error
59810 or 0. */
59811 int apply_relocate(Elf_Shdr *sechdrs,
59812 diff -urNp linux-2.6.32.45/include/linux/moduleparam.h linux-2.6.32.45/include/linux/moduleparam.h
59813 --- linux-2.6.32.45/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
59814 +++ linux-2.6.32.45/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
59815 @@ -132,7 +132,7 @@ struct kparam_array
59816
59817 /* Actually copy string: maxlen param is usually sizeof(string). */
59818 #define module_param_string(name, string, len, perm) \
59819 - static const struct kparam_string __param_string_##name \
59820 + static const struct kparam_string __param_string_##name __used \
59821 = { len, string }; \
59822 __module_param_call(MODULE_PARAM_PREFIX, name, \
59823 param_set_copystring, param_get_string, \
59824 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
59825
59826 /* Comma-separated array: *nump is set to number they actually specified. */
59827 #define module_param_array_named(name, array, type, nump, perm) \
59828 - static const struct kparam_array __param_arr_##name \
59829 + static const struct kparam_array __param_arr_##name __used \
59830 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
59831 sizeof(array[0]), array }; \
59832 __module_param_call(MODULE_PARAM_PREFIX, name, \
59833 diff -urNp linux-2.6.32.45/include/linux/mutex.h linux-2.6.32.45/include/linux/mutex.h
59834 --- linux-2.6.32.45/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
59835 +++ linux-2.6.32.45/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
59836 @@ -51,7 +51,7 @@ struct mutex {
59837 spinlock_t wait_lock;
59838 struct list_head wait_list;
59839 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
59840 - struct thread_info *owner;
59841 + struct task_struct *owner;
59842 #endif
59843 #ifdef CONFIG_DEBUG_MUTEXES
59844 const char *name;
59845 diff -urNp linux-2.6.32.45/include/linux/namei.h linux-2.6.32.45/include/linux/namei.h
59846 --- linux-2.6.32.45/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
59847 +++ linux-2.6.32.45/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
59848 @@ -22,7 +22,7 @@ struct nameidata {
59849 unsigned int flags;
59850 int last_type;
59851 unsigned depth;
59852 - char *saved_names[MAX_NESTED_LINKS + 1];
59853 + const char *saved_names[MAX_NESTED_LINKS + 1];
59854
59855 /* Intent data */
59856 union {
59857 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
59858 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59859 extern void unlock_rename(struct dentry *, struct dentry *);
59860
59861 -static inline void nd_set_link(struct nameidata *nd, char *path)
59862 +static inline void nd_set_link(struct nameidata *nd, const char *path)
59863 {
59864 nd->saved_names[nd->depth] = path;
59865 }
59866
59867 -static inline char *nd_get_link(struct nameidata *nd)
59868 +static inline const char *nd_get_link(const struct nameidata *nd)
59869 {
59870 return nd->saved_names[nd->depth];
59871 }
59872 diff -urNp linux-2.6.32.45/include/linux/netfilter/xt_gradm.h linux-2.6.32.45/include/linux/netfilter/xt_gradm.h
59873 --- linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59874 +++ linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
59875 @@ -0,0 +1,9 @@
59876 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
59877 +#define _LINUX_NETFILTER_XT_GRADM_H 1
59878 +
59879 +struct xt_gradm_mtinfo {
59880 + __u16 flags;
59881 + __u16 invflags;
59882 +};
59883 +
59884 +#endif
59885 diff -urNp linux-2.6.32.45/include/linux/nodemask.h linux-2.6.32.45/include/linux/nodemask.h
59886 --- linux-2.6.32.45/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
59887 +++ linux-2.6.32.45/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
59888 @@ -464,11 +464,11 @@ static inline int num_node_state(enum no
59889
59890 #define any_online_node(mask) \
59891 ({ \
59892 - int node; \
59893 - for_each_node_mask(node, (mask)) \
59894 - if (node_online(node)) \
59895 + int __node; \
59896 + for_each_node_mask(__node, (mask)) \
59897 + if (node_online(__node)) \
59898 break; \
59899 - node; \
59900 + __node; \
59901 })
59902
59903 #define num_online_nodes() num_node_state(N_ONLINE)
59904 diff -urNp linux-2.6.32.45/include/linux/oprofile.h linux-2.6.32.45/include/linux/oprofile.h
59905 --- linux-2.6.32.45/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
59906 +++ linux-2.6.32.45/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
59907 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
59908 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59909 char const * name, ulong * val);
59910
59911 -/** Create a file for read-only access to an atomic_t. */
59912 +/** Create a file for read-only access to an atomic_unchecked_t. */
59913 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59914 - char const * name, atomic_t * val);
59915 + char const * name, atomic_unchecked_t * val);
59916
59917 /** create a directory */
59918 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59919 diff -urNp linux-2.6.32.45/include/linux/pagemap.h linux-2.6.32.45/include/linux/pagemap.h
59920 --- linux-2.6.32.45/include/linux/pagemap.h 2011-03-27 14:31:47.000000000 -0400
59921 +++ linux-2.6.32.45/include/linux/pagemap.h 2011-08-17 19:36:28.000000000 -0400
59922 @@ -425,6 +425,7 @@ static inline int fault_in_pages_readabl
59923 if (((unsigned long)uaddr & PAGE_MASK) !=
59924 ((unsigned long)end & PAGE_MASK))
59925 ret = __get_user(c, end);
59926 + (void)c;
59927 }
59928 return ret;
59929 }
59930 diff -urNp linux-2.6.32.45/include/linux/perf_event.h linux-2.6.32.45/include/linux/perf_event.h
59931 --- linux-2.6.32.45/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
59932 +++ linux-2.6.32.45/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
59933 @@ -476,7 +476,7 @@ struct hw_perf_event {
59934 struct hrtimer hrtimer;
59935 };
59936 };
59937 - atomic64_t prev_count;
59938 + atomic64_unchecked_t prev_count;
59939 u64 sample_period;
59940 u64 last_period;
59941 atomic64_t period_left;
59942 @@ -557,7 +557,7 @@ struct perf_event {
59943 const struct pmu *pmu;
59944
59945 enum perf_event_active_state state;
59946 - atomic64_t count;
59947 + atomic64_unchecked_t count;
59948
59949 /*
59950 * These are the total time in nanoseconds that the event
59951 @@ -595,8 +595,8 @@ struct perf_event {
59952 * These accumulate total time (in nanoseconds) that children
59953 * events have been enabled and running, respectively.
59954 */
59955 - atomic64_t child_total_time_enabled;
59956 - atomic64_t child_total_time_running;
59957 + atomic64_unchecked_t child_total_time_enabled;
59958 + atomic64_unchecked_t child_total_time_running;
59959
59960 /*
59961 * Protect attach/detach and child_list:
59962 diff -urNp linux-2.6.32.45/include/linux/pipe_fs_i.h linux-2.6.32.45/include/linux/pipe_fs_i.h
59963 --- linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
59964 +++ linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
59965 @@ -46,9 +46,9 @@ struct pipe_inode_info {
59966 wait_queue_head_t wait;
59967 unsigned int nrbufs, curbuf;
59968 struct page *tmp_page;
59969 - unsigned int readers;
59970 - unsigned int writers;
59971 - unsigned int waiting_writers;
59972 + atomic_t readers;
59973 + atomic_t writers;
59974 + atomic_t waiting_writers;
59975 unsigned int r_counter;
59976 unsigned int w_counter;
59977 struct fasync_struct *fasync_readers;
59978 diff -urNp linux-2.6.32.45/include/linux/poison.h linux-2.6.32.45/include/linux/poison.h
59979 --- linux-2.6.32.45/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
59980 +++ linux-2.6.32.45/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
59981 @@ -19,8 +19,8 @@
59982 * under normal circumstances, used to verify that nobody uses
59983 * non-initialized list entries.
59984 */
59985 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59986 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59987 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59988 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59989
59990 /********** include/linux/timer.h **********/
59991 /*
59992 diff -urNp linux-2.6.32.45/include/linux/posix-timers.h linux-2.6.32.45/include/linux/posix-timers.h
59993 --- linux-2.6.32.45/include/linux/posix-timers.h 2011-03-27 14:31:47.000000000 -0400
59994 +++ linux-2.6.32.45/include/linux/posix-timers.h 2011-08-05 20:33:55.000000000 -0400
59995 @@ -67,7 +67,7 @@ struct k_itimer {
59996 };
59997
59998 struct k_clock {
59999 - int res; /* in nanoseconds */
60000 + const int res; /* in nanoseconds */
60001 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
60002 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
60003 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
60004 diff -urNp linux-2.6.32.45/include/linux/preempt.h linux-2.6.32.45/include/linux/preempt.h
60005 --- linux-2.6.32.45/include/linux/preempt.h 2011-03-27 14:31:47.000000000 -0400
60006 +++ linux-2.6.32.45/include/linux/preempt.h 2011-08-05 20:33:55.000000000 -0400
60007 @@ -110,7 +110,7 @@ struct preempt_ops {
60008 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60009 void (*sched_out)(struct preempt_notifier *notifier,
60010 struct task_struct *next);
60011 -};
60012 +} __no_const;
60013
60014 /**
60015 * preempt_notifier - key for installing preemption notifiers
60016 diff -urNp linux-2.6.32.45/include/linux/proc_fs.h linux-2.6.32.45/include/linux/proc_fs.h
60017 --- linux-2.6.32.45/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
60018 +++ linux-2.6.32.45/include/linux/proc_fs.h 2011-08-05 20:33:55.000000000 -0400
60019 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
60020 return proc_create_data(name, mode, parent, proc_fops, NULL);
60021 }
60022
60023 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60024 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60025 +{
60026 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60027 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60028 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60029 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60030 +#else
60031 + return proc_create_data(name, mode, parent, proc_fops, NULL);
60032 +#endif
60033 +}
60034 +
60035 +
60036 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60037 mode_t mode, struct proc_dir_entry *base,
60038 read_proc_t *read_proc, void * data)
60039 @@ -256,7 +269,7 @@ union proc_op {
60040 int (*proc_show)(struct seq_file *m,
60041 struct pid_namespace *ns, struct pid *pid,
60042 struct task_struct *task);
60043 -};
60044 +} __no_const;
60045
60046 struct ctl_table_header;
60047 struct ctl_table;
60048 diff -urNp linux-2.6.32.45/include/linux/ptrace.h linux-2.6.32.45/include/linux/ptrace.h
60049 --- linux-2.6.32.45/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
60050 +++ linux-2.6.32.45/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
60051 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
60052 extern void exit_ptrace(struct task_struct *tracer);
60053 #define PTRACE_MODE_READ 1
60054 #define PTRACE_MODE_ATTACH 2
60055 -/* Returns 0 on success, -errno on denial. */
60056 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60057 /* Returns true on success, false on denial. */
60058 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60059 +/* Returns true on success, false on denial. */
60060 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60061
60062 static inline int ptrace_reparented(struct task_struct *child)
60063 {
60064 diff -urNp linux-2.6.32.45/include/linux/random.h linux-2.6.32.45/include/linux/random.h
60065 --- linux-2.6.32.45/include/linux/random.h 2011-08-16 20:37:25.000000000 -0400
60066 +++ linux-2.6.32.45/include/linux/random.h 2011-08-07 19:48:09.000000000 -0400
60067 @@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned l
60068 u32 random32(void);
60069 void srandom32(u32 seed);
60070
60071 +static inline unsigned long pax_get_random_long(void)
60072 +{
60073 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60074 +}
60075 +
60076 #endif /* __KERNEL___ */
60077
60078 #endif /* _LINUX_RANDOM_H */
60079 diff -urNp linux-2.6.32.45/include/linux/reboot.h linux-2.6.32.45/include/linux/reboot.h
60080 --- linux-2.6.32.45/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
60081 +++ linux-2.6.32.45/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
60082 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
60083 * Architecture-specific implementations of sys_reboot commands.
60084 */
60085
60086 -extern void machine_restart(char *cmd);
60087 -extern void machine_halt(void);
60088 -extern void machine_power_off(void);
60089 +extern void machine_restart(char *cmd) __noreturn;
60090 +extern void machine_halt(void) __noreturn;
60091 +extern void machine_power_off(void) __noreturn;
60092
60093 extern void machine_shutdown(void);
60094 struct pt_regs;
60095 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
60096 */
60097
60098 extern void kernel_restart_prepare(char *cmd);
60099 -extern void kernel_restart(char *cmd);
60100 -extern void kernel_halt(void);
60101 -extern void kernel_power_off(void);
60102 +extern void kernel_restart(char *cmd) __noreturn;
60103 +extern void kernel_halt(void) __noreturn;
60104 +extern void kernel_power_off(void) __noreturn;
60105
60106 void ctrl_alt_del(void);
60107
60108 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
60109 * Emergency restart, callable from an interrupt handler.
60110 */
60111
60112 -extern void emergency_restart(void);
60113 +extern void emergency_restart(void) __noreturn;
60114 #include <asm/emergency-restart.h>
60115
60116 #endif
60117 diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs.h linux-2.6.32.45/include/linux/reiserfs_fs.h
60118 --- linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
60119 +++ linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
60120 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
60121 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60122
60123 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60124 -#define get_generation(s) atomic_read (&fs_generation(s))
60125 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60126 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60127 #define __fs_changed(gen,s) (gen != get_generation (s))
60128 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
60129 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
60130 */
60131
60132 struct item_operations {
60133 - int (*bytes_number) (struct item_head * ih, int block_size);
60134 - void (*decrement_key) (struct cpu_key *);
60135 - int (*is_left_mergeable) (struct reiserfs_key * ih,
60136 + int (* const bytes_number) (struct item_head * ih, int block_size);
60137 + void (* const decrement_key) (struct cpu_key *);
60138 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
60139 unsigned long bsize);
60140 - void (*print_item) (struct item_head *, char *item);
60141 - void (*check_item) (struct item_head *, char *item);
60142 + void (* const print_item) (struct item_head *, char *item);
60143 + void (* const check_item) (struct item_head *, char *item);
60144
60145 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
60146 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
60147 int is_affected, int insert_size);
60148 - int (*check_left) (struct virtual_item * vi, int free,
60149 + int (* const check_left) (struct virtual_item * vi, int free,
60150 int start_skip, int end_skip);
60151 - int (*check_right) (struct virtual_item * vi, int free);
60152 - int (*part_size) (struct virtual_item * vi, int from, int to);
60153 - int (*unit_num) (struct virtual_item * vi);
60154 - void (*print_vi) (struct virtual_item * vi);
60155 + int (* const check_right) (struct virtual_item * vi, int free);
60156 + int (* const part_size) (struct virtual_item * vi, int from, int to);
60157 + int (* const unit_num) (struct virtual_item * vi);
60158 + void (* const print_vi) (struct virtual_item * vi);
60159 };
60160
60161 -extern struct item_operations *item_ops[TYPE_ANY + 1];
60162 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
60163
60164 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
60165 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
60166 diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs_sb.h linux-2.6.32.45/include/linux/reiserfs_fs_sb.h
60167 --- linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
60168 +++ linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
60169 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
60170 /* Comment? -Hans */
60171 wait_queue_head_t s_wait;
60172 /* To be obsoleted soon by per buffer seals.. -Hans */
60173 - atomic_t s_generation_counter; // increased by one every time the
60174 + atomic_unchecked_t s_generation_counter; // increased by one every time the
60175 // tree gets re-balanced
60176 unsigned long s_properties; /* File system properties. Currently holds
60177 on-disk FS format */
60178 diff -urNp linux-2.6.32.45/include/linux/relay.h linux-2.6.32.45/include/linux/relay.h
60179 --- linux-2.6.32.45/include/linux/relay.h 2011-03-27 14:31:47.000000000 -0400
60180 +++ linux-2.6.32.45/include/linux/relay.h 2011-08-05 20:33:55.000000000 -0400
60181 @@ -159,7 +159,7 @@ struct rchan_callbacks
60182 * The callback should return 0 if successful, negative if not.
60183 */
60184 int (*remove_buf_file)(struct dentry *dentry);
60185 -};
60186 +} __no_const;
60187
60188 /*
60189 * CONFIG_RELAY kernel API, kernel/relay.c
60190 diff -urNp linux-2.6.32.45/include/linux/sched.h linux-2.6.32.45/include/linux/sched.h
60191 --- linux-2.6.32.45/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
60192 +++ linux-2.6.32.45/include/linux/sched.h 2011-08-11 19:48:55.000000000 -0400
60193 @@ -101,6 +101,7 @@ struct bio;
60194 struct fs_struct;
60195 struct bts_context;
60196 struct perf_event_context;
60197 +struct linux_binprm;
60198
60199 /*
60200 * List of flags we want to share for kernel threads,
60201 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
60202 extern signed long schedule_timeout_uninterruptible(signed long timeout);
60203 asmlinkage void __schedule(void);
60204 asmlinkage void schedule(void);
60205 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
60206 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
60207
60208 struct nsproxy;
60209 struct user_namespace;
60210 @@ -371,9 +372,12 @@ struct user_namespace;
60211 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60212
60213 extern int sysctl_max_map_count;
60214 +extern unsigned long sysctl_heap_stack_gap;
60215
60216 #include <linux/aio.h>
60217
60218 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60219 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60220 extern unsigned long
60221 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60222 unsigned long, unsigned long);
60223 @@ -666,6 +670,16 @@ struct signal_struct {
60224 struct tty_audit_buf *tty_audit_buf;
60225 #endif
60226
60227 +#ifdef CONFIG_GRKERNSEC
60228 + u32 curr_ip;
60229 + u32 saved_ip;
60230 + u32 gr_saddr;
60231 + u32 gr_daddr;
60232 + u16 gr_sport;
60233 + u16 gr_dport;
60234 + u8 used_accept:1;
60235 +#endif
60236 +
60237 int oom_adj; /* OOM kill score adjustment (bit shift) */
60238 };
60239
60240 @@ -723,6 +737,11 @@ struct user_struct {
60241 struct key *session_keyring; /* UID's default session keyring */
60242 #endif
60243
60244 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60245 + unsigned int banned;
60246 + unsigned long ban_expires;
60247 +#endif
60248 +
60249 /* Hash table maintenance information */
60250 struct hlist_node uidhash_node;
60251 uid_t uid;
60252 @@ -1328,8 +1347,8 @@ struct task_struct {
60253 struct list_head thread_group;
60254
60255 struct completion *vfork_done; /* for vfork() */
60256 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60257 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60258 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60259 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60260
60261 cputime_t utime, stime, utimescaled, stimescaled;
60262 cputime_t gtime;
60263 @@ -1343,16 +1362,6 @@ struct task_struct {
60264 struct task_cputime cputime_expires;
60265 struct list_head cpu_timers[3];
60266
60267 -/* process credentials */
60268 - const struct cred *real_cred; /* objective and real subjective task
60269 - * credentials (COW) */
60270 - const struct cred *cred; /* effective (overridable) subjective task
60271 - * credentials (COW) */
60272 - struct mutex cred_guard_mutex; /* guard against foreign influences on
60273 - * credential calculations
60274 - * (notably. ptrace) */
60275 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60276 -
60277 char comm[TASK_COMM_LEN]; /* executable name excluding path
60278 - access with [gs]et_task_comm (which lock
60279 it with task_lock())
60280 @@ -1369,6 +1378,10 @@ struct task_struct {
60281 #endif
60282 /* CPU-specific state of this task */
60283 struct thread_struct thread;
60284 +/* thread_info moved to task_struct */
60285 +#ifdef CONFIG_X86
60286 + struct thread_info tinfo;
60287 +#endif
60288 /* filesystem information */
60289 struct fs_struct *fs;
60290 /* open file information */
60291 @@ -1436,6 +1449,15 @@ struct task_struct {
60292 int hardirq_context;
60293 int softirq_context;
60294 #endif
60295 +
60296 +/* process credentials */
60297 + const struct cred *real_cred; /* objective and real subjective task
60298 + * credentials (COW) */
60299 + struct mutex cred_guard_mutex; /* guard against foreign influences on
60300 + * credential calculations
60301 + * (notably. ptrace) */
60302 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60303 +
60304 #ifdef CONFIG_LOCKDEP
60305 # define MAX_LOCK_DEPTH 48UL
60306 u64 curr_chain_key;
60307 @@ -1456,6 +1478,9 @@ struct task_struct {
60308
60309 struct backing_dev_info *backing_dev_info;
60310
60311 + const struct cred *cred; /* effective (overridable) subjective task
60312 + * credentials (COW) */
60313 +
60314 struct io_context *io_context;
60315
60316 unsigned long ptrace_message;
60317 @@ -1519,6 +1544,21 @@ struct task_struct {
60318 unsigned long default_timer_slack_ns;
60319
60320 struct list_head *scm_work_list;
60321 +
60322 +#ifdef CONFIG_GRKERNSEC
60323 + /* grsecurity */
60324 + struct dentry *gr_chroot_dentry;
60325 + struct acl_subject_label *acl;
60326 + struct acl_role_label *role;
60327 + struct file *exec_file;
60328 + u16 acl_role_id;
60329 + /* is this the task that authenticated to the special role */
60330 + u8 acl_sp_role;
60331 + u8 is_writable;
60332 + u8 brute;
60333 + u8 gr_is_chrooted;
60334 +#endif
60335 +
60336 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60337 /* Index of current stored adress in ret_stack */
60338 int curr_ret_stack;
60339 @@ -1542,6 +1582,57 @@ struct task_struct {
60340 #endif /* CONFIG_TRACING */
60341 };
60342
60343 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60344 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60345 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60346 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60347 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60348 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60349 +
60350 +#ifdef CONFIG_PAX_SOFTMODE
60351 +extern int pax_softmode;
60352 +#endif
60353 +
60354 +extern int pax_check_flags(unsigned long *);
60355 +
60356 +/* if tsk != current then task_lock must be held on it */
60357 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60358 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
60359 +{
60360 + if (likely(tsk->mm))
60361 + return tsk->mm->pax_flags;
60362 + else
60363 + return 0UL;
60364 +}
60365 +
60366 +/* if tsk != current then task_lock must be held on it */
60367 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60368 +{
60369 + if (likely(tsk->mm)) {
60370 + tsk->mm->pax_flags = flags;
60371 + return 0;
60372 + }
60373 + return -EINVAL;
60374 +}
60375 +#endif
60376 +
60377 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60378 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
60379 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60380 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60381 +#endif
60382 +
60383 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60384 +extern void pax_report_insns(void *pc, void *sp);
60385 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
60386 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60387 +
60388 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60389 +extern void pax_track_stack(void);
60390 +#else
60391 +static inline void pax_track_stack(void) {}
60392 +#endif
60393 +
60394 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60395 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
60396
60397 @@ -1740,7 +1831,7 @@ extern void thread_group_times(struct ta
60398 #define PF_DUMPCORE 0x00000200 /* dumped core */
60399 #define PF_SIGNALED 0x00000400 /* killed by a signal */
60400 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
60401 -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
60402 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
60403 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
60404 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
60405 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
60406 @@ -1978,7 +2069,9 @@ void yield(void);
60407 extern struct exec_domain default_exec_domain;
60408
60409 union thread_union {
60410 +#ifndef CONFIG_X86
60411 struct thread_info thread_info;
60412 +#endif
60413 unsigned long stack[THREAD_SIZE/sizeof(long)];
60414 };
60415
60416 @@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
60417 */
60418
60419 extern struct task_struct *find_task_by_vpid(pid_t nr);
60420 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60421 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60422 struct pid_namespace *ns);
60423
60424 @@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
60425 extern void exit_itimers(struct signal_struct *);
60426 extern void flush_itimer_signals(void);
60427
60428 -extern NORET_TYPE void do_group_exit(int);
60429 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60430
60431 extern void daemonize(const char *, ...);
60432 extern int allow_signal(int);
60433 @@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
60434
60435 #endif
60436
60437 -static inline int object_is_on_stack(void *obj)
60438 +static inline int object_starts_on_stack(void *obj)
60439 {
60440 - void *stack = task_stack_page(current);
60441 + const void *stack = task_stack_page(current);
60442
60443 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60444 }
60445
60446 +#ifdef CONFIG_PAX_USERCOPY
60447 +extern int object_is_on_stack(const void *obj, unsigned long len);
60448 +#endif
60449 +
60450 extern void thread_info_cache_init(void);
60451
60452 #ifdef CONFIG_DEBUG_STACK_USAGE
60453 diff -urNp linux-2.6.32.45/include/linux/screen_info.h linux-2.6.32.45/include/linux/screen_info.h
60454 --- linux-2.6.32.45/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
60455 +++ linux-2.6.32.45/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
60456 @@ -42,7 +42,8 @@ struct screen_info {
60457 __u16 pages; /* 0x32 */
60458 __u16 vesa_attributes; /* 0x34 */
60459 __u32 capabilities; /* 0x36 */
60460 - __u8 _reserved[6]; /* 0x3a */
60461 + __u16 vesapm_size; /* 0x3a */
60462 + __u8 _reserved[4]; /* 0x3c */
60463 } __attribute__((packed));
60464
60465 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60466 diff -urNp linux-2.6.32.45/include/linux/security.h linux-2.6.32.45/include/linux/security.h
60467 --- linux-2.6.32.45/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
60468 +++ linux-2.6.32.45/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
60469 @@ -34,6 +34,7 @@
60470 #include <linux/key.h>
60471 #include <linux/xfrm.h>
60472 #include <linux/gfp.h>
60473 +#include <linux/grsecurity.h>
60474 #include <net/flow.h>
60475
60476 /* Maximum number of letters for an LSM name string */
60477 diff -urNp linux-2.6.32.45/include/linux/shm.h linux-2.6.32.45/include/linux/shm.h
60478 --- linux-2.6.32.45/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
60479 +++ linux-2.6.32.45/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
60480 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
60481 pid_t shm_cprid;
60482 pid_t shm_lprid;
60483 struct user_struct *mlock_user;
60484 +#ifdef CONFIG_GRKERNSEC
60485 + time_t shm_createtime;
60486 + pid_t shm_lapid;
60487 +#endif
60488 };
60489
60490 /* shm_mode upper byte flags */
60491 diff -urNp linux-2.6.32.45/include/linux/skbuff.h linux-2.6.32.45/include/linux/skbuff.h
60492 --- linux-2.6.32.45/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
60493 +++ linux-2.6.32.45/include/linux/skbuff.h 2011-07-06 19:53:33.000000000 -0400
60494 @@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
60495 */
60496 static inline int skb_queue_empty(const struct sk_buff_head *list)
60497 {
60498 - return list->next == (struct sk_buff *)list;
60499 + return list->next == (const struct sk_buff *)list;
60500 }
60501
60502 /**
60503 @@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
60504 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60505 const struct sk_buff *skb)
60506 {
60507 - return (skb->next == (struct sk_buff *) list);
60508 + return (skb->next == (const struct sk_buff *) list);
60509 }
60510
60511 /**
60512 @@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
60513 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60514 const struct sk_buff *skb)
60515 {
60516 - return (skb->prev == (struct sk_buff *) list);
60517 + return (skb->prev == (const struct sk_buff *) list);
60518 }
60519
60520 /**
60521 @@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
60522 * headroom, you should not reduce this.
60523 */
60524 #ifndef NET_SKB_PAD
60525 -#define NET_SKB_PAD 32
60526 +#define NET_SKB_PAD (_AC(32,UL))
60527 #endif
60528
60529 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60530 diff -urNp linux-2.6.32.45/include/linux/slab_def.h linux-2.6.32.45/include/linux/slab_def.h
60531 --- linux-2.6.32.45/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
60532 +++ linux-2.6.32.45/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
60533 @@ -69,10 +69,10 @@ struct kmem_cache {
60534 unsigned long node_allocs;
60535 unsigned long node_frees;
60536 unsigned long node_overflow;
60537 - atomic_t allochit;
60538 - atomic_t allocmiss;
60539 - atomic_t freehit;
60540 - atomic_t freemiss;
60541 + atomic_unchecked_t allochit;
60542 + atomic_unchecked_t allocmiss;
60543 + atomic_unchecked_t freehit;
60544 + atomic_unchecked_t freemiss;
60545
60546 /*
60547 * If debugging is enabled, then the allocator can add additional
60548 diff -urNp linux-2.6.32.45/include/linux/slab.h linux-2.6.32.45/include/linux/slab.h
60549 --- linux-2.6.32.45/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
60550 +++ linux-2.6.32.45/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
60551 @@ -11,12 +11,20 @@
60552
60553 #include <linux/gfp.h>
60554 #include <linux/types.h>
60555 +#include <linux/err.h>
60556
60557 /*
60558 * Flags to pass to kmem_cache_create().
60559 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60560 */
60561 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60562 +
60563 +#ifdef CONFIG_PAX_USERCOPY
60564 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60565 +#else
60566 +#define SLAB_USERCOPY 0x00000000UL
60567 +#endif
60568 +
60569 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60570 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60571 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60572 @@ -82,10 +90,13 @@
60573 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60574 * Both make kfree a no-op.
60575 */
60576 -#define ZERO_SIZE_PTR ((void *)16)
60577 +#define ZERO_SIZE_PTR \
60578 +({ \
60579 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60580 + (void *)(-MAX_ERRNO-1L); \
60581 +})
60582
60583 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60584 - (unsigned long)ZERO_SIZE_PTR)
60585 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60586
60587 /*
60588 * struct kmem_cache related prototypes
60589 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void
60590 void kfree(const void *);
60591 void kzfree(const void *);
60592 size_t ksize(const void *);
60593 +void check_object_size(const void *ptr, unsigned long n, bool to);
60594
60595 /*
60596 * Allocator specific definitions. These are mainly used to establish optimized
60597 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
60598
60599 void __init kmem_cache_init_late(void);
60600
60601 +#define kmalloc(x, y) \
60602 +({ \
60603 + void *___retval; \
60604 + intoverflow_t ___x = (intoverflow_t)x; \
60605 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
60606 + ___retval = NULL; \
60607 + else \
60608 + ___retval = kmalloc((size_t)___x, (y)); \
60609 + ___retval; \
60610 +})
60611 +
60612 +#define kmalloc_node(x, y, z) \
60613 +({ \
60614 + void *___retval; \
60615 + intoverflow_t ___x = (intoverflow_t)x; \
60616 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60617 + ___retval = NULL; \
60618 + else \
60619 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
60620 + ___retval; \
60621 +})
60622 +
60623 +#define kzalloc(x, y) \
60624 +({ \
60625 + void *___retval; \
60626 + intoverflow_t ___x = (intoverflow_t)x; \
60627 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
60628 + ___retval = NULL; \
60629 + else \
60630 + ___retval = kzalloc((size_t)___x, (y)); \
60631 + ___retval; \
60632 +})
60633 +
60634 #endif /* _LINUX_SLAB_H */
60635 diff -urNp linux-2.6.32.45/include/linux/slub_def.h linux-2.6.32.45/include/linux/slub_def.h
60636 --- linux-2.6.32.45/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
60637 +++ linux-2.6.32.45/include/linux/slub_def.h 2011-08-05 20:33:55.000000000 -0400
60638 @@ -86,7 +86,7 @@ struct kmem_cache {
60639 struct kmem_cache_order_objects max;
60640 struct kmem_cache_order_objects min;
60641 gfp_t allocflags; /* gfp flags to use on each alloc */
60642 - int refcount; /* Refcount for slab cache destroy */
60643 + atomic_t refcount; /* Refcount for slab cache destroy */
60644 void (*ctor)(void *);
60645 int inuse; /* Offset to metadata */
60646 int align; /* Alignment */
60647 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache
60648 #endif
60649
60650 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60651 -void *__kmalloc(size_t size, gfp_t flags);
60652 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60653
60654 #ifdef CONFIG_KMEMTRACE
60655 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
60656 diff -urNp linux-2.6.32.45/include/linux/sonet.h linux-2.6.32.45/include/linux/sonet.h
60657 --- linux-2.6.32.45/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
60658 +++ linux-2.6.32.45/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
60659 @@ -61,7 +61,7 @@ struct sonet_stats {
60660 #include <asm/atomic.h>
60661
60662 struct k_sonet_stats {
60663 -#define __HANDLE_ITEM(i) atomic_t i
60664 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60665 __SONET_ITEMS
60666 #undef __HANDLE_ITEM
60667 };
60668 diff -urNp linux-2.6.32.45/include/linux/sunrpc/cache.h linux-2.6.32.45/include/linux/sunrpc/cache.h
60669 --- linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-03-27 14:31:47.000000000 -0400
60670 +++ linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-08-05 20:33:55.000000000 -0400
60671 @@ -125,7 +125,7 @@ struct cache_detail {
60672 */
60673 struct cache_req {
60674 struct cache_deferred_req *(*defer)(struct cache_req *req);
60675 -};
60676 +} __no_const;
60677 /* this must be embedded in a deferred_request that is being
60678 * delayed awaiting cache-fill
60679 */
60680 diff -urNp linux-2.6.32.45/include/linux/sunrpc/clnt.h linux-2.6.32.45/include/linux/sunrpc/clnt.h
60681 --- linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
60682 +++ linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
60683 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
60684 {
60685 switch (sap->sa_family) {
60686 case AF_INET:
60687 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
60688 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60689 case AF_INET6:
60690 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60691 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60692 }
60693 return 0;
60694 }
60695 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
60696 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60697 const struct sockaddr *src)
60698 {
60699 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60700 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60701 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60702
60703 dsin->sin_family = ssin->sin_family;
60704 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
60705 if (sa->sa_family != AF_INET6)
60706 return 0;
60707
60708 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60709 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60710 }
60711
60712 #endif /* __KERNEL__ */
60713 diff -urNp linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h
60714 --- linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
60715 +++ linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
60716 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60717 extern unsigned int svcrdma_max_requests;
60718 extern unsigned int svcrdma_max_req_size;
60719
60720 -extern atomic_t rdma_stat_recv;
60721 -extern atomic_t rdma_stat_read;
60722 -extern atomic_t rdma_stat_write;
60723 -extern atomic_t rdma_stat_sq_starve;
60724 -extern atomic_t rdma_stat_rq_starve;
60725 -extern atomic_t rdma_stat_rq_poll;
60726 -extern atomic_t rdma_stat_rq_prod;
60727 -extern atomic_t rdma_stat_sq_poll;
60728 -extern atomic_t rdma_stat_sq_prod;
60729 +extern atomic_unchecked_t rdma_stat_recv;
60730 +extern atomic_unchecked_t rdma_stat_read;
60731 +extern atomic_unchecked_t rdma_stat_write;
60732 +extern atomic_unchecked_t rdma_stat_sq_starve;
60733 +extern atomic_unchecked_t rdma_stat_rq_starve;
60734 +extern atomic_unchecked_t rdma_stat_rq_poll;
60735 +extern atomic_unchecked_t rdma_stat_rq_prod;
60736 +extern atomic_unchecked_t rdma_stat_sq_poll;
60737 +extern atomic_unchecked_t rdma_stat_sq_prod;
60738
60739 #define RPCRDMA_VERSION 1
60740
60741 diff -urNp linux-2.6.32.45/include/linux/suspend.h linux-2.6.32.45/include/linux/suspend.h
60742 --- linux-2.6.32.45/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
60743 +++ linux-2.6.32.45/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
60744 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
60745 * which require special recovery actions in that situation.
60746 */
60747 struct platform_suspend_ops {
60748 - int (*valid)(suspend_state_t state);
60749 - int (*begin)(suspend_state_t state);
60750 - int (*prepare)(void);
60751 - int (*prepare_late)(void);
60752 - int (*enter)(suspend_state_t state);
60753 - void (*wake)(void);
60754 - void (*finish)(void);
60755 - void (*end)(void);
60756 - void (*recover)(void);
60757 + int (* const valid)(suspend_state_t state);
60758 + int (* const begin)(suspend_state_t state);
60759 + int (* const prepare)(void);
60760 + int (* const prepare_late)(void);
60761 + int (* const enter)(suspend_state_t state);
60762 + void (* const wake)(void);
60763 + void (* const finish)(void);
60764 + void (* const end)(void);
60765 + void (* const recover)(void);
60766 };
60767
60768 #ifdef CONFIG_SUSPEND
60769 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
60770 * suspend_set_ops - set platform dependent suspend operations
60771 * @ops: The new suspend operations to set.
60772 */
60773 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
60774 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
60775 extern int suspend_valid_only_mem(suspend_state_t state);
60776
60777 /**
60778 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
60779 #else /* !CONFIG_SUSPEND */
60780 #define suspend_valid_only_mem NULL
60781
60782 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
60783 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
60784 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
60785 #endif /* !CONFIG_SUSPEND */
60786
60787 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
60788 * platforms which require special recovery actions in that situation.
60789 */
60790 struct platform_hibernation_ops {
60791 - int (*begin)(void);
60792 - void (*end)(void);
60793 - int (*pre_snapshot)(void);
60794 - void (*finish)(void);
60795 - int (*prepare)(void);
60796 - int (*enter)(void);
60797 - void (*leave)(void);
60798 - int (*pre_restore)(void);
60799 - void (*restore_cleanup)(void);
60800 - void (*recover)(void);
60801 + int (* const begin)(void);
60802 + void (* const end)(void);
60803 + int (* const pre_snapshot)(void);
60804 + void (* const finish)(void);
60805 + int (* const prepare)(void);
60806 + int (* const enter)(void);
60807 + void (* const leave)(void);
60808 + int (* const pre_restore)(void);
60809 + void (* const restore_cleanup)(void);
60810 + void (* const recover)(void);
60811 };
60812
60813 #ifdef CONFIG_HIBERNATION
60814 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
60815 extern void swsusp_unset_page_free(struct page *);
60816 extern unsigned long get_safe_page(gfp_t gfp_mask);
60817
60818 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
60819 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
60820 extern int hibernate(void);
60821 extern bool system_entering_hibernation(void);
60822 #else /* CONFIG_HIBERNATION */
60823 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
60824 static inline void swsusp_set_page_free(struct page *p) {}
60825 static inline void swsusp_unset_page_free(struct page *p) {}
60826
60827 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
60828 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
60829 static inline int hibernate(void) { return -ENOSYS; }
60830 static inline bool system_entering_hibernation(void) { return false; }
60831 #endif /* CONFIG_HIBERNATION */
60832 diff -urNp linux-2.6.32.45/include/linux/sysctl.h linux-2.6.32.45/include/linux/sysctl.h
60833 --- linux-2.6.32.45/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
60834 +++ linux-2.6.32.45/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
60835 @@ -164,7 +164,11 @@ enum
60836 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60837 };
60838
60839 -
60840 +#ifdef CONFIG_PAX_SOFTMODE
60841 +enum {
60842 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60843 +};
60844 +#endif
60845
60846 /* CTL_VM names: */
60847 enum
60848 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
60849
60850 extern int proc_dostring(struct ctl_table *, int,
60851 void __user *, size_t *, loff_t *);
60852 +extern int proc_dostring_modpriv(struct ctl_table *, int,
60853 + void __user *, size_t *, loff_t *);
60854 extern int proc_dointvec(struct ctl_table *, int,
60855 void __user *, size_t *, loff_t *);
60856 extern int proc_dointvec_minmax(struct ctl_table *, int,
60857 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
60858
60859 extern ctl_handler sysctl_data;
60860 extern ctl_handler sysctl_string;
60861 +extern ctl_handler sysctl_string_modpriv;
60862 extern ctl_handler sysctl_intvec;
60863 extern ctl_handler sysctl_jiffies;
60864 extern ctl_handler sysctl_ms_jiffies;
60865 diff -urNp linux-2.6.32.45/include/linux/sysfs.h linux-2.6.32.45/include/linux/sysfs.h
60866 --- linux-2.6.32.45/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
60867 +++ linux-2.6.32.45/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
60868 @@ -75,8 +75,8 @@ struct bin_attribute {
60869 };
60870
60871 struct sysfs_ops {
60872 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
60873 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
60874 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
60875 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
60876 };
60877
60878 struct sysfs_dirent;
60879 diff -urNp linux-2.6.32.45/include/linux/thread_info.h linux-2.6.32.45/include/linux/thread_info.h
60880 --- linux-2.6.32.45/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
60881 +++ linux-2.6.32.45/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
60882 @@ -23,7 +23,7 @@ struct restart_block {
60883 };
60884 /* For futex_wait and futex_wait_requeue_pi */
60885 struct {
60886 - u32 *uaddr;
60887 + u32 __user *uaddr;
60888 u32 val;
60889 u32 flags;
60890 u32 bitset;
60891 diff -urNp linux-2.6.32.45/include/linux/tty.h linux-2.6.32.45/include/linux/tty.h
60892 --- linux-2.6.32.45/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
60893 +++ linux-2.6.32.45/include/linux/tty.h 2011-08-05 20:33:55.000000000 -0400
60894 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
60895 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
60896 extern void tty_ldisc_enable(struct tty_struct *tty);
60897
60898 -
60899 /* n_tty.c */
60900 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
60901
60902 diff -urNp linux-2.6.32.45/include/linux/tty_ldisc.h linux-2.6.32.45/include/linux/tty_ldisc.h
60903 --- linux-2.6.32.45/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
60904 +++ linux-2.6.32.45/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
60905 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
60906
60907 struct module *owner;
60908
60909 - int refcount;
60910 + atomic_t refcount;
60911 };
60912
60913 struct tty_ldisc {
60914 diff -urNp linux-2.6.32.45/include/linux/types.h linux-2.6.32.45/include/linux/types.h
60915 --- linux-2.6.32.45/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
60916 +++ linux-2.6.32.45/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
60917 @@ -191,10 +191,26 @@ typedef struct {
60918 volatile int counter;
60919 } atomic_t;
60920
60921 +#ifdef CONFIG_PAX_REFCOUNT
60922 +typedef struct {
60923 + volatile int counter;
60924 +} atomic_unchecked_t;
60925 +#else
60926 +typedef atomic_t atomic_unchecked_t;
60927 +#endif
60928 +
60929 #ifdef CONFIG_64BIT
60930 typedef struct {
60931 volatile long counter;
60932 } atomic64_t;
60933 +
60934 +#ifdef CONFIG_PAX_REFCOUNT
60935 +typedef struct {
60936 + volatile long counter;
60937 +} atomic64_unchecked_t;
60938 +#else
60939 +typedef atomic64_t atomic64_unchecked_t;
60940 +#endif
60941 #endif
60942
60943 struct ustat {
60944 diff -urNp linux-2.6.32.45/include/linux/uaccess.h linux-2.6.32.45/include/linux/uaccess.h
60945 --- linux-2.6.32.45/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
60946 +++ linux-2.6.32.45/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
60947 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60948 long ret; \
60949 mm_segment_t old_fs = get_fs(); \
60950 \
60951 - set_fs(KERNEL_DS); \
60952 pagefault_disable(); \
60953 + set_fs(KERNEL_DS); \
60954 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60955 - pagefault_enable(); \
60956 set_fs(old_fs); \
60957 + pagefault_enable(); \
60958 ret; \
60959 })
60960
60961 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
60962 * Safely read from address @src to the buffer at @dst. If a kernel fault
60963 * happens, handle that and return -EFAULT.
60964 */
60965 -extern long probe_kernel_read(void *dst, void *src, size_t size);
60966 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
60967
60968 /*
60969 * probe_kernel_write(): safely attempt to write to a location
60970 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
60971 * Safely write to address @dst from the buffer at @src. If a kernel fault
60972 * happens, handle that and return -EFAULT.
60973 */
60974 -extern long probe_kernel_write(void *dst, void *src, size_t size);
60975 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
60976
60977 #endif /* __LINUX_UACCESS_H__ */
60978 diff -urNp linux-2.6.32.45/include/linux/unaligned/access_ok.h linux-2.6.32.45/include/linux/unaligned/access_ok.h
60979 --- linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
60980 +++ linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
60981 @@ -6,32 +6,32 @@
60982
60983 static inline u16 get_unaligned_le16(const void *p)
60984 {
60985 - return le16_to_cpup((__le16 *)p);
60986 + return le16_to_cpup((const __le16 *)p);
60987 }
60988
60989 static inline u32 get_unaligned_le32(const void *p)
60990 {
60991 - return le32_to_cpup((__le32 *)p);
60992 + return le32_to_cpup((const __le32 *)p);
60993 }
60994
60995 static inline u64 get_unaligned_le64(const void *p)
60996 {
60997 - return le64_to_cpup((__le64 *)p);
60998 + return le64_to_cpup((const __le64 *)p);
60999 }
61000
61001 static inline u16 get_unaligned_be16(const void *p)
61002 {
61003 - return be16_to_cpup((__be16 *)p);
61004 + return be16_to_cpup((const __be16 *)p);
61005 }
61006
61007 static inline u32 get_unaligned_be32(const void *p)
61008 {
61009 - return be32_to_cpup((__be32 *)p);
61010 + return be32_to_cpup((const __be32 *)p);
61011 }
61012
61013 static inline u64 get_unaligned_be64(const void *p)
61014 {
61015 - return be64_to_cpup((__be64 *)p);
61016 + return be64_to_cpup((const __be64 *)p);
61017 }
61018
61019 static inline void put_unaligned_le16(u16 val, void *p)
61020 diff -urNp linux-2.6.32.45/include/linux/vmalloc.h linux-2.6.32.45/include/linux/vmalloc.h
61021 --- linux-2.6.32.45/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
61022 +++ linux-2.6.32.45/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
61023 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
61024 #define VM_MAP 0x00000004 /* vmap()ed pages */
61025 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61026 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61027 +
61028 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61029 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
61030 +#endif
61031 +
61032 /* bits [20..32] reserved for arch specific ioremap internals */
61033
61034 /*
61035 @@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
61036
61037 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
61038
61039 +#define vmalloc(x) \
61040 +({ \
61041 + void *___retval; \
61042 + intoverflow_t ___x = (intoverflow_t)x; \
61043 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61044 + ___retval = NULL; \
61045 + else \
61046 + ___retval = vmalloc((unsigned long)___x); \
61047 + ___retval; \
61048 +})
61049 +
61050 +#define __vmalloc(x, y, z) \
61051 +({ \
61052 + void *___retval; \
61053 + intoverflow_t ___x = (intoverflow_t)x; \
61054 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61055 + ___retval = NULL; \
61056 + else \
61057 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61058 + ___retval; \
61059 +})
61060 +
61061 +#define vmalloc_user(x) \
61062 +({ \
61063 + void *___retval; \
61064 + intoverflow_t ___x = (intoverflow_t)x; \
61065 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61066 + ___retval = NULL; \
61067 + else \
61068 + ___retval = vmalloc_user((unsigned long)___x); \
61069 + ___retval; \
61070 +})
61071 +
61072 +#define vmalloc_exec(x) \
61073 +({ \
61074 + void *___retval; \
61075 + intoverflow_t ___x = (intoverflow_t)x; \
61076 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61077 + ___retval = NULL; \
61078 + else \
61079 + ___retval = vmalloc_exec((unsigned long)___x); \
61080 + ___retval; \
61081 +})
61082 +
61083 +#define vmalloc_node(x, y) \
61084 +({ \
61085 + void *___retval; \
61086 + intoverflow_t ___x = (intoverflow_t)x; \
61087 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61088 + ___retval = NULL; \
61089 + else \
61090 + ___retval = vmalloc_node((unsigned long)___x, (y));\
61091 + ___retval; \
61092 +})
61093 +
61094 +#define vmalloc_32(x) \
61095 +({ \
61096 + void *___retval; \
61097 + intoverflow_t ___x = (intoverflow_t)x; \
61098 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61099 + ___retval = NULL; \
61100 + else \
61101 + ___retval = vmalloc_32((unsigned long)___x); \
61102 + ___retval; \
61103 +})
61104 +
61105 +#define vmalloc_32_user(x) \
61106 +({ \
61107 + void *___retval; \
61108 + intoverflow_t ___x = (intoverflow_t)x; \
61109 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61110 + ___retval = NULL; \
61111 + else \
61112 + ___retval = vmalloc_32_user((unsigned long)___x);\
61113 + ___retval; \
61114 +})
61115 +
61116 #endif /* _LINUX_VMALLOC_H */
61117 diff -urNp linux-2.6.32.45/include/linux/vmstat.h linux-2.6.32.45/include/linux/vmstat.h
61118 --- linux-2.6.32.45/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
61119 +++ linux-2.6.32.45/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
61120 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
61121 /*
61122 * Zone based page accounting with per cpu differentials.
61123 */
61124 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61125 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61126
61127 static inline void zone_page_state_add(long x, struct zone *zone,
61128 enum zone_stat_item item)
61129 {
61130 - atomic_long_add(x, &zone->vm_stat[item]);
61131 - atomic_long_add(x, &vm_stat[item]);
61132 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61133 + atomic_long_add_unchecked(x, &vm_stat[item]);
61134 }
61135
61136 static inline unsigned long global_page_state(enum zone_stat_item item)
61137 {
61138 - long x = atomic_long_read(&vm_stat[item]);
61139 + long x = atomic_long_read_unchecked(&vm_stat[item]);
61140 #ifdef CONFIG_SMP
61141 if (x < 0)
61142 x = 0;
61143 @@ -158,7 +158,7 @@ static inline unsigned long global_page_
61144 static inline unsigned long zone_page_state(struct zone *zone,
61145 enum zone_stat_item item)
61146 {
61147 - long x = atomic_long_read(&zone->vm_stat[item]);
61148 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61149 #ifdef CONFIG_SMP
61150 if (x < 0)
61151 x = 0;
61152 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
61153 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61154 enum zone_stat_item item)
61155 {
61156 - long x = atomic_long_read(&zone->vm_stat[item]);
61157 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61158
61159 #ifdef CONFIG_SMP
61160 int cpu;
61161 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
61162
61163 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61164 {
61165 - atomic_long_inc(&zone->vm_stat[item]);
61166 - atomic_long_inc(&vm_stat[item]);
61167 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
61168 + atomic_long_inc_unchecked(&vm_stat[item]);
61169 }
61170
61171 static inline void __inc_zone_page_state(struct page *page,
61172 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
61173
61174 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61175 {
61176 - atomic_long_dec(&zone->vm_stat[item]);
61177 - atomic_long_dec(&vm_stat[item]);
61178 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
61179 + atomic_long_dec_unchecked(&vm_stat[item]);
61180 }
61181
61182 static inline void __dec_zone_page_state(struct page *page,
61183 diff -urNp linux-2.6.32.45/include/media/v4l2-dev.h linux-2.6.32.45/include/media/v4l2-dev.h
61184 --- linux-2.6.32.45/include/media/v4l2-dev.h 2011-03-27 14:31:47.000000000 -0400
61185 +++ linux-2.6.32.45/include/media/v4l2-dev.h 2011-08-05 20:33:55.000000000 -0400
61186 @@ -34,7 +34,7 @@ struct v4l2_device;
61187 #define V4L2_FL_UNREGISTERED (0)
61188
61189 struct v4l2_file_operations {
61190 - struct module *owner;
61191 + struct module * const owner;
61192 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61193 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61194 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61195 diff -urNp linux-2.6.32.45/include/media/v4l2-device.h linux-2.6.32.45/include/media/v4l2-device.h
61196 --- linux-2.6.32.45/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
61197 +++ linux-2.6.32.45/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
61198 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
61199 this function returns 0. If the name ends with a digit (e.g. cx18),
61200 then the name will be set to cx18-0 since cx180 looks really odd. */
61201 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
61202 - atomic_t *instance);
61203 + atomic_unchecked_t *instance);
61204
61205 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
61206 Since the parent disappears this ensures that v4l2_dev doesn't have an
61207 diff -urNp linux-2.6.32.45/include/net/flow.h linux-2.6.32.45/include/net/flow.h
61208 --- linux-2.6.32.45/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
61209 +++ linux-2.6.32.45/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
61210 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
61211 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
61212 u8 dir, flow_resolve_t resolver);
61213 extern void flow_cache_flush(void);
61214 -extern atomic_t flow_cache_genid;
61215 +extern atomic_unchecked_t flow_cache_genid;
61216
61217 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
61218 {
61219 diff -urNp linux-2.6.32.45/include/net/inetpeer.h linux-2.6.32.45/include/net/inetpeer.h
61220 --- linux-2.6.32.45/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
61221 +++ linux-2.6.32.45/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
61222 @@ -24,7 +24,7 @@ struct inet_peer
61223 __u32 dtime; /* the time of last use of not
61224 * referenced entries */
61225 atomic_t refcnt;
61226 - atomic_t rid; /* Frag reception counter */
61227 + atomic_unchecked_t rid; /* Frag reception counter */
61228 __u32 tcp_ts;
61229 unsigned long tcp_ts_stamp;
61230 };
61231 diff -urNp linux-2.6.32.45/include/net/ip_vs.h linux-2.6.32.45/include/net/ip_vs.h
61232 --- linux-2.6.32.45/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
61233 +++ linux-2.6.32.45/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
61234 @@ -365,7 +365,7 @@ struct ip_vs_conn {
61235 struct ip_vs_conn *control; /* Master control connection */
61236 atomic_t n_control; /* Number of controlled ones */
61237 struct ip_vs_dest *dest; /* real server */
61238 - atomic_t in_pkts; /* incoming packet counter */
61239 + atomic_unchecked_t in_pkts; /* incoming packet counter */
61240
61241 /* packet transmitter for different forwarding methods. If it
61242 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61243 @@ -466,7 +466,7 @@ struct ip_vs_dest {
61244 union nf_inet_addr addr; /* IP address of the server */
61245 __be16 port; /* port number of the server */
61246 volatile unsigned flags; /* dest status flags */
61247 - atomic_t conn_flags; /* flags to copy to conn */
61248 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
61249 atomic_t weight; /* server weight */
61250
61251 atomic_t refcnt; /* reference counter */
61252 diff -urNp linux-2.6.32.45/include/net/irda/ircomm_core.h linux-2.6.32.45/include/net/irda/ircomm_core.h
61253 --- linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-03-27 14:31:47.000000000 -0400
61254 +++ linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-08-05 20:33:55.000000000 -0400
61255 @@ -51,7 +51,7 @@ typedef struct {
61256 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61257 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61258 struct ircomm_info *);
61259 -} call_t;
61260 +} __no_const call_t;
61261
61262 struct ircomm_cb {
61263 irda_queue_t queue;
61264 diff -urNp linux-2.6.32.45/include/net/irda/ircomm_tty.h linux-2.6.32.45/include/net/irda/ircomm_tty.h
61265 --- linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
61266 +++ linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
61267 @@ -35,6 +35,7 @@
61268 #include <linux/termios.h>
61269 #include <linux/timer.h>
61270 #include <linux/tty.h> /* struct tty_struct */
61271 +#include <asm/local.h>
61272
61273 #include <net/irda/irias_object.h>
61274 #include <net/irda/ircomm_core.h>
61275 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61276 unsigned short close_delay;
61277 unsigned short closing_wait; /* time to wait before closing */
61278
61279 - int open_count;
61280 - int blocked_open; /* # of blocked opens */
61281 + local_t open_count;
61282 + local_t blocked_open; /* # of blocked opens */
61283
61284 /* Protect concurent access to :
61285 * o self->open_count
61286 diff -urNp linux-2.6.32.45/include/net/iucv/af_iucv.h linux-2.6.32.45/include/net/iucv/af_iucv.h
61287 --- linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
61288 +++ linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
61289 @@ -87,7 +87,7 @@ struct iucv_sock {
61290 struct iucv_sock_list {
61291 struct hlist_head head;
61292 rwlock_t lock;
61293 - atomic_t autobind_name;
61294 + atomic_unchecked_t autobind_name;
61295 };
61296
61297 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61298 diff -urNp linux-2.6.32.45/include/net/lapb.h linux-2.6.32.45/include/net/lapb.h
61299 --- linux-2.6.32.45/include/net/lapb.h 2011-03-27 14:31:47.000000000 -0400
61300 +++ linux-2.6.32.45/include/net/lapb.h 2011-08-05 20:33:55.000000000 -0400
61301 @@ -95,7 +95,7 @@ struct lapb_cb {
61302 struct sk_buff_head write_queue;
61303 struct sk_buff_head ack_queue;
61304 unsigned char window;
61305 - struct lapb_register_struct callbacks;
61306 + struct lapb_register_struct *callbacks;
61307
61308 /* FRMR control information */
61309 struct lapb_frame frmr_data;
61310 diff -urNp linux-2.6.32.45/include/net/neighbour.h linux-2.6.32.45/include/net/neighbour.h
61311 --- linux-2.6.32.45/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
61312 +++ linux-2.6.32.45/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
61313 @@ -125,12 +125,12 @@ struct neighbour
61314 struct neigh_ops
61315 {
61316 int family;
61317 - void (*solicit)(struct neighbour *, struct sk_buff*);
61318 - void (*error_report)(struct neighbour *, struct sk_buff*);
61319 - int (*output)(struct sk_buff*);
61320 - int (*connected_output)(struct sk_buff*);
61321 - int (*hh_output)(struct sk_buff*);
61322 - int (*queue_xmit)(struct sk_buff*);
61323 + void (* const solicit)(struct neighbour *, struct sk_buff*);
61324 + void (* const error_report)(struct neighbour *, struct sk_buff*);
61325 + int (* const output)(struct sk_buff*);
61326 + int (* const connected_output)(struct sk_buff*);
61327 + int (* const hh_output)(struct sk_buff*);
61328 + int (* const queue_xmit)(struct sk_buff*);
61329 };
61330
61331 struct pneigh_entry
61332 diff -urNp linux-2.6.32.45/include/net/netlink.h linux-2.6.32.45/include/net/netlink.h
61333 --- linux-2.6.32.45/include/net/netlink.h 2011-07-13 17:23:04.000000000 -0400
61334 +++ linux-2.6.32.45/include/net/netlink.h 2011-07-13 17:23:19.000000000 -0400
61335 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
61336 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61337 {
61338 if (mark)
61339 - skb_trim(skb, (unsigned char *) mark - skb->data);
61340 + skb_trim(skb, (const unsigned char *) mark - skb->data);
61341 }
61342
61343 /**
61344 diff -urNp linux-2.6.32.45/include/net/netns/ipv4.h linux-2.6.32.45/include/net/netns/ipv4.h
61345 --- linux-2.6.32.45/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
61346 +++ linux-2.6.32.45/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
61347 @@ -54,7 +54,7 @@ struct netns_ipv4 {
61348 int current_rt_cache_rebuild_count;
61349
61350 struct timer_list rt_secret_timer;
61351 - atomic_t rt_genid;
61352 + atomic_unchecked_t rt_genid;
61353
61354 #ifdef CONFIG_IP_MROUTE
61355 struct sock *mroute_sk;
61356 diff -urNp linux-2.6.32.45/include/net/sctp/sctp.h linux-2.6.32.45/include/net/sctp/sctp.h
61357 --- linux-2.6.32.45/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
61358 +++ linux-2.6.32.45/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
61359 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
61360
61361 #else /* SCTP_DEBUG */
61362
61363 -#define SCTP_DEBUG_PRINTK(whatever...)
61364 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61365 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61366 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61367 #define SCTP_ENABLE_DEBUG
61368 #define SCTP_DISABLE_DEBUG
61369 #define SCTP_ASSERT(expr, str, func)
61370 diff -urNp linux-2.6.32.45/include/net/secure_seq.h linux-2.6.32.45/include/net/secure_seq.h
61371 --- linux-2.6.32.45/include/net/secure_seq.h 2011-08-16 20:37:25.000000000 -0400
61372 +++ linux-2.6.32.45/include/net/secure_seq.h 2011-08-07 19:48:09.000000000 -0400
61373 @@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
61374 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
61375 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
61376 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
61377 - __be16 dport);
61378 + __be16 dport);
61379 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
61380 __be16 sport, __be16 dport);
61381 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61382 - __be16 sport, __be16 dport);
61383 + __be16 sport, __be16 dport);
61384 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
61385 - __be16 sport, __be16 dport);
61386 + __be16 sport, __be16 dport);
61387 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61388 - __be16 sport, __be16 dport);
61389 + __be16 sport, __be16 dport);
61390
61391 #endif /* _NET_SECURE_SEQ */
61392 diff -urNp linux-2.6.32.45/include/net/sock.h linux-2.6.32.45/include/net/sock.h
61393 --- linux-2.6.32.45/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
61394 +++ linux-2.6.32.45/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
61395 @@ -272,7 +272,7 @@ struct sock {
61396 rwlock_t sk_callback_lock;
61397 int sk_err,
61398 sk_err_soft;
61399 - atomic_t sk_drops;
61400 + atomic_unchecked_t sk_drops;
61401 unsigned short sk_ack_backlog;
61402 unsigned short sk_max_ack_backlog;
61403 __u32 sk_priority;
61404 diff -urNp linux-2.6.32.45/include/net/tcp.h linux-2.6.32.45/include/net/tcp.h
61405 --- linux-2.6.32.45/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
61406 +++ linux-2.6.32.45/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
61407 @@ -1444,6 +1444,7 @@ enum tcp_seq_states {
61408 struct tcp_seq_afinfo {
61409 char *name;
61410 sa_family_t family;
61411 + /* cannot be const */
61412 struct file_operations seq_fops;
61413 struct seq_operations seq_ops;
61414 };
61415 diff -urNp linux-2.6.32.45/include/net/udp.h linux-2.6.32.45/include/net/udp.h
61416 --- linux-2.6.32.45/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
61417 +++ linux-2.6.32.45/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
61418 @@ -187,6 +187,7 @@ struct udp_seq_afinfo {
61419 char *name;
61420 sa_family_t family;
61421 struct udp_table *udp_table;
61422 + /* cannot be const */
61423 struct file_operations seq_fops;
61424 struct seq_operations seq_ops;
61425 };
61426 diff -urNp linux-2.6.32.45/include/rdma/iw_cm.h linux-2.6.32.45/include/rdma/iw_cm.h
61427 --- linux-2.6.32.45/include/rdma/iw_cm.h 2011-03-27 14:31:47.000000000 -0400
61428 +++ linux-2.6.32.45/include/rdma/iw_cm.h 2011-08-05 20:33:55.000000000 -0400
61429 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
61430 int backlog);
61431
61432 int (*destroy_listen)(struct iw_cm_id *cm_id);
61433 -};
61434 +} __no_const;
61435
61436 /**
61437 * iw_create_cm_id - Create an IW CM identifier.
61438 diff -urNp linux-2.6.32.45/include/scsi/scsi_device.h linux-2.6.32.45/include/scsi/scsi_device.h
61439 --- linux-2.6.32.45/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
61440 +++ linux-2.6.32.45/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
61441 @@ -156,9 +156,9 @@ struct scsi_device {
61442 unsigned int max_device_blocked; /* what device_blocked counts down from */
61443 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61444
61445 - atomic_t iorequest_cnt;
61446 - atomic_t iodone_cnt;
61447 - atomic_t ioerr_cnt;
61448 + atomic_unchecked_t iorequest_cnt;
61449 + atomic_unchecked_t iodone_cnt;
61450 + atomic_unchecked_t ioerr_cnt;
61451
61452 struct device sdev_gendev,
61453 sdev_dev;
61454 diff -urNp linux-2.6.32.45/include/scsi/scsi_transport_fc.h linux-2.6.32.45/include/scsi/scsi_transport_fc.h
61455 --- linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-03-27 14:31:47.000000000 -0400
61456 +++ linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-08-05 20:33:55.000000000 -0400
61457 @@ -663,9 +663,9 @@ struct fc_function_template {
61458 int (*bsg_timeout)(struct fc_bsg_job *);
61459
61460 /* allocation lengths for host-specific data */
61461 - u32 dd_fcrport_size;
61462 - u32 dd_fcvport_size;
61463 - u32 dd_bsg_size;
61464 + const u32 dd_fcrport_size;
61465 + const u32 dd_fcvport_size;
61466 + const u32 dd_bsg_size;
61467
61468 /*
61469 * The driver sets these to tell the transport class it
61470 @@ -675,39 +675,39 @@ struct fc_function_template {
61471 */
61472
61473 /* remote port fixed attributes */
61474 - unsigned long show_rport_maxframe_size:1;
61475 - unsigned long show_rport_supported_classes:1;
61476 - unsigned long show_rport_dev_loss_tmo:1;
61477 + const unsigned long show_rport_maxframe_size:1;
61478 + const unsigned long show_rport_supported_classes:1;
61479 + const unsigned long show_rport_dev_loss_tmo:1;
61480
61481 /*
61482 * target dynamic attributes
61483 * These should all be "1" if the driver uses the remote port
61484 * add/delete functions (so attributes reflect rport values).
61485 */
61486 - unsigned long show_starget_node_name:1;
61487 - unsigned long show_starget_port_name:1;
61488 - unsigned long show_starget_port_id:1;
61489 + const unsigned long show_starget_node_name:1;
61490 + const unsigned long show_starget_port_name:1;
61491 + const unsigned long show_starget_port_id:1;
61492
61493 /* host fixed attributes */
61494 - unsigned long show_host_node_name:1;
61495 - unsigned long show_host_port_name:1;
61496 - unsigned long show_host_permanent_port_name:1;
61497 - unsigned long show_host_supported_classes:1;
61498 - unsigned long show_host_supported_fc4s:1;
61499 - unsigned long show_host_supported_speeds:1;
61500 - unsigned long show_host_maxframe_size:1;
61501 - unsigned long show_host_serial_number:1;
61502 + const unsigned long show_host_node_name:1;
61503 + const unsigned long show_host_port_name:1;
61504 + const unsigned long show_host_permanent_port_name:1;
61505 + const unsigned long show_host_supported_classes:1;
61506 + const unsigned long show_host_supported_fc4s:1;
61507 + const unsigned long show_host_supported_speeds:1;
61508 + const unsigned long show_host_maxframe_size:1;
61509 + const unsigned long show_host_serial_number:1;
61510 /* host dynamic attributes */
61511 - unsigned long show_host_port_id:1;
61512 - unsigned long show_host_port_type:1;
61513 - unsigned long show_host_port_state:1;
61514 - unsigned long show_host_active_fc4s:1;
61515 - unsigned long show_host_speed:1;
61516 - unsigned long show_host_fabric_name:1;
61517 - unsigned long show_host_symbolic_name:1;
61518 - unsigned long show_host_system_hostname:1;
61519 + const unsigned long show_host_port_id:1;
61520 + const unsigned long show_host_port_type:1;
61521 + const unsigned long show_host_port_state:1;
61522 + const unsigned long show_host_active_fc4s:1;
61523 + const unsigned long show_host_speed:1;
61524 + const unsigned long show_host_fabric_name:1;
61525 + const unsigned long show_host_symbolic_name:1;
61526 + const unsigned long show_host_system_hostname:1;
61527
61528 - unsigned long disable_target_scan:1;
61529 + const unsigned long disable_target_scan:1;
61530 };
61531
61532
61533 diff -urNp linux-2.6.32.45/include/sound/ac97_codec.h linux-2.6.32.45/include/sound/ac97_codec.h
61534 --- linux-2.6.32.45/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
61535 +++ linux-2.6.32.45/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
61536 @@ -419,15 +419,15 @@
61537 struct snd_ac97;
61538
61539 struct snd_ac97_build_ops {
61540 - int (*build_3d) (struct snd_ac97 *ac97);
61541 - int (*build_specific) (struct snd_ac97 *ac97);
61542 - int (*build_spdif) (struct snd_ac97 *ac97);
61543 - int (*build_post_spdif) (struct snd_ac97 *ac97);
61544 + int (* const build_3d) (struct snd_ac97 *ac97);
61545 + int (* const build_specific) (struct snd_ac97 *ac97);
61546 + int (* const build_spdif) (struct snd_ac97 *ac97);
61547 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
61548 #ifdef CONFIG_PM
61549 - void (*suspend) (struct snd_ac97 *ac97);
61550 - void (*resume) (struct snd_ac97 *ac97);
61551 + void (* const suspend) (struct snd_ac97 *ac97);
61552 + void (* const resume) (struct snd_ac97 *ac97);
61553 #endif
61554 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61555 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61556 };
61557
61558 struct snd_ac97_bus_ops {
61559 @@ -477,7 +477,7 @@ struct snd_ac97_template {
61560
61561 struct snd_ac97 {
61562 /* -- lowlevel (hardware) driver specific -- */
61563 - struct snd_ac97_build_ops * build_ops;
61564 + const struct snd_ac97_build_ops * build_ops;
61565 void *private_data;
61566 void (*private_free) (struct snd_ac97 *ac97);
61567 /* --- */
61568 diff -urNp linux-2.6.32.45/include/sound/ak4xxx-adda.h linux-2.6.32.45/include/sound/ak4xxx-adda.h
61569 --- linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-03-27 14:31:47.000000000 -0400
61570 +++ linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-08-05 20:33:55.000000000 -0400
61571 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61572 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61573 unsigned char val);
61574 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61575 -};
61576 +} __no_const;
61577
61578 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61579
61580 diff -urNp linux-2.6.32.45/include/sound/hwdep.h linux-2.6.32.45/include/sound/hwdep.h
61581 --- linux-2.6.32.45/include/sound/hwdep.h 2011-03-27 14:31:47.000000000 -0400
61582 +++ linux-2.6.32.45/include/sound/hwdep.h 2011-08-05 20:33:55.000000000 -0400
61583 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61584 struct snd_hwdep_dsp_status *status);
61585 int (*dsp_load)(struct snd_hwdep *hw,
61586 struct snd_hwdep_dsp_image *image);
61587 -};
61588 +} __no_const;
61589
61590 struct snd_hwdep {
61591 struct snd_card *card;
61592 diff -urNp linux-2.6.32.45/include/sound/info.h linux-2.6.32.45/include/sound/info.h
61593 --- linux-2.6.32.45/include/sound/info.h 2011-03-27 14:31:47.000000000 -0400
61594 +++ linux-2.6.32.45/include/sound/info.h 2011-08-05 20:33:55.000000000 -0400
61595 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
61596 struct snd_info_buffer *buffer);
61597 void (*write)(struct snd_info_entry *entry,
61598 struct snd_info_buffer *buffer);
61599 -};
61600 +} __no_const;
61601
61602 struct snd_info_entry_ops {
61603 int (*open)(struct snd_info_entry *entry,
61604 diff -urNp linux-2.6.32.45/include/sound/sb16_csp.h linux-2.6.32.45/include/sound/sb16_csp.h
61605 --- linux-2.6.32.45/include/sound/sb16_csp.h 2011-03-27 14:31:47.000000000 -0400
61606 +++ linux-2.6.32.45/include/sound/sb16_csp.h 2011-08-05 20:33:55.000000000 -0400
61607 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
61608 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61609 int (*csp_stop) (struct snd_sb_csp * p);
61610 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61611 -};
61612 +} __no_const;
61613
61614 /*
61615 * CSP private data
61616 diff -urNp linux-2.6.32.45/include/sound/ymfpci.h linux-2.6.32.45/include/sound/ymfpci.h
61617 --- linux-2.6.32.45/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
61618 +++ linux-2.6.32.45/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
61619 @@ -358,7 +358,7 @@ struct snd_ymfpci {
61620 spinlock_t reg_lock;
61621 spinlock_t voice_lock;
61622 wait_queue_head_t interrupt_sleep;
61623 - atomic_t interrupt_sleep_count;
61624 + atomic_unchecked_t interrupt_sleep_count;
61625 struct snd_info_entry *proc_entry;
61626 const struct firmware *dsp_microcode;
61627 const struct firmware *controller_microcode;
61628 diff -urNp linux-2.6.32.45/include/trace/events/irq.h linux-2.6.32.45/include/trace/events/irq.h
61629 --- linux-2.6.32.45/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
61630 +++ linux-2.6.32.45/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
61631 @@ -34,7 +34,7 @@
61632 */
61633 TRACE_EVENT(irq_handler_entry,
61634
61635 - TP_PROTO(int irq, struct irqaction *action),
61636 + TP_PROTO(int irq, const struct irqaction *action),
61637
61638 TP_ARGS(irq, action),
61639
61640 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
61641 */
61642 TRACE_EVENT(irq_handler_exit,
61643
61644 - TP_PROTO(int irq, struct irqaction *action, int ret),
61645 + TP_PROTO(int irq, const struct irqaction *action, int ret),
61646
61647 TP_ARGS(irq, action, ret),
61648
61649 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
61650 */
61651 TRACE_EVENT(softirq_entry,
61652
61653 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61654 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61655
61656 TP_ARGS(h, vec),
61657
61658 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
61659 */
61660 TRACE_EVENT(softirq_exit,
61661
61662 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61663 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61664
61665 TP_ARGS(h, vec),
61666
61667 diff -urNp linux-2.6.32.45/include/video/uvesafb.h linux-2.6.32.45/include/video/uvesafb.h
61668 --- linux-2.6.32.45/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
61669 +++ linux-2.6.32.45/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
61670 @@ -177,6 +177,7 @@ struct uvesafb_par {
61671 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61672 u8 pmi_setpal; /* PMI for palette changes */
61673 u16 *pmi_base; /* protected mode interface location */
61674 + u8 *pmi_code; /* protected mode code location */
61675 void *pmi_start;
61676 void *pmi_pal;
61677 u8 *vbe_state_orig; /*
61678 diff -urNp linux-2.6.32.45/init/do_mounts.c linux-2.6.32.45/init/do_mounts.c
61679 --- linux-2.6.32.45/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
61680 +++ linux-2.6.32.45/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
61681 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
61682
61683 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61684 {
61685 - int err = sys_mount(name, "/root", fs, flags, data);
61686 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
61687 if (err)
61688 return err;
61689
61690 - sys_chdir("/root");
61691 + sys_chdir((__force const char __user *)"/root");
61692 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61693 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61694 current->fs->pwd.mnt->mnt_sb->s_type->name,
61695 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
61696 va_start(args, fmt);
61697 vsprintf(buf, fmt, args);
61698 va_end(args);
61699 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61700 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61701 if (fd >= 0) {
61702 sys_ioctl(fd, FDEJECT, 0);
61703 sys_close(fd);
61704 }
61705 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61706 - fd = sys_open("/dev/console", O_RDWR, 0);
61707 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
61708 if (fd >= 0) {
61709 sys_ioctl(fd, TCGETS, (long)&termios);
61710 termios.c_lflag &= ~ICANON;
61711 sys_ioctl(fd, TCSETSF, (long)&termios);
61712 - sys_read(fd, &c, 1);
61713 + sys_read(fd, (char __user *)&c, 1);
61714 termios.c_lflag |= ICANON;
61715 sys_ioctl(fd, TCSETSF, (long)&termios);
61716 sys_close(fd);
61717 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
61718 mount_root();
61719 out:
61720 devtmpfs_mount("dev");
61721 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61722 - sys_chroot(".");
61723 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61724 + sys_chroot((__force char __user *)".");
61725 }
61726 diff -urNp linux-2.6.32.45/init/do_mounts.h linux-2.6.32.45/init/do_mounts.h
61727 --- linux-2.6.32.45/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
61728 +++ linux-2.6.32.45/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
61729 @@ -15,15 +15,15 @@ extern int root_mountflags;
61730
61731 static inline int create_dev(char *name, dev_t dev)
61732 {
61733 - sys_unlink(name);
61734 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61735 + sys_unlink((__force char __user *)name);
61736 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
61737 }
61738
61739 #if BITS_PER_LONG == 32
61740 static inline u32 bstat(char *name)
61741 {
61742 struct stat64 stat;
61743 - if (sys_stat64(name, &stat) != 0)
61744 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
61745 return 0;
61746 if (!S_ISBLK(stat.st_mode))
61747 return 0;
61748 diff -urNp linux-2.6.32.45/init/do_mounts_initrd.c linux-2.6.32.45/init/do_mounts_initrd.c
61749 --- linux-2.6.32.45/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
61750 +++ linux-2.6.32.45/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
61751 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
61752 sys_close(old_fd);sys_close(root_fd);
61753 sys_close(0);sys_close(1);sys_close(2);
61754 sys_setsid();
61755 - (void) sys_open("/dev/console",O_RDWR,0);
61756 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
61757 (void) sys_dup(0);
61758 (void) sys_dup(0);
61759 return kernel_execve(shell, argv, envp_init);
61760 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
61761 create_dev("/dev/root.old", Root_RAM0);
61762 /* mount initrd on rootfs' /root */
61763 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61764 - sys_mkdir("/old", 0700);
61765 - root_fd = sys_open("/", 0, 0);
61766 - old_fd = sys_open("/old", 0, 0);
61767 + sys_mkdir((__force const char __user *)"/old", 0700);
61768 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
61769 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
61770 /* move initrd over / and chdir/chroot in initrd root */
61771 - sys_chdir("/root");
61772 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61773 - sys_chroot(".");
61774 + sys_chdir((__force const char __user *)"/root");
61775 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61776 + sys_chroot((__force const char __user *)".");
61777
61778 /*
61779 * In case that a resume from disk is carried out by linuxrc or one of
61780 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
61781
61782 /* move initrd to rootfs' /old */
61783 sys_fchdir(old_fd);
61784 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
61785 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
61786 /* switch root and cwd back to / of rootfs */
61787 sys_fchdir(root_fd);
61788 - sys_chroot(".");
61789 + sys_chroot((__force const char __user *)".");
61790 sys_close(old_fd);
61791 sys_close(root_fd);
61792
61793 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61794 - sys_chdir("/old");
61795 + sys_chdir((__force const char __user *)"/old");
61796 return;
61797 }
61798
61799 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
61800 mount_root();
61801
61802 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61803 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61804 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
61805 if (!error)
61806 printk("okay\n");
61807 else {
61808 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
61809 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
61810 if (error == -ENOENT)
61811 printk("/initrd does not exist. Ignored.\n");
61812 else
61813 printk("failed\n");
61814 printk(KERN_NOTICE "Unmounting old root\n");
61815 - sys_umount("/old", MNT_DETACH);
61816 + sys_umount((__force char __user *)"/old", MNT_DETACH);
61817 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61818 if (fd < 0) {
61819 error = fd;
61820 @@ -119,11 +119,11 @@ int __init initrd_load(void)
61821 * mounted in the normal path.
61822 */
61823 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61824 - sys_unlink("/initrd.image");
61825 + sys_unlink((__force const char __user *)"/initrd.image");
61826 handle_initrd();
61827 return 1;
61828 }
61829 }
61830 - sys_unlink("/initrd.image");
61831 + sys_unlink((__force const char __user *)"/initrd.image");
61832 return 0;
61833 }
61834 diff -urNp linux-2.6.32.45/init/do_mounts_md.c linux-2.6.32.45/init/do_mounts_md.c
61835 --- linux-2.6.32.45/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
61836 +++ linux-2.6.32.45/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
61837 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61838 partitioned ? "_d" : "", minor,
61839 md_setup_args[ent].device_names);
61840
61841 - fd = sys_open(name, 0, 0);
61842 + fd = sys_open((__force char __user *)name, 0, 0);
61843 if (fd < 0) {
61844 printk(KERN_ERR "md: open failed - cannot start "
61845 "array %s\n", name);
61846 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61847 * array without it
61848 */
61849 sys_close(fd);
61850 - fd = sys_open(name, 0, 0);
61851 + fd = sys_open((__force char __user *)name, 0, 0);
61852 sys_ioctl(fd, BLKRRPART, 0);
61853 }
61854 sys_close(fd);
61855 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61856
61857 wait_for_device_probe();
61858
61859 - fd = sys_open("/dev/md0", 0, 0);
61860 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
61861 if (fd >= 0) {
61862 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61863 sys_close(fd);
61864 diff -urNp linux-2.6.32.45/init/initramfs.c linux-2.6.32.45/init/initramfs.c
61865 --- linux-2.6.32.45/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
61866 +++ linux-2.6.32.45/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
61867 @@ -74,7 +74,7 @@ static void __init free_hash(void)
61868 }
61869 }
61870
61871 -static long __init do_utime(char __user *filename, time_t mtime)
61872 +static long __init do_utime(__force char __user *filename, time_t mtime)
61873 {
61874 struct timespec t[2];
61875
61876 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
61877 struct dir_entry *de, *tmp;
61878 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61879 list_del(&de->list);
61880 - do_utime(de->name, de->mtime);
61881 + do_utime((__force char __user *)de->name, de->mtime);
61882 kfree(de->name);
61883 kfree(de);
61884 }
61885 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
61886 if (nlink >= 2) {
61887 char *old = find_link(major, minor, ino, mode, collected);
61888 if (old)
61889 - return (sys_link(old, collected) < 0) ? -1 : 1;
61890 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
61891 }
61892 return 0;
61893 }
61894 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
61895 {
61896 struct stat st;
61897
61898 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61899 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
61900 if (S_ISDIR(st.st_mode))
61901 - sys_rmdir(path);
61902 + sys_rmdir((__force char __user *)path);
61903 else
61904 - sys_unlink(path);
61905 + sys_unlink((__force char __user *)path);
61906 }
61907 }
61908
61909 @@ -305,7 +305,7 @@ static int __init do_name(void)
61910 int openflags = O_WRONLY|O_CREAT;
61911 if (ml != 1)
61912 openflags |= O_TRUNC;
61913 - wfd = sys_open(collected, openflags, mode);
61914 + wfd = sys_open((__force char __user *)collected, openflags, mode);
61915
61916 if (wfd >= 0) {
61917 sys_fchown(wfd, uid, gid);
61918 @@ -317,17 +317,17 @@ static int __init do_name(void)
61919 }
61920 }
61921 } else if (S_ISDIR(mode)) {
61922 - sys_mkdir(collected, mode);
61923 - sys_chown(collected, uid, gid);
61924 - sys_chmod(collected, mode);
61925 + sys_mkdir((__force char __user *)collected, mode);
61926 + sys_chown((__force char __user *)collected, uid, gid);
61927 + sys_chmod((__force char __user *)collected, mode);
61928 dir_add(collected, mtime);
61929 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61930 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61931 if (maybe_link() == 0) {
61932 - sys_mknod(collected, mode, rdev);
61933 - sys_chown(collected, uid, gid);
61934 - sys_chmod(collected, mode);
61935 - do_utime(collected, mtime);
61936 + sys_mknod((__force char __user *)collected, mode, rdev);
61937 + sys_chown((__force char __user *)collected, uid, gid);
61938 + sys_chmod((__force char __user *)collected, mode);
61939 + do_utime((__force char __user *)collected, mtime);
61940 }
61941 }
61942 return 0;
61943 @@ -336,15 +336,15 @@ static int __init do_name(void)
61944 static int __init do_copy(void)
61945 {
61946 if (count >= body_len) {
61947 - sys_write(wfd, victim, body_len);
61948 + sys_write(wfd, (__force char __user *)victim, body_len);
61949 sys_close(wfd);
61950 - do_utime(vcollected, mtime);
61951 + do_utime((__force char __user *)vcollected, mtime);
61952 kfree(vcollected);
61953 eat(body_len);
61954 state = SkipIt;
61955 return 0;
61956 } else {
61957 - sys_write(wfd, victim, count);
61958 + sys_write(wfd, (__force char __user *)victim, count);
61959 body_len -= count;
61960 eat(count);
61961 return 1;
61962 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
61963 {
61964 collected[N_ALIGN(name_len) + body_len] = '\0';
61965 clean_path(collected, 0);
61966 - sys_symlink(collected + N_ALIGN(name_len), collected);
61967 - sys_lchown(collected, uid, gid);
61968 - do_utime(collected, mtime);
61969 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
61970 + sys_lchown((__force char __user *)collected, uid, gid);
61971 + do_utime((__force char __user *)collected, mtime);
61972 state = SkipIt;
61973 next_state = Reset;
61974 return 0;
61975 diff -urNp linux-2.6.32.45/init/Kconfig linux-2.6.32.45/init/Kconfig
61976 --- linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
61977 +++ linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
61978 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
61979
61980 config COMPAT_BRK
61981 bool "Disable heap randomization"
61982 - default y
61983 + default n
61984 help
61985 Randomizing heap placement makes heap exploits harder, but it
61986 also breaks ancient binaries (including anything libc5 based).
61987 diff -urNp linux-2.6.32.45/init/main.c linux-2.6.32.45/init/main.c
61988 --- linux-2.6.32.45/init/main.c 2011-05-10 22:12:01.000000000 -0400
61989 +++ linux-2.6.32.45/init/main.c 2011-08-05 20:33:55.000000000 -0400
61990 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
61991 #ifdef CONFIG_TC
61992 extern void tc_init(void);
61993 #endif
61994 +extern void grsecurity_init(void);
61995
61996 enum system_states system_state __read_mostly;
61997 EXPORT_SYMBOL(system_state);
61998 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char
61999
62000 __setup("reset_devices", set_reset_devices);
62001
62002 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62003 +extern char pax_enter_kernel_user[];
62004 +extern char pax_exit_kernel_user[];
62005 +extern pgdval_t clone_pgd_mask;
62006 +#endif
62007 +
62008 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62009 +static int __init setup_pax_nouderef(char *str)
62010 +{
62011 +#ifdef CONFIG_X86_32
62012 + unsigned int cpu;
62013 + struct desc_struct *gdt;
62014 +
62015 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
62016 + gdt = get_cpu_gdt_table(cpu);
62017 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62018 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62019 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62020 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62021 + }
62022 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62023 +#else
62024 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62025 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62026 + clone_pgd_mask = ~(pgdval_t)0UL;
62027 +#endif
62028 +
62029 + return 0;
62030 +}
62031 +early_param("pax_nouderef", setup_pax_nouderef);
62032 +#endif
62033 +
62034 +#ifdef CONFIG_PAX_SOFTMODE
62035 +int pax_softmode;
62036 +
62037 +static int __init setup_pax_softmode(char *str)
62038 +{
62039 + get_option(&str, &pax_softmode);
62040 + return 1;
62041 +}
62042 +__setup("pax_softmode=", setup_pax_softmode);
62043 +#endif
62044 +
62045 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62046 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62047 static const char *panic_later, *panic_param;
62048 @@ -705,52 +749,53 @@ int initcall_debug;
62049 core_param(initcall_debug, initcall_debug, bool, 0644);
62050
62051 static char msgbuf[64];
62052 -static struct boot_trace_call call;
62053 -static struct boot_trace_ret ret;
62054 +static struct boot_trace_call trace_call;
62055 +static struct boot_trace_ret trace_ret;
62056
62057 int do_one_initcall(initcall_t fn)
62058 {
62059 int count = preempt_count();
62060 ktime_t calltime, delta, rettime;
62061 + const char *msg1 = "", *msg2 = "";
62062
62063 if (initcall_debug) {
62064 - call.caller = task_pid_nr(current);
62065 - printk("calling %pF @ %i\n", fn, call.caller);
62066 + trace_call.caller = task_pid_nr(current);
62067 + printk("calling %pF @ %i\n", fn, trace_call.caller);
62068 calltime = ktime_get();
62069 - trace_boot_call(&call, fn);
62070 + trace_boot_call(&trace_call, fn);
62071 enable_boot_trace();
62072 }
62073
62074 - ret.result = fn();
62075 + trace_ret.result = fn();
62076
62077 if (initcall_debug) {
62078 disable_boot_trace();
62079 rettime = ktime_get();
62080 delta = ktime_sub(rettime, calltime);
62081 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
62082 - trace_boot_ret(&ret, fn);
62083 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
62084 + trace_boot_ret(&trace_ret, fn);
62085 printk("initcall %pF returned %d after %Ld usecs\n", fn,
62086 - ret.result, ret.duration);
62087 + trace_ret.result, trace_ret.duration);
62088 }
62089
62090 msgbuf[0] = 0;
62091
62092 - if (ret.result && ret.result != -ENODEV && initcall_debug)
62093 - sprintf(msgbuf, "error code %d ", ret.result);
62094 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
62095 + sprintf(msgbuf, "error code %d ", trace_ret.result);
62096
62097 if (preempt_count() != count) {
62098 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62099 + msg1 = " preemption imbalance";
62100 preempt_count() = count;
62101 }
62102 if (irqs_disabled()) {
62103 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62104 + msg2 = " disabled interrupts";
62105 local_irq_enable();
62106 }
62107 - if (msgbuf[0]) {
62108 - printk("initcall %pF returned with %s\n", fn, msgbuf);
62109 + if (msgbuf[0] || *msg1 || *msg2) {
62110 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62111 }
62112
62113 - return ret.result;
62114 + return trace_ret.result;
62115 }
62116
62117
62118 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
62119 if (!ramdisk_execute_command)
62120 ramdisk_execute_command = "/init";
62121
62122 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62123 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
62124 ramdisk_execute_command = NULL;
62125 prepare_namespace();
62126 }
62127
62128 + grsecurity_init();
62129 +
62130 /*
62131 * Ok, we have completed the initial bootup, and
62132 * we're essentially up and running. Get rid of the
62133 diff -urNp linux-2.6.32.45/init/noinitramfs.c linux-2.6.32.45/init/noinitramfs.c
62134 --- linux-2.6.32.45/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
62135 +++ linux-2.6.32.45/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
62136 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
62137 {
62138 int err;
62139
62140 - err = sys_mkdir("/dev", 0755);
62141 + err = sys_mkdir((const char __user *)"/dev", 0755);
62142 if (err < 0)
62143 goto out;
62144
62145 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
62146 if (err < 0)
62147 goto out;
62148
62149 - err = sys_mkdir("/root", 0700);
62150 + err = sys_mkdir((const char __user *)"/root", 0700);
62151 if (err < 0)
62152 goto out;
62153
62154 diff -urNp linux-2.6.32.45/ipc/mqueue.c linux-2.6.32.45/ipc/mqueue.c
62155 --- linux-2.6.32.45/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
62156 +++ linux-2.6.32.45/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
62157 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
62158 mq_bytes = (mq_msg_tblsz +
62159 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62160
62161 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62162 spin_lock(&mq_lock);
62163 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62164 u->mq_bytes + mq_bytes >
62165 diff -urNp linux-2.6.32.45/ipc/msg.c linux-2.6.32.45/ipc/msg.c
62166 --- linux-2.6.32.45/ipc/msg.c 2011-03-27 14:31:47.000000000 -0400
62167 +++ linux-2.6.32.45/ipc/msg.c 2011-08-05 20:33:55.000000000 -0400
62168 @@ -310,18 +310,19 @@ static inline int msg_security(struct ke
62169 return security_msg_queue_associate(msq, msgflg);
62170 }
62171
62172 +static struct ipc_ops msg_ops = {
62173 + .getnew = newque,
62174 + .associate = msg_security,
62175 + .more_checks = NULL
62176 +};
62177 +
62178 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62179 {
62180 struct ipc_namespace *ns;
62181 - struct ipc_ops msg_ops;
62182 struct ipc_params msg_params;
62183
62184 ns = current->nsproxy->ipc_ns;
62185
62186 - msg_ops.getnew = newque;
62187 - msg_ops.associate = msg_security;
62188 - msg_ops.more_checks = NULL;
62189 -
62190 msg_params.key = key;
62191 msg_params.flg = msgflg;
62192
62193 diff -urNp linux-2.6.32.45/ipc/sem.c linux-2.6.32.45/ipc/sem.c
62194 --- linux-2.6.32.45/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
62195 +++ linux-2.6.32.45/ipc/sem.c 2011-08-05 20:33:55.000000000 -0400
62196 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct
62197 return 0;
62198 }
62199
62200 +static struct ipc_ops sem_ops = {
62201 + .getnew = newary,
62202 + .associate = sem_security,
62203 + .more_checks = sem_more_checks
62204 +};
62205 +
62206 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62207 {
62208 struct ipc_namespace *ns;
62209 - struct ipc_ops sem_ops;
62210 struct ipc_params sem_params;
62211
62212 ns = current->nsproxy->ipc_ns;
62213 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62214 if (nsems < 0 || nsems > ns->sc_semmsl)
62215 return -EINVAL;
62216
62217 - sem_ops.getnew = newary;
62218 - sem_ops.associate = sem_security;
62219 - sem_ops.more_checks = sem_more_checks;
62220 -
62221 sem_params.key = key;
62222 sem_params.flg = semflg;
62223 sem_params.u.nsems = nsems;
62224 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namesp
62225 ushort* sem_io = fast_sem_io;
62226 int nsems;
62227
62228 + pax_track_stack();
62229 +
62230 sma = sem_lock_check(ns, semid);
62231 if (IS_ERR(sma))
62232 return PTR_ERR(sma);
62233 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62234 unsigned long jiffies_left = 0;
62235 struct ipc_namespace *ns;
62236
62237 + pax_track_stack();
62238 +
62239 ns = current->nsproxy->ipc_ns;
62240
62241 if (nsops < 1 || semid < 0)
62242 diff -urNp linux-2.6.32.45/ipc/shm.c linux-2.6.32.45/ipc/shm.c
62243 --- linux-2.6.32.45/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
62244 +++ linux-2.6.32.45/ipc/shm.c 2011-08-05 20:33:55.000000000 -0400
62245 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
62246 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62247 #endif
62248
62249 +#ifdef CONFIG_GRKERNSEC
62250 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62251 + const time_t shm_createtime, const uid_t cuid,
62252 + const int shmid);
62253 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62254 + const time_t shm_createtime);
62255 +#endif
62256 +
62257 void shm_init_ns(struct ipc_namespace *ns)
62258 {
62259 ns->shm_ctlmax = SHMMAX;
62260 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
62261 shp->shm_lprid = 0;
62262 shp->shm_atim = shp->shm_dtim = 0;
62263 shp->shm_ctim = get_seconds();
62264 +#ifdef CONFIG_GRKERNSEC
62265 + {
62266 + struct timespec timeval;
62267 + do_posix_clock_monotonic_gettime(&timeval);
62268 +
62269 + shp->shm_createtime = timeval.tv_sec;
62270 + }
62271 +#endif
62272 shp->shm_segsz = size;
62273 shp->shm_nattch = 0;
62274 shp->shm_file = file;
62275 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct
62276 return 0;
62277 }
62278
62279 +static struct ipc_ops shm_ops = {
62280 + .getnew = newseg,
62281 + .associate = shm_security,
62282 + .more_checks = shm_more_checks
62283 +};
62284 +
62285 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62286 {
62287 struct ipc_namespace *ns;
62288 - struct ipc_ops shm_ops;
62289 struct ipc_params shm_params;
62290
62291 ns = current->nsproxy->ipc_ns;
62292
62293 - shm_ops.getnew = newseg;
62294 - shm_ops.associate = shm_security;
62295 - shm_ops.more_checks = shm_more_checks;
62296 -
62297 shm_params.key = key;
62298 shm_params.flg = shmflg;
62299 shm_params.u.size = size;
62300 @@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *sh
62301 if (err)
62302 goto out_unlock;
62303
62304 +#ifdef CONFIG_GRKERNSEC
62305 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62306 + shp->shm_perm.cuid, shmid) ||
62307 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62308 + err = -EACCES;
62309 + goto out_unlock;
62310 + }
62311 +#endif
62312 +
62313 path.dentry = dget(shp->shm_file->f_path.dentry);
62314 path.mnt = shp->shm_file->f_path.mnt;
62315 shp->shm_nattch++;
62316 +#ifdef CONFIG_GRKERNSEC
62317 + shp->shm_lapid = current->pid;
62318 +#endif
62319 size = i_size_read(path.dentry->d_inode);
62320 shm_unlock(shp);
62321
62322 diff -urNp linux-2.6.32.45/kernel/acct.c linux-2.6.32.45/kernel/acct.c
62323 --- linux-2.6.32.45/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
62324 +++ linux-2.6.32.45/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
62325 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
62326 */
62327 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62328 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62329 - file->f_op->write(file, (char *)&ac,
62330 + file->f_op->write(file, (__force char __user *)&ac,
62331 sizeof(acct_t), &file->f_pos);
62332 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62333 set_fs(fs);
62334 diff -urNp linux-2.6.32.45/kernel/audit.c linux-2.6.32.45/kernel/audit.c
62335 --- linux-2.6.32.45/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
62336 +++ linux-2.6.32.45/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
62337 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
62338 3) suppressed due to audit_rate_limit
62339 4) suppressed due to audit_backlog_limit
62340 */
62341 -static atomic_t audit_lost = ATOMIC_INIT(0);
62342 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62343
62344 /* The netlink socket. */
62345 static struct sock *audit_sock;
62346 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
62347 unsigned long now;
62348 int print;
62349
62350 - atomic_inc(&audit_lost);
62351 + atomic_inc_unchecked(&audit_lost);
62352
62353 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62354
62355 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
62356 printk(KERN_WARNING
62357 "audit: audit_lost=%d audit_rate_limit=%d "
62358 "audit_backlog_limit=%d\n",
62359 - atomic_read(&audit_lost),
62360 + atomic_read_unchecked(&audit_lost),
62361 audit_rate_limit,
62362 audit_backlog_limit);
62363 audit_panic(message);
62364 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
62365 status_set.pid = audit_pid;
62366 status_set.rate_limit = audit_rate_limit;
62367 status_set.backlog_limit = audit_backlog_limit;
62368 - status_set.lost = atomic_read(&audit_lost);
62369 + status_set.lost = atomic_read_unchecked(&audit_lost);
62370 status_set.backlog = skb_queue_len(&audit_skb_queue);
62371 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62372 &status_set, sizeof(status_set));
62373 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
62374 spin_unlock_irq(&tsk->sighand->siglock);
62375 }
62376 read_unlock(&tasklist_lock);
62377 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
62378 - &s, sizeof(s));
62379 +
62380 + if (!err)
62381 + audit_send_reply(NETLINK_CB(skb).pid, seq,
62382 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
62383 break;
62384 }
62385 case AUDIT_TTY_SET: {
62386 diff -urNp linux-2.6.32.45/kernel/auditsc.c linux-2.6.32.45/kernel/auditsc.c
62387 --- linux-2.6.32.45/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
62388 +++ linux-2.6.32.45/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
62389 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
62390 }
62391
62392 /* global counter which is incremented every time something logs in */
62393 -static atomic_t session_id = ATOMIC_INIT(0);
62394 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62395
62396 /**
62397 * audit_set_loginuid - set a task's audit_context loginuid
62398 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
62399 */
62400 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62401 {
62402 - unsigned int sessionid = atomic_inc_return(&session_id);
62403 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62404 struct audit_context *context = task->audit_context;
62405
62406 if (context && context->in_syscall) {
62407 diff -urNp linux-2.6.32.45/kernel/capability.c linux-2.6.32.45/kernel/capability.c
62408 --- linux-2.6.32.45/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
62409 +++ linux-2.6.32.45/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
62410 @@ -305,10 +305,26 @@ int capable(int cap)
62411 BUG();
62412 }
62413
62414 - if (security_capable(cap) == 0) {
62415 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
62416 current->flags |= PF_SUPERPRIV;
62417 return 1;
62418 }
62419 return 0;
62420 }
62421 +
62422 +int capable_nolog(int cap)
62423 +{
62424 + if (unlikely(!cap_valid(cap))) {
62425 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62426 + BUG();
62427 + }
62428 +
62429 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
62430 + current->flags |= PF_SUPERPRIV;
62431 + return 1;
62432 + }
62433 + return 0;
62434 +}
62435 +
62436 EXPORT_SYMBOL(capable);
62437 +EXPORT_SYMBOL(capable_nolog);
62438 diff -urNp linux-2.6.32.45/kernel/cgroup.c linux-2.6.32.45/kernel/cgroup.c
62439 --- linux-2.6.32.45/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
62440 +++ linux-2.6.32.45/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
62441 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
62442 struct hlist_head *hhead;
62443 struct cg_cgroup_link *link;
62444
62445 + pax_track_stack();
62446 +
62447 /* First see if we already have a cgroup group that matches
62448 * the desired set */
62449 read_lock(&css_set_lock);
62450 diff -urNp linux-2.6.32.45/kernel/configs.c linux-2.6.32.45/kernel/configs.c
62451 --- linux-2.6.32.45/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
62452 +++ linux-2.6.32.45/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
62453 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
62454 struct proc_dir_entry *entry;
62455
62456 /* create the current config file */
62457 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62458 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62459 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62460 + &ikconfig_file_ops);
62461 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62462 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62463 + &ikconfig_file_ops);
62464 +#endif
62465 +#else
62466 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62467 &ikconfig_file_ops);
62468 +#endif
62469 +
62470 if (!entry)
62471 return -ENOMEM;
62472
62473 diff -urNp linux-2.6.32.45/kernel/cpu.c linux-2.6.32.45/kernel/cpu.c
62474 --- linux-2.6.32.45/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
62475 +++ linux-2.6.32.45/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
62476 @@ -19,7 +19,7 @@
62477 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
62478 static DEFINE_MUTEX(cpu_add_remove_lock);
62479
62480 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
62481 +static RAW_NOTIFIER_HEAD(cpu_chain);
62482
62483 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
62484 * Should always be manipulated under cpu_add_remove_lock
62485 diff -urNp linux-2.6.32.45/kernel/cred.c linux-2.6.32.45/kernel/cred.c
62486 --- linux-2.6.32.45/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
62487 +++ linux-2.6.32.45/kernel/cred.c 2011-08-11 19:49:38.000000000 -0400
62488 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
62489 */
62490 void __put_cred(struct cred *cred)
62491 {
62492 + pax_track_stack();
62493 +
62494 kdebug("__put_cred(%p{%d,%d})", cred,
62495 atomic_read(&cred->usage),
62496 read_cred_subscribers(cred));
62497 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
62498 {
62499 struct cred *cred;
62500
62501 + pax_track_stack();
62502 +
62503 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62504 atomic_read(&tsk->cred->usage),
62505 read_cred_subscribers(tsk->cred));
62506 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
62507 {
62508 const struct cred *cred;
62509
62510 + pax_track_stack();
62511 +
62512 rcu_read_lock();
62513
62514 do {
62515 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
62516 {
62517 struct cred *new;
62518
62519 + pax_track_stack();
62520 +
62521 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62522 if (!new)
62523 return NULL;
62524 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
62525 const struct cred *old;
62526 struct cred *new;
62527
62528 + pax_track_stack();
62529 +
62530 validate_process_creds();
62531
62532 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62533 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
62534 struct thread_group_cred *tgcred = NULL;
62535 struct cred *new;
62536
62537 + pax_track_stack();
62538 +
62539 #ifdef CONFIG_KEYS
62540 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62541 if (!tgcred)
62542 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
62543 struct cred *new;
62544 int ret;
62545
62546 + pax_track_stack();
62547 +
62548 mutex_init(&p->cred_guard_mutex);
62549
62550 if (
62551 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
62552 struct task_struct *task = current;
62553 const struct cred *old = task->real_cred;
62554
62555 + pax_track_stack();
62556 +
62557 kdebug("commit_creds(%p{%d,%d})", new,
62558 atomic_read(&new->usage),
62559 read_cred_subscribers(new));
62560 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
62561
62562 get_cred(new); /* we will require a ref for the subj creds too */
62563
62564 + gr_set_role_label(task, new->uid, new->gid);
62565 +
62566 /* dumpability changes */
62567 if (old->euid != new->euid ||
62568 old->egid != new->egid ||
62569 @@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
62570 key_fsgid_changed(task);
62571
62572 /* do it
62573 - * - What if a process setreuid()'s and this brings the
62574 - * new uid over his NPROC rlimit? We can check this now
62575 - * cheaply with the new uid cache, so if it matters
62576 - * we should be checking for it. -DaveM
62577 + * RLIMIT_NPROC limits on user->processes have already been checked
62578 + * in set_user().
62579 */
62580 alter_cred_subscribers(new, 2);
62581 if (new->user != old->user)
62582 @@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
62583 */
62584 void abort_creds(struct cred *new)
62585 {
62586 + pax_track_stack();
62587 +
62588 kdebug("abort_creds(%p{%d,%d})", new,
62589 atomic_read(&new->usage),
62590 read_cred_subscribers(new));
62591 @@ -629,6 +647,8 @@ const struct cred *override_creds(const
62592 {
62593 const struct cred *old = current->cred;
62594
62595 + pax_track_stack();
62596 +
62597 kdebug("override_creds(%p{%d,%d})", new,
62598 atomic_read(&new->usage),
62599 read_cred_subscribers(new));
62600 @@ -658,6 +678,8 @@ void revert_creds(const struct cred *old
62601 {
62602 const struct cred *override = current->cred;
62603
62604 + pax_track_stack();
62605 +
62606 kdebug("revert_creds(%p{%d,%d})", old,
62607 atomic_read(&old->usage),
62608 read_cred_subscribers(old));
62609 @@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct
62610 const struct cred *old;
62611 struct cred *new;
62612
62613 + pax_track_stack();
62614 +
62615 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62616 if (!new)
62617 return NULL;
62618 @@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62619 */
62620 int set_security_override(struct cred *new, u32 secid)
62621 {
62622 + pax_track_stack();
62623 +
62624 return security_kernel_act_as(new, secid);
62625 }
62626 EXPORT_SYMBOL(set_security_override);
62627 @@ -777,6 +803,8 @@ int set_security_override_from_ctx(struc
62628 u32 secid;
62629 int ret;
62630
62631 + pax_track_stack();
62632 +
62633 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62634 if (ret < 0)
62635 return ret;
62636 diff -urNp linux-2.6.32.45/kernel/exit.c linux-2.6.32.45/kernel/exit.c
62637 --- linux-2.6.32.45/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
62638 +++ linux-2.6.32.45/kernel/exit.c 2011-08-17 19:19:50.000000000 -0400
62639 @@ -55,6 +55,10 @@
62640 #include <asm/pgtable.h>
62641 #include <asm/mmu_context.h>
62642
62643 +#ifdef CONFIG_GRKERNSEC
62644 +extern rwlock_t grsec_exec_file_lock;
62645 +#endif
62646 +
62647 static void exit_mm(struct task_struct * tsk);
62648
62649 static void __unhash_process(struct task_struct *p)
62650 @@ -174,6 +178,10 @@ void release_task(struct task_struct * p
62651 struct task_struct *leader;
62652 int zap_leader;
62653 repeat:
62654 +#ifdef CONFIG_NET
62655 + gr_del_task_from_ip_table(p);
62656 +#endif
62657 +
62658 tracehook_prepare_release_task(p);
62659 /* don't need to get the RCU readlock here - the process is dead and
62660 * can't be modifying its own credentials */
62661 @@ -341,11 +349,22 @@ static void reparent_to_kthreadd(void)
62662 {
62663 write_lock_irq(&tasklist_lock);
62664
62665 +#ifdef CONFIG_GRKERNSEC
62666 + write_lock(&grsec_exec_file_lock);
62667 + if (current->exec_file) {
62668 + fput(current->exec_file);
62669 + current->exec_file = NULL;
62670 + }
62671 + write_unlock(&grsec_exec_file_lock);
62672 +#endif
62673 +
62674 ptrace_unlink(current);
62675 /* Reparent to init */
62676 current->real_parent = current->parent = kthreadd_task;
62677 list_move_tail(&current->sibling, &current->real_parent->children);
62678
62679 + gr_set_kernel_label(current);
62680 +
62681 /* Set the exit signal to SIGCHLD so we signal init on exit */
62682 current->exit_signal = SIGCHLD;
62683
62684 @@ -397,7 +416,7 @@ int allow_signal(int sig)
62685 * know it'll be handled, so that they don't get converted to
62686 * SIGKILL or just silently dropped.
62687 */
62688 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62689 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62690 recalc_sigpending();
62691 spin_unlock_irq(&current->sighand->siglock);
62692 return 0;
62693 @@ -433,6 +452,17 @@ void daemonize(const char *name, ...)
62694 vsnprintf(current->comm, sizeof(current->comm), name, args);
62695 va_end(args);
62696
62697 +#ifdef CONFIG_GRKERNSEC
62698 + write_lock(&grsec_exec_file_lock);
62699 + if (current->exec_file) {
62700 + fput(current->exec_file);
62701 + current->exec_file = NULL;
62702 + }
62703 + write_unlock(&grsec_exec_file_lock);
62704 +#endif
62705 +
62706 + gr_set_kernel_label(current);
62707 +
62708 /*
62709 * If we were started as result of loading a module, close all of the
62710 * user space pages. We don't need them, and if we didn't close them
62711 @@ -897,17 +927,17 @@ NORET_TYPE void do_exit(long code)
62712 struct task_struct *tsk = current;
62713 int group_dead;
62714
62715 - profile_task_exit(tsk);
62716 -
62717 - WARN_ON(atomic_read(&tsk->fs_excl));
62718 -
62719 + /*
62720 + * Check this first since set_fs() below depends on
62721 + * current_thread_info(), which we better not access when we're in
62722 + * interrupt context. Other than that, we want to do the set_fs()
62723 + * as early as possible.
62724 + */
62725 if (unlikely(in_interrupt()))
62726 panic("Aiee, killing interrupt handler!");
62727 - if (unlikely(!tsk->pid))
62728 - panic("Attempted to kill the idle task!");
62729
62730 /*
62731 - * If do_exit is called because this processes oopsed, it's possible
62732 + * If do_exit is called because this processes Oops'ed, it's possible
62733 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
62734 * continuing. Amongst other possible reasons, this is to prevent
62735 * mm_release()->clear_child_tid() from writing to a user-controlled
62736 @@ -915,6 +945,13 @@ NORET_TYPE void do_exit(long code)
62737 */
62738 set_fs(USER_DS);
62739
62740 + profile_task_exit(tsk);
62741 +
62742 + WARN_ON(atomic_read(&tsk->fs_excl));
62743 +
62744 + if (unlikely(!tsk->pid))
62745 + panic("Attempted to kill the idle task!");
62746 +
62747 tracehook_report_exit(&code);
62748
62749 validate_creds_for_do_exit(tsk);
62750 @@ -973,6 +1010,9 @@ NORET_TYPE void do_exit(long code)
62751 tsk->exit_code = code;
62752 taskstats_exit(tsk, group_dead);
62753
62754 + gr_acl_handle_psacct(tsk, code);
62755 + gr_acl_handle_exit();
62756 +
62757 exit_mm(tsk);
62758
62759 if (group_dead)
62760 @@ -1188,7 +1228,7 @@ static int wait_task_zombie(struct wait_
62761
62762 if (unlikely(wo->wo_flags & WNOWAIT)) {
62763 int exit_code = p->exit_code;
62764 - int why, status;
62765 + int why;
62766
62767 get_task_struct(p);
62768 read_unlock(&tasklist_lock);
62769 diff -urNp linux-2.6.32.45/kernel/fork.c linux-2.6.32.45/kernel/fork.c
62770 --- linux-2.6.32.45/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
62771 +++ linux-2.6.32.45/kernel/fork.c 2011-08-11 19:50:07.000000000 -0400
62772 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
62773 *stackend = STACK_END_MAGIC; /* for overflow detection */
62774
62775 #ifdef CONFIG_CC_STACKPROTECTOR
62776 - tsk->stack_canary = get_random_int();
62777 + tsk->stack_canary = pax_get_random_long();
62778 #endif
62779
62780 /* One for us, one for whoever does the "release_task()" (usually parent) */
62781 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
62782 mm->locked_vm = 0;
62783 mm->mmap = NULL;
62784 mm->mmap_cache = NULL;
62785 - mm->free_area_cache = oldmm->mmap_base;
62786 - mm->cached_hole_size = ~0UL;
62787 + mm->free_area_cache = oldmm->free_area_cache;
62788 + mm->cached_hole_size = oldmm->cached_hole_size;
62789 mm->map_count = 0;
62790 cpumask_clear(mm_cpumask(mm));
62791 mm->mm_rb = RB_ROOT;
62792 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
62793 tmp->vm_flags &= ~VM_LOCKED;
62794 tmp->vm_mm = mm;
62795 tmp->vm_next = tmp->vm_prev = NULL;
62796 + tmp->vm_mirror = NULL;
62797 anon_vma_link(tmp);
62798 file = tmp->vm_file;
62799 if (file) {
62800 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
62801 if (retval)
62802 goto out;
62803 }
62804 +
62805 +#ifdef CONFIG_PAX_SEGMEXEC
62806 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
62807 + struct vm_area_struct *mpnt_m;
62808 +
62809 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
62810 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
62811 +
62812 + if (!mpnt->vm_mirror)
62813 + continue;
62814 +
62815 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
62816 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
62817 + mpnt->vm_mirror = mpnt_m;
62818 + } else {
62819 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
62820 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
62821 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
62822 + mpnt->vm_mirror->vm_mirror = mpnt;
62823 + }
62824 + }
62825 + BUG_ON(mpnt_m);
62826 + }
62827 +#endif
62828 +
62829 /* a new mm has just been created */
62830 arch_dup_mmap(oldmm, mm);
62831 retval = 0;
62832 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
62833 write_unlock(&fs->lock);
62834 return -EAGAIN;
62835 }
62836 - fs->users++;
62837 + atomic_inc(&fs->users);
62838 write_unlock(&fs->lock);
62839 return 0;
62840 }
62841 tsk->fs = copy_fs_struct(fs);
62842 if (!tsk->fs)
62843 return -ENOMEM;
62844 + gr_set_chroot_entries(tsk, &tsk->fs->root);
62845 return 0;
62846 }
62847
62848 @@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(
62849 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
62850 #endif
62851 retval = -EAGAIN;
62852 +
62853 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
62854 +
62855 if (atomic_read(&p->real_cred->user->processes) >=
62856 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
62857 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
62858 - p->real_cred->user != INIT_USER)
62859 + if (p->real_cred->user != INIT_USER &&
62860 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
62861 goto bad_fork_free;
62862 }
62863 + current->flags &= ~PF_NPROC_EXCEEDED;
62864
62865 retval = copy_creds(p, clone_flags);
62866 if (retval < 0)
62867 @@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(
62868 goto bad_fork_free_pid;
62869 }
62870
62871 + gr_copy_label(p);
62872 +
62873 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
62874 /*
62875 * Clear TID on mm_release()?
62876 @@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
62877 bad_fork_free:
62878 free_task(p);
62879 fork_out:
62880 + gr_log_forkfail(retval);
62881 +
62882 return ERR_PTR(retval);
62883 }
62884
62885 @@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
62886 if (clone_flags & CLONE_PARENT_SETTID)
62887 put_user(nr, parent_tidptr);
62888
62889 + gr_handle_brute_check();
62890 +
62891 if (clone_flags & CLONE_VFORK) {
62892 p->vfork_done = &vfork;
62893 init_completion(&vfork);
62894 @@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unsh
62895 return 0;
62896
62897 /* don't need lock here; in the worst case we'll do useless copy */
62898 - if (fs->users == 1)
62899 + if (atomic_read(&fs->users) == 1)
62900 return 0;
62901
62902 *new_fsp = copy_fs_struct(fs);
62903 @@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
62904 fs = current->fs;
62905 write_lock(&fs->lock);
62906 current->fs = new_fs;
62907 - if (--fs->users)
62908 + gr_set_chroot_entries(current, &current->fs->root);
62909 + if (atomic_dec_return(&fs->users))
62910 new_fs = NULL;
62911 else
62912 new_fs = fs;
62913 diff -urNp linux-2.6.32.45/kernel/futex.c linux-2.6.32.45/kernel/futex.c
62914 --- linux-2.6.32.45/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
62915 +++ linux-2.6.32.45/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
62916 @@ -54,6 +54,7 @@
62917 #include <linux/mount.h>
62918 #include <linux/pagemap.h>
62919 #include <linux/syscalls.h>
62920 +#include <linux/ptrace.h>
62921 #include <linux/signal.h>
62922 #include <linux/module.h>
62923 #include <linux/magic.h>
62924 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
62925 struct page *page;
62926 int err;
62927
62928 +#ifdef CONFIG_PAX_SEGMEXEC
62929 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
62930 + return -EFAULT;
62931 +#endif
62932 +
62933 /*
62934 * The futex address must be "naturally" aligned.
62935 */
62936 @@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
62937 struct futex_q q;
62938 int ret;
62939
62940 + pax_track_stack();
62941 +
62942 if (!bitset)
62943 return -EINVAL;
62944
62945 @@ -1841,7 +1849,7 @@ retry:
62946
62947 restart = &current_thread_info()->restart_block;
62948 restart->fn = futex_wait_restart;
62949 - restart->futex.uaddr = (u32 *)uaddr;
62950 + restart->futex.uaddr = uaddr;
62951 restart->futex.val = val;
62952 restart->futex.time = abs_time->tv64;
62953 restart->futex.bitset = bitset;
62954 @@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
62955 struct futex_q q;
62956 int res, ret;
62957
62958 + pax_track_stack();
62959 +
62960 if (!bitset)
62961 return -EINVAL;
62962
62963 @@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62964 {
62965 struct robust_list_head __user *head;
62966 unsigned long ret;
62967 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62968 const struct cred *cred = current_cred(), *pcred;
62969 +#endif
62970
62971 if (!futex_cmpxchg_enabled)
62972 return -ENOSYS;
62973 @@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62974 if (!p)
62975 goto err_unlock;
62976 ret = -EPERM;
62977 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62978 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
62979 + goto err_unlock;
62980 +#else
62981 pcred = __task_cred(p);
62982 if (cred->euid != pcred->euid &&
62983 cred->euid != pcred->uid &&
62984 !capable(CAP_SYS_PTRACE))
62985 goto err_unlock;
62986 +#endif
62987 head = p->robust_list;
62988 rcu_read_unlock();
62989 }
62990 @@ -2459,7 +2476,7 @@ retry:
62991 */
62992 static inline int fetch_robust_entry(struct robust_list __user **entry,
62993 struct robust_list __user * __user *head,
62994 - int *pi)
62995 + unsigned int *pi)
62996 {
62997 unsigned long uentry;
62998
62999 @@ -2640,6 +2657,7 @@ static int __init futex_init(void)
63000 {
63001 u32 curval;
63002 int i;
63003 + mm_segment_t oldfs;
63004
63005 /*
63006 * This will fail and we want it. Some arch implementations do
63007 @@ -2651,7 +2669,10 @@ static int __init futex_init(void)
63008 * implementation, the non functional ones will return
63009 * -ENOSYS.
63010 */
63011 + oldfs = get_fs();
63012 + set_fs(USER_DS);
63013 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
63014 + set_fs(oldfs);
63015 if (curval == -EFAULT)
63016 futex_cmpxchg_enabled = 1;
63017
63018 diff -urNp linux-2.6.32.45/kernel/futex_compat.c linux-2.6.32.45/kernel/futex_compat.c
63019 --- linux-2.6.32.45/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
63020 +++ linux-2.6.32.45/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
63021 @@ -10,6 +10,7 @@
63022 #include <linux/compat.h>
63023 #include <linux/nsproxy.h>
63024 #include <linux/futex.h>
63025 +#include <linux/ptrace.h>
63026
63027 #include <asm/uaccess.h>
63028
63029 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
63030 {
63031 struct compat_robust_list_head __user *head;
63032 unsigned long ret;
63033 - const struct cred *cred = current_cred(), *pcred;
63034 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63035 + const struct cred *cred = current_cred();
63036 + const struct cred *pcred;
63037 +#endif
63038
63039 if (!futex_cmpxchg_enabled)
63040 return -ENOSYS;
63041 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
63042 if (!p)
63043 goto err_unlock;
63044 ret = -EPERM;
63045 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63046 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63047 + goto err_unlock;
63048 +#else
63049 pcred = __task_cred(p);
63050 if (cred->euid != pcred->euid &&
63051 cred->euid != pcred->uid &&
63052 !capable(CAP_SYS_PTRACE))
63053 goto err_unlock;
63054 +#endif
63055 head = p->compat_robust_list;
63056 read_unlock(&tasklist_lock);
63057 }
63058 diff -urNp linux-2.6.32.45/kernel/gcov/base.c linux-2.6.32.45/kernel/gcov/base.c
63059 --- linux-2.6.32.45/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
63060 +++ linux-2.6.32.45/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
63061 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
63062 }
63063
63064 #ifdef CONFIG_MODULES
63065 -static inline int within(void *addr, void *start, unsigned long size)
63066 -{
63067 - return ((addr >= start) && (addr < start + size));
63068 -}
63069 -
63070 /* Update list and generate events when modules are unloaded. */
63071 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63072 void *data)
63073 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
63074 prev = NULL;
63075 /* Remove entries located in module from linked list. */
63076 for (info = gcov_info_head; info; info = info->next) {
63077 - if (within(info, mod->module_core, mod->core_size)) {
63078 + if (within_module_core_rw((unsigned long)info, mod)) {
63079 if (prev)
63080 prev->next = info->next;
63081 else
63082 diff -urNp linux-2.6.32.45/kernel/hrtimer.c linux-2.6.32.45/kernel/hrtimer.c
63083 --- linux-2.6.32.45/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
63084 +++ linux-2.6.32.45/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
63085 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
63086 local_irq_restore(flags);
63087 }
63088
63089 -static void run_hrtimer_softirq(struct softirq_action *h)
63090 +static void run_hrtimer_softirq(void)
63091 {
63092 hrtimer_peek_ahead_timers();
63093 }
63094 diff -urNp linux-2.6.32.45/kernel/kallsyms.c linux-2.6.32.45/kernel/kallsyms.c
63095 --- linux-2.6.32.45/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
63096 +++ linux-2.6.32.45/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
63097 @@ -11,6 +11,9 @@
63098 * Changed the compression method from stem compression to "table lookup"
63099 * compression (see scripts/kallsyms.c for a more complete description)
63100 */
63101 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63102 +#define __INCLUDED_BY_HIDESYM 1
63103 +#endif
63104 #include <linux/kallsyms.h>
63105 #include <linux/module.h>
63106 #include <linux/init.h>
63107 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
63108
63109 static inline int is_kernel_inittext(unsigned long addr)
63110 {
63111 + if (system_state != SYSTEM_BOOTING)
63112 + return 0;
63113 +
63114 if (addr >= (unsigned long)_sinittext
63115 && addr <= (unsigned long)_einittext)
63116 return 1;
63117 return 0;
63118 }
63119
63120 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63121 +#ifdef CONFIG_MODULES
63122 +static inline int is_module_text(unsigned long addr)
63123 +{
63124 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63125 + return 1;
63126 +
63127 + addr = ktla_ktva(addr);
63128 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63129 +}
63130 +#else
63131 +static inline int is_module_text(unsigned long addr)
63132 +{
63133 + return 0;
63134 +}
63135 +#endif
63136 +#endif
63137 +
63138 static inline int is_kernel_text(unsigned long addr)
63139 {
63140 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63141 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
63142
63143 static inline int is_kernel(unsigned long addr)
63144 {
63145 +
63146 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63147 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
63148 + return 1;
63149 +
63150 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63151 +#else
63152 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63153 +#endif
63154 +
63155 return 1;
63156 return in_gate_area_no_task(addr);
63157 }
63158
63159 static int is_ksym_addr(unsigned long addr)
63160 {
63161 +
63162 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63163 + if (is_module_text(addr))
63164 + return 0;
63165 +#endif
63166 +
63167 if (all_var)
63168 return is_kernel(addr);
63169
63170 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
63171
63172 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63173 {
63174 - iter->name[0] = '\0';
63175 iter->nameoff = get_symbol_offset(new_pos);
63176 iter->pos = new_pos;
63177 }
63178 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
63179 {
63180 struct kallsym_iter *iter = m->private;
63181
63182 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63183 + if (current_uid())
63184 + return 0;
63185 +#endif
63186 +
63187 /* Some debugging symbols have no name. Ignore them. */
63188 if (!iter->name[0])
63189 return 0;
63190 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
63191 struct kallsym_iter *iter;
63192 int ret;
63193
63194 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63195 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63196 if (!iter)
63197 return -ENOMEM;
63198 reset_iter(iter, 0);
63199 diff -urNp linux-2.6.32.45/kernel/kgdb.c linux-2.6.32.45/kernel/kgdb.c
63200 --- linux-2.6.32.45/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
63201 +++ linux-2.6.32.45/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
63202 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
63203 /* Guard for recursive entry */
63204 static int exception_level;
63205
63206 -static struct kgdb_io *kgdb_io_ops;
63207 +static const struct kgdb_io *kgdb_io_ops;
63208 static DEFINE_SPINLOCK(kgdb_registration_lock);
63209
63210 /* kgdb console driver is loaded */
63211 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
63212 */
63213 static atomic_t passive_cpu_wait[NR_CPUS];
63214 static atomic_t cpu_in_kgdb[NR_CPUS];
63215 -atomic_t kgdb_setting_breakpoint;
63216 +atomic_unchecked_t kgdb_setting_breakpoint;
63217
63218 struct task_struct *kgdb_usethread;
63219 struct task_struct *kgdb_contthread;
63220 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
63221 sizeof(unsigned long)];
63222
63223 /* to keep track of the CPU which is doing the single stepping*/
63224 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63225 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63226
63227 /*
63228 * If you are debugging a problem where roundup (the collection of
63229 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
63230 return 0;
63231 if (kgdb_connected)
63232 return 1;
63233 - if (atomic_read(&kgdb_setting_breakpoint))
63234 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
63235 return 1;
63236 if (print_wait)
63237 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
63238 @@ -1426,8 +1426,8 @@ acquirelock:
63239 * instance of the exception handler wanted to come into the
63240 * debugger on a different CPU via a single step
63241 */
63242 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63243 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
63244 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63245 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
63246
63247 atomic_set(&kgdb_active, -1);
63248 touch_softlockup_watchdog();
63249 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
63250 *
63251 * Register it with the KGDB core.
63252 */
63253 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
63254 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
63255 {
63256 int err;
63257
63258 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
63259 *
63260 * Unregister it with the KGDB core.
63261 */
63262 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
63263 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
63264 {
63265 BUG_ON(kgdb_connected);
63266
63267 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
63268 */
63269 void kgdb_breakpoint(void)
63270 {
63271 - atomic_set(&kgdb_setting_breakpoint, 1);
63272 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
63273 wmb(); /* Sync point before breakpoint */
63274 arch_kgdb_breakpoint();
63275 wmb(); /* Sync point after breakpoint */
63276 - atomic_set(&kgdb_setting_breakpoint, 0);
63277 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
63278 }
63279 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
63280
63281 diff -urNp linux-2.6.32.45/kernel/kmod.c linux-2.6.32.45/kernel/kmod.c
63282 --- linux-2.6.32.45/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
63283 +++ linux-2.6.32.45/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
63284 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63285 * If module auto-loading support is disabled then this function
63286 * becomes a no-operation.
63287 */
63288 -int __request_module(bool wait, const char *fmt, ...)
63289 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63290 {
63291 - va_list args;
63292 char module_name[MODULE_NAME_LEN];
63293 unsigned int max_modprobes;
63294 int ret;
63295 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63296 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63297 static char *envp[] = { "HOME=/",
63298 "TERM=linux",
63299 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63300 @@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
63301 if (ret)
63302 return ret;
63303
63304 - va_start(args, fmt);
63305 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63306 - va_end(args);
63307 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63308 if (ret >= MODULE_NAME_LEN)
63309 return -ENAMETOOLONG;
63310
63311 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63312 + if (!current_uid()) {
63313 + /* hack to workaround consolekit/udisks stupidity */
63314 + read_lock(&tasklist_lock);
63315 + if (!strcmp(current->comm, "mount") &&
63316 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63317 + read_unlock(&tasklist_lock);
63318 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63319 + return -EPERM;
63320 + }
63321 + read_unlock(&tasklist_lock);
63322 + }
63323 +#endif
63324 +
63325 /* If modprobe needs a service that is in a module, we get a recursive
63326 * loop. Limit the number of running kmod threads to max_threads/2 or
63327 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63328 @@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
63329 atomic_dec(&kmod_concurrent);
63330 return ret;
63331 }
63332 +
63333 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63334 +{
63335 + va_list args;
63336 + int ret;
63337 +
63338 + va_start(args, fmt);
63339 + ret = ____request_module(wait, module_param, fmt, args);
63340 + va_end(args);
63341 +
63342 + return ret;
63343 +}
63344 +
63345 +int __request_module(bool wait, const char *fmt, ...)
63346 +{
63347 + va_list args;
63348 + int ret;
63349 +
63350 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63351 + if (current_uid()) {
63352 + char module_param[MODULE_NAME_LEN];
63353 +
63354 + memset(module_param, 0, sizeof(module_param));
63355 +
63356 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63357 +
63358 + va_start(args, fmt);
63359 + ret = ____request_module(wait, module_param, fmt, args);
63360 + va_end(args);
63361 +
63362 + return ret;
63363 + }
63364 +#endif
63365 +
63366 + va_start(args, fmt);
63367 + ret = ____request_module(wait, NULL, fmt, args);
63368 + va_end(args);
63369 +
63370 + return ret;
63371 +}
63372 +
63373 +
63374 EXPORT_SYMBOL(__request_module);
63375 #endif /* CONFIG_MODULES */
63376
63377 diff -urNp linux-2.6.32.45/kernel/kprobes.c linux-2.6.32.45/kernel/kprobes.c
63378 --- linux-2.6.32.45/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
63379 +++ linux-2.6.32.45/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
63380 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
63381 * kernel image and loaded module images reside. This is required
63382 * so x86_64 can correctly handle the %rip-relative fixups.
63383 */
63384 - kip->insns = module_alloc(PAGE_SIZE);
63385 + kip->insns = module_alloc_exec(PAGE_SIZE);
63386 if (!kip->insns) {
63387 kfree(kip);
63388 return NULL;
63389 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
63390 */
63391 if (!list_is_singular(&kprobe_insn_pages)) {
63392 list_del(&kip->list);
63393 - module_free(NULL, kip->insns);
63394 + module_free_exec(NULL, kip->insns);
63395 kfree(kip);
63396 }
63397 return 1;
63398 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
63399 {
63400 int i, err = 0;
63401 unsigned long offset = 0, size = 0;
63402 - char *modname, namebuf[128];
63403 + char *modname, namebuf[KSYM_NAME_LEN];
63404 const char *symbol_name;
63405 void *addr;
63406 struct kprobe_blackpoint *kb;
63407 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
63408 const char *sym = NULL;
63409 unsigned int i = *(loff_t *) v;
63410 unsigned long offset = 0;
63411 - char *modname, namebuf[128];
63412 + char *modname, namebuf[KSYM_NAME_LEN];
63413
63414 head = &kprobe_table[i];
63415 preempt_disable();
63416 diff -urNp linux-2.6.32.45/kernel/lockdep.c linux-2.6.32.45/kernel/lockdep.c
63417 --- linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
63418 +++ linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
63419 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
63420 /*
63421 * Various lockdep statistics:
63422 */
63423 -atomic_t chain_lookup_hits;
63424 -atomic_t chain_lookup_misses;
63425 -atomic_t hardirqs_on_events;
63426 -atomic_t hardirqs_off_events;
63427 -atomic_t redundant_hardirqs_on;
63428 -atomic_t redundant_hardirqs_off;
63429 -atomic_t softirqs_on_events;
63430 -atomic_t softirqs_off_events;
63431 -atomic_t redundant_softirqs_on;
63432 -atomic_t redundant_softirqs_off;
63433 -atomic_t nr_unused_locks;
63434 -atomic_t nr_cyclic_checks;
63435 -atomic_t nr_find_usage_forwards_checks;
63436 -atomic_t nr_find_usage_backwards_checks;
63437 +atomic_unchecked_t chain_lookup_hits;
63438 +atomic_unchecked_t chain_lookup_misses;
63439 +atomic_unchecked_t hardirqs_on_events;
63440 +atomic_unchecked_t hardirqs_off_events;
63441 +atomic_unchecked_t redundant_hardirqs_on;
63442 +atomic_unchecked_t redundant_hardirqs_off;
63443 +atomic_unchecked_t softirqs_on_events;
63444 +atomic_unchecked_t softirqs_off_events;
63445 +atomic_unchecked_t redundant_softirqs_on;
63446 +atomic_unchecked_t redundant_softirqs_off;
63447 +atomic_unchecked_t nr_unused_locks;
63448 +atomic_unchecked_t nr_cyclic_checks;
63449 +atomic_unchecked_t nr_find_usage_forwards_checks;
63450 +atomic_unchecked_t nr_find_usage_backwards_checks;
63451 #endif
63452
63453 /*
63454 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
63455 int i;
63456 #endif
63457
63458 +#ifdef CONFIG_PAX_KERNEXEC
63459 + start = ktla_ktva(start);
63460 +#endif
63461 +
63462 /*
63463 * static variable?
63464 */
63465 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
63466 */
63467 for_each_possible_cpu(i) {
63468 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
63469 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
63470 - + per_cpu_offset(i);
63471 + end = start + PERCPU_ENOUGH_ROOM;
63472
63473 if ((addr >= start) && (addr < end))
63474 return 1;
63475 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
63476 if (!static_obj(lock->key)) {
63477 debug_locks_off();
63478 printk("INFO: trying to register non-static key.\n");
63479 + printk("lock:%pS key:%pS.\n", lock, lock->key);
63480 printk("the code is fine but needs lockdep annotation.\n");
63481 printk("turning off the locking correctness validator.\n");
63482 dump_stack();
63483 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
63484 if (!class)
63485 return 0;
63486 }
63487 - debug_atomic_inc((atomic_t *)&class->ops);
63488 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
63489 if (very_verbose(class)) {
63490 printk("\nacquire class [%p] %s", class->key, class->name);
63491 if (class->name_version > 1)
63492 diff -urNp linux-2.6.32.45/kernel/lockdep_internals.h linux-2.6.32.45/kernel/lockdep_internals.h
63493 --- linux-2.6.32.45/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
63494 +++ linux-2.6.32.45/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
63495 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
63496 /*
63497 * Various lockdep statistics:
63498 */
63499 -extern atomic_t chain_lookup_hits;
63500 -extern atomic_t chain_lookup_misses;
63501 -extern atomic_t hardirqs_on_events;
63502 -extern atomic_t hardirqs_off_events;
63503 -extern atomic_t redundant_hardirqs_on;
63504 -extern atomic_t redundant_hardirqs_off;
63505 -extern atomic_t softirqs_on_events;
63506 -extern atomic_t softirqs_off_events;
63507 -extern atomic_t redundant_softirqs_on;
63508 -extern atomic_t redundant_softirqs_off;
63509 -extern atomic_t nr_unused_locks;
63510 -extern atomic_t nr_cyclic_checks;
63511 -extern atomic_t nr_cyclic_check_recursions;
63512 -extern atomic_t nr_find_usage_forwards_checks;
63513 -extern atomic_t nr_find_usage_forwards_recursions;
63514 -extern atomic_t nr_find_usage_backwards_checks;
63515 -extern atomic_t nr_find_usage_backwards_recursions;
63516 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
63517 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
63518 -# define debug_atomic_read(ptr) atomic_read(ptr)
63519 +extern atomic_unchecked_t chain_lookup_hits;
63520 +extern atomic_unchecked_t chain_lookup_misses;
63521 +extern atomic_unchecked_t hardirqs_on_events;
63522 +extern atomic_unchecked_t hardirqs_off_events;
63523 +extern atomic_unchecked_t redundant_hardirqs_on;
63524 +extern atomic_unchecked_t redundant_hardirqs_off;
63525 +extern atomic_unchecked_t softirqs_on_events;
63526 +extern atomic_unchecked_t softirqs_off_events;
63527 +extern atomic_unchecked_t redundant_softirqs_on;
63528 +extern atomic_unchecked_t redundant_softirqs_off;
63529 +extern atomic_unchecked_t nr_unused_locks;
63530 +extern atomic_unchecked_t nr_cyclic_checks;
63531 +extern atomic_unchecked_t nr_cyclic_check_recursions;
63532 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
63533 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
63534 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
63535 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
63536 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
63537 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
63538 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
63539 #else
63540 # define debug_atomic_inc(ptr) do { } while (0)
63541 # define debug_atomic_dec(ptr) do { } while (0)
63542 diff -urNp linux-2.6.32.45/kernel/lockdep_proc.c linux-2.6.32.45/kernel/lockdep_proc.c
63543 --- linux-2.6.32.45/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
63544 +++ linux-2.6.32.45/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
63545 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63546
63547 static void print_name(struct seq_file *m, struct lock_class *class)
63548 {
63549 - char str[128];
63550 + char str[KSYM_NAME_LEN];
63551 const char *name = class->name;
63552
63553 if (!name) {
63554 diff -urNp linux-2.6.32.45/kernel/module.c linux-2.6.32.45/kernel/module.c
63555 --- linux-2.6.32.45/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
63556 +++ linux-2.6.32.45/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
63557 @@ -55,6 +55,7 @@
63558 #include <linux/async.h>
63559 #include <linux/percpu.h>
63560 #include <linux/kmemleak.h>
63561 +#include <linux/grsecurity.h>
63562
63563 #define CREATE_TRACE_POINTS
63564 #include <trace/events/module.h>
63565 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
63566 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
63567
63568 /* Bounds of module allocation, for speeding __module_address */
63569 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63570 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63571 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63572
63573 int register_module_notifier(struct notifier_block * nb)
63574 {
63575 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
63576 return true;
63577
63578 list_for_each_entry_rcu(mod, &modules, list) {
63579 - struct symsearch arr[] = {
63580 + struct symsearch modarr[] = {
63581 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63582 NOT_GPL_ONLY, false },
63583 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63584 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
63585 #endif
63586 };
63587
63588 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63589 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63590 return true;
63591 }
63592 return false;
63593 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
63594 void *ptr;
63595 int cpu;
63596
63597 - if (align > PAGE_SIZE) {
63598 + if (align-1 >= PAGE_SIZE) {
63599 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63600 name, align, PAGE_SIZE);
63601 align = PAGE_SIZE;
63602 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
63603 * /sys/module/foo/sections stuff
63604 * J. Corbet <corbet@lwn.net>
63605 */
63606 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
63607 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63608
63609 static inline bool sect_empty(const Elf_Shdr *sect)
63610 {
63611 @@ -1545,7 +1547,8 @@ static void free_module(struct module *m
63612 destroy_params(mod->kp, mod->num_kp);
63613
63614 /* This may be NULL, but that's OK */
63615 - module_free(mod, mod->module_init);
63616 + module_free(mod, mod->module_init_rw);
63617 + module_free_exec(mod, mod->module_init_rx);
63618 kfree(mod->args);
63619 if (mod->percpu)
63620 percpu_modfree(mod->percpu);
63621 @@ -1554,10 +1557,12 @@ static void free_module(struct module *m
63622 percpu_modfree(mod->refptr);
63623 #endif
63624 /* Free lock-classes: */
63625 - lockdep_free_key_range(mod->module_core, mod->core_size);
63626 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63627 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63628
63629 /* Finally, free the core (containing the module structure) */
63630 - module_free(mod, mod->module_core);
63631 + module_free_exec(mod, mod->module_core_rx);
63632 + module_free(mod, mod->module_core_rw);
63633
63634 #ifdef CONFIG_MPU
63635 update_protections(current->mm);
63636 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
63637 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63638 int ret = 0;
63639 const struct kernel_symbol *ksym;
63640 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63641 + int is_fs_load = 0;
63642 + int register_filesystem_found = 0;
63643 + char *p;
63644 +
63645 + p = strstr(mod->args, "grsec_modharden_fs");
63646 +
63647 + if (p) {
63648 + char *endptr = p + strlen("grsec_modharden_fs");
63649 + /* copy \0 as well */
63650 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63651 + is_fs_load = 1;
63652 + }
63653 +#endif
63654 +
63655
63656 for (i = 1; i < n; i++) {
63657 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63658 + const char *name = strtab + sym[i].st_name;
63659 +
63660 + /* it's a real shame this will never get ripped and copied
63661 + upstream! ;(
63662 + */
63663 + if (is_fs_load && !strcmp(name, "register_filesystem"))
63664 + register_filesystem_found = 1;
63665 +#endif
63666 switch (sym[i].st_shndx) {
63667 case SHN_COMMON:
63668 /* We compiled with -fno-common. These are not
63669 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
63670 strtab + sym[i].st_name, mod);
63671 /* Ok if resolved. */
63672 if (ksym) {
63673 + pax_open_kernel();
63674 sym[i].st_value = ksym->value;
63675 + pax_close_kernel();
63676 break;
63677 }
63678
63679 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
63680 secbase = (unsigned long)mod->percpu;
63681 else
63682 secbase = sechdrs[sym[i].st_shndx].sh_addr;
63683 + pax_open_kernel();
63684 sym[i].st_value += secbase;
63685 + pax_close_kernel();
63686 break;
63687 }
63688 }
63689
63690 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63691 + if (is_fs_load && !register_filesystem_found) {
63692 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63693 + ret = -EPERM;
63694 + }
63695 +#endif
63696 +
63697 return ret;
63698 }
63699
63700 @@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
63701 || s->sh_entsize != ~0UL
63702 || strstarts(secstrings + s->sh_name, ".init"))
63703 continue;
63704 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63705 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63706 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63707 + else
63708 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63709 DEBUGP("\t%s\n", secstrings + s->sh_name);
63710 }
63711 - if (m == 0)
63712 - mod->core_text_size = mod->core_size;
63713 }
63714
63715 DEBUGP("Init section allocation order:\n");
63716 @@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
63717 || s->sh_entsize != ~0UL
63718 || !strstarts(secstrings + s->sh_name, ".init"))
63719 continue;
63720 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63721 - | INIT_OFFSET_MASK);
63722 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63723 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63724 + else
63725 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63726 + s->sh_entsize |= INIT_OFFSET_MASK;
63727 DEBUGP("\t%s\n", secstrings + s->sh_name);
63728 }
63729 - if (m == 0)
63730 - mod->init_text_size = mod->init_size;
63731 }
63732 }
63733
63734 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
63735
63736 /* As per nm */
63737 static char elf_type(const Elf_Sym *sym,
63738 - Elf_Shdr *sechdrs,
63739 - const char *secstrings,
63740 - struct module *mod)
63741 + const Elf_Shdr *sechdrs,
63742 + const char *secstrings)
63743 {
63744 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
63745 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
63746 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
63747
63748 /* Put symbol section at end of init part of module. */
63749 symsect->sh_flags |= SHF_ALLOC;
63750 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63751 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63752 symindex) | INIT_OFFSET_MASK;
63753 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
63754
63755 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
63756 }
63757
63758 /* Append room for core symbols at end of core part. */
63759 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63760 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
63761 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63762 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
63763
63764 /* Put string table section at end of init part of module. */
63765 strsect->sh_flags |= SHF_ALLOC;
63766 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63767 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63768 strindex) | INIT_OFFSET_MASK;
63769 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
63770
63771 /* Append room for core symbols' strings at end of core part. */
63772 - *pstroffs = mod->core_size;
63773 + *pstroffs = mod->core_size_rx;
63774 __set_bit(0, strmap);
63775 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
63776 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
63777
63778 return symoffs;
63779 }
63780 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
63781 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63782 mod->strtab = (void *)sechdrs[strindex].sh_addr;
63783
63784 + pax_open_kernel();
63785 +
63786 /* Set types up while we still have access to sections. */
63787 for (i = 0; i < mod->num_symtab; i++)
63788 mod->symtab[i].st_info
63789 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
63790 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
63791
63792 - mod->core_symtab = dst = mod->module_core + symoffs;
63793 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
63794 src = mod->symtab;
63795 *dst = *src;
63796 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63797 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
63798 }
63799 mod->core_num_syms = ndst;
63800
63801 - mod->core_strtab = s = mod->module_core + stroffs;
63802 + mod->core_strtab = s = mod->module_core_rx + stroffs;
63803 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
63804 if (test_bit(i, strmap))
63805 *++s = mod->strtab[i];
63806 +
63807 + pax_close_kernel();
63808 }
63809 #else
63810 static inline unsigned long layout_symtab(struct module *mod,
63811 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
63812 #endif
63813 }
63814
63815 -static void *module_alloc_update_bounds(unsigned long size)
63816 +static void *module_alloc_update_bounds_rw(unsigned long size)
63817 {
63818 void *ret = module_alloc(size);
63819
63820 if (ret) {
63821 /* Update module bounds. */
63822 - if ((unsigned long)ret < module_addr_min)
63823 - module_addr_min = (unsigned long)ret;
63824 - if ((unsigned long)ret + size > module_addr_max)
63825 - module_addr_max = (unsigned long)ret + size;
63826 + if ((unsigned long)ret < module_addr_min_rw)
63827 + module_addr_min_rw = (unsigned long)ret;
63828 + if ((unsigned long)ret + size > module_addr_max_rw)
63829 + module_addr_max_rw = (unsigned long)ret + size;
63830 + }
63831 + return ret;
63832 +}
63833 +
63834 +static void *module_alloc_update_bounds_rx(unsigned long size)
63835 +{
63836 + void *ret = module_alloc_exec(size);
63837 +
63838 + if (ret) {
63839 + /* Update module bounds. */
63840 + if ((unsigned long)ret < module_addr_min_rx)
63841 + module_addr_min_rx = (unsigned long)ret;
63842 + if ((unsigned long)ret + size > module_addr_max_rx)
63843 + module_addr_max_rx = (unsigned long)ret + size;
63844 }
63845 return ret;
63846 }
63847 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
63848 unsigned int i;
63849
63850 /* only scan the sections containing data */
63851 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
63852 - (unsigned long)mod->module_core,
63853 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
63854 + (unsigned long)mod->module_core_rw,
63855 sizeof(struct module), GFP_KERNEL);
63856
63857 for (i = 1; i < hdr->e_shnum; i++) {
63858 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
63859 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
63860 continue;
63861
63862 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
63863 - (unsigned long)mod->module_core,
63864 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
63865 + (unsigned long)mod->module_core_rw,
63866 sechdrs[i].sh_size, GFP_KERNEL);
63867 }
63868 }
63869 @@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
63870 secstrings, &stroffs, strmap);
63871
63872 /* Do the allocs. */
63873 - ptr = module_alloc_update_bounds(mod->core_size);
63874 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
63875 /*
63876 * The pointer to this block is stored in the module structure
63877 * which is inside the block. Just mark it as not being a
63878 @@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
63879 err = -ENOMEM;
63880 goto free_percpu;
63881 }
63882 - memset(ptr, 0, mod->core_size);
63883 - mod->module_core = ptr;
63884 + memset(ptr, 0, mod->core_size_rw);
63885 + mod->module_core_rw = ptr;
63886
63887 - ptr = module_alloc_update_bounds(mod->init_size);
63888 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
63889 /*
63890 * The pointer to this block is stored in the module structure
63891 * which is inside the block. This block doesn't need to be
63892 * scanned as it contains data and code that will be freed
63893 * after the module is initialized.
63894 */
63895 - kmemleak_ignore(ptr);
63896 - if (!ptr && mod->init_size) {
63897 + kmemleak_not_leak(ptr);
63898 + if (!ptr && mod->init_size_rw) {
63899 + err = -ENOMEM;
63900 + goto free_core_rw;
63901 + }
63902 + memset(ptr, 0, mod->init_size_rw);
63903 + mod->module_init_rw = ptr;
63904 +
63905 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
63906 + kmemleak_not_leak(ptr);
63907 + if (!ptr) {
63908 err = -ENOMEM;
63909 - goto free_core;
63910 + goto free_init_rw;
63911 }
63912 - memset(ptr, 0, mod->init_size);
63913 - mod->module_init = ptr;
63914 +
63915 + pax_open_kernel();
63916 + memset(ptr, 0, mod->core_size_rx);
63917 + pax_close_kernel();
63918 + mod->module_core_rx = ptr;
63919 +
63920 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
63921 + kmemleak_not_leak(ptr);
63922 + if (!ptr && mod->init_size_rx) {
63923 + err = -ENOMEM;
63924 + goto free_core_rx;
63925 + }
63926 +
63927 + pax_open_kernel();
63928 + memset(ptr, 0, mod->init_size_rx);
63929 + pax_close_kernel();
63930 + mod->module_init_rx = ptr;
63931
63932 /* Transfer each section which specifies SHF_ALLOC */
63933 DEBUGP("final section addresses:\n");
63934 @@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
63935 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
63936 continue;
63937
63938 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
63939 - dest = mod->module_init
63940 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63941 - else
63942 - dest = mod->module_core + sechdrs[i].sh_entsize;
63943 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
63944 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63945 + dest = mod->module_init_rw
63946 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63947 + else
63948 + dest = mod->module_init_rx
63949 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63950 + } else {
63951 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63952 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
63953 + else
63954 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
63955 + }
63956 +
63957 + if (sechdrs[i].sh_type != SHT_NOBITS) {
63958
63959 - if (sechdrs[i].sh_type != SHT_NOBITS)
63960 - memcpy(dest, (void *)sechdrs[i].sh_addr,
63961 - sechdrs[i].sh_size);
63962 +#ifdef CONFIG_PAX_KERNEXEC
63963 +#ifdef CONFIG_X86_64
63964 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
63965 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
63966 +#endif
63967 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
63968 + pax_open_kernel();
63969 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63970 + pax_close_kernel();
63971 + } else
63972 +#endif
63973 +
63974 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63975 + }
63976 /* Update sh_addr to point to copy in image. */
63977 - sechdrs[i].sh_addr = (unsigned long)dest;
63978 +
63979 +#ifdef CONFIG_PAX_KERNEXEC
63980 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
63981 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
63982 + else
63983 +#endif
63984 +
63985 + sechdrs[i].sh_addr = (unsigned long)dest;
63986 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
63987 }
63988 /* Module has been moved. */
63989 @@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
63990 mod->name);
63991 if (!mod->refptr) {
63992 err = -ENOMEM;
63993 - goto free_init;
63994 + goto free_init_rx;
63995 }
63996 #endif
63997 /* Now we've moved module, initialize linked lists, etc. */
63998 @@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
63999 /* Set up MODINFO_ATTR fields */
64000 setup_modinfo(mod, sechdrs, infoindex);
64001
64002 + mod->args = args;
64003 +
64004 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64005 + {
64006 + char *p, *p2;
64007 +
64008 + if (strstr(mod->args, "grsec_modharden_netdev")) {
64009 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64010 + err = -EPERM;
64011 + goto cleanup;
64012 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64013 + p += strlen("grsec_modharden_normal");
64014 + p2 = strstr(p, "_");
64015 + if (p2) {
64016 + *p2 = '\0';
64017 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64018 + *p2 = '_';
64019 + }
64020 + err = -EPERM;
64021 + goto cleanup;
64022 + }
64023 + }
64024 +#endif
64025 +
64026 +
64027 /* Fix up syms, so that st_value is a pointer to location. */
64028 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
64029 mod);
64030 @@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
64031
64032 /* Now do relocations. */
64033 for (i = 1; i < hdr->e_shnum; i++) {
64034 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
64035 unsigned int info = sechdrs[i].sh_info;
64036 + strtab = (char *)sechdrs[strindex].sh_addr;
64037
64038 /* Not a valid relocation section? */
64039 if (info >= hdr->e_shnum)
64040 @@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
64041 * Do it before processing of module parameters, so the module
64042 * can provide parameter accessor functions of its own.
64043 */
64044 - if (mod->module_init)
64045 - flush_icache_range((unsigned long)mod->module_init,
64046 - (unsigned long)mod->module_init
64047 - + mod->init_size);
64048 - flush_icache_range((unsigned long)mod->module_core,
64049 - (unsigned long)mod->module_core + mod->core_size);
64050 + if (mod->module_init_rx)
64051 + flush_icache_range((unsigned long)mod->module_init_rx,
64052 + (unsigned long)mod->module_init_rx
64053 + + mod->init_size_rx);
64054 + flush_icache_range((unsigned long)mod->module_core_rx,
64055 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
64056
64057 set_fs(old_fs);
64058
64059 - mod->args = args;
64060 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
64061 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
64062 mod->name);
64063 @@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
64064 free_unload:
64065 module_unload_free(mod);
64066 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
64067 + free_init_rx:
64068 percpu_modfree(mod->refptr);
64069 - free_init:
64070 #endif
64071 - module_free(mod, mod->module_init);
64072 - free_core:
64073 - module_free(mod, mod->module_core);
64074 + module_free_exec(mod, mod->module_init_rx);
64075 + free_core_rx:
64076 + module_free_exec(mod, mod->module_core_rx);
64077 + free_init_rw:
64078 + module_free(mod, mod->module_init_rw);
64079 + free_core_rw:
64080 + module_free(mod, mod->module_core_rw);
64081 /* mod will be freed with core. Don't access it beyond this line! */
64082 free_percpu:
64083 if (percpu)
64084 @@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
64085 mod->symtab = mod->core_symtab;
64086 mod->strtab = mod->core_strtab;
64087 #endif
64088 - module_free(mod, mod->module_init);
64089 - mod->module_init = NULL;
64090 - mod->init_size = 0;
64091 - mod->init_text_size = 0;
64092 + module_free(mod, mod->module_init_rw);
64093 + module_free_exec(mod, mod->module_init_rx);
64094 + mod->module_init_rw = NULL;
64095 + mod->module_init_rx = NULL;
64096 + mod->init_size_rw = 0;
64097 + mod->init_size_rx = 0;
64098 mutex_unlock(&module_mutex);
64099
64100 return 0;
64101 @@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
64102 unsigned long nextval;
64103
64104 /* At worse, next value is at end of module */
64105 - if (within_module_init(addr, mod))
64106 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
64107 + if (within_module_init_rx(addr, mod))
64108 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64109 + else if (within_module_init_rw(addr, mod))
64110 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64111 + else if (within_module_core_rx(addr, mod))
64112 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64113 + else if (within_module_core_rw(addr, mod))
64114 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64115 else
64116 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
64117 + return NULL;
64118
64119 /* Scan for closest preceeding symbol, and next symbol. (ELF
64120 starts real symbols at 1). */
64121 @@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
64122 char buf[8];
64123
64124 seq_printf(m, "%s %u",
64125 - mod->name, mod->init_size + mod->core_size);
64126 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64127 print_unload_info(m, mod);
64128
64129 /* Informative for users. */
64130 @@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
64131 mod->state == MODULE_STATE_COMING ? "Loading":
64132 "Live");
64133 /* Used by oprofile and other similar tools. */
64134 - seq_printf(m, " 0x%p", mod->module_core);
64135 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
64136
64137 /* Taints info */
64138 if (mod->taints)
64139 @@ -2981,7 +3128,17 @@ static const struct file_operations proc
64140
64141 static int __init proc_modules_init(void)
64142 {
64143 +#ifndef CONFIG_GRKERNSEC_HIDESYM
64144 +#ifdef CONFIG_GRKERNSEC_PROC_USER
64145 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64146 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64147 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64148 +#else
64149 proc_create("modules", 0, NULL, &proc_modules_operations);
64150 +#endif
64151 +#else
64152 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64153 +#endif
64154 return 0;
64155 }
64156 module_init(proc_modules_init);
64157 @@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
64158 {
64159 struct module *mod;
64160
64161 - if (addr < module_addr_min || addr > module_addr_max)
64162 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64163 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
64164 return NULL;
64165
64166 list_for_each_entry_rcu(mod, &modules, list)
64167 - if (within_module_core(addr, mod)
64168 - || within_module_init(addr, mod))
64169 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
64170 return mod;
64171 return NULL;
64172 }
64173 @@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
64174 */
64175 struct module *__module_text_address(unsigned long addr)
64176 {
64177 - struct module *mod = __module_address(addr);
64178 + struct module *mod;
64179 +
64180 +#ifdef CONFIG_X86_32
64181 + addr = ktla_ktva(addr);
64182 +#endif
64183 +
64184 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64185 + return NULL;
64186 +
64187 + mod = __module_address(addr);
64188 +
64189 if (mod) {
64190 /* Make sure it's within the text section. */
64191 - if (!within(addr, mod->module_init, mod->init_text_size)
64192 - && !within(addr, mod->module_core, mod->core_text_size))
64193 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64194 mod = NULL;
64195 }
64196 return mod;
64197 diff -urNp linux-2.6.32.45/kernel/mutex.c linux-2.6.32.45/kernel/mutex.c
64198 --- linux-2.6.32.45/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
64199 +++ linux-2.6.32.45/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
64200 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
64201 */
64202
64203 for (;;) {
64204 - struct thread_info *owner;
64205 + struct task_struct *owner;
64206
64207 /*
64208 * If we own the BKL, then don't spin. The owner of
64209 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
64210 spin_lock_mutex(&lock->wait_lock, flags);
64211
64212 debug_mutex_lock_common(lock, &waiter);
64213 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64214 + debug_mutex_add_waiter(lock, &waiter, task);
64215
64216 /* add waiting tasks to the end of the waitqueue (FIFO): */
64217 list_add_tail(&waiter.list, &lock->wait_list);
64218 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
64219 * TASK_UNINTERRUPTIBLE case.)
64220 */
64221 if (unlikely(signal_pending_state(state, task))) {
64222 - mutex_remove_waiter(lock, &waiter,
64223 - task_thread_info(task));
64224 + mutex_remove_waiter(lock, &waiter, task);
64225 mutex_release(&lock->dep_map, 1, ip);
64226 spin_unlock_mutex(&lock->wait_lock, flags);
64227
64228 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
64229 done:
64230 lock_acquired(&lock->dep_map, ip);
64231 /* got the lock - rejoice! */
64232 - mutex_remove_waiter(lock, &waiter, current_thread_info());
64233 + mutex_remove_waiter(lock, &waiter, task);
64234 mutex_set_owner(lock);
64235
64236 /* set it to 0 if there are no waiters left: */
64237 diff -urNp linux-2.6.32.45/kernel/mutex-debug.c linux-2.6.32.45/kernel/mutex-debug.c
64238 --- linux-2.6.32.45/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
64239 +++ linux-2.6.32.45/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
64240 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64241 }
64242
64243 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64244 - struct thread_info *ti)
64245 + struct task_struct *task)
64246 {
64247 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64248
64249 /* Mark the current thread as blocked on the lock: */
64250 - ti->task->blocked_on = waiter;
64251 + task->blocked_on = waiter;
64252 }
64253
64254 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64255 - struct thread_info *ti)
64256 + struct task_struct *task)
64257 {
64258 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64259 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64260 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64261 - ti->task->blocked_on = NULL;
64262 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
64263 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64264 + task->blocked_on = NULL;
64265
64266 list_del_init(&waiter->list);
64267 waiter->task = NULL;
64268 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
64269 return;
64270
64271 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
64272 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
64273 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
64274 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
64275 mutex_clear_owner(lock);
64276 }
64277 diff -urNp linux-2.6.32.45/kernel/mutex-debug.h linux-2.6.32.45/kernel/mutex-debug.h
64278 --- linux-2.6.32.45/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
64279 +++ linux-2.6.32.45/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
64280 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
64281 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64282 extern void debug_mutex_add_waiter(struct mutex *lock,
64283 struct mutex_waiter *waiter,
64284 - struct thread_info *ti);
64285 + struct task_struct *task);
64286 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64287 - struct thread_info *ti);
64288 + struct task_struct *task);
64289 extern void debug_mutex_unlock(struct mutex *lock);
64290 extern void debug_mutex_init(struct mutex *lock, const char *name,
64291 struct lock_class_key *key);
64292
64293 static inline void mutex_set_owner(struct mutex *lock)
64294 {
64295 - lock->owner = current_thread_info();
64296 + lock->owner = current;
64297 }
64298
64299 static inline void mutex_clear_owner(struct mutex *lock)
64300 diff -urNp linux-2.6.32.45/kernel/mutex.h linux-2.6.32.45/kernel/mutex.h
64301 --- linux-2.6.32.45/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
64302 +++ linux-2.6.32.45/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
64303 @@ -19,7 +19,7 @@
64304 #ifdef CONFIG_SMP
64305 static inline void mutex_set_owner(struct mutex *lock)
64306 {
64307 - lock->owner = current_thread_info();
64308 + lock->owner = current;
64309 }
64310
64311 static inline void mutex_clear_owner(struct mutex *lock)
64312 diff -urNp linux-2.6.32.45/kernel/panic.c linux-2.6.32.45/kernel/panic.c
64313 --- linux-2.6.32.45/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
64314 +++ linux-2.6.32.45/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
64315 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
64316 const char *board;
64317
64318 printk(KERN_WARNING "------------[ cut here ]------------\n");
64319 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64320 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64321 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64322 if (board)
64323 printk(KERN_WARNING "Hardware name: %s\n", board);
64324 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64325 */
64326 void __stack_chk_fail(void)
64327 {
64328 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
64329 + dump_stack();
64330 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64331 __builtin_return_address(0));
64332 }
64333 EXPORT_SYMBOL(__stack_chk_fail);
64334 diff -urNp linux-2.6.32.45/kernel/params.c linux-2.6.32.45/kernel/params.c
64335 --- linux-2.6.32.45/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
64336 +++ linux-2.6.32.45/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
64337 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
64338 return ret;
64339 }
64340
64341 -static struct sysfs_ops module_sysfs_ops = {
64342 +static const struct sysfs_ops module_sysfs_ops = {
64343 .show = module_attr_show,
64344 .store = module_attr_store,
64345 };
64346 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
64347 return 0;
64348 }
64349
64350 -static struct kset_uevent_ops module_uevent_ops = {
64351 +static const struct kset_uevent_ops module_uevent_ops = {
64352 .filter = uevent_filter,
64353 };
64354
64355 diff -urNp linux-2.6.32.45/kernel/perf_event.c linux-2.6.32.45/kernel/perf_event.c
64356 --- linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:35:30.000000000 -0400
64357 +++ linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:34:01.000000000 -0400
64358 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
64359 */
64360 int sysctl_perf_event_sample_rate __read_mostly = 100000;
64361
64362 -static atomic64_t perf_event_id;
64363 +static atomic64_unchecked_t perf_event_id;
64364
64365 /*
64366 * Lock for (sysadmin-configurable) event reservations:
64367 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
64368 * In order to keep per-task stats reliable we need to flip the event
64369 * values when we flip the contexts.
64370 */
64371 - value = atomic64_read(&next_event->count);
64372 - value = atomic64_xchg(&event->count, value);
64373 - atomic64_set(&next_event->count, value);
64374 + value = atomic64_read_unchecked(&next_event->count);
64375 + value = atomic64_xchg_unchecked(&event->count, value);
64376 + atomic64_set_unchecked(&next_event->count, value);
64377
64378 swap(event->total_time_enabled, next_event->total_time_enabled);
64379 swap(event->total_time_running, next_event->total_time_running);
64380 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
64381 update_event_times(event);
64382 }
64383
64384 - return atomic64_read(&event->count);
64385 + return atomic64_read_unchecked(&event->count);
64386 }
64387
64388 /*
64389 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
64390 values[n++] = 1 + leader->nr_siblings;
64391 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64392 values[n++] = leader->total_time_enabled +
64393 - atomic64_read(&leader->child_total_time_enabled);
64394 + atomic64_read_unchecked(&leader->child_total_time_enabled);
64395 }
64396 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64397 values[n++] = leader->total_time_running +
64398 - atomic64_read(&leader->child_total_time_running);
64399 + atomic64_read_unchecked(&leader->child_total_time_running);
64400 }
64401
64402 size = n * sizeof(u64);
64403 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
64404 values[n++] = perf_event_read_value(event);
64405 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64406 values[n++] = event->total_time_enabled +
64407 - atomic64_read(&event->child_total_time_enabled);
64408 + atomic64_read_unchecked(&event->child_total_time_enabled);
64409 }
64410 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64411 values[n++] = event->total_time_running +
64412 - atomic64_read(&event->child_total_time_running);
64413 + atomic64_read_unchecked(&event->child_total_time_running);
64414 }
64415 if (read_format & PERF_FORMAT_ID)
64416 values[n++] = primary_event_id(event);
64417 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
64418 static void perf_event_reset(struct perf_event *event)
64419 {
64420 (void)perf_event_read(event);
64421 - atomic64_set(&event->count, 0);
64422 + atomic64_set_unchecked(&event->count, 0);
64423 perf_event_update_userpage(event);
64424 }
64425
64426 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
64427 ++userpg->lock;
64428 barrier();
64429 userpg->index = perf_event_index(event);
64430 - userpg->offset = atomic64_read(&event->count);
64431 + userpg->offset = atomic64_read_unchecked(&event->count);
64432 if (event->state == PERF_EVENT_STATE_ACTIVE)
64433 - userpg->offset -= atomic64_read(&event->hw.prev_count);
64434 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
64435
64436 userpg->time_enabled = event->total_time_enabled +
64437 - atomic64_read(&event->child_total_time_enabled);
64438 + atomic64_read_unchecked(&event->child_total_time_enabled);
64439
64440 userpg->time_running = event->total_time_running +
64441 - atomic64_read(&event->child_total_time_running);
64442 + atomic64_read_unchecked(&event->child_total_time_running);
64443
64444 barrier();
64445 ++userpg->lock;
64446 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
64447 u64 values[4];
64448 int n = 0;
64449
64450 - values[n++] = atomic64_read(&event->count);
64451 + values[n++] = atomic64_read_unchecked(&event->count);
64452 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64453 values[n++] = event->total_time_enabled +
64454 - atomic64_read(&event->child_total_time_enabled);
64455 + atomic64_read_unchecked(&event->child_total_time_enabled);
64456 }
64457 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64458 values[n++] = event->total_time_running +
64459 - atomic64_read(&event->child_total_time_running);
64460 + atomic64_read_unchecked(&event->child_total_time_running);
64461 }
64462 if (read_format & PERF_FORMAT_ID)
64463 values[n++] = primary_event_id(event);
64464 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
64465 if (leader != event)
64466 leader->pmu->read(leader);
64467
64468 - values[n++] = atomic64_read(&leader->count);
64469 + values[n++] = atomic64_read_unchecked(&leader->count);
64470 if (read_format & PERF_FORMAT_ID)
64471 values[n++] = primary_event_id(leader);
64472
64473 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
64474 if (sub != event)
64475 sub->pmu->read(sub);
64476
64477 - values[n++] = atomic64_read(&sub->count);
64478 + values[n++] = atomic64_read_unchecked(&sub->count);
64479 if (read_format & PERF_FORMAT_ID)
64480 values[n++] = primary_event_id(sub);
64481
64482 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf
64483 {
64484 struct hw_perf_event *hwc = &event->hw;
64485
64486 - atomic64_add(nr, &event->count);
64487 + atomic64_add_unchecked(nr, &event->count);
64488
64489 if (!hwc->sample_period)
64490 return;
64491 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(
64492 u64 now;
64493
64494 now = cpu_clock(cpu);
64495 - prev = atomic64_read(&event->hw.prev_count);
64496 - atomic64_set(&event->hw.prev_count, now);
64497 - atomic64_add(now - prev, &event->count);
64498 + prev = atomic64_read_unchecked(&event->hw.prev_count);
64499 + atomic64_set_unchecked(&event->hw.prev_count, now);
64500 + atomic64_add_unchecked(now - prev, &event->count);
64501 }
64502
64503 static int cpu_clock_perf_event_enable(struct perf_event *event)
64504 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(s
64505 struct hw_perf_event *hwc = &event->hw;
64506 int cpu = raw_smp_processor_id();
64507
64508 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
64509 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
64510 perf_swevent_start_hrtimer(event);
64511
64512 return 0;
64513 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update
64514 u64 prev;
64515 s64 delta;
64516
64517 - prev = atomic64_xchg(&event->hw.prev_count, now);
64518 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
64519 delta = now - prev;
64520 - atomic64_add(delta, &event->count);
64521 + atomic64_add_unchecked(delta, &event->count);
64522 }
64523
64524 static int task_clock_perf_event_enable(struct perf_event *event)
64525 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(
64526
64527 now = event->ctx->time;
64528
64529 - atomic64_set(&hwc->prev_count, now);
64530 + atomic64_set_unchecked(&hwc->prev_count, now);
64531
64532 perf_swevent_start_hrtimer(event);
64533
64534 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr
64535 event->parent = parent_event;
64536
64537 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64538 - event->id = atomic64_inc_return(&perf_event_id);
64539 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
64540
64541 event->state = PERF_EVENT_STATE_INACTIVE;
64542
64543 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf
64544 if (child_event->attr.inherit_stat)
64545 perf_event_read_event(child_event, child);
64546
64547 - child_val = atomic64_read(&child_event->count);
64548 + child_val = atomic64_read_unchecked(&child_event->count);
64549
64550 /*
64551 * Add back the child's count to the parent's count:
64552 */
64553 - atomic64_add(child_val, &parent_event->count);
64554 - atomic64_add(child_event->total_time_enabled,
64555 + atomic64_add_unchecked(child_val, &parent_event->count);
64556 + atomic64_add_unchecked(child_event->total_time_enabled,
64557 &parent_event->child_total_time_enabled);
64558 - atomic64_add(child_event->total_time_running,
64559 + atomic64_add_unchecked(child_event->total_time_running,
64560 &parent_event->child_total_time_running);
64561
64562 /*
64563 diff -urNp linux-2.6.32.45/kernel/pid.c linux-2.6.32.45/kernel/pid.c
64564 --- linux-2.6.32.45/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
64565 +++ linux-2.6.32.45/kernel/pid.c 2011-07-14 19:15:33.000000000 -0400
64566 @@ -33,6 +33,7 @@
64567 #include <linux/rculist.h>
64568 #include <linux/bootmem.h>
64569 #include <linux/hash.h>
64570 +#include <linux/security.h>
64571 #include <linux/pid_namespace.h>
64572 #include <linux/init_task.h>
64573 #include <linux/syscalls.h>
64574 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64575
64576 int pid_max = PID_MAX_DEFAULT;
64577
64578 -#define RESERVED_PIDS 300
64579 +#define RESERVED_PIDS 500
64580
64581 int pid_max_min = RESERVED_PIDS + 1;
64582 int pid_max_max = PID_MAX_LIMIT;
64583 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
64584 */
64585 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64586 {
64587 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64588 + struct task_struct *task;
64589 +
64590 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64591 +
64592 + if (gr_pid_is_chrooted(task))
64593 + return NULL;
64594 +
64595 + return task;
64596 }
64597
64598 struct task_struct *find_task_by_vpid(pid_t vnr)
64599 @@ -391,6 +399,13 @@ struct task_struct *find_task_by_vpid(pi
64600 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64601 }
64602
64603 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64604 +{
64605 + struct task_struct *task;
64606 +
64607 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64608 +}
64609 +
64610 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64611 {
64612 struct pid *pid;
64613 diff -urNp linux-2.6.32.45/kernel/posix-cpu-timers.c linux-2.6.32.45/kernel/posix-cpu-timers.c
64614 --- linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
64615 +++ linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-08-06 09:33:44.000000000 -0400
64616 @@ -6,6 +6,7 @@
64617 #include <linux/posix-timers.h>
64618 #include <linux/errno.h>
64619 #include <linux/math64.h>
64620 +#include <linux/security.h>
64621 #include <asm/uaccess.h>
64622 #include <linux/kernel_stat.h>
64623 #include <trace/events/timer.h>
64624 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(st
64625
64626 static __init int init_posix_cpu_timers(void)
64627 {
64628 - struct k_clock process = {
64629 + static struct k_clock process = {
64630 .clock_getres = process_cpu_clock_getres,
64631 .clock_get = process_cpu_clock_get,
64632 .clock_set = do_posix_clock_nosettime,
64633 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(
64634 .nsleep = process_cpu_nsleep,
64635 .nsleep_restart = process_cpu_nsleep_restart,
64636 };
64637 - struct k_clock thread = {
64638 + static struct k_clock thread = {
64639 .clock_getres = thread_cpu_clock_getres,
64640 .clock_get = thread_cpu_clock_get,
64641 .clock_set = do_posix_clock_nosettime,
64642 diff -urNp linux-2.6.32.45/kernel/posix-timers.c linux-2.6.32.45/kernel/posix-timers.c
64643 --- linux-2.6.32.45/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
64644 +++ linux-2.6.32.45/kernel/posix-timers.c 2011-08-06 09:34:14.000000000 -0400
64645 @@ -42,6 +42,7 @@
64646 #include <linux/compiler.h>
64647 #include <linux/idr.h>
64648 #include <linux/posix-timers.h>
64649 +#include <linux/grsecurity.h>
64650 #include <linux/syscalls.h>
64651 #include <linux/wait.h>
64652 #include <linux/workqueue.h>
64653 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
64654 * which we beg off on and pass to do_sys_settimeofday().
64655 */
64656
64657 -static struct k_clock posix_clocks[MAX_CLOCKS];
64658 +static struct k_clock *posix_clocks[MAX_CLOCKS];
64659
64660 /*
64661 * These ones are defined below.
64662 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k
64663 */
64664 #define CLOCK_DISPATCH(clock, call, arglist) \
64665 ((clock) < 0 ? posix_cpu_##call arglist : \
64666 - (posix_clocks[clock].call != NULL \
64667 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
64668 + (posix_clocks[clock]->call != NULL \
64669 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
64670
64671 /*
64672 * Default clock hook functions when the struct k_clock passed
64673 @@ -172,7 +173,7 @@ static inline int common_clock_getres(co
64674 struct timespec *tp)
64675 {
64676 tp->tv_sec = 0;
64677 - tp->tv_nsec = posix_clocks[which_clock].res;
64678 + tp->tv_nsec = posix_clocks[which_clock]->res;
64679 return 0;
64680 }
64681
64682 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const
64683 return 0;
64684 if ((unsigned) which_clock >= MAX_CLOCKS)
64685 return 1;
64686 - if (posix_clocks[which_clock].clock_getres != NULL)
64687 + if (!posix_clocks[which_clock])
64688 return 0;
64689 - if (posix_clocks[which_clock].res != 0)
64690 + if (posix_clocks[which_clock]->clock_getres != NULL)
64691 + return 0;
64692 + if (posix_clocks[which_clock]->res != 0)
64693 return 0;
64694 return 1;
64695 }
64696 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t
64697 */
64698 static __init int init_posix_timers(void)
64699 {
64700 - struct k_clock clock_realtime = {
64701 + static struct k_clock clock_realtime = {
64702 .clock_getres = hrtimer_get_res,
64703 };
64704 - struct k_clock clock_monotonic = {
64705 + static struct k_clock clock_monotonic = {
64706 .clock_getres = hrtimer_get_res,
64707 .clock_get = posix_ktime_get_ts,
64708 .clock_set = do_posix_clock_nosettime,
64709 };
64710 - struct k_clock clock_monotonic_raw = {
64711 + static struct k_clock clock_monotonic_raw = {
64712 .clock_getres = hrtimer_get_res,
64713 .clock_get = posix_get_monotonic_raw,
64714 .clock_set = do_posix_clock_nosettime,
64715 .timer_create = no_timer_create,
64716 .nsleep = no_nsleep,
64717 };
64718 - struct k_clock clock_realtime_coarse = {
64719 + static struct k_clock clock_realtime_coarse = {
64720 .clock_getres = posix_get_coarse_res,
64721 .clock_get = posix_get_realtime_coarse,
64722 .clock_set = do_posix_clock_nosettime,
64723 .timer_create = no_timer_create,
64724 .nsleep = no_nsleep,
64725 };
64726 - struct k_clock clock_monotonic_coarse = {
64727 + static struct k_clock clock_monotonic_coarse = {
64728 .clock_getres = posix_get_coarse_res,
64729 .clock_get = posix_get_monotonic_coarse,
64730 .clock_set = do_posix_clock_nosettime,
64731 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void
64732 .nsleep = no_nsleep,
64733 };
64734
64735 + pax_track_stack();
64736 +
64737 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
64738 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
64739 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64740 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_
64741 return;
64742 }
64743
64744 - posix_clocks[clock_id] = *new_clock;
64745 + posix_clocks[clock_id] = new_clock;
64746 }
64747 EXPORT_SYMBOL_GPL(register_posix_clock);
64748
64749 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64750 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64751 return -EFAULT;
64752
64753 + /* only the CLOCK_REALTIME clock can be set, all other clocks
64754 + have their clock_set fptr set to a nosettime dummy function
64755 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64756 + call common_clock_set, which calls do_sys_settimeofday, which
64757 + we hook
64758 + */
64759 +
64760 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
64761 }
64762
64763 diff -urNp linux-2.6.32.45/kernel/power/hibernate.c linux-2.6.32.45/kernel/power/hibernate.c
64764 --- linux-2.6.32.45/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
64765 +++ linux-2.6.32.45/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
64766 @@ -48,14 +48,14 @@ enum {
64767
64768 static int hibernation_mode = HIBERNATION_SHUTDOWN;
64769
64770 -static struct platform_hibernation_ops *hibernation_ops;
64771 +static const struct platform_hibernation_ops *hibernation_ops;
64772
64773 /**
64774 * hibernation_set_ops - set the global hibernate operations
64775 * @ops: the hibernation operations to use in subsequent hibernation transitions
64776 */
64777
64778 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
64779 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
64780 {
64781 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
64782 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
64783 diff -urNp linux-2.6.32.45/kernel/power/poweroff.c linux-2.6.32.45/kernel/power/poweroff.c
64784 --- linux-2.6.32.45/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
64785 +++ linux-2.6.32.45/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
64786 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64787 .enable_mask = SYSRQ_ENABLE_BOOT,
64788 };
64789
64790 -static int pm_sysrq_init(void)
64791 +static int __init pm_sysrq_init(void)
64792 {
64793 register_sysrq_key('o', &sysrq_poweroff_op);
64794 return 0;
64795 diff -urNp linux-2.6.32.45/kernel/power/process.c linux-2.6.32.45/kernel/power/process.c
64796 --- linux-2.6.32.45/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
64797 +++ linux-2.6.32.45/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
64798 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
64799 struct timeval start, end;
64800 u64 elapsed_csecs64;
64801 unsigned int elapsed_csecs;
64802 + bool timedout = false;
64803
64804 do_gettimeofday(&start);
64805
64806 end_time = jiffies + TIMEOUT;
64807 do {
64808 todo = 0;
64809 + if (time_after(jiffies, end_time))
64810 + timedout = true;
64811 read_lock(&tasklist_lock);
64812 do_each_thread(g, p) {
64813 if (frozen(p) || !freezeable(p))
64814 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
64815 * It is "frozen enough". If the task does wake
64816 * up, it will immediately call try_to_freeze.
64817 */
64818 - if (!task_is_stopped_or_traced(p) &&
64819 - !freezer_should_skip(p))
64820 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64821 todo++;
64822 + if (timedout) {
64823 + printk(KERN_ERR "Task refusing to freeze:\n");
64824 + sched_show_task(p);
64825 + }
64826 + }
64827 } while_each_thread(g, p);
64828 read_unlock(&tasklist_lock);
64829 yield(); /* Yield is okay here */
64830 - if (time_after(jiffies, end_time))
64831 - break;
64832 - } while (todo);
64833 + } while (todo && !timedout);
64834
64835 do_gettimeofday(&end);
64836 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
64837 diff -urNp linux-2.6.32.45/kernel/power/suspend.c linux-2.6.32.45/kernel/power/suspend.c
64838 --- linux-2.6.32.45/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
64839 +++ linux-2.6.32.45/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
64840 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
64841 [PM_SUSPEND_MEM] = "mem",
64842 };
64843
64844 -static struct platform_suspend_ops *suspend_ops;
64845 +static const struct platform_suspend_ops *suspend_ops;
64846
64847 /**
64848 * suspend_set_ops - Set the global suspend method table.
64849 * @ops: Pointer to ops structure.
64850 */
64851 -void suspend_set_ops(struct platform_suspend_ops *ops)
64852 +void suspend_set_ops(const struct platform_suspend_ops *ops)
64853 {
64854 mutex_lock(&pm_mutex);
64855 suspend_ops = ops;
64856 diff -urNp linux-2.6.32.45/kernel/printk.c linux-2.6.32.45/kernel/printk.c
64857 --- linux-2.6.32.45/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
64858 +++ linux-2.6.32.45/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
64859 @@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
64860 char c;
64861 int error = 0;
64862
64863 +#ifdef CONFIG_GRKERNSEC_DMESG
64864 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
64865 + return -EPERM;
64866 +#endif
64867 +
64868 error = security_syslog(type);
64869 if (error)
64870 return error;
64871 diff -urNp linux-2.6.32.45/kernel/profile.c linux-2.6.32.45/kernel/profile.c
64872 --- linux-2.6.32.45/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
64873 +++ linux-2.6.32.45/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
64874 @@ -39,7 +39,7 @@ struct profile_hit {
64875 /* Oprofile timer tick hook */
64876 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64877
64878 -static atomic_t *prof_buffer;
64879 +static atomic_unchecked_t *prof_buffer;
64880 static unsigned long prof_len, prof_shift;
64881
64882 int prof_on __read_mostly;
64883 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
64884 hits[i].pc = 0;
64885 continue;
64886 }
64887 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64888 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64889 hits[i].hits = hits[i].pc = 0;
64890 }
64891 }
64892 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
64893 * Add the current hit(s) and flush the write-queue out
64894 * to the global buffer:
64895 */
64896 - atomic_add(nr_hits, &prof_buffer[pc]);
64897 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64898 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64899 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64900 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64901 hits[i].pc = hits[i].hits = 0;
64902 }
64903 out:
64904 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
64905 if (prof_on != type || !prof_buffer)
64906 return;
64907 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64908 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64909 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64910 }
64911 #endif /* !CONFIG_SMP */
64912 EXPORT_SYMBOL_GPL(profile_hits);
64913 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64914 return -EFAULT;
64915 buf++; p++; count--; read++;
64916 }
64917 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64918 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64919 if (copy_to_user(buf, (void *)pnt, count))
64920 return -EFAULT;
64921 read += count;
64922 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64923 }
64924 #endif
64925 profile_discard_flip_buffers();
64926 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64927 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64928 return count;
64929 }
64930
64931 diff -urNp linux-2.6.32.45/kernel/ptrace.c linux-2.6.32.45/kernel/ptrace.c
64932 --- linux-2.6.32.45/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
64933 +++ linux-2.6.32.45/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
64934 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
64935 return ret;
64936 }
64937
64938 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64939 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64940 + unsigned int log)
64941 {
64942 const struct cred *cred = current_cred(), *tcred;
64943
64944 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
64945 cred->gid != tcred->egid ||
64946 cred->gid != tcred->sgid ||
64947 cred->gid != tcred->gid) &&
64948 - !capable(CAP_SYS_PTRACE)) {
64949 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64950 + (log && !capable(CAP_SYS_PTRACE)))
64951 + ) {
64952 rcu_read_unlock();
64953 return -EPERM;
64954 }
64955 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
64956 smp_rmb();
64957 if (task->mm)
64958 dumpable = get_dumpable(task->mm);
64959 - if (!dumpable && !capable(CAP_SYS_PTRACE))
64960 + if (!dumpable &&
64961 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64962 + (log && !capable(CAP_SYS_PTRACE))))
64963 return -EPERM;
64964
64965 return security_ptrace_access_check(task, mode);
64966 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
64967 {
64968 int err;
64969 task_lock(task);
64970 - err = __ptrace_may_access(task, mode);
64971 + err = __ptrace_may_access(task, mode, 0);
64972 + task_unlock(task);
64973 + return !err;
64974 +}
64975 +
64976 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64977 +{
64978 + int err;
64979 + task_lock(task);
64980 + err = __ptrace_may_access(task, mode, 1);
64981 task_unlock(task);
64982 return !err;
64983 }
64984 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
64985 goto out;
64986
64987 task_lock(task);
64988 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
64989 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
64990 task_unlock(task);
64991 if (retval)
64992 goto unlock_creds;
64993 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
64994 goto unlock_tasklist;
64995
64996 task->ptrace = PT_PTRACED;
64997 - if (capable(CAP_SYS_PTRACE))
64998 + if (capable_nolog(CAP_SYS_PTRACE))
64999 task->ptrace |= PT_PTRACE_CAP;
65000
65001 __ptrace_link(task, current);
65002 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
65003 {
65004 int copied = 0;
65005
65006 + pax_track_stack();
65007 +
65008 while (len > 0) {
65009 char buf[128];
65010 int this_len, retval;
65011 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
65012 {
65013 int copied = 0;
65014
65015 + pax_track_stack();
65016 +
65017 while (len > 0) {
65018 char buf[128];
65019 int this_len, retval;
65020 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
65021 int ret = -EIO;
65022 siginfo_t siginfo;
65023
65024 + pax_track_stack();
65025 +
65026 switch (request) {
65027 case PTRACE_PEEKTEXT:
65028 case PTRACE_PEEKDATA:
65029 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
65030 ret = ptrace_setoptions(child, data);
65031 break;
65032 case PTRACE_GETEVENTMSG:
65033 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
65034 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
65035 break;
65036
65037 case PTRACE_GETSIGINFO:
65038 ret = ptrace_getsiginfo(child, &siginfo);
65039 if (!ret)
65040 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
65041 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
65042 &siginfo);
65043 break;
65044
65045 case PTRACE_SETSIGINFO:
65046 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
65047 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
65048 sizeof siginfo))
65049 ret = -EFAULT;
65050 else
65051 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
65052 goto out;
65053 }
65054
65055 + if (gr_handle_ptrace(child, request)) {
65056 + ret = -EPERM;
65057 + goto out_put_task_struct;
65058 + }
65059 +
65060 if (request == PTRACE_ATTACH) {
65061 ret = ptrace_attach(child);
65062 /*
65063 * Some architectures need to do book-keeping after
65064 * a ptrace attach.
65065 */
65066 - if (!ret)
65067 + if (!ret) {
65068 arch_ptrace_attach(child);
65069 + gr_audit_ptrace(child);
65070 + }
65071 goto out_put_task_struct;
65072 }
65073
65074 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
65075 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65076 if (copied != sizeof(tmp))
65077 return -EIO;
65078 - return put_user(tmp, (unsigned long __user *)data);
65079 + return put_user(tmp, (__force unsigned long __user *)data);
65080 }
65081
65082 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
65083 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
65084 siginfo_t siginfo;
65085 int ret;
65086
65087 + pax_track_stack();
65088 +
65089 switch (request) {
65090 case PTRACE_PEEKTEXT:
65091 case PTRACE_PEEKDATA:
65092 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
65093 goto out;
65094 }
65095
65096 + if (gr_handle_ptrace(child, request)) {
65097 + ret = -EPERM;
65098 + goto out_put_task_struct;
65099 + }
65100 +
65101 if (request == PTRACE_ATTACH) {
65102 ret = ptrace_attach(child);
65103 /*
65104 * Some architectures need to do book-keeping after
65105 * a ptrace attach.
65106 */
65107 - if (!ret)
65108 + if (!ret) {
65109 arch_ptrace_attach(child);
65110 + gr_audit_ptrace(child);
65111 + }
65112 goto out_put_task_struct;
65113 }
65114
65115 diff -urNp linux-2.6.32.45/kernel/rcutorture.c linux-2.6.32.45/kernel/rcutorture.c
65116 --- linux-2.6.32.45/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
65117 +++ linux-2.6.32.45/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
65118 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
65119 { 0 };
65120 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65121 { 0 };
65122 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65123 -static atomic_t n_rcu_torture_alloc;
65124 -static atomic_t n_rcu_torture_alloc_fail;
65125 -static atomic_t n_rcu_torture_free;
65126 -static atomic_t n_rcu_torture_mberror;
65127 -static atomic_t n_rcu_torture_error;
65128 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65129 +static atomic_unchecked_t n_rcu_torture_alloc;
65130 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
65131 +static atomic_unchecked_t n_rcu_torture_free;
65132 +static atomic_unchecked_t n_rcu_torture_mberror;
65133 +static atomic_unchecked_t n_rcu_torture_error;
65134 static long n_rcu_torture_timers;
65135 static struct list_head rcu_torture_removed;
65136 static cpumask_var_t shuffle_tmp_mask;
65137 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
65138
65139 spin_lock_bh(&rcu_torture_lock);
65140 if (list_empty(&rcu_torture_freelist)) {
65141 - atomic_inc(&n_rcu_torture_alloc_fail);
65142 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65143 spin_unlock_bh(&rcu_torture_lock);
65144 return NULL;
65145 }
65146 - atomic_inc(&n_rcu_torture_alloc);
65147 + atomic_inc_unchecked(&n_rcu_torture_alloc);
65148 p = rcu_torture_freelist.next;
65149 list_del_init(p);
65150 spin_unlock_bh(&rcu_torture_lock);
65151 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
65152 static void
65153 rcu_torture_free(struct rcu_torture *p)
65154 {
65155 - atomic_inc(&n_rcu_torture_free);
65156 + atomic_inc_unchecked(&n_rcu_torture_free);
65157 spin_lock_bh(&rcu_torture_lock);
65158 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65159 spin_unlock_bh(&rcu_torture_lock);
65160 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
65161 i = rp->rtort_pipe_count;
65162 if (i > RCU_TORTURE_PIPE_LEN)
65163 i = RCU_TORTURE_PIPE_LEN;
65164 - atomic_inc(&rcu_torture_wcount[i]);
65165 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65166 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65167 rp->rtort_mbtest = 0;
65168 rcu_torture_free(rp);
65169 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
65170 i = rp->rtort_pipe_count;
65171 if (i > RCU_TORTURE_PIPE_LEN)
65172 i = RCU_TORTURE_PIPE_LEN;
65173 - atomic_inc(&rcu_torture_wcount[i]);
65174 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65175 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65176 rp->rtort_mbtest = 0;
65177 list_del(&rp->rtort_free);
65178 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
65179 i = old_rp->rtort_pipe_count;
65180 if (i > RCU_TORTURE_PIPE_LEN)
65181 i = RCU_TORTURE_PIPE_LEN;
65182 - atomic_inc(&rcu_torture_wcount[i]);
65183 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65184 old_rp->rtort_pipe_count++;
65185 cur_ops->deferred_free(old_rp);
65186 }
65187 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
65188 return;
65189 }
65190 if (p->rtort_mbtest == 0)
65191 - atomic_inc(&n_rcu_torture_mberror);
65192 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65193 spin_lock(&rand_lock);
65194 cur_ops->read_delay(&rand);
65195 n_rcu_torture_timers++;
65196 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
65197 continue;
65198 }
65199 if (p->rtort_mbtest == 0)
65200 - atomic_inc(&n_rcu_torture_mberror);
65201 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65202 cur_ops->read_delay(&rand);
65203 preempt_disable();
65204 pipe_count = p->rtort_pipe_count;
65205 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
65206 rcu_torture_current,
65207 rcu_torture_current_version,
65208 list_empty(&rcu_torture_freelist),
65209 - atomic_read(&n_rcu_torture_alloc),
65210 - atomic_read(&n_rcu_torture_alloc_fail),
65211 - atomic_read(&n_rcu_torture_free),
65212 - atomic_read(&n_rcu_torture_mberror),
65213 + atomic_read_unchecked(&n_rcu_torture_alloc),
65214 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65215 + atomic_read_unchecked(&n_rcu_torture_free),
65216 + atomic_read_unchecked(&n_rcu_torture_mberror),
65217 n_rcu_torture_timers);
65218 - if (atomic_read(&n_rcu_torture_mberror) != 0)
65219 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
65220 cnt += sprintf(&page[cnt], " !!!");
65221 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65222 if (i > 1) {
65223 cnt += sprintf(&page[cnt], "!!! ");
65224 - atomic_inc(&n_rcu_torture_error);
65225 + atomic_inc_unchecked(&n_rcu_torture_error);
65226 WARN_ON_ONCE(1);
65227 }
65228 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65229 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
65230 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65231 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65232 cnt += sprintf(&page[cnt], " %d",
65233 - atomic_read(&rcu_torture_wcount[i]));
65234 + atomic_read_unchecked(&rcu_torture_wcount[i]));
65235 }
65236 cnt += sprintf(&page[cnt], "\n");
65237 if (cur_ops->stats)
65238 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
65239
65240 if (cur_ops->cleanup)
65241 cur_ops->cleanup();
65242 - if (atomic_read(&n_rcu_torture_error))
65243 + if (atomic_read_unchecked(&n_rcu_torture_error))
65244 rcu_torture_print_module_parms("End of test: FAILURE");
65245 else
65246 rcu_torture_print_module_parms("End of test: SUCCESS");
65247 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
65248
65249 rcu_torture_current = NULL;
65250 rcu_torture_current_version = 0;
65251 - atomic_set(&n_rcu_torture_alloc, 0);
65252 - atomic_set(&n_rcu_torture_alloc_fail, 0);
65253 - atomic_set(&n_rcu_torture_free, 0);
65254 - atomic_set(&n_rcu_torture_mberror, 0);
65255 - atomic_set(&n_rcu_torture_error, 0);
65256 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65257 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65258 + atomic_set_unchecked(&n_rcu_torture_free, 0);
65259 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65260 + atomic_set_unchecked(&n_rcu_torture_error, 0);
65261 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65262 - atomic_set(&rcu_torture_wcount[i], 0);
65263 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65264 for_each_possible_cpu(cpu) {
65265 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65266 per_cpu(rcu_torture_count, cpu)[i] = 0;
65267 diff -urNp linux-2.6.32.45/kernel/rcutree.c linux-2.6.32.45/kernel/rcutree.c
65268 --- linux-2.6.32.45/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
65269 +++ linux-2.6.32.45/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
65270 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
65271 /*
65272 * Do softirq processing for the current CPU.
65273 */
65274 -static void rcu_process_callbacks(struct softirq_action *unused)
65275 +static void rcu_process_callbacks(void)
65276 {
65277 /*
65278 * Memory references from any prior RCU read-side critical sections
65279 diff -urNp linux-2.6.32.45/kernel/rcutree_plugin.h linux-2.6.32.45/kernel/rcutree_plugin.h
65280 --- linux-2.6.32.45/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
65281 +++ linux-2.6.32.45/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
65282 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
65283 */
65284 void __rcu_read_lock(void)
65285 {
65286 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
65287 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
65288 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
65289 }
65290 EXPORT_SYMBOL_GPL(__rcu_read_lock);
65291 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
65292 struct task_struct *t = current;
65293
65294 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
65295 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
65296 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
65297 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
65298 rcu_read_unlock_special(t);
65299 }
65300 diff -urNp linux-2.6.32.45/kernel/relay.c linux-2.6.32.45/kernel/relay.c
65301 --- linux-2.6.32.45/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
65302 +++ linux-2.6.32.45/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
65303 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
65304 unsigned int flags,
65305 int *nonpad_ret)
65306 {
65307 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
65308 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
65309 struct rchan_buf *rbuf = in->private_data;
65310 unsigned int subbuf_size = rbuf->chan->subbuf_size;
65311 uint64_t pos = (uint64_t) *ppos;
65312 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
65313 .ops = &relay_pipe_buf_ops,
65314 .spd_release = relay_page_release,
65315 };
65316 + ssize_t ret;
65317 +
65318 + pax_track_stack();
65319
65320 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65321 return 0;
65322 diff -urNp linux-2.6.32.45/kernel/resource.c linux-2.6.32.45/kernel/resource.c
65323 --- linux-2.6.32.45/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
65324 +++ linux-2.6.32.45/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
65325 @@ -132,8 +132,18 @@ static const struct file_operations proc
65326
65327 static int __init ioresources_init(void)
65328 {
65329 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65330 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65331 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65332 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65333 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65334 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65335 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65336 +#endif
65337 +#else
65338 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65339 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65340 +#endif
65341 return 0;
65342 }
65343 __initcall(ioresources_init);
65344 diff -urNp linux-2.6.32.45/kernel/rtmutex.c linux-2.6.32.45/kernel/rtmutex.c
65345 --- linux-2.6.32.45/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
65346 +++ linux-2.6.32.45/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
65347 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
65348 */
65349 spin_lock_irqsave(&pendowner->pi_lock, flags);
65350
65351 - WARN_ON(!pendowner->pi_blocked_on);
65352 + BUG_ON(!pendowner->pi_blocked_on);
65353 WARN_ON(pendowner->pi_blocked_on != waiter);
65354 WARN_ON(pendowner->pi_blocked_on->lock != lock);
65355
65356 diff -urNp linux-2.6.32.45/kernel/rtmutex-tester.c linux-2.6.32.45/kernel/rtmutex-tester.c
65357 --- linux-2.6.32.45/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
65358 +++ linux-2.6.32.45/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
65359 @@ -21,7 +21,7 @@
65360 #define MAX_RT_TEST_MUTEXES 8
65361
65362 static spinlock_t rttest_lock;
65363 -static atomic_t rttest_event;
65364 +static atomic_unchecked_t rttest_event;
65365
65366 struct test_thread_data {
65367 int opcode;
65368 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
65369
65370 case RTTEST_LOCKCONT:
65371 td->mutexes[td->opdata] = 1;
65372 - td->event = atomic_add_return(1, &rttest_event);
65373 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65374 return 0;
65375
65376 case RTTEST_RESET:
65377 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
65378 return 0;
65379
65380 case RTTEST_RESETEVENT:
65381 - atomic_set(&rttest_event, 0);
65382 + atomic_set_unchecked(&rttest_event, 0);
65383 return 0;
65384
65385 default:
65386 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
65387 return ret;
65388
65389 td->mutexes[id] = 1;
65390 - td->event = atomic_add_return(1, &rttest_event);
65391 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65392 rt_mutex_lock(&mutexes[id]);
65393 - td->event = atomic_add_return(1, &rttest_event);
65394 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65395 td->mutexes[id] = 4;
65396 return 0;
65397
65398 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
65399 return ret;
65400
65401 td->mutexes[id] = 1;
65402 - td->event = atomic_add_return(1, &rttest_event);
65403 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65404 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65405 - td->event = atomic_add_return(1, &rttest_event);
65406 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65407 td->mutexes[id] = ret ? 0 : 4;
65408 return ret ? -EINTR : 0;
65409
65410 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
65411 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65412 return ret;
65413
65414 - td->event = atomic_add_return(1, &rttest_event);
65415 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65416 rt_mutex_unlock(&mutexes[id]);
65417 - td->event = atomic_add_return(1, &rttest_event);
65418 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65419 td->mutexes[id] = 0;
65420 return 0;
65421
65422 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
65423 break;
65424
65425 td->mutexes[dat] = 2;
65426 - td->event = atomic_add_return(1, &rttest_event);
65427 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65428 break;
65429
65430 case RTTEST_LOCKBKL:
65431 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
65432 return;
65433
65434 td->mutexes[dat] = 3;
65435 - td->event = atomic_add_return(1, &rttest_event);
65436 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65437 break;
65438
65439 case RTTEST_LOCKNOWAIT:
65440 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
65441 return;
65442
65443 td->mutexes[dat] = 1;
65444 - td->event = atomic_add_return(1, &rttest_event);
65445 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65446 return;
65447
65448 case RTTEST_LOCKBKL:
65449 diff -urNp linux-2.6.32.45/kernel/sched.c linux-2.6.32.45/kernel/sched.c
65450 --- linux-2.6.32.45/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
65451 +++ linux-2.6.32.45/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
65452 @@ -5043,7 +5043,7 @@ out:
65453 * In CONFIG_NO_HZ case, the idle load balance owner will do the
65454 * rebalancing for all the cpus for whom scheduler ticks are stopped.
65455 */
65456 -static void run_rebalance_domains(struct softirq_action *h)
65457 +static void run_rebalance_domains(void)
65458 {
65459 int this_cpu = smp_processor_id();
65460 struct rq *this_rq = cpu_rq(this_cpu);
65461 @@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
65462 struct rq *rq;
65463 int cpu;
65464
65465 + pax_track_stack();
65466 +
65467 need_resched:
65468 preempt_disable();
65469 cpu = smp_processor_id();
65470 @@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
65471 * Look out! "owner" is an entirely speculative pointer
65472 * access and not reliable.
65473 */
65474 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
65475 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
65476 {
65477 unsigned int cpu;
65478 struct rq *rq;
65479 @@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
65480 * DEBUG_PAGEALLOC could have unmapped it if
65481 * the mutex owner just released it and exited.
65482 */
65483 - if (probe_kernel_address(&owner->cpu, cpu))
65484 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
65485 return 0;
65486 #else
65487 - cpu = owner->cpu;
65488 + cpu = task_thread_info(owner)->cpu;
65489 #endif
65490
65491 /*
65492 @@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
65493 /*
65494 * Is that owner really running on that cpu?
65495 */
65496 - if (task_thread_info(rq->curr) != owner || need_resched())
65497 + if (rq->curr != owner || need_resched())
65498 return 0;
65499
65500 cpu_relax();
65501 @@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
65502 /* convert nice value [19,-20] to rlimit style value [1,40] */
65503 int nice_rlim = 20 - nice;
65504
65505 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65506 +
65507 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
65508 capable(CAP_SYS_NICE));
65509 }
65510 @@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65511 if (nice > 19)
65512 nice = 19;
65513
65514 - if (increment < 0 && !can_nice(current, nice))
65515 + if (increment < 0 && (!can_nice(current, nice) ||
65516 + gr_handle_chroot_nice()))
65517 return -EPERM;
65518
65519 retval = security_task_setnice(current, nice);
65520 @@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
65521 long power;
65522 int weight;
65523
65524 - WARN_ON(!sd || !sd->groups);
65525 + BUG_ON(!sd || !sd->groups);
65526
65527 if (cpu != group_first_cpu(sd->groups))
65528 return;
65529 diff -urNp linux-2.6.32.45/kernel/signal.c linux-2.6.32.45/kernel/signal.c
65530 --- linux-2.6.32.45/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
65531 +++ linux-2.6.32.45/kernel/signal.c 2011-08-16 21:15:58.000000000 -0400
65532 @@ -41,12 +41,12 @@
65533
65534 static struct kmem_cache *sigqueue_cachep;
65535
65536 -static void __user *sig_handler(struct task_struct *t, int sig)
65537 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
65538 {
65539 return t->sighand->action[sig - 1].sa.sa_handler;
65540 }
65541
65542 -static int sig_handler_ignored(void __user *handler, int sig)
65543 +static int sig_handler_ignored(__sighandler_t handler, int sig)
65544 {
65545 /* Is it explicitly or implicitly ignored? */
65546 return handler == SIG_IGN ||
65547 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
65548 static int sig_task_ignored(struct task_struct *t, int sig,
65549 int from_ancestor_ns)
65550 {
65551 - void __user *handler;
65552 + __sighandler_t handler;
65553
65554 handler = sig_handler(t, sig);
65555
65556 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
65557 */
65558 user = get_uid(__task_cred(t)->user);
65559 atomic_inc(&user->sigpending);
65560 +
65561 + if (!override_rlimit)
65562 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65563 if (override_rlimit ||
65564 atomic_read(&user->sigpending) <=
65565 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
65566 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
65567
65568 int unhandled_signal(struct task_struct *tsk, int sig)
65569 {
65570 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65571 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65572 if (is_global_init(tsk))
65573 return 1;
65574 if (handler != SIG_IGN && handler != SIG_DFL)
65575 @@ -627,6 +630,13 @@ static int check_kill_permission(int sig
65576 }
65577 }
65578
65579 + /* allow glibc communication via tgkill to other threads in our
65580 + thread group */
65581 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65582 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65583 + && gr_handle_signal(t, sig))
65584 + return -EPERM;
65585 +
65586 return security_task_kill(t, info, sig, 0);
65587 }
65588
65589 @@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct si
65590 return send_signal(sig, info, p, 1);
65591 }
65592
65593 -static int
65594 +int
65595 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65596 {
65597 return send_signal(sig, info, t, 0);
65598 @@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *
65599 unsigned long int flags;
65600 int ret, blocked, ignored;
65601 struct k_sigaction *action;
65602 + int is_unhandled = 0;
65603
65604 spin_lock_irqsave(&t->sighand->siglock, flags);
65605 action = &t->sighand->action[sig-1];
65606 @@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *
65607 }
65608 if (action->sa.sa_handler == SIG_DFL)
65609 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65610 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65611 + is_unhandled = 1;
65612 ret = specific_send_sig_info(sig, info, t);
65613 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65614
65615 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
65616 + normal operation */
65617 + if (is_unhandled) {
65618 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65619 + gr_handle_crash(t, sig);
65620 + }
65621 +
65622 return ret;
65623 }
65624
65625 @@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct
65626 {
65627 int ret = check_kill_permission(sig, info, p);
65628
65629 - if (!ret && sig)
65630 + if (!ret && sig) {
65631 ret = do_send_sig_info(sig, info, p, true);
65632 + if (!ret)
65633 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65634 + }
65635
65636 return ret;
65637 }
65638 @@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
65639 {
65640 siginfo_t info;
65641
65642 + pax_track_stack();
65643 +
65644 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65645
65646 memset(&info, 0, sizeof info);
65647 @@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65648 int error = -ESRCH;
65649
65650 rcu_read_lock();
65651 - p = find_task_by_vpid(pid);
65652 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65653 + /* allow glibc communication via tgkill to other threads in our
65654 + thread group */
65655 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65656 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
65657 + p = find_task_by_vpid_unrestricted(pid);
65658 + else
65659 +#endif
65660 + p = find_task_by_vpid(pid);
65661 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65662 error = check_kill_permission(sig, info, p);
65663 /*
65664 diff -urNp linux-2.6.32.45/kernel/smp.c linux-2.6.32.45/kernel/smp.c
65665 --- linux-2.6.32.45/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
65666 +++ linux-2.6.32.45/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
65667 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
65668 }
65669 EXPORT_SYMBOL(smp_call_function);
65670
65671 -void ipi_call_lock(void)
65672 +void ipi_call_lock(void) __acquires(call_function.lock)
65673 {
65674 spin_lock(&call_function.lock);
65675 }
65676
65677 -void ipi_call_unlock(void)
65678 +void ipi_call_unlock(void) __releases(call_function.lock)
65679 {
65680 spin_unlock(&call_function.lock);
65681 }
65682
65683 -void ipi_call_lock_irq(void)
65684 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
65685 {
65686 spin_lock_irq(&call_function.lock);
65687 }
65688
65689 -void ipi_call_unlock_irq(void)
65690 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
65691 {
65692 spin_unlock_irq(&call_function.lock);
65693 }
65694 diff -urNp linux-2.6.32.45/kernel/softirq.c linux-2.6.32.45/kernel/softirq.c
65695 --- linux-2.6.32.45/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
65696 +++ linux-2.6.32.45/kernel/softirq.c 2011-08-05 20:33:55.000000000 -0400
65697 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65698
65699 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65700
65701 -char *softirq_to_name[NR_SOFTIRQS] = {
65702 +const char * const softirq_to_name[NR_SOFTIRQS] = {
65703 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65704 "TASKLET", "SCHED", "HRTIMER", "RCU"
65705 };
65706 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
65707
65708 asmlinkage void __do_softirq(void)
65709 {
65710 - struct softirq_action *h;
65711 + const struct softirq_action *h;
65712 __u32 pending;
65713 int max_restart = MAX_SOFTIRQ_RESTART;
65714 int cpu;
65715 @@ -233,7 +233,7 @@ restart:
65716 kstat_incr_softirqs_this_cpu(h - softirq_vec);
65717
65718 trace_softirq_entry(h, softirq_vec);
65719 - h->action(h);
65720 + h->action();
65721 trace_softirq_exit(h, softirq_vec);
65722 if (unlikely(prev_count != preempt_count())) {
65723 printk(KERN_ERR "huh, entered softirq %td %s %p"
65724 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
65725 local_irq_restore(flags);
65726 }
65727
65728 -void open_softirq(int nr, void (*action)(struct softirq_action *))
65729 +void open_softirq(int nr, void (*action)(void))
65730 {
65731 - softirq_vec[nr].action = action;
65732 + pax_open_kernel();
65733 + *(void **)&softirq_vec[nr].action = action;
65734 + pax_close_kernel();
65735 }
65736
65737 /*
65738 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct
65739
65740 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65741
65742 -static void tasklet_action(struct softirq_action *a)
65743 +static void tasklet_action(void)
65744 {
65745 struct tasklet_struct *list;
65746
65747 @@ -454,7 +456,7 @@ static void tasklet_action(struct softir
65748 }
65749 }
65750
65751 -static void tasklet_hi_action(struct softirq_action *a)
65752 +static void tasklet_hi_action(void)
65753 {
65754 struct tasklet_struct *list;
65755
65756 diff -urNp linux-2.6.32.45/kernel/sys.c linux-2.6.32.45/kernel/sys.c
65757 --- linux-2.6.32.45/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
65758 +++ linux-2.6.32.45/kernel/sys.c 2011-08-11 19:51:54.000000000 -0400
65759 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
65760 error = -EACCES;
65761 goto out;
65762 }
65763 +
65764 + if (gr_handle_chroot_setpriority(p, niceval)) {
65765 + error = -EACCES;
65766 + goto out;
65767 + }
65768 +
65769 no_nice = security_task_setnice(p, niceval);
65770 if (no_nice) {
65771 error = no_nice;
65772 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
65773 !(user = find_user(who)))
65774 goto out_unlock; /* No processes for this user */
65775
65776 - do_each_thread(g, p)
65777 + do_each_thread(g, p) {
65778 if (__task_cred(p)->uid == who)
65779 error = set_one_prio(p, niceval, error);
65780 - while_each_thread(g, p);
65781 + } while_each_thread(g, p);
65782 if (who != cred->uid)
65783 free_uid(user); /* For find_user() */
65784 break;
65785 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
65786 !(user = find_user(who)))
65787 goto out_unlock; /* No processes for this user */
65788
65789 - do_each_thread(g, p)
65790 + do_each_thread(g, p) {
65791 if (__task_cred(p)->uid == who) {
65792 niceval = 20 - task_nice(p);
65793 if (niceval > retval)
65794 retval = niceval;
65795 }
65796 - while_each_thread(g, p);
65797 + } while_each_thread(g, p);
65798 if (who != cred->uid)
65799 free_uid(user); /* for find_user() */
65800 break;
65801 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65802 goto error;
65803 }
65804
65805 + if (gr_check_group_change(new->gid, new->egid, -1))
65806 + goto error;
65807 +
65808 if (rgid != (gid_t) -1 ||
65809 (egid != (gid_t) -1 && egid != old->gid))
65810 new->sgid = new->egid;
65811 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65812 goto error;
65813
65814 retval = -EPERM;
65815 +
65816 + if (gr_check_group_change(gid, gid, gid))
65817 + goto error;
65818 +
65819 if (capable(CAP_SETGID))
65820 new->gid = new->egid = new->sgid = new->fsgid = gid;
65821 else if (gid == old->gid || gid == old->sgid)
65822 @@ -567,12 +580,19 @@ static int set_user(struct cred *new)
65823 if (!new_user)
65824 return -EAGAIN;
65825
65826 + /*
65827 + * We don't fail in case of NPROC limit excess here because too many
65828 + * poorly written programs don't check set*uid() return code, assuming
65829 + * it never fails if called by root. We may still enforce NPROC limit
65830 + * for programs doing set*uid()+execve() by harmlessly deferring the
65831 + * failure to the execve() stage.
65832 + */
65833 if (atomic_read(&new_user->processes) >=
65834 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
65835 - new_user != INIT_USER) {
65836 - free_uid(new_user);
65837 - return -EAGAIN;
65838 - }
65839 + new_user != INIT_USER)
65840 + current->flags |= PF_NPROC_EXCEEDED;
65841 + else
65842 + current->flags &= ~PF_NPROC_EXCEEDED;
65843
65844 free_uid(new->user);
65845 new->user = new_user;
65846 @@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65847 goto error;
65848 }
65849
65850 + if (gr_check_user_change(new->uid, new->euid, -1))
65851 + goto error;
65852 +
65853 if (new->uid != old->uid) {
65854 retval = set_user(new);
65855 if (retval < 0)
65856 @@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65857 goto error;
65858
65859 retval = -EPERM;
65860 +
65861 + if (gr_check_crash_uid(uid))
65862 + goto error;
65863 + if (gr_check_user_change(uid, uid, uid))
65864 + goto error;
65865 +
65866 if (capable(CAP_SETUID)) {
65867 new->suid = new->uid = uid;
65868 if (uid != old->uid) {
65869 @@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65870 goto error;
65871 }
65872
65873 + if (gr_check_user_change(ruid, euid, -1))
65874 + goto error;
65875 +
65876 if (ruid != (uid_t) -1) {
65877 new->uid = ruid;
65878 if (ruid != old->uid) {
65879 @@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65880 goto error;
65881 }
65882
65883 + if (gr_check_group_change(rgid, egid, -1))
65884 + goto error;
65885 +
65886 if (rgid != (gid_t) -1)
65887 new->gid = rgid;
65888 if (egid != (gid_t) -1)
65889 @@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65890 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
65891 goto error;
65892
65893 + if (gr_check_user_change(-1, -1, uid))
65894 + goto error;
65895 +
65896 if (uid == old->uid || uid == old->euid ||
65897 uid == old->suid || uid == old->fsuid ||
65898 capable(CAP_SETUID)) {
65899 @@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65900 if (gid == old->gid || gid == old->egid ||
65901 gid == old->sgid || gid == old->fsgid ||
65902 capable(CAP_SETGID)) {
65903 + if (gr_check_group_change(-1, -1, gid))
65904 + goto error;
65905 +
65906 if (gid != old_fsgid) {
65907 new->fsgid = gid;
65908 goto change_okay;
65909 @@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65910 error = get_dumpable(me->mm);
65911 break;
65912 case PR_SET_DUMPABLE:
65913 - if (arg2 < 0 || arg2 > 1) {
65914 + if (arg2 > 1) {
65915 error = -EINVAL;
65916 break;
65917 }
65918 diff -urNp linux-2.6.32.45/kernel/sysctl.c linux-2.6.32.45/kernel/sysctl.c
65919 --- linux-2.6.32.45/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
65920 +++ linux-2.6.32.45/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
65921 @@ -63,6 +63,13 @@
65922 static int deprecated_sysctl_warning(struct __sysctl_args *args);
65923
65924 #if defined(CONFIG_SYSCTL)
65925 +#include <linux/grsecurity.h>
65926 +#include <linux/grinternal.h>
65927 +
65928 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
65929 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
65930 + const int op);
65931 +extern int gr_handle_chroot_sysctl(const int op);
65932
65933 /* External variables not in a header file. */
65934 extern int C_A_D;
65935 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
65936 static int proc_taint(struct ctl_table *table, int write,
65937 void __user *buffer, size_t *lenp, loff_t *ppos);
65938 #endif
65939 +extern ctl_table grsecurity_table[];
65940
65941 static struct ctl_table root_table[];
65942 static struct ctl_table_root sysctl_table_root;
65943 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
65944 int sysctl_legacy_va_layout;
65945 #endif
65946
65947 +#ifdef CONFIG_PAX_SOFTMODE
65948 +static ctl_table pax_table[] = {
65949 + {
65950 + .ctl_name = CTL_UNNUMBERED,
65951 + .procname = "softmode",
65952 + .data = &pax_softmode,
65953 + .maxlen = sizeof(unsigned int),
65954 + .mode = 0600,
65955 + .proc_handler = &proc_dointvec,
65956 + },
65957 +
65958 + { .ctl_name = 0 }
65959 +};
65960 +#endif
65961 +
65962 extern int prove_locking;
65963 extern int lock_stat;
65964
65965 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
65966 #endif
65967
65968 static struct ctl_table kern_table[] = {
65969 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65970 + {
65971 + .ctl_name = CTL_UNNUMBERED,
65972 + .procname = "grsecurity",
65973 + .mode = 0500,
65974 + .child = grsecurity_table,
65975 + },
65976 +#endif
65977 +
65978 +#ifdef CONFIG_PAX_SOFTMODE
65979 + {
65980 + .ctl_name = CTL_UNNUMBERED,
65981 + .procname = "pax",
65982 + .mode = 0500,
65983 + .child = pax_table,
65984 + },
65985 +#endif
65986 +
65987 {
65988 .ctl_name = CTL_UNNUMBERED,
65989 .procname = "sched_child_runs_first",
65990 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
65991 .data = &modprobe_path,
65992 .maxlen = KMOD_PATH_LEN,
65993 .mode = 0644,
65994 - .proc_handler = &proc_dostring,
65995 - .strategy = &sysctl_string,
65996 + .proc_handler = &proc_dostring_modpriv,
65997 + .strategy = &sysctl_string_modpriv,
65998 },
65999 {
66000 .ctl_name = CTL_UNNUMBERED,
66001 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
66002 .mode = 0644,
66003 .proc_handler = &proc_dointvec
66004 },
66005 + {
66006 + .procname = "heap_stack_gap",
66007 + .data = &sysctl_heap_stack_gap,
66008 + .maxlen = sizeof(sysctl_heap_stack_gap),
66009 + .mode = 0644,
66010 + .proc_handler = proc_doulongvec_minmax,
66011 + },
66012 #else
66013 {
66014 .ctl_name = CTL_UNNUMBERED,
66015 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
66016 return 0;
66017 }
66018
66019 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
66020 +
66021 static int parse_table(int __user *name, int nlen,
66022 void __user *oldval, size_t __user *oldlenp,
66023 void __user *newval, size_t newlen,
66024 @@ -1821,7 +1871,7 @@ repeat:
66025 if (n == table->ctl_name) {
66026 int error;
66027 if (table->child) {
66028 - if (sysctl_perm(root, table, MAY_EXEC))
66029 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
66030 return -EPERM;
66031 name++;
66032 nlen--;
66033 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
66034 int error;
66035 int mode;
66036
66037 + if (table->parent != NULL && table->parent->procname != NULL &&
66038 + table->procname != NULL &&
66039 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66040 + return -EACCES;
66041 + if (gr_handle_chroot_sysctl(op))
66042 + return -EACCES;
66043 + error = gr_handle_sysctl(table, op);
66044 + if (error)
66045 + return error;
66046 +
66047 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
66048 + if (error)
66049 + return error;
66050 +
66051 + if (root->permissions)
66052 + mode = root->permissions(root, current->nsproxy, table);
66053 + else
66054 + mode = table->mode;
66055 +
66056 + return test_perm(mode, op);
66057 +}
66058 +
66059 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
66060 +{
66061 + int error;
66062 + int mode;
66063 +
66064 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
66065 if (error)
66066 return error;
66067 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
66068 buffer, lenp, ppos);
66069 }
66070
66071 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66072 + void __user *buffer, size_t *lenp, loff_t *ppos)
66073 +{
66074 + if (write && !capable(CAP_SYS_MODULE))
66075 + return -EPERM;
66076 +
66077 + return _proc_do_string(table->data, table->maxlen, write,
66078 + buffer, lenp, ppos);
66079 +}
66080 +
66081
66082 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
66083 int *valp,
66084 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
66085 vleft = table->maxlen / sizeof(unsigned long);
66086 left = *lenp;
66087
66088 - for (; left && vleft--; i++, min++, max++, first=0) {
66089 + for (; left && vleft--; i++, first=0) {
66090 if (write) {
66091 while (left) {
66092 char c;
66093 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
66094 return -ENOSYS;
66095 }
66096
66097 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66098 + void __user *buffer, size_t *lenp, loff_t *ppos)
66099 +{
66100 + return -ENOSYS;
66101 +}
66102 +
66103 int proc_dointvec(struct ctl_table *table, int write,
66104 void __user *buffer, size_t *lenp, loff_t *ppos)
66105 {
66106 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
66107 return 1;
66108 }
66109
66110 +int sysctl_string_modpriv(struct ctl_table *table,
66111 + void __user *oldval, size_t __user *oldlenp,
66112 + void __user *newval, size_t newlen)
66113 +{
66114 + if (newval && newlen && !capable(CAP_SYS_MODULE))
66115 + return -EPERM;
66116 +
66117 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
66118 +}
66119 +
66120 /*
66121 * This function makes sure that all of the integers in the vector
66122 * are between the minimum and maximum values given in the arrays
66123 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
66124 return -ENOSYS;
66125 }
66126
66127 +int sysctl_string_modpriv(struct ctl_table *table,
66128 + void __user *oldval, size_t __user *oldlenp,
66129 + void __user *newval, size_t newlen)
66130 +{
66131 + return -ENOSYS;
66132 +}
66133 +
66134 int sysctl_intvec(struct ctl_table *table,
66135 void __user *oldval, size_t __user *oldlenp,
66136 void __user *newval, size_t newlen)
66137 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66138 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66139 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66140 EXPORT_SYMBOL(proc_dostring);
66141 +EXPORT_SYMBOL(proc_dostring_modpriv);
66142 EXPORT_SYMBOL(proc_doulongvec_minmax);
66143 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66144 EXPORT_SYMBOL(register_sysctl_table);
66145 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
66146 EXPORT_SYMBOL(sysctl_jiffies);
66147 EXPORT_SYMBOL(sysctl_ms_jiffies);
66148 EXPORT_SYMBOL(sysctl_string);
66149 +EXPORT_SYMBOL(sysctl_string_modpriv);
66150 EXPORT_SYMBOL(sysctl_data);
66151 EXPORT_SYMBOL(unregister_sysctl_table);
66152 diff -urNp linux-2.6.32.45/kernel/sysctl_check.c linux-2.6.32.45/kernel/sysctl_check.c
66153 --- linux-2.6.32.45/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
66154 +++ linux-2.6.32.45/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
66155 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
66156 } else {
66157 if ((table->strategy == sysctl_data) ||
66158 (table->strategy == sysctl_string) ||
66159 + (table->strategy == sysctl_string_modpriv) ||
66160 (table->strategy == sysctl_intvec) ||
66161 (table->strategy == sysctl_jiffies) ||
66162 (table->strategy == sysctl_ms_jiffies) ||
66163 (table->proc_handler == proc_dostring) ||
66164 + (table->proc_handler == proc_dostring_modpriv) ||
66165 (table->proc_handler == proc_dointvec) ||
66166 (table->proc_handler == proc_dointvec_minmax) ||
66167 (table->proc_handler == proc_dointvec_jiffies) ||
66168 diff -urNp linux-2.6.32.45/kernel/taskstats.c linux-2.6.32.45/kernel/taskstats.c
66169 --- linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:04.000000000 -0400
66170 +++ linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:19.000000000 -0400
66171 @@ -26,9 +26,12 @@
66172 #include <linux/cgroup.h>
66173 #include <linux/fs.h>
66174 #include <linux/file.h>
66175 +#include <linux/grsecurity.h>
66176 #include <net/genetlink.h>
66177 #include <asm/atomic.h>
66178
66179 +extern int gr_is_taskstats_denied(int pid);
66180 +
66181 /*
66182 * Maximum length of a cpumask that can be specified in
66183 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66184 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
66185 size_t size;
66186 cpumask_var_t mask;
66187
66188 + if (gr_is_taskstats_denied(current->pid))
66189 + return -EACCES;
66190 +
66191 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
66192 return -ENOMEM;
66193
66194 diff -urNp linux-2.6.32.45/kernel/time/tick-broadcast.c linux-2.6.32.45/kernel/time/tick-broadcast.c
66195 --- linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
66196 +++ linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
66197 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
66198 * then clear the broadcast bit.
66199 */
66200 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66201 - int cpu = smp_processor_id();
66202 + cpu = smp_processor_id();
66203
66204 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66205 tick_broadcast_clear_oneshot(cpu);
66206 diff -urNp linux-2.6.32.45/kernel/time/timekeeping.c linux-2.6.32.45/kernel/time/timekeeping.c
66207 --- linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
66208 +++ linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
66209 @@ -14,6 +14,7 @@
66210 #include <linux/init.h>
66211 #include <linux/mm.h>
66212 #include <linux/sched.h>
66213 +#include <linux/grsecurity.h>
66214 #include <linux/sysdev.h>
66215 #include <linux/clocksource.h>
66216 #include <linux/jiffies.h>
66217 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
66218 */
66219 struct timespec ts = xtime;
66220 timespec_add_ns(&ts, nsec);
66221 - ACCESS_ONCE(xtime_cache) = ts;
66222 + ACCESS_ONCE_RW(xtime_cache) = ts;
66223 }
66224
66225 /* must hold xtime_lock */
66226 @@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
66227 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66228 return -EINVAL;
66229
66230 + gr_log_timechange();
66231 +
66232 write_seqlock_irqsave(&xtime_lock, flags);
66233
66234 timekeeping_forward_now();
66235 diff -urNp linux-2.6.32.45/kernel/time/timer_list.c linux-2.6.32.45/kernel/time/timer_list.c
66236 --- linux-2.6.32.45/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
66237 +++ linux-2.6.32.45/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
66238 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66239
66240 static void print_name_offset(struct seq_file *m, void *sym)
66241 {
66242 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66243 + SEQ_printf(m, "<%p>", NULL);
66244 +#else
66245 char symname[KSYM_NAME_LEN];
66246
66247 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66248 SEQ_printf(m, "<%p>", sym);
66249 else
66250 SEQ_printf(m, "%s", symname);
66251 +#endif
66252 }
66253
66254 static void
66255 @@ -112,7 +116,11 @@ next_one:
66256 static void
66257 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66258 {
66259 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66260 + SEQ_printf(m, " .base: %p\n", NULL);
66261 +#else
66262 SEQ_printf(m, " .base: %p\n", base);
66263 +#endif
66264 SEQ_printf(m, " .index: %d\n",
66265 base->index);
66266 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66267 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
66268 {
66269 struct proc_dir_entry *pe;
66270
66271 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66272 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66273 +#else
66274 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66275 +#endif
66276 if (!pe)
66277 return -ENOMEM;
66278 return 0;
66279 diff -urNp linux-2.6.32.45/kernel/time/timer_stats.c linux-2.6.32.45/kernel/time/timer_stats.c
66280 --- linux-2.6.32.45/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
66281 +++ linux-2.6.32.45/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
66282 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66283 static unsigned long nr_entries;
66284 static struct entry entries[MAX_ENTRIES];
66285
66286 -static atomic_t overflow_count;
66287 +static atomic_unchecked_t overflow_count;
66288
66289 /*
66290 * The entries are in a hash-table, for fast lookup:
66291 @@ -140,7 +140,7 @@ static void reset_entries(void)
66292 nr_entries = 0;
66293 memset(entries, 0, sizeof(entries));
66294 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66295 - atomic_set(&overflow_count, 0);
66296 + atomic_set_unchecked(&overflow_count, 0);
66297 }
66298
66299 static struct entry *alloc_entry(void)
66300 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66301 if (likely(entry))
66302 entry->count++;
66303 else
66304 - atomic_inc(&overflow_count);
66305 + atomic_inc_unchecked(&overflow_count);
66306
66307 out_unlock:
66308 spin_unlock_irqrestore(lock, flags);
66309 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66310
66311 static void print_name_offset(struct seq_file *m, unsigned long addr)
66312 {
66313 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66314 + seq_printf(m, "<%p>", NULL);
66315 +#else
66316 char symname[KSYM_NAME_LEN];
66317
66318 if (lookup_symbol_name(addr, symname) < 0)
66319 seq_printf(m, "<%p>", (void *)addr);
66320 else
66321 seq_printf(m, "%s", symname);
66322 +#endif
66323 }
66324
66325 static int tstats_show(struct seq_file *m, void *v)
66326 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66327
66328 seq_puts(m, "Timer Stats Version: v0.2\n");
66329 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66330 - if (atomic_read(&overflow_count))
66331 + if (atomic_read_unchecked(&overflow_count))
66332 seq_printf(m, "Overflow: %d entries\n",
66333 - atomic_read(&overflow_count));
66334 + atomic_read_unchecked(&overflow_count));
66335
66336 for (i = 0; i < nr_entries; i++) {
66337 entry = entries + i;
66338 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
66339 {
66340 struct proc_dir_entry *pe;
66341
66342 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66343 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66344 +#else
66345 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66346 +#endif
66347 if (!pe)
66348 return -ENOMEM;
66349 return 0;
66350 diff -urNp linux-2.6.32.45/kernel/time.c linux-2.6.32.45/kernel/time.c
66351 --- linux-2.6.32.45/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
66352 +++ linux-2.6.32.45/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
66353 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
66354 return error;
66355
66356 if (tz) {
66357 + /* we log in do_settimeofday called below, so don't log twice
66358 + */
66359 + if (!tv)
66360 + gr_log_timechange();
66361 +
66362 /* SMP safe, global irq locking makes it work. */
66363 sys_tz = *tz;
66364 update_vsyscall_tz();
66365 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
66366 * Avoid unnecessary multiplications/divisions in the
66367 * two most common HZ cases:
66368 */
66369 -unsigned int inline jiffies_to_msecs(const unsigned long j)
66370 +inline unsigned int jiffies_to_msecs(const unsigned long j)
66371 {
66372 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
66373 return (MSEC_PER_SEC / HZ) * j;
66374 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
66375 }
66376 EXPORT_SYMBOL(jiffies_to_msecs);
66377
66378 -unsigned int inline jiffies_to_usecs(const unsigned long j)
66379 +inline unsigned int jiffies_to_usecs(const unsigned long j)
66380 {
66381 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
66382 return (USEC_PER_SEC / HZ) * j;
66383 diff -urNp linux-2.6.32.45/kernel/timer.c linux-2.6.32.45/kernel/timer.c
66384 --- linux-2.6.32.45/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
66385 +++ linux-2.6.32.45/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
66386 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
66387 /*
66388 * This function runs timers and the timer-tq in bottom half context.
66389 */
66390 -static void run_timer_softirq(struct softirq_action *h)
66391 +static void run_timer_softirq(void)
66392 {
66393 struct tvec_base *base = __get_cpu_var(tvec_bases);
66394
66395 diff -urNp linux-2.6.32.45/kernel/trace/blktrace.c linux-2.6.32.45/kernel/trace/blktrace.c
66396 --- linux-2.6.32.45/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
66397 +++ linux-2.6.32.45/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
66398 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
66399 struct blk_trace *bt = filp->private_data;
66400 char buf[16];
66401
66402 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66403 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66404
66405 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66406 }
66407 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
66408 return 1;
66409
66410 bt = buf->chan->private_data;
66411 - atomic_inc(&bt->dropped);
66412 + atomic_inc_unchecked(&bt->dropped);
66413 return 0;
66414 }
66415
66416 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
66417
66418 bt->dir = dir;
66419 bt->dev = dev;
66420 - atomic_set(&bt->dropped, 0);
66421 + atomic_set_unchecked(&bt->dropped, 0);
66422
66423 ret = -EIO;
66424 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66425 diff -urNp linux-2.6.32.45/kernel/trace/ftrace.c linux-2.6.32.45/kernel/trace/ftrace.c
66426 --- linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
66427 +++ linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
66428 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
66429
66430 ip = rec->ip;
66431
66432 + ret = ftrace_arch_code_modify_prepare();
66433 + FTRACE_WARN_ON(ret);
66434 + if (ret)
66435 + return 0;
66436 +
66437 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66438 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66439 if (ret) {
66440 ftrace_bug(ret, ip);
66441 rec->flags |= FTRACE_FL_FAILED;
66442 - return 0;
66443 }
66444 - return 1;
66445 + return ret ? 0 : 1;
66446 }
66447
66448 /*
66449 diff -urNp linux-2.6.32.45/kernel/trace/ring_buffer.c linux-2.6.32.45/kernel/trace/ring_buffer.c
66450 --- linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
66451 +++ linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
66452 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
66453 * the reader page). But if the next page is a header page,
66454 * its flags will be non zero.
66455 */
66456 -static int inline
66457 +static inline int
66458 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
66459 struct buffer_page *page, struct list_head *list)
66460 {
66461 diff -urNp linux-2.6.32.45/kernel/trace/trace.c linux-2.6.32.45/kernel/trace/trace.c
66462 --- linux-2.6.32.45/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
66463 +++ linux-2.6.32.45/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
66464 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
66465 size_t rem;
66466 unsigned int i;
66467
66468 + pax_track_stack();
66469 +
66470 /* copy the tracer to avoid using a global lock all around */
66471 mutex_lock(&trace_types_lock);
66472 if (unlikely(old_tracer != current_trace && current_trace)) {
66473 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
66474 int entries, size, i;
66475 size_t ret;
66476
66477 + pax_track_stack();
66478 +
66479 if (*ppos & (PAGE_SIZE - 1)) {
66480 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
66481 return -EINVAL;
66482 @@ -3816,10 +3820,9 @@ static const struct file_operations trac
66483 };
66484 #endif
66485
66486 -static struct dentry *d_tracer;
66487 -
66488 struct dentry *tracing_init_dentry(void)
66489 {
66490 + static struct dentry *d_tracer;
66491 static int once;
66492
66493 if (d_tracer)
66494 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
66495 return d_tracer;
66496 }
66497
66498 -static struct dentry *d_percpu;
66499 -
66500 struct dentry *tracing_dentry_percpu(void)
66501 {
66502 + static struct dentry *d_percpu;
66503 static int once;
66504 struct dentry *d_tracer;
66505
66506 diff -urNp linux-2.6.32.45/kernel/trace/trace_events.c linux-2.6.32.45/kernel/trace/trace_events.c
66507 --- linux-2.6.32.45/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
66508 +++ linux-2.6.32.45/kernel/trace/trace_events.c 2011-08-05 20:33:55.000000000 -0400
66509 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list
66510 * Modules must own their file_operations to keep up with
66511 * reference counting.
66512 */
66513 +
66514 struct ftrace_module_file_ops {
66515 struct list_head list;
66516 struct module *mod;
66517 - struct file_operations id;
66518 - struct file_operations enable;
66519 - struct file_operations format;
66520 - struct file_operations filter;
66521 };
66522
66523 static void remove_subsystem_dir(const char *name)
66524 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod
66525
66526 file_ops->mod = mod;
66527
66528 - file_ops->id = ftrace_event_id_fops;
66529 - file_ops->id.owner = mod;
66530 -
66531 - file_ops->enable = ftrace_enable_fops;
66532 - file_ops->enable.owner = mod;
66533 -
66534 - file_ops->filter = ftrace_event_filter_fops;
66535 - file_ops->filter.owner = mod;
66536 -
66537 - file_ops->format = ftrace_event_format_fops;
66538 - file_ops->format.owner = mod;
66539 + pax_open_kernel();
66540 + *(void **)&mod->trace_id.owner = mod;
66541 + *(void **)&mod->trace_enable.owner = mod;
66542 + *(void **)&mod->trace_filter.owner = mod;
66543 + *(void **)&mod->trace_format.owner = mod;
66544 + pax_close_kernel();
66545
66546 list_add(&file_ops->list, &ftrace_module_file_list);
66547
66548 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(stru
66549 call->mod = mod;
66550 list_add(&call->list, &ftrace_events);
66551 event_create_dir(call, d_events,
66552 - &file_ops->id, &file_ops->enable,
66553 - &file_ops->filter, &file_ops->format);
66554 + &mod->trace_id, &mod->trace_enable,
66555 + &mod->trace_filter, &mod->trace_format);
66556 }
66557 }
66558
66559 diff -urNp linux-2.6.32.45/kernel/trace/trace_mmiotrace.c linux-2.6.32.45/kernel/trace/trace_mmiotrace.c
66560 --- linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
66561 +++ linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
66562 @@ -23,7 +23,7 @@ struct header_iter {
66563 static struct trace_array *mmio_trace_array;
66564 static bool overrun_detected;
66565 static unsigned long prev_overruns;
66566 -static atomic_t dropped_count;
66567 +static atomic_unchecked_t dropped_count;
66568
66569 static void mmio_reset_data(struct trace_array *tr)
66570 {
66571 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
66572
66573 static unsigned long count_overruns(struct trace_iterator *iter)
66574 {
66575 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
66576 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66577 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66578
66579 if (over > prev_overruns)
66580 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
66581 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66582 sizeof(*entry), 0, pc);
66583 if (!event) {
66584 - atomic_inc(&dropped_count);
66585 + atomic_inc_unchecked(&dropped_count);
66586 return;
66587 }
66588 entry = ring_buffer_event_data(event);
66589 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
66590 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66591 sizeof(*entry), 0, pc);
66592 if (!event) {
66593 - atomic_inc(&dropped_count);
66594 + atomic_inc_unchecked(&dropped_count);
66595 return;
66596 }
66597 entry = ring_buffer_event_data(event);
66598 diff -urNp linux-2.6.32.45/kernel/trace/trace_output.c linux-2.6.32.45/kernel/trace/trace_output.c
66599 --- linux-2.6.32.45/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
66600 +++ linux-2.6.32.45/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
66601 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
66602 return 0;
66603 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66604 if (!IS_ERR(p)) {
66605 - p = mangle_path(s->buffer + s->len, p, "\n");
66606 + p = mangle_path(s->buffer + s->len, p, "\n\\");
66607 if (p) {
66608 s->len = p - s->buffer;
66609 return 1;
66610 diff -urNp linux-2.6.32.45/kernel/trace/trace_stack.c linux-2.6.32.45/kernel/trace/trace_stack.c
66611 --- linux-2.6.32.45/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
66612 +++ linux-2.6.32.45/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
66613 @@ -50,7 +50,7 @@ static inline void check_stack(void)
66614 return;
66615
66616 /* we do not handle interrupt stacks yet */
66617 - if (!object_is_on_stack(&this_size))
66618 + if (!object_starts_on_stack(&this_size))
66619 return;
66620
66621 local_irq_save(flags);
66622 diff -urNp linux-2.6.32.45/kernel/trace/trace_workqueue.c linux-2.6.32.45/kernel/trace/trace_workqueue.c
66623 --- linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
66624 +++ linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
66625 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
66626 int cpu;
66627 pid_t pid;
66628 /* Can be inserted from interrupt or user context, need to be atomic */
66629 - atomic_t inserted;
66630 + atomic_unchecked_t inserted;
66631 /*
66632 * Don't need to be atomic, works are serialized in a single workqueue thread
66633 * on a single CPU.
66634 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
66635 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66636 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66637 if (node->pid == wq_thread->pid) {
66638 - atomic_inc(&node->inserted);
66639 + atomic_inc_unchecked(&node->inserted);
66640 goto found;
66641 }
66642 }
66643 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
66644 tsk = get_pid_task(pid, PIDTYPE_PID);
66645 if (tsk) {
66646 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66647 - atomic_read(&cws->inserted), cws->executed,
66648 + atomic_read_unchecked(&cws->inserted), cws->executed,
66649 tsk->comm);
66650 put_task_struct(tsk);
66651 }
66652 diff -urNp linux-2.6.32.45/kernel/user.c linux-2.6.32.45/kernel/user.c
66653 --- linux-2.6.32.45/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
66654 +++ linux-2.6.32.45/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
66655 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
66656 spin_lock_irq(&uidhash_lock);
66657 up = uid_hash_find(uid, hashent);
66658 if (up) {
66659 + put_user_ns(ns);
66660 key_put(new->uid_keyring);
66661 key_put(new->session_keyring);
66662 kmem_cache_free(uid_cachep, new);
66663 diff -urNp linux-2.6.32.45/lib/bug.c linux-2.6.32.45/lib/bug.c
66664 --- linux-2.6.32.45/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
66665 +++ linux-2.6.32.45/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
66666 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
66667 return BUG_TRAP_TYPE_NONE;
66668
66669 bug = find_bug(bugaddr);
66670 + if (!bug)
66671 + return BUG_TRAP_TYPE_NONE;
66672
66673 printk(KERN_EMERG "------------[ cut here ]------------\n");
66674
66675 diff -urNp linux-2.6.32.45/lib/debugobjects.c linux-2.6.32.45/lib/debugobjects.c
66676 --- linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:04.000000000 -0400
66677 +++ linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:19.000000000 -0400
66678 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
66679 if (limit > 4)
66680 return;
66681
66682 - is_on_stack = object_is_on_stack(addr);
66683 + is_on_stack = object_starts_on_stack(addr);
66684 if (is_on_stack == onstack)
66685 return;
66686
66687 diff -urNp linux-2.6.32.45/lib/dma-debug.c linux-2.6.32.45/lib/dma-debug.c
66688 --- linux-2.6.32.45/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
66689 +++ linux-2.6.32.45/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
66690 @@ -861,7 +861,7 @@ out:
66691
66692 static void check_for_stack(struct device *dev, void *addr)
66693 {
66694 - if (object_is_on_stack(addr))
66695 + if (object_starts_on_stack(addr))
66696 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66697 "stack [addr=%p]\n", addr);
66698 }
66699 diff -urNp linux-2.6.32.45/lib/idr.c linux-2.6.32.45/lib/idr.c
66700 --- linux-2.6.32.45/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
66701 +++ linux-2.6.32.45/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
66702 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
66703 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
66704
66705 /* if already at the top layer, we need to grow */
66706 - if (id >= 1 << (idp->layers * IDR_BITS)) {
66707 + if (id >= (1 << (idp->layers * IDR_BITS))) {
66708 *starting_id = id;
66709 return IDR_NEED_TO_GROW;
66710 }
66711 diff -urNp linux-2.6.32.45/lib/inflate.c linux-2.6.32.45/lib/inflate.c
66712 --- linux-2.6.32.45/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
66713 +++ linux-2.6.32.45/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
66714 @@ -266,7 +266,7 @@ static void free(void *where)
66715 malloc_ptr = free_mem_ptr;
66716 }
66717 #else
66718 -#define malloc(a) kmalloc(a, GFP_KERNEL)
66719 +#define malloc(a) kmalloc((a), GFP_KERNEL)
66720 #define free(a) kfree(a)
66721 #endif
66722
66723 diff -urNp linux-2.6.32.45/lib/Kconfig.debug linux-2.6.32.45/lib/Kconfig.debug
66724 --- linux-2.6.32.45/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
66725 +++ linux-2.6.32.45/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
66726 @@ -905,7 +905,7 @@ config LATENCYTOP
66727 select STACKTRACE
66728 select SCHEDSTATS
66729 select SCHED_DEBUG
66730 - depends on HAVE_LATENCYTOP_SUPPORT
66731 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
66732 help
66733 Enable this option if you want to use the LatencyTOP tool
66734 to find out which userspace is blocking on what kernel operations.
66735 diff -urNp linux-2.6.32.45/lib/kobject.c linux-2.6.32.45/lib/kobject.c
66736 --- linux-2.6.32.45/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
66737 +++ linux-2.6.32.45/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
66738 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
66739 return ret;
66740 }
66741
66742 -struct sysfs_ops kobj_sysfs_ops = {
66743 +const struct sysfs_ops kobj_sysfs_ops = {
66744 .show = kobj_attr_show,
66745 .store = kobj_attr_store,
66746 };
66747 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
66748 * If the kset was not able to be created, NULL will be returned.
66749 */
66750 static struct kset *kset_create(const char *name,
66751 - struct kset_uevent_ops *uevent_ops,
66752 + const struct kset_uevent_ops *uevent_ops,
66753 struct kobject *parent_kobj)
66754 {
66755 struct kset *kset;
66756 @@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
66757 * If the kset was not able to be created, NULL will be returned.
66758 */
66759 struct kset *kset_create_and_add(const char *name,
66760 - struct kset_uevent_ops *uevent_ops,
66761 + const struct kset_uevent_ops *uevent_ops,
66762 struct kobject *parent_kobj)
66763 {
66764 struct kset *kset;
66765 diff -urNp linux-2.6.32.45/lib/kobject_uevent.c linux-2.6.32.45/lib/kobject_uevent.c
66766 --- linux-2.6.32.45/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
66767 +++ linux-2.6.32.45/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
66768 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
66769 const char *subsystem;
66770 struct kobject *top_kobj;
66771 struct kset *kset;
66772 - struct kset_uevent_ops *uevent_ops;
66773 + const struct kset_uevent_ops *uevent_ops;
66774 u64 seq;
66775 int i = 0;
66776 int retval = 0;
66777 diff -urNp linux-2.6.32.45/lib/kref.c linux-2.6.32.45/lib/kref.c
66778 --- linux-2.6.32.45/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
66779 +++ linux-2.6.32.45/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
66780 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
66781 */
66782 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66783 {
66784 - WARN_ON(release == NULL);
66785 + BUG_ON(release == NULL);
66786 WARN_ON(release == (void (*)(struct kref *))kfree);
66787
66788 if (atomic_dec_and_test(&kref->refcount)) {
66789 diff -urNp linux-2.6.32.45/lib/parser.c linux-2.6.32.45/lib/parser.c
66790 --- linux-2.6.32.45/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
66791 +++ linux-2.6.32.45/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
66792 @@ -126,7 +126,7 @@ static int match_number(substring_t *s,
66793 char *buf;
66794 int ret;
66795
66796 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
66797 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
66798 if (!buf)
66799 return -ENOMEM;
66800 memcpy(buf, s->from, s->to - s->from);
66801 diff -urNp linux-2.6.32.45/lib/radix-tree.c linux-2.6.32.45/lib/radix-tree.c
66802 --- linux-2.6.32.45/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
66803 +++ linux-2.6.32.45/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
66804 @@ -81,7 +81,7 @@ struct radix_tree_preload {
66805 int nr;
66806 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66807 };
66808 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66809 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66810
66811 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
66812 {
66813 diff -urNp linux-2.6.32.45/lib/random32.c linux-2.6.32.45/lib/random32.c
66814 --- linux-2.6.32.45/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
66815 +++ linux-2.6.32.45/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
66816 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
66817 */
66818 static inline u32 __seed(u32 x, u32 m)
66819 {
66820 - return (x < m) ? x + m : x;
66821 + return (x <= m) ? x + m + 1 : x;
66822 }
66823
66824 /**
66825 diff -urNp linux-2.6.32.45/lib/vsprintf.c linux-2.6.32.45/lib/vsprintf.c
66826 --- linux-2.6.32.45/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
66827 +++ linux-2.6.32.45/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
66828 @@ -16,6 +16,9 @@
66829 * - scnprintf and vscnprintf
66830 */
66831
66832 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66833 +#define __INCLUDED_BY_HIDESYM 1
66834 +#endif
66835 #include <stdarg.h>
66836 #include <linux/module.h>
66837 #include <linux/types.h>
66838 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end
66839 return buf;
66840 }
66841
66842 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
66843 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
66844 {
66845 int len, i;
66846
66847 if ((unsigned long)s < PAGE_SIZE)
66848 - s = "<NULL>";
66849 + s = "(null)";
66850
66851 len = strnlen(s, spec.precision);
66852
66853 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
66854 unsigned long value = (unsigned long) ptr;
66855 #ifdef CONFIG_KALLSYMS
66856 char sym[KSYM_SYMBOL_LEN];
66857 - if (ext != 'f' && ext != 's')
66858 + if (ext != 'f' && ext != 's' && ext != 'a')
66859 sprint_symbol(sym, value);
66860 else
66861 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66862 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
66863 * - 'f' For simple symbolic function names without offset
66864 * - 'S' For symbolic direct pointers with offset
66865 * - 's' For symbolic direct pointers without offset
66866 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66867 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66868 * - 'R' For a struct resource pointer, it prints the range of
66869 * addresses (not the name nor the flags)
66870 * - 'M' For a 6-byte MAC address, it prints the address in the
66871 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
66872 struct printf_spec spec)
66873 {
66874 if (!ptr)
66875 - return string(buf, end, "(null)", spec);
66876 + return string(buf, end, "(nil)", spec);
66877
66878 switch (*fmt) {
66879 case 'F':
66880 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
66881 case 's':
66882 /* Fallthrough */
66883 case 'S':
66884 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66885 + break;
66886 +#else
66887 + return symbol_string(buf, end, ptr, spec, *fmt);
66888 +#endif
66889 + case 'a':
66890 + /* Fallthrough */
66891 + case 'A':
66892 return symbol_string(buf, end, ptr, spec, *fmt);
66893 case 'R':
66894 return resource_string(buf, end, ptr, spec);
66895 @@ -1445,7 +1458,7 @@ do { \
66896 size_t len;
66897 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
66898 || (unsigned long)save_str < PAGE_SIZE)
66899 - save_str = "<NULL>";
66900 + save_str = "(null)";
66901 len = strlen(save_str);
66902 if (str + len + 1 < end)
66903 memcpy(str, save_str, len + 1);
66904 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
66905 typeof(type) value; \
66906 if (sizeof(type) == 8) { \
66907 args = PTR_ALIGN(args, sizeof(u32)); \
66908 - *(u32 *)&value = *(u32 *)args; \
66909 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66910 + *(u32 *)&value = *(const u32 *)args; \
66911 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66912 } else { \
66913 args = PTR_ALIGN(args, sizeof(type)); \
66914 - value = *(typeof(type) *)args; \
66915 + value = *(const typeof(type) *)args; \
66916 } \
66917 args += sizeof(type); \
66918 value; \
66919 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
66920 const char *str_arg = args;
66921 size_t len = strlen(str_arg);
66922 args += len + 1;
66923 - str = string(str, end, (char *)str_arg, spec);
66924 + str = string(str, end, str_arg, spec);
66925 break;
66926 }
66927
66928 diff -urNp linux-2.6.32.45/localversion-grsec linux-2.6.32.45/localversion-grsec
66929 --- linux-2.6.32.45/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66930 +++ linux-2.6.32.45/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
66931 @@ -0,0 +1 @@
66932 +-grsec
66933 diff -urNp linux-2.6.32.45/Makefile linux-2.6.32.45/Makefile
66934 --- linux-2.6.32.45/Makefile 2011-08-16 20:37:25.000000000 -0400
66935 +++ linux-2.6.32.45/Makefile 2011-08-16 20:42:28.000000000 -0400
66936 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
66937
66938 HOSTCC = gcc
66939 HOSTCXX = g++
66940 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
66941 -HOSTCXXFLAGS = -O2
66942 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
66943 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
66944 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
66945
66946 # Decide whether to build built-in, modular, or both.
66947 # Normally, just do built-in.
66948 @@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
66949 KBUILD_CPPFLAGS := -D__KERNEL__
66950
66951 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
66952 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
66953 -fno-strict-aliasing -fno-common \
66954 -Werror-implicit-function-declaration \
66955 -Wno-format-security \
66956 -fno-delete-null-pointer-checks
66957 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
66958 KBUILD_AFLAGS := -D__ASSEMBLY__
66959
66960 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
66961 @@ -376,8 +379,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
66962 # Rules shared between *config targets and build targets
66963
66964 # Basic helpers built in scripts/
66965 -PHONY += scripts_basic
66966 -scripts_basic:
66967 +PHONY += scripts_basic gcc-plugins
66968 +scripts_basic: gcc-plugins
66969 $(Q)$(MAKE) $(build)=scripts/basic
66970
66971 # To avoid any implicit rule to kick in, define an empty command.
66972 @@ -403,7 +406,7 @@ endif
66973 # of make so .config is not included in this case either (for *config).
66974
66975 no-dot-config-targets := clean mrproper distclean \
66976 - cscope TAGS tags help %docs check% \
66977 + cscope gtags TAGS tags help %docs check% \
66978 include/linux/version.h headers_% \
66979 kernelrelease kernelversion
66980
66981 @@ -526,6 +529,25 @@ else
66982 KBUILD_CFLAGS += -O2
66983 endif
66984
66985 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
66986 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
66987 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
66988 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
66989 +endif
66990 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
66991 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
66992 +gcc-plugins:
66993 + $(Q)$(MAKE) $(build)=tools/gcc
66994 +else
66995 +gcc-plugins:
66996 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
66997 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
66998 +else
66999 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
67000 +endif
67001 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
67002 +endif
67003 +
67004 include $(srctree)/arch/$(SRCARCH)/Makefile
67005
67006 ifneq ($(CONFIG_FRAME_WARN),0)
67007 @@ -644,7 +666,7 @@ export mod_strip_cmd
67008
67009
67010 ifeq ($(KBUILD_EXTMOD),)
67011 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
67012 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
67013
67014 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
67015 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
67016 @@ -970,7 +992,7 @@ ifneq ($(KBUILD_SRC),)
67017 endif
67018
67019 # prepare2 creates a makefile if using a separate output directory
67020 -prepare2: prepare3 outputmakefile
67021 +prepare2: prepare3 outputmakefile gcc-plugins
67022
67023 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
67024 include/asm include/config/auto.conf
67025 @@ -1198,7 +1220,7 @@ MRPROPER_FILES += .config .config.old in
67026 include/linux/autoconf.h include/linux/version.h \
67027 include/linux/utsrelease.h \
67028 include/linux/bounds.h include/asm*/asm-offsets.h \
67029 - Module.symvers Module.markers tags TAGS cscope*
67030 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
67031
67032 # clean - Delete most, but leave enough to build external modules
67033 #
67034 @@ -1289,6 +1311,7 @@ help:
67035 @echo ' modules_prepare - Set up for building external modules'
67036 @echo ' tags/TAGS - Generate tags file for editors'
67037 @echo ' cscope - Generate cscope index'
67038 + @echo ' gtags - Generate GNU GLOBAL index'
67039 @echo ' kernelrelease - Output the release version string'
67040 @echo ' kernelversion - Output the version stored in Makefile'
67041 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
67042 @@ -1421,7 +1444,7 @@ clean: $(clean-dirs)
67043 $(call cmd,rmdirs)
67044 $(call cmd,rmfiles)
67045 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
67046 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
67047 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
67048 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
67049 -o -name '*.gcno' \) -type f -print | xargs rm -f
67050
67051 @@ -1445,7 +1468,7 @@ endif # KBUILD_EXTMOD
67052 quiet_cmd_tags = GEN $@
67053 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
67054
67055 -tags TAGS cscope: FORCE
67056 +tags TAGS cscope gtags: FORCE
67057 $(call cmd,tags)
67058
67059 # Scripts to check various things for consistency
67060 diff -urNp linux-2.6.32.45/mm/backing-dev.c linux-2.6.32.45/mm/backing-dev.c
67061 --- linux-2.6.32.45/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
67062 +++ linux-2.6.32.45/mm/backing-dev.c 2011-08-11 19:48:17.000000000 -0400
67063 @@ -272,7 +272,7 @@ static void bdi_task_init(struct backing
67064 list_add_tail_rcu(&wb->list, &bdi->wb_list);
67065 spin_unlock(&bdi->wb_lock);
67066
67067 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
67068 + tsk->flags |= PF_SWAPWRITE;
67069 set_freezable();
67070
67071 /*
67072 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
67073 * Add the default flusher task that gets created for any bdi
67074 * that has dirty data pending writeout
67075 */
67076 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67077 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67078 {
67079 if (!bdi_cap_writeback_dirty(bdi))
67080 return;
67081 diff -urNp linux-2.6.32.45/mm/filemap.c linux-2.6.32.45/mm/filemap.c
67082 --- linux-2.6.32.45/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
67083 +++ linux-2.6.32.45/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
67084 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
67085 struct address_space *mapping = file->f_mapping;
67086
67087 if (!mapping->a_ops->readpage)
67088 - return -ENOEXEC;
67089 + return -ENODEV;
67090 file_accessed(file);
67091 vma->vm_ops = &generic_file_vm_ops;
67092 vma->vm_flags |= VM_CAN_NONLINEAR;
67093 @@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
67094 *pos = i_size_read(inode);
67095
67096 if (limit != RLIM_INFINITY) {
67097 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67098 if (*pos >= limit) {
67099 send_sig(SIGXFSZ, current, 0);
67100 return -EFBIG;
67101 diff -urNp linux-2.6.32.45/mm/fremap.c linux-2.6.32.45/mm/fremap.c
67102 --- linux-2.6.32.45/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
67103 +++ linux-2.6.32.45/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
67104 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67105 retry:
67106 vma = find_vma(mm, start);
67107
67108 +#ifdef CONFIG_PAX_SEGMEXEC
67109 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67110 + goto out;
67111 +#endif
67112 +
67113 /*
67114 * Make sure the vma is shared, that it supports prefaulting,
67115 * and that the remapped range is valid and fully within
67116 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67117 /*
67118 * drop PG_Mlocked flag for over-mapped range
67119 */
67120 - unsigned int saved_flags = vma->vm_flags;
67121 + unsigned long saved_flags = vma->vm_flags;
67122 munlock_vma_pages_range(vma, start, start + size);
67123 vma->vm_flags = saved_flags;
67124 }
67125 diff -urNp linux-2.6.32.45/mm/highmem.c linux-2.6.32.45/mm/highmem.c
67126 --- linux-2.6.32.45/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
67127 +++ linux-2.6.32.45/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
67128 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
67129 * So no dangers, even with speculative execution.
67130 */
67131 page = pte_page(pkmap_page_table[i]);
67132 + pax_open_kernel();
67133 pte_clear(&init_mm, (unsigned long)page_address(page),
67134 &pkmap_page_table[i]);
67135 -
67136 + pax_close_kernel();
67137 set_page_address(page, NULL);
67138 need_flush = 1;
67139 }
67140 @@ -177,9 +178,11 @@ start:
67141 }
67142 }
67143 vaddr = PKMAP_ADDR(last_pkmap_nr);
67144 +
67145 + pax_open_kernel();
67146 set_pte_at(&init_mm, vaddr,
67147 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67148 -
67149 + pax_close_kernel();
67150 pkmap_count[last_pkmap_nr] = 1;
67151 set_page_address(page, (void *)vaddr);
67152
67153 diff -urNp linux-2.6.32.45/mm/hugetlb.c linux-2.6.32.45/mm/hugetlb.c
67154 --- linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:04.000000000 -0400
67155 +++ linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:19.000000000 -0400
67156 @@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
67157 return 1;
67158 }
67159
67160 +#ifdef CONFIG_PAX_SEGMEXEC
67161 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67162 +{
67163 + struct mm_struct *mm = vma->vm_mm;
67164 + struct vm_area_struct *vma_m;
67165 + unsigned long address_m;
67166 + pte_t *ptep_m;
67167 +
67168 + vma_m = pax_find_mirror_vma(vma);
67169 + if (!vma_m)
67170 + return;
67171 +
67172 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67173 + address_m = address + SEGMEXEC_TASK_SIZE;
67174 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67175 + get_page(page_m);
67176 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67177 +}
67178 +#endif
67179 +
67180 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
67181 unsigned long address, pte_t *ptep, pte_t pte,
67182 struct page *pagecache_page)
67183 @@ -2004,6 +2024,11 @@ retry_avoidcopy:
67184 huge_ptep_clear_flush(vma, address, ptep);
67185 set_huge_pte_at(mm, address, ptep,
67186 make_huge_pte(vma, new_page, 1));
67187 +
67188 +#ifdef CONFIG_PAX_SEGMEXEC
67189 + pax_mirror_huge_pte(vma, address, new_page);
67190 +#endif
67191 +
67192 /* Make the old page be freed below */
67193 new_page = old_page;
67194 }
67195 @@ -2135,6 +2160,10 @@ retry:
67196 && (vma->vm_flags & VM_SHARED)));
67197 set_huge_pte_at(mm, address, ptep, new_pte);
67198
67199 +#ifdef CONFIG_PAX_SEGMEXEC
67200 + pax_mirror_huge_pte(vma, address, page);
67201 +#endif
67202 +
67203 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67204 /* Optimization, do the COW without a second fault */
67205 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67206 @@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
67207 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67208 struct hstate *h = hstate_vma(vma);
67209
67210 +#ifdef CONFIG_PAX_SEGMEXEC
67211 + struct vm_area_struct *vma_m;
67212 +
67213 + vma_m = pax_find_mirror_vma(vma);
67214 + if (vma_m) {
67215 + unsigned long address_m;
67216 +
67217 + if (vma->vm_start > vma_m->vm_start) {
67218 + address_m = address;
67219 + address -= SEGMEXEC_TASK_SIZE;
67220 + vma = vma_m;
67221 + h = hstate_vma(vma);
67222 + } else
67223 + address_m = address + SEGMEXEC_TASK_SIZE;
67224 +
67225 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67226 + return VM_FAULT_OOM;
67227 + address_m &= HPAGE_MASK;
67228 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67229 + }
67230 +#endif
67231 +
67232 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67233 if (!ptep)
67234 return VM_FAULT_OOM;
67235 diff -urNp linux-2.6.32.45/mm/internal.h linux-2.6.32.45/mm/internal.h
67236 --- linux-2.6.32.45/mm/internal.h 2011-03-27 14:31:47.000000000 -0400
67237 +++ linux-2.6.32.45/mm/internal.h 2011-07-09 09:13:08.000000000 -0400
67238 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
67239 * in mm/page_alloc.c
67240 */
67241 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67242 +extern void free_compound_page(struct page *page);
67243 extern void prep_compound_page(struct page *page, unsigned long order);
67244
67245
67246 diff -urNp linux-2.6.32.45/mm/Kconfig linux-2.6.32.45/mm/Kconfig
67247 --- linux-2.6.32.45/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
67248 +++ linux-2.6.32.45/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
67249 @@ -228,7 +228,7 @@ config KSM
67250 config DEFAULT_MMAP_MIN_ADDR
67251 int "Low address space to protect from user allocation"
67252 depends on MMU
67253 - default 4096
67254 + default 65536
67255 help
67256 This is the portion of low virtual memory which should be protected
67257 from userspace allocation. Keeping a user from writing to low pages
67258 diff -urNp linux-2.6.32.45/mm/kmemleak.c linux-2.6.32.45/mm/kmemleak.c
67259 --- linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
67260 +++ linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
67261 @@ -358,7 +358,7 @@ static void print_unreferenced(struct se
67262
67263 for (i = 0; i < object->trace_len; i++) {
67264 void *ptr = (void *)object->trace[i];
67265 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67266 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67267 }
67268 }
67269
67270 diff -urNp linux-2.6.32.45/mm/maccess.c linux-2.6.32.45/mm/maccess.c
67271 --- linux-2.6.32.45/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
67272 +++ linux-2.6.32.45/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
67273 @@ -14,7 +14,7 @@
67274 * Safely read from address @src to the buffer at @dst. If a kernel fault
67275 * happens, handle that and return -EFAULT.
67276 */
67277 -long probe_kernel_read(void *dst, void *src, size_t size)
67278 +long probe_kernel_read(void *dst, const void *src, size_t size)
67279 {
67280 long ret;
67281 mm_segment_t old_fs = get_fs();
67282 @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
67283 * Safely write to address @dst from the buffer at @src. If a kernel fault
67284 * happens, handle that and return -EFAULT.
67285 */
67286 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
67287 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
67288 {
67289 long ret;
67290 mm_segment_t old_fs = get_fs();
67291 diff -urNp linux-2.6.32.45/mm/madvise.c linux-2.6.32.45/mm/madvise.c
67292 --- linux-2.6.32.45/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
67293 +++ linux-2.6.32.45/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
67294 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
67295 pgoff_t pgoff;
67296 unsigned long new_flags = vma->vm_flags;
67297
67298 +#ifdef CONFIG_PAX_SEGMEXEC
67299 + struct vm_area_struct *vma_m;
67300 +#endif
67301 +
67302 switch (behavior) {
67303 case MADV_NORMAL:
67304 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67305 @@ -103,6 +107,13 @@ success:
67306 /*
67307 * vm_flags is protected by the mmap_sem held in write mode.
67308 */
67309 +
67310 +#ifdef CONFIG_PAX_SEGMEXEC
67311 + vma_m = pax_find_mirror_vma(vma);
67312 + if (vma_m)
67313 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67314 +#endif
67315 +
67316 vma->vm_flags = new_flags;
67317
67318 out:
67319 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
67320 struct vm_area_struct ** prev,
67321 unsigned long start, unsigned long end)
67322 {
67323 +
67324 +#ifdef CONFIG_PAX_SEGMEXEC
67325 + struct vm_area_struct *vma_m;
67326 +#endif
67327 +
67328 *prev = vma;
67329 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67330 return -EINVAL;
67331 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
67332 zap_page_range(vma, start, end - start, &details);
67333 } else
67334 zap_page_range(vma, start, end - start, NULL);
67335 +
67336 +#ifdef CONFIG_PAX_SEGMEXEC
67337 + vma_m = pax_find_mirror_vma(vma);
67338 + if (vma_m) {
67339 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67340 + struct zap_details details = {
67341 + .nonlinear_vma = vma_m,
67342 + .last_index = ULONG_MAX,
67343 + };
67344 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67345 + } else
67346 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67347 + }
67348 +#endif
67349 +
67350 return 0;
67351 }
67352
67353 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67354 if (end < start)
67355 goto out;
67356
67357 +#ifdef CONFIG_PAX_SEGMEXEC
67358 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67359 + if (end > SEGMEXEC_TASK_SIZE)
67360 + goto out;
67361 + } else
67362 +#endif
67363 +
67364 + if (end > TASK_SIZE)
67365 + goto out;
67366 +
67367 error = 0;
67368 if (end == start)
67369 goto out;
67370 diff -urNp linux-2.6.32.45/mm/memory.c linux-2.6.32.45/mm/memory.c
67371 --- linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:04.000000000 -0400
67372 +++ linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:23.000000000 -0400
67373 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
67374 return;
67375
67376 pmd = pmd_offset(pud, start);
67377 +
67378 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67379 pud_clear(pud);
67380 pmd_free_tlb(tlb, pmd, start);
67381 +#endif
67382 +
67383 }
67384
67385 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67386 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct
67387 if (end - 1 > ceiling - 1)
67388 return;
67389
67390 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67391 pud = pud_offset(pgd, start);
67392 pgd_clear(pgd);
67393 pud_free_tlb(tlb, pud, start);
67394 +#endif
67395 +
67396 }
67397
67398 /*
67399 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
67400 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67401 i = 0;
67402
67403 - do {
67404 + while (nr_pages) {
67405 struct vm_area_struct *vma;
67406
67407 - vma = find_extend_vma(mm, start);
67408 + vma = find_vma(mm, start);
67409 if (!vma && in_gate_area(tsk, start)) {
67410 unsigned long pg = start & PAGE_MASK;
67411 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
67412 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
67413 continue;
67414 }
67415
67416 - if (!vma ||
67417 + if (!vma || start < vma->vm_start ||
67418 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67419 !(vm_flags & vma->vm_flags))
67420 return i ? : -EFAULT;
67421 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
67422 start += PAGE_SIZE;
67423 nr_pages--;
67424 } while (nr_pages && start < vma->vm_end);
67425 - } while (nr_pages);
67426 + }
67427 return i;
67428 }
67429
67430 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
67431 page_add_file_rmap(page);
67432 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67433
67434 +#ifdef CONFIG_PAX_SEGMEXEC
67435 + pax_mirror_file_pte(vma, addr, page, ptl);
67436 +#endif
67437 +
67438 retval = 0;
67439 pte_unmap_unlock(pte, ptl);
67440 return retval;
67441 @@ -1560,10 +1571,22 @@ out:
67442 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67443 struct page *page)
67444 {
67445 +
67446 +#ifdef CONFIG_PAX_SEGMEXEC
67447 + struct vm_area_struct *vma_m;
67448 +#endif
67449 +
67450 if (addr < vma->vm_start || addr >= vma->vm_end)
67451 return -EFAULT;
67452 if (!page_count(page))
67453 return -EINVAL;
67454 +
67455 +#ifdef CONFIG_PAX_SEGMEXEC
67456 + vma_m = pax_find_mirror_vma(vma);
67457 + if (vma_m)
67458 + vma_m->vm_flags |= VM_INSERTPAGE;
67459 +#endif
67460 +
67461 vma->vm_flags |= VM_INSERTPAGE;
67462 return insert_page(vma, addr, page, vma->vm_page_prot);
67463 }
67464 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
67465 unsigned long pfn)
67466 {
67467 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67468 + BUG_ON(vma->vm_mirror);
67469
67470 if (addr < vma->vm_start || addr >= vma->vm_end)
67471 return -EFAULT;
67472 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
67473 copy_user_highpage(dst, src, va, vma);
67474 }
67475
67476 +#ifdef CONFIG_PAX_SEGMEXEC
67477 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67478 +{
67479 + struct mm_struct *mm = vma->vm_mm;
67480 + spinlock_t *ptl;
67481 + pte_t *pte, entry;
67482 +
67483 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67484 + entry = *pte;
67485 + if (!pte_present(entry)) {
67486 + if (!pte_none(entry)) {
67487 + BUG_ON(pte_file(entry));
67488 + free_swap_and_cache(pte_to_swp_entry(entry));
67489 + pte_clear_not_present_full(mm, address, pte, 0);
67490 + }
67491 + } else {
67492 + struct page *page;
67493 +
67494 + flush_cache_page(vma, address, pte_pfn(entry));
67495 + entry = ptep_clear_flush(vma, address, pte);
67496 + BUG_ON(pte_dirty(entry));
67497 + page = vm_normal_page(vma, address, entry);
67498 + if (page) {
67499 + update_hiwater_rss(mm);
67500 + if (PageAnon(page))
67501 + dec_mm_counter(mm, anon_rss);
67502 + else
67503 + dec_mm_counter(mm, file_rss);
67504 + page_remove_rmap(page);
67505 + page_cache_release(page);
67506 + }
67507 + }
67508 + pte_unmap_unlock(pte, ptl);
67509 +}
67510 +
67511 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
67512 + *
67513 + * the ptl of the lower mapped page is held on entry and is not released on exit
67514 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67515 + */
67516 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67517 +{
67518 + struct mm_struct *mm = vma->vm_mm;
67519 + unsigned long address_m;
67520 + spinlock_t *ptl_m;
67521 + struct vm_area_struct *vma_m;
67522 + pmd_t *pmd_m;
67523 + pte_t *pte_m, entry_m;
67524 +
67525 + BUG_ON(!page_m || !PageAnon(page_m));
67526 +
67527 + vma_m = pax_find_mirror_vma(vma);
67528 + if (!vma_m)
67529 + return;
67530 +
67531 + BUG_ON(!PageLocked(page_m));
67532 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67533 + address_m = address + SEGMEXEC_TASK_SIZE;
67534 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67535 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67536 + ptl_m = pte_lockptr(mm, pmd_m);
67537 + if (ptl != ptl_m) {
67538 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67539 + if (!pte_none(*pte_m))
67540 + goto out;
67541 + }
67542 +
67543 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67544 + page_cache_get(page_m);
67545 + page_add_anon_rmap(page_m, vma_m, address_m);
67546 + inc_mm_counter(mm, anon_rss);
67547 + set_pte_at(mm, address_m, pte_m, entry_m);
67548 + update_mmu_cache(vma_m, address_m, entry_m);
67549 +out:
67550 + if (ptl != ptl_m)
67551 + spin_unlock(ptl_m);
67552 + pte_unmap_nested(pte_m);
67553 + unlock_page(page_m);
67554 +}
67555 +
67556 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67557 +{
67558 + struct mm_struct *mm = vma->vm_mm;
67559 + unsigned long address_m;
67560 + spinlock_t *ptl_m;
67561 + struct vm_area_struct *vma_m;
67562 + pmd_t *pmd_m;
67563 + pte_t *pte_m, entry_m;
67564 +
67565 + BUG_ON(!page_m || PageAnon(page_m));
67566 +
67567 + vma_m = pax_find_mirror_vma(vma);
67568 + if (!vma_m)
67569 + return;
67570 +
67571 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67572 + address_m = address + SEGMEXEC_TASK_SIZE;
67573 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67574 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67575 + ptl_m = pte_lockptr(mm, pmd_m);
67576 + if (ptl != ptl_m) {
67577 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67578 + if (!pte_none(*pte_m))
67579 + goto out;
67580 + }
67581 +
67582 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67583 + page_cache_get(page_m);
67584 + page_add_file_rmap(page_m);
67585 + inc_mm_counter(mm, file_rss);
67586 + set_pte_at(mm, address_m, pte_m, entry_m);
67587 + update_mmu_cache(vma_m, address_m, entry_m);
67588 +out:
67589 + if (ptl != ptl_m)
67590 + spin_unlock(ptl_m);
67591 + pte_unmap_nested(pte_m);
67592 +}
67593 +
67594 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67595 +{
67596 + struct mm_struct *mm = vma->vm_mm;
67597 + unsigned long address_m;
67598 + spinlock_t *ptl_m;
67599 + struct vm_area_struct *vma_m;
67600 + pmd_t *pmd_m;
67601 + pte_t *pte_m, entry_m;
67602 +
67603 + vma_m = pax_find_mirror_vma(vma);
67604 + if (!vma_m)
67605 + return;
67606 +
67607 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67608 + address_m = address + SEGMEXEC_TASK_SIZE;
67609 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67610 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67611 + ptl_m = pte_lockptr(mm, pmd_m);
67612 + if (ptl != ptl_m) {
67613 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67614 + if (!pte_none(*pte_m))
67615 + goto out;
67616 + }
67617 +
67618 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67619 + set_pte_at(mm, address_m, pte_m, entry_m);
67620 +out:
67621 + if (ptl != ptl_m)
67622 + spin_unlock(ptl_m);
67623 + pte_unmap_nested(pte_m);
67624 +}
67625 +
67626 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67627 +{
67628 + struct page *page_m;
67629 + pte_t entry;
67630 +
67631 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67632 + goto out;
67633 +
67634 + entry = *pte;
67635 + page_m = vm_normal_page(vma, address, entry);
67636 + if (!page_m)
67637 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67638 + else if (PageAnon(page_m)) {
67639 + if (pax_find_mirror_vma(vma)) {
67640 + pte_unmap_unlock(pte, ptl);
67641 + lock_page(page_m);
67642 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67643 + if (pte_same(entry, *pte))
67644 + pax_mirror_anon_pte(vma, address, page_m, ptl);
67645 + else
67646 + unlock_page(page_m);
67647 + }
67648 + } else
67649 + pax_mirror_file_pte(vma, address, page_m, ptl);
67650 +
67651 +out:
67652 + pte_unmap_unlock(pte, ptl);
67653 +}
67654 +#endif
67655 +
67656 /*
67657 * This routine handles present pages, when users try to write
67658 * to a shared page. It is done by copying the page to a new address
67659 @@ -2156,6 +2360,12 @@ gotten:
67660 */
67661 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67662 if (likely(pte_same(*page_table, orig_pte))) {
67663 +
67664 +#ifdef CONFIG_PAX_SEGMEXEC
67665 + if (pax_find_mirror_vma(vma))
67666 + BUG_ON(!trylock_page(new_page));
67667 +#endif
67668 +
67669 if (old_page) {
67670 if (!PageAnon(old_page)) {
67671 dec_mm_counter(mm, file_rss);
67672 @@ -2207,6 +2417,10 @@ gotten:
67673 page_remove_rmap(old_page);
67674 }
67675
67676 +#ifdef CONFIG_PAX_SEGMEXEC
67677 + pax_mirror_anon_pte(vma, address, new_page, ptl);
67678 +#endif
67679 +
67680 /* Free the old page.. */
67681 new_page = old_page;
67682 ret |= VM_FAULT_WRITE;
67683 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
67684 swap_free(entry);
67685 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67686 try_to_free_swap(page);
67687 +
67688 +#ifdef CONFIG_PAX_SEGMEXEC
67689 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67690 +#endif
67691 +
67692 unlock_page(page);
67693
67694 if (flags & FAULT_FLAG_WRITE) {
67695 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
67696
67697 /* No need to invalidate - it was non-present before */
67698 update_mmu_cache(vma, address, pte);
67699 +
67700 +#ifdef CONFIG_PAX_SEGMEXEC
67701 + pax_mirror_anon_pte(vma, address, page, ptl);
67702 +#endif
67703 +
67704 unlock:
67705 pte_unmap_unlock(page_table, ptl);
67706 out:
67707 @@ -2632,40 +2856,6 @@ out_release:
67708 }
67709
67710 /*
67711 - * This is like a special single-page "expand_{down|up}wards()",
67712 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
67713 - * doesn't hit another vma.
67714 - */
67715 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67716 -{
67717 - address &= PAGE_MASK;
67718 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67719 - struct vm_area_struct *prev = vma->vm_prev;
67720 -
67721 - /*
67722 - * Is there a mapping abutting this one below?
67723 - *
67724 - * That's only ok if it's the same stack mapping
67725 - * that has gotten split..
67726 - */
67727 - if (prev && prev->vm_end == address)
67728 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67729 -
67730 - expand_stack(vma, address - PAGE_SIZE);
67731 - }
67732 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67733 - struct vm_area_struct *next = vma->vm_next;
67734 -
67735 - /* As VM_GROWSDOWN but s/below/above/ */
67736 - if (next && next->vm_start == address + PAGE_SIZE)
67737 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67738 -
67739 - expand_upwards(vma, address + PAGE_SIZE);
67740 - }
67741 - return 0;
67742 -}
67743 -
67744 -/*
67745 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67746 * but allow concurrent faults), and pte mapped but not yet locked.
67747 * We return with mmap_sem still held, but pte unmapped and unlocked.
67748 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
67749 unsigned long address, pte_t *page_table, pmd_t *pmd,
67750 unsigned int flags)
67751 {
67752 - struct page *page;
67753 + struct page *page = NULL;
67754 spinlock_t *ptl;
67755 pte_t entry;
67756
67757 - pte_unmap(page_table);
67758 -
67759 - /* Check if we need to add a guard page to the stack */
67760 - if (check_stack_guard_page(vma, address) < 0)
67761 - return VM_FAULT_SIGBUS;
67762 -
67763 - /* Use the zero-page for reads */
67764 if (!(flags & FAULT_FLAG_WRITE)) {
67765 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67766 vma->vm_page_prot));
67767 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67768 + ptl = pte_lockptr(mm, pmd);
67769 + spin_lock(ptl);
67770 if (!pte_none(*page_table))
67771 goto unlock;
67772 goto setpte;
67773 }
67774
67775 /* Allocate our own private page. */
67776 + pte_unmap(page_table);
67777 +
67778 if (unlikely(anon_vma_prepare(vma)))
67779 goto oom;
67780 page = alloc_zeroed_user_highpage_movable(vma, address);
67781 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
67782 if (!pte_none(*page_table))
67783 goto release;
67784
67785 +#ifdef CONFIG_PAX_SEGMEXEC
67786 + if (pax_find_mirror_vma(vma))
67787 + BUG_ON(!trylock_page(page));
67788 +#endif
67789 +
67790 inc_mm_counter(mm, anon_rss);
67791 page_add_new_anon_rmap(page, vma, address);
67792 setpte:
67793 @@ -2720,6 +2911,12 @@ setpte:
67794
67795 /* No need to invalidate - it was non-present before */
67796 update_mmu_cache(vma, address, entry);
67797 +
67798 +#ifdef CONFIG_PAX_SEGMEXEC
67799 + if (page)
67800 + pax_mirror_anon_pte(vma, address, page, ptl);
67801 +#endif
67802 +
67803 unlock:
67804 pte_unmap_unlock(page_table, ptl);
67805 return 0;
67806 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
67807 */
67808 /* Only go through if we didn't race with anybody else... */
67809 if (likely(pte_same(*page_table, orig_pte))) {
67810 +
67811 +#ifdef CONFIG_PAX_SEGMEXEC
67812 + if (anon && pax_find_mirror_vma(vma))
67813 + BUG_ON(!trylock_page(page));
67814 +#endif
67815 +
67816 flush_icache_page(vma, page);
67817 entry = mk_pte(page, vma->vm_page_prot);
67818 if (flags & FAULT_FLAG_WRITE)
67819 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
67820
67821 /* no need to invalidate: a not-present page won't be cached */
67822 update_mmu_cache(vma, address, entry);
67823 +
67824 +#ifdef CONFIG_PAX_SEGMEXEC
67825 + if (anon)
67826 + pax_mirror_anon_pte(vma, address, page, ptl);
67827 + else
67828 + pax_mirror_file_pte(vma, address, page, ptl);
67829 +#endif
67830 +
67831 } else {
67832 if (charged)
67833 mem_cgroup_uncharge_page(page);
67834 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
67835 if (flags & FAULT_FLAG_WRITE)
67836 flush_tlb_page(vma, address);
67837 }
67838 +
67839 +#ifdef CONFIG_PAX_SEGMEXEC
67840 + pax_mirror_pte(vma, address, pte, pmd, ptl);
67841 + return 0;
67842 +#endif
67843 +
67844 unlock:
67845 pte_unmap_unlock(pte, ptl);
67846 return 0;
67847 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
67848 pmd_t *pmd;
67849 pte_t *pte;
67850
67851 +#ifdef CONFIG_PAX_SEGMEXEC
67852 + struct vm_area_struct *vma_m;
67853 +#endif
67854 +
67855 __set_current_state(TASK_RUNNING);
67856
67857 count_vm_event(PGFAULT);
67858 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
67859 if (unlikely(is_vm_hugetlb_page(vma)))
67860 return hugetlb_fault(mm, vma, address, flags);
67861
67862 +#ifdef CONFIG_PAX_SEGMEXEC
67863 + vma_m = pax_find_mirror_vma(vma);
67864 + if (vma_m) {
67865 + unsigned long address_m;
67866 + pgd_t *pgd_m;
67867 + pud_t *pud_m;
67868 + pmd_t *pmd_m;
67869 +
67870 + if (vma->vm_start > vma_m->vm_start) {
67871 + address_m = address;
67872 + address -= SEGMEXEC_TASK_SIZE;
67873 + vma = vma_m;
67874 + } else
67875 + address_m = address + SEGMEXEC_TASK_SIZE;
67876 +
67877 + pgd_m = pgd_offset(mm, address_m);
67878 + pud_m = pud_alloc(mm, pgd_m, address_m);
67879 + if (!pud_m)
67880 + return VM_FAULT_OOM;
67881 + pmd_m = pmd_alloc(mm, pud_m, address_m);
67882 + if (!pmd_m)
67883 + return VM_FAULT_OOM;
67884 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
67885 + return VM_FAULT_OOM;
67886 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67887 + }
67888 +#endif
67889 +
67890 pgd = pgd_offset(mm, address);
67891 pud = pud_alloc(mm, pgd, address);
67892 if (!pud)
67893 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
67894 gate_vma.vm_start = FIXADDR_USER_START;
67895 gate_vma.vm_end = FIXADDR_USER_END;
67896 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
67897 - gate_vma.vm_page_prot = __P101;
67898 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
67899 /*
67900 * Make sure the vDSO gets into every core dump.
67901 * Dumping its contents makes post-mortem fully interpretable later
67902 diff -urNp linux-2.6.32.45/mm/memory-failure.c linux-2.6.32.45/mm/memory-failure.c
67903 --- linux-2.6.32.45/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
67904 +++ linux-2.6.32.45/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
67905 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
67906
67907 int sysctl_memory_failure_recovery __read_mostly = 1;
67908
67909 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67910 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67911
67912 /*
67913 * Send all the processes who have the page mapped an ``action optional''
67914 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
67915 return 0;
67916 }
67917
67918 - atomic_long_add(1, &mce_bad_pages);
67919 + atomic_long_add_unchecked(1, &mce_bad_pages);
67920
67921 /*
67922 * We need/can do nothing about count=0 pages.
67923 diff -urNp linux-2.6.32.45/mm/mempolicy.c linux-2.6.32.45/mm/mempolicy.c
67924 --- linux-2.6.32.45/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
67925 +++ linux-2.6.32.45/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
67926 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
67927 struct vm_area_struct *next;
67928 int err;
67929
67930 +#ifdef CONFIG_PAX_SEGMEXEC
67931 + struct vm_area_struct *vma_m;
67932 +#endif
67933 +
67934 err = 0;
67935 for (; vma && vma->vm_start < end; vma = next) {
67936 next = vma->vm_next;
67937 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
67938 err = policy_vma(vma, new);
67939 if (err)
67940 break;
67941 +
67942 +#ifdef CONFIG_PAX_SEGMEXEC
67943 + vma_m = pax_find_mirror_vma(vma);
67944 + if (vma_m) {
67945 + err = policy_vma(vma_m, new);
67946 + if (err)
67947 + break;
67948 + }
67949 +#endif
67950 +
67951 }
67952 return err;
67953 }
67954 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
67955
67956 if (end < start)
67957 return -EINVAL;
67958 +
67959 +#ifdef CONFIG_PAX_SEGMEXEC
67960 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67961 + if (end > SEGMEXEC_TASK_SIZE)
67962 + return -EINVAL;
67963 + } else
67964 +#endif
67965 +
67966 + if (end > TASK_SIZE)
67967 + return -EINVAL;
67968 +
67969 if (end == start)
67970 return 0;
67971
67972 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67973 if (!mm)
67974 return -EINVAL;
67975
67976 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67977 + if (mm != current->mm &&
67978 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67979 + err = -EPERM;
67980 + goto out;
67981 + }
67982 +#endif
67983 +
67984 /*
67985 * Check if this process has the right to modify the specified
67986 * process. The right exists if the process has administrative
67987 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67988 rcu_read_lock();
67989 tcred = __task_cred(task);
67990 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67991 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
67992 - !capable(CAP_SYS_NICE)) {
67993 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67994 rcu_read_unlock();
67995 err = -EPERM;
67996 goto out;
67997 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
67998
67999 if (file) {
68000 seq_printf(m, " file=");
68001 - seq_path(m, &file->f_path, "\n\t= ");
68002 + seq_path(m, &file->f_path, "\n\t\\= ");
68003 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
68004 seq_printf(m, " heap");
68005 } else if (vma->vm_start <= mm->start_stack &&
68006 diff -urNp linux-2.6.32.45/mm/migrate.c linux-2.6.32.45/mm/migrate.c
68007 --- linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:04.000000000 -0400
68008 +++ linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:23.000000000 -0400
68009 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
68010 unsigned long chunk_start;
68011 int err;
68012
68013 + pax_track_stack();
68014 +
68015 task_nodes = cpuset_mems_allowed(task);
68016
68017 err = -ENOMEM;
68018 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68019 if (!mm)
68020 return -EINVAL;
68021
68022 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68023 + if (mm != current->mm &&
68024 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68025 + err = -EPERM;
68026 + goto out;
68027 + }
68028 +#endif
68029 +
68030 /*
68031 * Check if this process has the right to modify the specified
68032 * process. The right exists if the process has administrative
68033 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68034 rcu_read_lock();
68035 tcred = __task_cred(task);
68036 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68037 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68038 - !capable(CAP_SYS_NICE)) {
68039 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68040 rcu_read_unlock();
68041 err = -EPERM;
68042 goto out;
68043 diff -urNp linux-2.6.32.45/mm/mlock.c linux-2.6.32.45/mm/mlock.c
68044 --- linux-2.6.32.45/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
68045 +++ linux-2.6.32.45/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
68046 @@ -13,6 +13,7 @@
68047 #include <linux/pagemap.h>
68048 #include <linux/mempolicy.h>
68049 #include <linux/syscalls.h>
68050 +#include <linux/security.h>
68051 #include <linux/sched.h>
68052 #include <linux/module.h>
68053 #include <linux/rmap.h>
68054 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
68055 }
68056 }
68057
68058 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
68059 -{
68060 - return (vma->vm_flags & VM_GROWSDOWN) &&
68061 - (vma->vm_start == addr) &&
68062 - !vma_stack_continue(vma->vm_prev, addr);
68063 -}
68064 -
68065 /**
68066 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
68067 * @vma: target vma
68068 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
68069 if (vma->vm_flags & VM_WRITE)
68070 gup_flags |= FOLL_WRITE;
68071
68072 - /* We don't try to access the guard page of a stack vma */
68073 - if (stack_guard_page(vma, start)) {
68074 - addr += PAGE_SIZE;
68075 - nr_pages--;
68076 - }
68077 -
68078 while (nr_pages > 0) {
68079 int i;
68080
68081 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
68082 {
68083 unsigned long nstart, end, tmp;
68084 struct vm_area_struct * vma, * prev;
68085 - int error;
68086 + int error = -EINVAL;
68087
68088 len = PAGE_ALIGN(len);
68089 end = start + len;
68090 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
68091 return -EINVAL;
68092 if (end == start)
68093 return 0;
68094 + if (end > TASK_SIZE)
68095 + return -EINVAL;
68096 +
68097 vma = find_vma_prev(current->mm, start, &prev);
68098 if (!vma || vma->vm_start > start)
68099 return -ENOMEM;
68100 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
68101 for (nstart = start ; ; ) {
68102 unsigned int newflags;
68103
68104 +#ifdef CONFIG_PAX_SEGMEXEC
68105 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68106 + break;
68107 +#endif
68108 +
68109 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68110
68111 newflags = vma->vm_flags | VM_LOCKED;
68112 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
68113 lock_limit >>= PAGE_SHIFT;
68114
68115 /* check against resource limits */
68116 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68117 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68118 error = do_mlock(start, len, 1);
68119 up_write(&current->mm->mmap_sem);
68120 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
68121 static int do_mlockall(int flags)
68122 {
68123 struct vm_area_struct * vma, * prev = NULL;
68124 - unsigned int def_flags = 0;
68125
68126 if (flags & MCL_FUTURE)
68127 - def_flags = VM_LOCKED;
68128 - current->mm->def_flags = def_flags;
68129 + current->mm->def_flags |= VM_LOCKED;
68130 + else
68131 + current->mm->def_flags &= ~VM_LOCKED;
68132 if (flags == MCL_FUTURE)
68133 goto out;
68134
68135 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68136 - unsigned int newflags;
68137 + unsigned long newflags;
68138 +
68139 +#ifdef CONFIG_PAX_SEGMEXEC
68140 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68141 + break;
68142 +#endif
68143
68144 + BUG_ON(vma->vm_end > TASK_SIZE);
68145 newflags = vma->vm_flags | VM_LOCKED;
68146 if (!(flags & MCL_CURRENT))
68147 newflags &= ~VM_LOCKED;
68148 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68149 lock_limit >>= PAGE_SHIFT;
68150
68151 ret = -ENOMEM;
68152 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68153 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68154 capable(CAP_IPC_LOCK))
68155 ret = do_mlockall(flags);
68156 diff -urNp linux-2.6.32.45/mm/mmap.c linux-2.6.32.45/mm/mmap.c
68157 --- linux-2.6.32.45/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
68158 +++ linux-2.6.32.45/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
68159 @@ -45,6 +45,16 @@
68160 #define arch_rebalance_pgtables(addr, len) (addr)
68161 #endif
68162
68163 +static inline void verify_mm_writelocked(struct mm_struct *mm)
68164 +{
68165 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68166 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68167 + up_read(&mm->mmap_sem);
68168 + BUG();
68169 + }
68170 +#endif
68171 +}
68172 +
68173 static void unmap_region(struct mm_struct *mm,
68174 struct vm_area_struct *vma, struct vm_area_struct *prev,
68175 unsigned long start, unsigned long end);
68176 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
68177 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68178 *
68179 */
68180 -pgprot_t protection_map[16] = {
68181 +pgprot_t protection_map[16] __read_only = {
68182 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68183 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68184 };
68185
68186 pgprot_t vm_get_page_prot(unsigned long vm_flags)
68187 {
68188 - return __pgprot(pgprot_val(protection_map[vm_flags &
68189 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68190 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68191 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68192 +
68193 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68194 + if (!nx_enabled &&
68195 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68196 + (vm_flags & (VM_READ | VM_WRITE)))
68197 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68198 +#endif
68199 +
68200 + return prot;
68201 }
68202 EXPORT_SYMBOL(vm_get_page_prot);
68203
68204 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
68205 int sysctl_overcommit_ratio = 50; /* default is 50% */
68206 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68207 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68208 struct percpu_counter vm_committed_as;
68209
68210 /*
68211 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
68212 struct vm_area_struct *next = vma->vm_next;
68213
68214 might_sleep();
68215 + BUG_ON(vma->vm_mirror);
68216 if (vma->vm_ops && vma->vm_ops->close)
68217 vma->vm_ops->close(vma);
68218 if (vma->vm_file) {
68219 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68220 * not page aligned -Ram Gupta
68221 */
68222 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
68223 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68224 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68225 (mm->end_data - mm->start_data) > rlim)
68226 goto out;
68227 @@ -704,6 +726,12 @@ static int
68228 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68229 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68230 {
68231 +
68232 +#ifdef CONFIG_PAX_SEGMEXEC
68233 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68234 + return 0;
68235 +#endif
68236 +
68237 if (is_mergeable_vma(vma, file, vm_flags) &&
68238 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68239 if (vma->vm_pgoff == vm_pgoff)
68240 @@ -723,6 +751,12 @@ static int
68241 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68242 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68243 {
68244 +
68245 +#ifdef CONFIG_PAX_SEGMEXEC
68246 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68247 + return 0;
68248 +#endif
68249 +
68250 if (is_mergeable_vma(vma, file, vm_flags) &&
68251 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68252 pgoff_t vm_pglen;
68253 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
68254 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68255 struct vm_area_struct *prev, unsigned long addr,
68256 unsigned long end, unsigned long vm_flags,
68257 - struct anon_vma *anon_vma, struct file *file,
68258 + struct anon_vma *anon_vma, struct file *file,
68259 pgoff_t pgoff, struct mempolicy *policy)
68260 {
68261 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68262 struct vm_area_struct *area, *next;
68263
68264 +#ifdef CONFIG_PAX_SEGMEXEC
68265 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68266 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68267 +
68268 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68269 +#endif
68270 +
68271 /*
68272 * We later require that vma->vm_flags == vm_flags,
68273 * so this tests vma->vm_flags & VM_SPECIAL, too.
68274 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
68275 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68276 next = next->vm_next;
68277
68278 +#ifdef CONFIG_PAX_SEGMEXEC
68279 + if (prev)
68280 + prev_m = pax_find_mirror_vma(prev);
68281 + if (area)
68282 + area_m = pax_find_mirror_vma(area);
68283 + if (next)
68284 + next_m = pax_find_mirror_vma(next);
68285 +#endif
68286 +
68287 /*
68288 * Can it merge with the predecessor?
68289 */
68290 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
68291 /* cases 1, 6 */
68292 vma_adjust(prev, prev->vm_start,
68293 next->vm_end, prev->vm_pgoff, NULL);
68294 - } else /* cases 2, 5, 7 */
68295 +
68296 +#ifdef CONFIG_PAX_SEGMEXEC
68297 + if (prev_m)
68298 + vma_adjust(prev_m, prev_m->vm_start,
68299 + next_m->vm_end, prev_m->vm_pgoff, NULL);
68300 +#endif
68301 +
68302 + } else { /* cases 2, 5, 7 */
68303 vma_adjust(prev, prev->vm_start,
68304 end, prev->vm_pgoff, NULL);
68305 +
68306 +#ifdef CONFIG_PAX_SEGMEXEC
68307 + if (prev_m)
68308 + vma_adjust(prev_m, prev_m->vm_start,
68309 + end_m, prev_m->vm_pgoff, NULL);
68310 +#endif
68311 +
68312 + }
68313 return prev;
68314 }
68315
68316 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
68317 mpol_equal(policy, vma_policy(next)) &&
68318 can_vma_merge_before(next, vm_flags,
68319 anon_vma, file, pgoff+pglen)) {
68320 - if (prev && addr < prev->vm_end) /* case 4 */
68321 + if (prev && addr < prev->vm_end) { /* case 4 */
68322 vma_adjust(prev, prev->vm_start,
68323 addr, prev->vm_pgoff, NULL);
68324 - else /* cases 3, 8 */
68325 +
68326 +#ifdef CONFIG_PAX_SEGMEXEC
68327 + if (prev_m)
68328 + vma_adjust(prev_m, prev_m->vm_start,
68329 + addr_m, prev_m->vm_pgoff, NULL);
68330 +#endif
68331 +
68332 + } else { /* cases 3, 8 */
68333 vma_adjust(area, addr, next->vm_end,
68334 next->vm_pgoff - pglen, NULL);
68335 +
68336 +#ifdef CONFIG_PAX_SEGMEXEC
68337 + if (area_m)
68338 + vma_adjust(area_m, addr_m, next_m->vm_end,
68339 + next_m->vm_pgoff - pglen, NULL);
68340 +#endif
68341 +
68342 + }
68343 return area;
68344 }
68345
68346 @@ -898,14 +978,11 @@ none:
68347 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68348 struct file *file, long pages)
68349 {
68350 - const unsigned long stack_flags
68351 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68352 -
68353 if (file) {
68354 mm->shared_vm += pages;
68355 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68356 mm->exec_vm += pages;
68357 - } else if (flags & stack_flags)
68358 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68359 mm->stack_vm += pages;
68360 if (flags & (VM_RESERVED|VM_IO))
68361 mm->reserved_vm += pages;
68362 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
68363 * (the exception is when the underlying filesystem is noexec
68364 * mounted, in which case we dont add PROT_EXEC.)
68365 */
68366 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68367 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68368 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68369 prot |= PROT_EXEC;
68370
68371 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
68372 /* Obtain the address to map to. we verify (or select) it and ensure
68373 * that it represents a valid section of the address space.
68374 */
68375 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
68376 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68377 if (addr & ~PAGE_MASK)
68378 return addr;
68379
68380 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
68381 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68382 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68383
68384 +#ifdef CONFIG_PAX_MPROTECT
68385 + if (mm->pax_flags & MF_PAX_MPROTECT) {
68386 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
68387 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68388 + gr_log_rwxmmap(file);
68389 +
68390 +#ifdef CONFIG_PAX_EMUPLT
68391 + vm_flags &= ~VM_EXEC;
68392 +#else
68393 + return -EPERM;
68394 +#endif
68395 +
68396 + }
68397 +
68398 + if (!(vm_flags & VM_EXEC))
68399 + vm_flags &= ~VM_MAYEXEC;
68400 +#else
68401 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68402 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68403 +#endif
68404 + else
68405 + vm_flags &= ~VM_MAYWRITE;
68406 + }
68407 +#endif
68408 +
68409 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68410 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68411 + vm_flags &= ~VM_PAGEEXEC;
68412 +#endif
68413 +
68414 if (flags & MAP_LOCKED)
68415 if (!can_do_mlock())
68416 return -EPERM;
68417 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
68418 locked += mm->locked_vm;
68419 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
68420 lock_limit >>= PAGE_SHIFT;
68421 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68422 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68423 return -EAGAIN;
68424 }
68425 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
68426 if (error)
68427 return error;
68428
68429 + if (!gr_acl_handle_mmap(file, prot))
68430 + return -EACCES;
68431 +
68432 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68433 }
68434 EXPORT_SYMBOL(do_mmap_pgoff);
68435 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
68436 */
68437 int vma_wants_writenotify(struct vm_area_struct *vma)
68438 {
68439 - unsigned int vm_flags = vma->vm_flags;
68440 + unsigned long vm_flags = vma->vm_flags;
68441
68442 /* If it was private or non-writable, the write bit is already clear */
68443 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68444 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68445 return 0;
68446
68447 /* The backer wishes to know when pages are first written to? */
68448 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
68449 unsigned long charged = 0;
68450 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68451
68452 +#ifdef CONFIG_PAX_SEGMEXEC
68453 + struct vm_area_struct *vma_m = NULL;
68454 +#endif
68455 +
68456 + /*
68457 + * mm->mmap_sem is required to protect against another thread
68458 + * changing the mappings in case we sleep.
68459 + */
68460 + verify_mm_writelocked(mm);
68461 +
68462 /* Clear old maps */
68463 error = -ENOMEM;
68464 -munmap_back:
68465 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68466 if (vma && vma->vm_start < addr + len) {
68467 if (do_munmap(mm, addr, len))
68468 return -ENOMEM;
68469 - goto munmap_back;
68470 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68471 + BUG_ON(vma && vma->vm_start < addr + len);
68472 }
68473
68474 /* Check against address space limit. */
68475 @@ -1173,6 +1294,16 @@ munmap_back:
68476 goto unacct_error;
68477 }
68478
68479 +#ifdef CONFIG_PAX_SEGMEXEC
68480 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68481 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68482 + if (!vma_m) {
68483 + error = -ENOMEM;
68484 + goto free_vma;
68485 + }
68486 + }
68487 +#endif
68488 +
68489 vma->vm_mm = mm;
68490 vma->vm_start = addr;
68491 vma->vm_end = addr + len;
68492 @@ -1195,6 +1326,19 @@ munmap_back:
68493 error = file->f_op->mmap(file, vma);
68494 if (error)
68495 goto unmap_and_free_vma;
68496 +
68497 +#ifdef CONFIG_PAX_SEGMEXEC
68498 + if (vma_m && (vm_flags & VM_EXECUTABLE))
68499 + added_exe_file_vma(mm);
68500 +#endif
68501 +
68502 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68503 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68504 + vma->vm_flags |= VM_PAGEEXEC;
68505 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68506 + }
68507 +#endif
68508 +
68509 if (vm_flags & VM_EXECUTABLE)
68510 added_exe_file_vma(mm);
68511
68512 @@ -1218,6 +1362,11 @@ munmap_back:
68513 vma_link(mm, vma, prev, rb_link, rb_parent);
68514 file = vma->vm_file;
68515
68516 +#ifdef CONFIG_PAX_SEGMEXEC
68517 + if (vma_m)
68518 + pax_mirror_vma(vma_m, vma);
68519 +#endif
68520 +
68521 /* Once vma denies write, undo our temporary denial count */
68522 if (correct_wcount)
68523 atomic_inc(&inode->i_writecount);
68524 @@ -1226,6 +1375,7 @@ out:
68525
68526 mm->total_vm += len >> PAGE_SHIFT;
68527 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68528 + track_exec_limit(mm, addr, addr + len, vm_flags);
68529 if (vm_flags & VM_LOCKED) {
68530 /*
68531 * makes pages present; downgrades, drops, reacquires mmap_sem
68532 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
68533 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68534 charged = 0;
68535 free_vma:
68536 +
68537 +#ifdef CONFIG_PAX_SEGMEXEC
68538 + if (vma_m)
68539 + kmem_cache_free(vm_area_cachep, vma_m);
68540 +#endif
68541 +
68542 kmem_cache_free(vm_area_cachep, vma);
68543 unacct_error:
68544 if (charged)
68545 @@ -1255,6 +1411,44 @@ unacct_error:
68546 return error;
68547 }
68548
68549 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68550 +{
68551 + if (!vma) {
68552 +#ifdef CONFIG_STACK_GROWSUP
68553 + if (addr > sysctl_heap_stack_gap)
68554 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68555 + else
68556 + vma = find_vma(current->mm, 0);
68557 + if (vma && (vma->vm_flags & VM_GROWSUP))
68558 + return false;
68559 +#endif
68560 + return true;
68561 + }
68562 +
68563 + if (addr + len > vma->vm_start)
68564 + return false;
68565 +
68566 + if (vma->vm_flags & VM_GROWSDOWN)
68567 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68568 +#ifdef CONFIG_STACK_GROWSUP
68569 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68570 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68571 +#endif
68572 +
68573 + return true;
68574 +}
68575 +
68576 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68577 +{
68578 + if (vma->vm_start < len)
68579 + return -ENOMEM;
68580 + if (!(vma->vm_flags & VM_GROWSDOWN))
68581 + return vma->vm_start - len;
68582 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
68583 + return vma->vm_start - len - sysctl_heap_stack_gap;
68584 + return -ENOMEM;
68585 +}
68586 +
68587 /* Get an address range which is currently unmapped.
68588 * For shmat() with addr=0.
68589 *
68590 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
68591 if (flags & MAP_FIXED)
68592 return addr;
68593
68594 +#ifdef CONFIG_PAX_RANDMMAP
68595 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68596 +#endif
68597 +
68598 if (addr) {
68599 addr = PAGE_ALIGN(addr);
68600 - vma = find_vma(mm, addr);
68601 - if (TASK_SIZE - len >= addr &&
68602 - (!vma || addr + len <= vma->vm_start))
68603 - return addr;
68604 + if (TASK_SIZE - len >= addr) {
68605 + vma = find_vma(mm, addr);
68606 + if (check_heap_stack_gap(vma, addr, len))
68607 + return addr;
68608 + }
68609 }
68610 if (len > mm->cached_hole_size) {
68611 - start_addr = addr = mm->free_area_cache;
68612 + start_addr = addr = mm->free_area_cache;
68613 } else {
68614 - start_addr = addr = TASK_UNMAPPED_BASE;
68615 - mm->cached_hole_size = 0;
68616 + start_addr = addr = mm->mmap_base;
68617 + mm->cached_hole_size = 0;
68618 }
68619
68620 full_search:
68621 @@ -1303,34 +1502,40 @@ full_search:
68622 * Start a new search - just in case we missed
68623 * some holes.
68624 */
68625 - if (start_addr != TASK_UNMAPPED_BASE) {
68626 - addr = TASK_UNMAPPED_BASE;
68627 - start_addr = addr;
68628 + if (start_addr != mm->mmap_base) {
68629 + start_addr = addr = mm->mmap_base;
68630 mm->cached_hole_size = 0;
68631 goto full_search;
68632 }
68633 return -ENOMEM;
68634 }
68635 - if (!vma || addr + len <= vma->vm_start) {
68636 - /*
68637 - * Remember the place where we stopped the search:
68638 - */
68639 - mm->free_area_cache = addr + len;
68640 - return addr;
68641 - }
68642 + if (check_heap_stack_gap(vma, addr, len))
68643 + break;
68644 if (addr + mm->cached_hole_size < vma->vm_start)
68645 mm->cached_hole_size = vma->vm_start - addr;
68646 addr = vma->vm_end;
68647 }
68648 +
68649 + /*
68650 + * Remember the place where we stopped the search:
68651 + */
68652 + mm->free_area_cache = addr + len;
68653 + return addr;
68654 }
68655 #endif
68656
68657 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68658 {
68659 +
68660 +#ifdef CONFIG_PAX_SEGMEXEC
68661 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68662 + return;
68663 +#endif
68664 +
68665 /*
68666 * Is this a new hole at the lowest possible address?
68667 */
68668 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68669 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68670 mm->free_area_cache = addr;
68671 mm->cached_hole_size = ~0UL;
68672 }
68673 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
68674 {
68675 struct vm_area_struct *vma;
68676 struct mm_struct *mm = current->mm;
68677 - unsigned long addr = addr0;
68678 + unsigned long base = mm->mmap_base, addr = addr0;
68679
68680 /* requested length too big for entire address space */
68681 if (len > TASK_SIZE)
68682 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
68683 if (flags & MAP_FIXED)
68684 return addr;
68685
68686 +#ifdef CONFIG_PAX_RANDMMAP
68687 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68688 +#endif
68689 +
68690 /* requesting a specific address */
68691 if (addr) {
68692 addr = PAGE_ALIGN(addr);
68693 - vma = find_vma(mm, addr);
68694 - if (TASK_SIZE - len >= addr &&
68695 - (!vma || addr + len <= vma->vm_start))
68696 - return addr;
68697 + if (TASK_SIZE - len >= addr) {
68698 + vma = find_vma(mm, addr);
68699 + if (check_heap_stack_gap(vma, addr, len))
68700 + return addr;
68701 + }
68702 }
68703
68704 /* check if free_area_cache is useful for us */
68705 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
68706 /* make sure it can fit in the remaining address space */
68707 if (addr > len) {
68708 vma = find_vma(mm, addr-len);
68709 - if (!vma || addr <= vma->vm_start)
68710 + if (check_heap_stack_gap(vma, addr - len, len))
68711 /* remember the address as a hint for next time */
68712 return (mm->free_area_cache = addr-len);
68713 }
68714 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
68715 * return with success:
68716 */
68717 vma = find_vma(mm, addr);
68718 - if (!vma || addr+len <= vma->vm_start)
68719 + if (check_heap_stack_gap(vma, addr, len))
68720 /* remember the address as a hint for next time */
68721 return (mm->free_area_cache = addr);
68722
68723 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
68724 mm->cached_hole_size = vma->vm_start - addr;
68725
68726 /* try just below the current vma->vm_start */
68727 - addr = vma->vm_start-len;
68728 - } while (len < vma->vm_start);
68729 + addr = skip_heap_stack_gap(vma, len);
68730 + } while (!IS_ERR_VALUE(addr));
68731
68732 bottomup:
68733 /*
68734 @@ -1414,13 +1624,21 @@ bottomup:
68735 * can happen with large stack limits and large mmap()
68736 * allocations.
68737 */
68738 + mm->mmap_base = TASK_UNMAPPED_BASE;
68739 +
68740 +#ifdef CONFIG_PAX_RANDMMAP
68741 + if (mm->pax_flags & MF_PAX_RANDMMAP)
68742 + mm->mmap_base += mm->delta_mmap;
68743 +#endif
68744 +
68745 + mm->free_area_cache = mm->mmap_base;
68746 mm->cached_hole_size = ~0UL;
68747 - mm->free_area_cache = TASK_UNMAPPED_BASE;
68748 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68749 /*
68750 * Restore the topdown base:
68751 */
68752 - mm->free_area_cache = mm->mmap_base;
68753 + mm->mmap_base = base;
68754 + mm->free_area_cache = base;
68755 mm->cached_hole_size = ~0UL;
68756
68757 return addr;
68758 @@ -1429,6 +1647,12 @@ bottomup:
68759
68760 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68761 {
68762 +
68763 +#ifdef CONFIG_PAX_SEGMEXEC
68764 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68765 + return;
68766 +#endif
68767 +
68768 /*
68769 * Is this a new hole at the highest possible address?
68770 */
68771 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
68772 mm->free_area_cache = addr;
68773
68774 /* dont allow allocations above current base */
68775 - if (mm->free_area_cache > mm->mmap_base)
68776 + if (mm->free_area_cache > mm->mmap_base) {
68777 mm->free_area_cache = mm->mmap_base;
68778 + mm->cached_hole_size = ~0UL;
68779 + }
68780 }
68781
68782 unsigned long
68783 @@ -1545,6 +1771,27 @@ out:
68784 return prev ? prev->vm_next : vma;
68785 }
68786
68787 +#ifdef CONFIG_PAX_SEGMEXEC
68788 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68789 +{
68790 + struct vm_area_struct *vma_m;
68791 +
68792 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68793 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68794 + BUG_ON(vma->vm_mirror);
68795 + return NULL;
68796 + }
68797 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68798 + vma_m = vma->vm_mirror;
68799 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68800 + BUG_ON(vma->vm_file != vma_m->vm_file);
68801 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68802 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
68803 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68804 + return vma_m;
68805 +}
68806 +#endif
68807 +
68808 /*
68809 * Verify that the stack growth is acceptable and
68810 * update accounting. This is shared with both the
68811 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
68812 return -ENOMEM;
68813
68814 /* Stack limit test */
68815 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
68816 if (size > rlim[RLIMIT_STACK].rlim_cur)
68817 return -ENOMEM;
68818
68819 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
68820 unsigned long limit;
68821 locked = mm->locked_vm + grow;
68822 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
68823 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68824 if (locked > limit && !capable(CAP_IPC_LOCK))
68825 return -ENOMEM;
68826 }
68827 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
68828 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68829 * vma is the last one with address > vma->vm_end. Have to extend vma.
68830 */
68831 +#ifndef CONFIG_IA64
68832 +static
68833 +#endif
68834 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68835 {
68836 int error;
68837 + bool locknext;
68838
68839 if (!(vma->vm_flags & VM_GROWSUP))
68840 return -EFAULT;
68841
68842 + /* Also guard against wrapping around to address 0. */
68843 + if (address < PAGE_ALIGN(address+1))
68844 + address = PAGE_ALIGN(address+1);
68845 + else
68846 + return -ENOMEM;
68847 +
68848 /*
68849 * We must make sure the anon_vma is allocated
68850 * so that the anon_vma locking is not a noop.
68851 */
68852 if (unlikely(anon_vma_prepare(vma)))
68853 return -ENOMEM;
68854 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68855 + if (locknext && anon_vma_prepare(vma->vm_next))
68856 + return -ENOMEM;
68857 anon_vma_lock(vma);
68858 + if (locknext)
68859 + anon_vma_lock(vma->vm_next);
68860
68861 /*
68862 * vma->vm_start/vm_end cannot change under us because the caller
68863 * is required to hold the mmap_sem in read mode. We need the
68864 - * anon_vma lock to serialize against concurrent expand_stacks.
68865 - * Also guard against wrapping around to address 0.
68866 + * anon_vma locks to serialize against concurrent expand_stacks
68867 + * and expand_upwards.
68868 */
68869 - if (address < PAGE_ALIGN(address+4))
68870 - address = PAGE_ALIGN(address+4);
68871 - else {
68872 - anon_vma_unlock(vma);
68873 - return -ENOMEM;
68874 - }
68875 error = 0;
68876
68877 /* Somebody else might have raced and expanded it already */
68878 - if (address > vma->vm_end) {
68879 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68880 + error = -ENOMEM;
68881 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68882 unsigned long size, grow;
68883
68884 size = address - vma->vm_start;
68885 @@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
68886 if (!error)
68887 vma->vm_end = address;
68888 }
68889 + if (locknext)
68890 + anon_vma_unlock(vma->vm_next);
68891 anon_vma_unlock(vma);
68892 return error;
68893 }
68894 @@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
68895 unsigned long address)
68896 {
68897 int error;
68898 + bool lockprev = false;
68899 + struct vm_area_struct *prev;
68900
68901 /*
68902 * We must make sure the anon_vma is allocated
68903 @@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
68904 if (error)
68905 return error;
68906
68907 + prev = vma->vm_prev;
68908 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68909 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68910 +#endif
68911 + if (lockprev && anon_vma_prepare(prev))
68912 + return -ENOMEM;
68913 + if (lockprev)
68914 + anon_vma_lock(prev);
68915 +
68916 anon_vma_lock(vma);
68917
68918 /*
68919 @@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
68920 */
68921
68922 /* Somebody else might have raced and expanded it already */
68923 - if (address < vma->vm_start) {
68924 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
68925 + error = -ENOMEM;
68926 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
68927 unsigned long size, grow;
68928
68929 +#ifdef CONFIG_PAX_SEGMEXEC
68930 + struct vm_area_struct *vma_m;
68931 +
68932 + vma_m = pax_find_mirror_vma(vma);
68933 +#endif
68934 +
68935 size = vma->vm_end - address;
68936 grow = (vma->vm_start - address) >> PAGE_SHIFT;
68937
68938 @@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
68939 if (!error) {
68940 vma->vm_start = address;
68941 vma->vm_pgoff -= grow;
68942 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
68943 +
68944 +#ifdef CONFIG_PAX_SEGMEXEC
68945 + if (vma_m) {
68946 + vma_m->vm_start -= grow << PAGE_SHIFT;
68947 + vma_m->vm_pgoff -= grow;
68948 + }
68949 +#endif
68950 +
68951 }
68952 }
68953 anon_vma_unlock(vma);
68954 + if (lockprev)
68955 + anon_vma_unlock(prev);
68956 return error;
68957 }
68958
68959 @@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
68960 do {
68961 long nrpages = vma_pages(vma);
68962
68963 +#ifdef CONFIG_PAX_SEGMEXEC
68964 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
68965 + vma = remove_vma(vma);
68966 + continue;
68967 + }
68968 +#endif
68969 +
68970 mm->total_vm -= nrpages;
68971 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
68972 vma = remove_vma(vma);
68973 @@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
68974 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
68975 vma->vm_prev = NULL;
68976 do {
68977 +
68978 +#ifdef CONFIG_PAX_SEGMEXEC
68979 + if (vma->vm_mirror) {
68980 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
68981 + vma->vm_mirror->vm_mirror = NULL;
68982 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
68983 + vma->vm_mirror = NULL;
68984 + }
68985 +#endif
68986 +
68987 rb_erase(&vma->vm_rb, &mm->mm_rb);
68988 mm->map_count--;
68989 tail_vma = vma;
68990 @@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
68991 struct mempolicy *pol;
68992 struct vm_area_struct *new;
68993
68994 +#ifdef CONFIG_PAX_SEGMEXEC
68995 + struct vm_area_struct *vma_m, *new_m = NULL;
68996 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
68997 +#endif
68998 +
68999 if (is_vm_hugetlb_page(vma) && (addr &
69000 ~(huge_page_mask(hstate_vma(vma)))))
69001 return -EINVAL;
69002
69003 +#ifdef CONFIG_PAX_SEGMEXEC
69004 + vma_m = pax_find_mirror_vma(vma);
69005 +
69006 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69007 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69008 + if (mm->map_count >= sysctl_max_map_count-1)
69009 + return -ENOMEM;
69010 + } else
69011 +#endif
69012 +
69013 if (mm->map_count >= sysctl_max_map_count)
69014 return -ENOMEM;
69015
69016 @@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
69017 if (!new)
69018 return -ENOMEM;
69019
69020 +#ifdef CONFIG_PAX_SEGMEXEC
69021 + if (vma_m) {
69022 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69023 + if (!new_m) {
69024 + kmem_cache_free(vm_area_cachep, new);
69025 + return -ENOMEM;
69026 + }
69027 + }
69028 +#endif
69029 +
69030 /* most fields are the same, copy all, and then fixup */
69031 *new = *vma;
69032
69033 @@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
69034 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69035 }
69036
69037 +#ifdef CONFIG_PAX_SEGMEXEC
69038 + if (vma_m) {
69039 + *new_m = *vma_m;
69040 + new_m->vm_mirror = new;
69041 + new->vm_mirror = new_m;
69042 +
69043 + if (new_below)
69044 + new_m->vm_end = addr_m;
69045 + else {
69046 + new_m->vm_start = addr_m;
69047 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69048 + }
69049 + }
69050 +#endif
69051 +
69052 pol = mpol_dup(vma_policy(vma));
69053 if (IS_ERR(pol)) {
69054 +
69055 +#ifdef CONFIG_PAX_SEGMEXEC
69056 + if (new_m)
69057 + kmem_cache_free(vm_area_cachep, new_m);
69058 +#endif
69059 +
69060 kmem_cache_free(vm_area_cachep, new);
69061 return PTR_ERR(pol);
69062 }
69063 @@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
69064 else
69065 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69066
69067 +#ifdef CONFIG_PAX_SEGMEXEC
69068 + if (vma_m) {
69069 + mpol_get(pol);
69070 + vma_set_policy(new_m, pol);
69071 +
69072 + if (new_m->vm_file) {
69073 + get_file(new_m->vm_file);
69074 + if (vma_m->vm_flags & VM_EXECUTABLE)
69075 + added_exe_file_vma(mm);
69076 + }
69077 +
69078 + if (new_m->vm_ops && new_m->vm_ops->open)
69079 + new_m->vm_ops->open(new_m);
69080 +
69081 + if (new_below)
69082 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69083 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69084 + else
69085 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69086 + }
69087 +#endif
69088 +
69089 return 0;
69090 }
69091
69092 @@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
69093 * work. This now handles partial unmappings.
69094 * Jeremy Fitzhardinge <jeremy@goop.org>
69095 */
69096 +#ifdef CONFIG_PAX_SEGMEXEC
69097 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69098 +{
69099 + int ret = __do_munmap(mm, start, len);
69100 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69101 + return ret;
69102 +
69103 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69104 +}
69105 +
69106 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69107 +#else
69108 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69109 +#endif
69110 {
69111 unsigned long end;
69112 struct vm_area_struct *vma, *prev, *last;
69113
69114 + /*
69115 + * mm->mmap_sem is required to protect against another thread
69116 + * changing the mappings in case we sleep.
69117 + */
69118 + verify_mm_writelocked(mm);
69119 +
69120 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69121 return -EINVAL;
69122
69123 @@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
69124 /* Fix up all other VM information */
69125 remove_vma_list(mm, vma);
69126
69127 + track_exec_limit(mm, start, end, 0UL);
69128 +
69129 return 0;
69130 }
69131
69132 @@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
69133
69134 profile_munmap(addr);
69135
69136 +#ifdef CONFIG_PAX_SEGMEXEC
69137 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69138 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69139 + return -EINVAL;
69140 +#endif
69141 +
69142 down_write(&mm->mmap_sem);
69143 ret = do_munmap(mm, addr, len);
69144 up_write(&mm->mmap_sem);
69145 return ret;
69146 }
69147
69148 -static inline void verify_mm_writelocked(struct mm_struct *mm)
69149 -{
69150 -#ifdef CONFIG_DEBUG_VM
69151 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69152 - WARN_ON(1);
69153 - up_read(&mm->mmap_sem);
69154 - }
69155 -#endif
69156 -}
69157 -
69158 /*
69159 * this is really a simplified "do_mmap". it only handles
69160 * anonymous maps. eventually we may be able to do some
69161 @@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
69162 struct rb_node ** rb_link, * rb_parent;
69163 pgoff_t pgoff = addr >> PAGE_SHIFT;
69164 int error;
69165 + unsigned long charged;
69166
69167 len = PAGE_ALIGN(len);
69168 if (!len)
69169 @@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
69170
69171 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69172
69173 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69174 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69175 + flags &= ~VM_EXEC;
69176 +
69177 +#ifdef CONFIG_PAX_MPROTECT
69178 + if (mm->pax_flags & MF_PAX_MPROTECT)
69179 + flags &= ~VM_MAYEXEC;
69180 +#endif
69181 +
69182 + }
69183 +#endif
69184 +
69185 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69186 if (error & ~PAGE_MASK)
69187 return error;
69188
69189 + charged = len >> PAGE_SHIFT;
69190 +
69191 /*
69192 * mlock MCL_FUTURE?
69193 */
69194 if (mm->def_flags & VM_LOCKED) {
69195 unsigned long locked, lock_limit;
69196 - locked = len >> PAGE_SHIFT;
69197 + locked = charged;
69198 locked += mm->locked_vm;
69199 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
69200 lock_limit >>= PAGE_SHIFT;
69201 @@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
69202 /*
69203 * Clear old maps. this also does some error checking for us
69204 */
69205 - munmap_back:
69206 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69207 if (vma && vma->vm_start < addr + len) {
69208 if (do_munmap(mm, addr, len))
69209 return -ENOMEM;
69210 - goto munmap_back;
69211 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69212 + BUG_ON(vma && vma->vm_start < addr + len);
69213 }
69214
69215 /* Check against address space limits *after* clearing old maps... */
69216 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69217 + if (!may_expand_vm(mm, charged))
69218 return -ENOMEM;
69219
69220 if (mm->map_count > sysctl_max_map_count)
69221 return -ENOMEM;
69222
69223 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
69224 + if (security_vm_enough_memory(charged))
69225 return -ENOMEM;
69226
69227 /* Can we just expand an old private anonymous mapping? */
69228 @@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
69229 */
69230 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69231 if (!vma) {
69232 - vm_unacct_memory(len >> PAGE_SHIFT);
69233 + vm_unacct_memory(charged);
69234 return -ENOMEM;
69235 }
69236
69237 @@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
69238 vma->vm_page_prot = vm_get_page_prot(flags);
69239 vma_link(mm, vma, prev, rb_link, rb_parent);
69240 out:
69241 - mm->total_vm += len >> PAGE_SHIFT;
69242 + mm->total_vm += charged;
69243 if (flags & VM_LOCKED) {
69244 if (!mlock_vma_pages_range(vma, addr, addr + len))
69245 - mm->locked_vm += (len >> PAGE_SHIFT);
69246 + mm->locked_vm += charged;
69247 }
69248 + track_exec_limit(mm, addr, addr + len, flags);
69249 return addr;
69250 }
69251
69252 @@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
69253 * Walk the list again, actually closing and freeing it,
69254 * with preemption enabled, without holding any MM locks.
69255 */
69256 - while (vma)
69257 + while (vma) {
69258 + vma->vm_mirror = NULL;
69259 vma = remove_vma(vma);
69260 + }
69261
69262 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69263 }
69264 @@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
69265 struct vm_area_struct * __vma, * prev;
69266 struct rb_node ** rb_link, * rb_parent;
69267
69268 +#ifdef CONFIG_PAX_SEGMEXEC
69269 + struct vm_area_struct *vma_m = NULL;
69270 +#endif
69271 +
69272 /*
69273 * The vm_pgoff of a purely anonymous vma should be irrelevant
69274 * until its first write fault, when page's anon_vma and index
69275 @@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
69276 if ((vma->vm_flags & VM_ACCOUNT) &&
69277 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69278 return -ENOMEM;
69279 +
69280 +#ifdef CONFIG_PAX_SEGMEXEC
69281 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69282 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69283 + if (!vma_m)
69284 + return -ENOMEM;
69285 + }
69286 +#endif
69287 +
69288 vma_link(mm, vma, prev, rb_link, rb_parent);
69289 +
69290 +#ifdef CONFIG_PAX_SEGMEXEC
69291 + if (vma_m)
69292 + pax_mirror_vma(vma_m, vma);
69293 +#endif
69294 +
69295 return 0;
69296 }
69297
69298 @@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
69299 struct rb_node **rb_link, *rb_parent;
69300 struct mempolicy *pol;
69301
69302 + BUG_ON(vma->vm_mirror);
69303 +
69304 /*
69305 * If anonymous vma has not yet been faulted, update new pgoff
69306 * to match new location, to increase its chance of merging.
69307 @@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
69308 return new_vma;
69309 }
69310
69311 +#ifdef CONFIG_PAX_SEGMEXEC
69312 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69313 +{
69314 + struct vm_area_struct *prev_m;
69315 + struct rb_node **rb_link_m, *rb_parent_m;
69316 + struct mempolicy *pol_m;
69317 +
69318 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69319 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69320 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69321 + *vma_m = *vma;
69322 + pol_m = vma_policy(vma_m);
69323 + mpol_get(pol_m);
69324 + vma_set_policy(vma_m, pol_m);
69325 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69326 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69327 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69328 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69329 + if (vma_m->vm_file)
69330 + get_file(vma_m->vm_file);
69331 + if (vma_m->vm_ops && vma_m->vm_ops->open)
69332 + vma_m->vm_ops->open(vma_m);
69333 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69334 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69335 + vma_m->vm_mirror = vma;
69336 + vma->vm_mirror = vma_m;
69337 +}
69338 +#endif
69339 +
69340 /*
69341 * Return true if the calling process may expand its vm space by the passed
69342 * number of pages
69343 @@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
69344 unsigned long lim;
69345
69346 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
69347 -
69348 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69349 if (cur + npages > lim)
69350 return 0;
69351 return 1;
69352 @@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
69353 vma->vm_start = addr;
69354 vma->vm_end = addr + len;
69355
69356 +#ifdef CONFIG_PAX_MPROTECT
69357 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69358 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69359 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69360 + return -EPERM;
69361 + if (!(vm_flags & VM_EXEC))
69362 + vm_flags &= ~VM_MAYEXEC;
69363 +#else
69364 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69365 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69366 +#endif
69367 + else
69368 + vm_flags &= ~VM_MAYWRITE;
69369 + }
69370 +#endif
69371 +
69372 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69373 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69374
69375 diff -urNp linux-2.6.32.45/mm/mprotect.c linux-2.6.32.45/mm/mprotect.c
69376 --- linux-2.6.32.45/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
69377 +++ linux-2.6.32.45/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
69378 @@ -24,10 +24,16 @@
69379 #include <linux/mmu_notifier.h>
69380 #include <linux/migrate.h>
69381 #include <linux/perf_event.h>
69382 +
69383 +#ifdef CONFIG_PAX_MPROTECT
69384 +#include <linux/elf.h>
69385 +#endif
69386 +
69387 #include <asm/uaccess.h>
69388 #include <asm/pgtable.h>
69389 #include <asm/cacheflush.h>
69390 #include <asm/tlbflush.h>
69391 +#include <asm/mmu_context.h>
69392
69393 #ifndef pgprot_modify
69394 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69395 @@ -132,6 +138,48 @@ static void change_protection(struct vm_
69396 flush_tlb_range(vma, start, end);
69397 }
69398
69399 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69400 +/* called while holding the mmap semaphor for writing except stack expansion */
69401 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69402 +{
69403 + unsigned long oldlimit, newlimit = 0UL;
69404 +
69405 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
69406 + return;
69407 +
69408 + spin_lock(&mm->page_table_lock);
69409 + oldlimit = mm->context.user_cs_limit;
69410 + if ((prot & VM_EXEC) && oldlimit < end)
69411 + /* USER_CS limit moved up */
69412 + newlimit = end;
69413 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69414 + /* USER_CS limit moved down */
69415 + newlimit = start;
69416 +
69417 + if (newlimit) {
69418 + mm->context.user_cs_limit = newlimit;
69419 +
69420 +#ifdef CONFIG_SMP
69421 + wmb();
69422 + cpus_clear(mm->context.cpu_user_cs_mask);
69423 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69424 +#endif
69425 +
69426 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69427 + }
69428 + spin_unlock(&mm->page_table_lock);
69429 + if (newlimit == end) {
69430 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
69431 +
69432 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
69433 + if (is_vm_hugetlb_page(vma))
69434 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69435 + else
69436 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69437 + }
69438 +}
69439 +#endif
69440 +
69441 int
69442 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69443 unsigned long start, unsigned long end, unsigned long newflags)
69444 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
69445 int error;
69446 int dirty_accountable = 0;
69447
69448 +#ifdef CONFIG_PAX_SEGMEXEC
69449 + struct vm_area_struct *vma_m = NULL;
69450 + unsigned long start_m, end_m;
69451 +
69452 + start_m = start + SEGMEXEC_TASK_SIZE;
69453 + end_m = end + SEGMEXEC_TASK_SIZE;
69454 +#endif
69455 +
69456 if (newflags == oldflags) {
69457 *pprev = vma;
69458 return 0;
69459 }
69460
69461 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69462 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69463 +
69464 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69465 + return -ENOMEM;
69466 +
69467 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69468 + return -ENOMEM;
69469 + }
69470 +
69471 /*
69472 * If we make a private mapping writable we increase our commit;
69473 * but (without finer accounting) cannot reduce our commit if we
69474 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
69475 }
69476 }
69477
69478 +#ifdef CONFIG_PAX_SEGMEXEC
69479 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69480 + if (start != vma->vm_start) {
69481 + error = split_vma(mm, vma, start, 1);
69482 + if (error)
69483 + goto fail;
69484 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69485 + *pprev = (*pprev)->vm_next;
69486 + }
69487 +
69488 + if (end != vma->vm_end) {
69489 + error = split_vma(mm, vma, end, 0);
69490 + if (error)
69491 + goto fail;
69492 + }
69493 +
69494 + if (pax_find_mirror_vma(vma)) {
69495 + error = __do_munmap(mm, start_m, end_m - start_m);
69496 + if (error)
69497 + goto fail;
69498 + } else {
69499 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69500 + if (!vma_m) {
69501 + error = -ENOMEM;
69502 + goto fail;
69503 + }
69504 + vma->vm_flags = newflags;
69505 + pax_mirror_vma(vma_m, vma);
69506 + }
69507 + }
69508 +#endif
69509 +
69510 /*
69511 * First try to merge with previous and/or next vma.
69512 */
69513 @@ -195,9 +293,21 @@ success:
69514 * vm_flags and vm_page_prot are protected by the mmap_sem
69515 * held in write mode.
69516 */
69517 +
69518 +#ifdef CONFIG_PAX_SEGMEXEC
69519 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69520 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69521 +#endif
69522 +
69523 vma->vm_flags = newflags;
69524 +
69525 +#ifdef CONFIG_PAX_MPROTECT
69526 + if (mm->binfmt && mm->binfmt->handle_mprotect)
69527 + mm->binfmt->handle_mprotect(vma, newflags);
69528 +#endif
69529 +
69530 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69531 - vm_get_page_prot(newflags));
69532 + vm_get_page_prot(vma->vm_flags));
69533
69534 if (vma_wants_writenotify(vma)) {
69535 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69536 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69537 end = start + len;
69538 if (end <= start)
69539 return -ENOMEM;
69540 +
69541 +#ifdef CONFIG_PAX_SEGMEXEC
69542 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69543 + if (end > SEGMEXEC_TASK_SIZE)
69544 + return -EINVAL;
69545 + } else
69546 +#endif
69547 +
69548 + if (end > TASK_SIZE)
69549 + return -EINVAL;
69550 +
69551 if (!arch_validate_prot(prot))
69552 return -EINVAL;
69553
69554 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69555 /*
69556 * Does the application expect PROT_READ to imply PROT_EXEC:
69557 */
69558 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69559 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69560 prot |= PROT_EXEC;
69561
69562 vm_flags = calc_vm_prot_bits(prot);
69563 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69564 if (start > vma->vm_start)
69565 prev = vma;
69566
69567 +#ifdef CONFIG_PAX_MPROTECT
69568 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69569 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
69570 +#endif
69571 +
69572 for (nstart = start ; ; ) {
69573 unsigned long newflags;
69574
69575 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69576
69577 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69578 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69579 + if (prot & (PROT_WRITE | PROT_EXEC))
69580 + gr_log_rwxmprotect(vma->vm_file);
69581 +
69582 + error = -EACCES;
69583 + goto out;
69584 + }
69585 +
69586 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69587 error = -EACCES;
69588 goto out;
69589 }
69590 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69591 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69592 if (error)
69593 goto out;
69594 +
69595 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
69596 +
69597 nstart = tmp;
69598
69599 if (nstart < prev->vm_end)
69600 diff -urNp linux-2.6.32.45/mm/mremap.c linux-2.6.32.45/mm/mremap.c
69601 --- linux-2.6.32.45/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
69602 +++ linux-2.6.32.45/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
69603 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
69604 continue;
69605 pte = ptep_clear_flush(vma, old_addr, old_pte);
69606 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69607 +
69608 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69609 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69610 + pte = pte_exprotect(pte);
69611 +#endif
69612 +
69613 set_pte_at(mm, new_addr, new_pte, pte);
69614 }
69615
69616 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
69617 if (is_vm_hugetlb_page(vma))
69618 goto Einval;
69619
69620 +#ifdef CONFIG_PAX_SEGMEXEC
69621 + if (pax_find_mirror_vma(vma))
69622 + goto Einval;
69623 +#endif
69624 +
69625 /* We can't remap across vm area boundaries */
69626 if (old_len > vma->vm_end - addr)
69627 goto Efault;
69628 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
69629 unsigned long ret = -EINVAL;
69630 unsigned long charged = 0;
69631 unsigned long map_flags;
69632 + unsigned long pax_task_size = TASK_SIZE;
69633
69634 if (new_addr & ~PAGE_MASK)
69635 goto out;
69636
69637 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69638 +#ifdef CONFIG_PAX_SEGMEXEC
69639 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
69640 + pax_task_size = SEGMEXEC_TASK_SIZE;
69641 +#endif
69642 +
69643 + pax_task_size -= PAGE_SIZE;
69644 +
69645 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69646 goto out;
69647
69648 /* Check if the location we're moving into overlaps the
69649 * old location at all, and fail if it does.
69650 */
69651 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
69652 - goto out;
69653 -
69654 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
69655 + if (addr + old_len > new_addr && new_addr + new_len > addr)
69656 goto out;
69657
69658 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69659 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
69660 struct vm_area_struct *vma;
69661 unsigned long ret = -EINVAL;
69662 unsigned long charged = 0;
69663 + unsigned long pax_task_size = TASK_SIZE;
69664
69665 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69666 goto out;
69667 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
69668 if (!new_len)
69669 goto out;
69670
69671 +#ifdef CONFIG_PAX_SEGMEXEC
69672 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
69673 + pax_task_size = SEGMEXEC_TASK_SIZE;
69674 +#endif
69675 +
69676 + pax_task_size -= PAGE_SIZE;
69677 +
69678 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69679 + old_len > pax_task_size || addr > pax_task_size-old_len)
69680 + goto out;
69681 +
69682 if (flags & MREMAP_FIXED) {
69683 if (flags & MREMAP_MAYMOVE)
69684 ret = mremap_to(addr, old_len, new_addr, new_len);
69685 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
69686 addr + new_len);
69687 }
69688 ret = addr;
69689 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69690 goto out;
69691 }
69692 }
69693 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
69694 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69695 if (ret)
69696 goto out;
69697 +
69698 + map_flags = vma->vm_flags;
69699 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69700 + if (!(ret & ~PAGE_MASK)) {
69701 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69702 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69703 + }
69704 }
69705 out:
69706 if (ret & ~PAGE_MASK)
69707 diff -urNp linux-2.6.32.45/mm/nommu.c linux-2.6.32.45/mm/nommu.c
69708 --- linux-2.6.32.45/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
69709 +++ linux-2.6.32.45/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
69710 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69711 int sysctl_overcommit_ratio = 50; /* default is 50% */
69712 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69713 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69714 -int heap_stack_gap = 0;
69715
69716 atomic_long_t mmap_pages_allocated;
69717
69718 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
69719 EXPORT_SYMBOL(find_vma);
69720
69721 /*
69722 - * find a VMA
69723 - * - we don't extend stack VMAs under NOMMU conditions
69724 - */
69725 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69726 -{
69727 - return find_vma(mm, addr);
69728 -}
69729 -
69730 -/*
69731 * expand a stack to a given address
69732 * - not supported under NOMMU conditions
69733 */
69734 diff -urNp linux-2.6.32.45/mm/page_alloc.c linux-2.6.32.45/mm/page_alloc.c
69735 --- linux-2.6.32.45/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
69736 +++ linux-2.6.32.45/mm/page_alloc.c 2011-07-09 09:13:08.000000000 -0400
69737 @@ -289,7 +289,7 @@ out:
69738 * This usage means that zero-order pages may not be compound.
69739 */
69740
69741 -static void free_compound_page(struct page *page)
69742 +void free_compound_page(struct page *page)
69743 {
69744 __free_pages_ok(page, compound_order(page));
69745 }
69746 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
69747 int bad = 0;
69748 int wasMlocked = __TestClearPageMlocked(page);
69749
69750 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69751 + unsigned long index = 1UL << order;
69752 +#endif
69753 +
69754 kmemcheck_free_shadow(page, order);
69755
69756 for (i = 0 ; i < (1 << order) ; ++i)
69757 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
69758 debug_check_no_obj_freed(page_address(page),
69759 PAGE_SIZE << order);
69760 }
69761 +
69762 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69763 + for (; index; --index)
69764 + sanitize_highpage(page + index - 1);
69765 +#endif
69766 +
69767 arch_free_page(page, order);
69768 kernel_map_pages(page, 1 << order, 0);
69769
69770 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
69771 arch_alloc_page(page, order);
69772 kernel_map_pages(page, 1 << order, 1);
69773
69774 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
69775 if (gfp_flags & __GFP_ZERO)
69776 prep_zero_page(page, order, gfp_flags);
69777 +#endif
69778
69779 if (order && (gfp_flags & __GFP_COMP))
69780 prep_compound_page(page, order);
69781 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
69782 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
69783 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
69784 }
69785 +
69786 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69787 + sanitize_highpage(page);
69788 +#endif
69789 +
69790 arch_free_page(page, 0);
69791 kernel_map_pages(page, 1, 0);
69792
69793 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
69794 int cpu;
69795 struct zone *zone;
69796
69797 + pax_track_stack();
69798 +
69799 for_each_populated_zone(zone) {
69800 show_node(zone);
69801 printk("%s per-cpu:\n", zone->name);
69802 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
69803 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
69804 }
69805 #else
69806 -static void inline setup_usemap(struct pglist_data *pgdat,
69807 +static inline void setup_usemap(struct pglist_data *pgdat,
69808 struct zone *zone, unsigned long zonesize) {}
69809 #endif /* CONFIG_SPARSEMEM */
69810
69811 diff -urNp linux-2.6.32.45/mm/percpu.c linux-2.6.32.45/mm/percpu.c
69812 --- linux-2.6.32.45/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
69813 +++ linux-2.6.32.45/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
69814 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
69815 static unsigned int pcpu_last_unit_cpu __read_mostly;
69816
69817 /* the address of the first chunk which starts with the kernel static area */
69818 -void *pcpu_base_addr __read_mostly;
69819 +void *pcpu_base_addr __read_only;
69820 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69821
69822 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69823 diff -urNp linux-2.6.32.45/mm/rmap.c linux-2.6.32.45/mm/rmap.c
69824 --- linux-2.6.32.45/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
69825 +++ linux-2.6.32.45/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
69826 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
69827 /* page_table_lock to protect against threads */
69828 spin_lock(&mm->page_table_lock);
69829 if (likely(!vma->anon_vma)) {
69830 +
69831 +#ifdef CONFIG_PAX_SEGMEXEC
69832 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
69833 +
69834 + if (vma_m) {
69835 + BUG_ON(vma_m->anon_vma);
69836 + vma_m->anon_vma = anon_vma;
69837 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
69838 + }
69839 +#endif
69840 +
69841 vma->anon_vma = anon_vma;
69842 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
69843 allocated = NULL;
69844 diff -urNp linux-2.6.32.45/mm/shmem.c linux-2.6.32.45/mm/shmem.c
69845 --- linux-2.6.32.45/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
69846 +++ linux-2.6.32.45/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
69847 @@ -31,7 +31,7 @@
69848 #include <linux/swap.h>
69849 #include <linux/ima.h>
69850
69851 -static struct vfsmount *shm_mnt;
69852 +struct vfsmount *shm_mnt;
69853
69854 #ifdef CONFIG_SHMEM
69855 /*
69856 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
69857 goto unlock;
69858 }
69859 entry = shmem_swp_entry(info, index, NULL);
69860 + if (!entry)
69861 + goto unlock;
69862 if (entry->val) {
69863 /*
69864 * The more uptodate page coming down from a stacked
69865 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
69866 struct vm_area_struct pvma;
69867 struct page *page;
69868
69869 + pax_track_stack();
69870 +
69871 spol = mpol_cond_copy(&mpol,
69872 mpol_shared_policy_lookup(&info->policy, idx));
69873
69874 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
69875
69876 info = SHMEM_I(inode);
69877 inode->i_size = len-1;
69878 - if (len <= (char *)inode - (char *)info) {
69879 + if (len <= (char *)inode - (char *)info && len <= 64) {
69880 /* do it inline */
69881 memcpy(info, symname, len);
69882 inode->i_op = &shmem_symlink_inline_operations;
69883 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
69884 int err = -ENOMEM;
69885
69886 /* Round up to L1_CACHE_BYTES to resist false sharing */
69887 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
69888 - L1_CACHE_BYTES), GFP_KERNEL);
69889 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
69890 if (!sbinfo)
69891 return -ENOMEM;
69892
69893 diff -urNp linux-2.6.32.45/mm/slab.c linux-2.6.32.45/mm/slab.c
69894 --- linux-2.6.32.45/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
69895 +++ linux-2.6.32.45/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
69896 @@ -174,7 +174,7 @@
69897
69898 /* Legal flag mask for kmem_cache_create(). */
69899 #if DEBUG
69900 -# define CREATE_MASK (SLAB_RED_ZONE | \
69901 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
69902 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
69903 SLAB_CACHE_DMA | \
69904 SLAB_STORE_USER | \
69905 @@ -182,7 +182,7 @@
69906 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69907 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
69908 #else
69909 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
69910 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
69911 SLAB_CACHE_DMA | \
69912 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
69913 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69914 @@ -308,7 +308,7 @@ struct kmem_list3 {
69915 * Need this for bootstrapping a per node allocator.
69916 */
69917 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
69918 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
69919 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
69920 #define CACHE_CACHE 0
69921 #define SIZE_AC MAX_NUMNODES
69922 #define SIZE_L3 (2 * MAX_NUMNODES)
69923 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
69924 if ((x)->max_freeable < i) \
69925 (x)->max_freeable = i; \
69926 } while (0)
69927 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
69928 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
69929 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
69930 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
69931 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
69932 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
69933 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
69934 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
69935 #else
69936 #define STATS_INC_ACTIVE(x) do { } while (0)
69937 #define STATS_DEC_ACTIVE(x) do { } while (0)
69938 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
69939 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
69940 */
69941 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
69942 - const struct slab *slab, void *obj)
69943 + const struct slab *slab, const void *obj)
69944 {
69945 u32 offset = (obj - slab->s_mem);
69946 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
69947 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
69948 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
69949 sizes[INDEX_AC].cs_size,
69950 ARCH_KMALLOC_MINALIGN,
69951 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69952 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69953 NULL);
69954
69955 if (INDEX_AC != INDEX_L3) {
69956 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
69957 kmem_cache_create(names[INDEX_L3].name,
69958 sizes[INDEX_L3].cs_size,
69959 ARCH_KMALLOC_MINALIGN,
69960 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69961 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69962 NULL);
69963 }
69964
69965 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
69966 sizes->cs_cachep = kmem_cache_create(names->name,
69967 sizes->cs_size,
69968 ARCH_KMALLOC_MINALIGN,
69969 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69970 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69971 NULL);
69972 }
69973 #ifdef CONFIG_ZONE_DMA
69974 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
69975 }
69976 /* cpu stats */
69977 {
69978 - unsigned long allochit = atomic_read(&cachep->allochit);
69979 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
69980 - unsigned long freehit = atomic_read(&cachep->freehit);
69981 - unsigned long freemiss = atomic_read(&cachep->freemiss);
69982 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
69983 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
69984 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
69985 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
69986
69987 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
69988 allochit, allocmiss, freehit, freemiss);
69989 @@ -4471,15 +4471,66 @@ static const struct file_operations proc
69990
69991 static int __init slab_proc_init(void)
69992 {
69993 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
69994 + mode_t gr_mode = S_IRUGO;
69995 +
69996 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69997 + gr_mode = S_IRUSR;
69998 +#endif
69999 +
70000 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
70001 #ifdef CONFIG_DEBUG_SLAB_LEAK
70002 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70003 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
70004 #endif
70005 return 0;
70006 }
70007 module_init(slab_proc_init);
70008 #endif
70009
70010 +void check_object_size(const void *ptr, unsigned long n, bool to)
70011 +{
70012 +
70013 +#ifdef CONFIG_PAX_USERCOPY
70014 + struct page *page;
70015 + struct kmem_cache *cachep = NULL;
70016 + struct slab *slabp;
70017 + unsigned int objnr;
70018 + unsigned long offset;
70019 +
70020 + if (!n)
70021 + return;
70022 +
70023 + if (ZERO_OR_NULL_PTR(ptr))
70024 + goto report;
70025 +
70026 + if (!virt_addr_valid(ptr))
70027 + return;
70028 +
70029 + page = virt_to_head_page(ptr);
70030 +
70031 + if (!PageSlab(page)) {
70032 + if (object_is_on_stack(ptr, n) == -1)
70033 + goto report;
70034 + return;
70035 + }
70036 +
70037 + cachep = page_get_cache(page);
70038 + if (!(cachep->flags & SLAB_USERCOPY))
70039 + goto report;
70040 +
70041 + slabp = page_get_slab(page);
70042 + objnr = obj_to_index(cachep, slabp, ptr);
70043 + BUG_ON(objnr >= cachep->num);
70044 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70045 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70046 + return;
70047 +
70048 +report:
70049 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
70050 +#endif
70051 +
70052 +}
70053 +EXPORT_SYMBOL(check_object_size);
70054 +
70055 /**
70056 * ksize - get the actual amount of memory allocated for a given object
70057 * @objp: Pointer to the object
70058 diff -urNp linux-2.6.32.45/mm/slob.c linux-2.6.32.45/mm/slob.c
70059 --- linux-2.6.32.45/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
70060 +++ linux-2.6.32.45/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
70061 @@ -29,7 +29,7 @@
70062 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70063 * alloc_pages() directly, allocating compound pages so the page order
70064 * does not have to be separately tracked, and also stores the exact
70065 - * allocation size in page->private so that it can be used to accurately
70066 + * allocation size in slob_page->size so that it can be used to accurately
70067 * provide ksize(). These objects are detected in kfree() because slob_page()
70068 * is false for them.
70069 *
70070 @@ -58,6 +58,7 @@
70071 */
70072
70073 #include <linux/kernel.h>
70074 +#include <linux/sched.h>
70075 #include <linux/slab.h>
70076 #include <linux/mm.h>
70077 #include <linux/swap.h> /* struct reclaim_state */
70078 @@ -100,7 +101,8 @@ struct slob_page {
70079 unsigned long flags; /* mandatory */
70080 atomic_t _count; /* mandatory */
70081 slobidx_t units; /* free units left in page */
70082 - unsigned long pad[2];
70083 + unsigned long pad[1];
70084 + unsigned long size; /* size when >=PAGE_SIZE */
70085 slob_t *free; /* first free slob_t in page */
70086 struct list_head list; /* linked list of free pages */
70087 };
70088 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
70089 */
70090 static inline int is_slob_page(struct slob_page *sp)
70091 {
70092 - return PageSlab((struct page *)sp);
70093 + return PageSlab((struct page *)sp) && !sp->size;
70094 }
70095
70096 static inline void set_slob_page(struct slob_page *sp)
70097 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
70098
70099 static inline struct slob_page *slob_page(const void *addr)
70100 {
70101 - return (struct slob_page *)virt_to_page(addr);
70102 + return (struct slob_page *)virt_to_head_page(addr);
70103 }
70104
70105 /*
70106 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
70107 /*
70108 * Return the size of a slob block.
70109 */
70110 -static slobidx_t slob_units(slob_t *s)
70111 +static slobidx_t slob_units(const slob_t *s)
70112 {
70113 if (s->units > 0)
70114 return s->units;
70115 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
70116 /*
70117 * Return the next free slob block pointer after this one.
70118 */
70119 -static slob_t *slob_next(slob_t *s)
70120 +static slob_t *slob_next(const slob_t *s)
70121 {
70122 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70123 slobidx_t next;
70124 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
70125 /*
70126 * Returns true if s is the last free block in its page.
70127 */
70128 -static int slob_last(slob_t *s)
70129 +static int slob_last(const slob_t *s)
70130 {
70131 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70132 }
70133 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
70134 if (!page)
70135 return NULL;
70136
70137 + set_slob_page(page);
70138 return page_address(page);
70139 }
70140
70141 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
70142 if (!b)
70143 return NULL;
70144 sp = slob_page(b);
70145 - set_slob_page(sp);
70146
70147 spin_lock_irqsave(&slob_lock, flags);
70148 sp->units = SLOB_UNITS(PAGE_SIZE);
70149 sp->free = b;
70150 + sp->size = 0;
70151 INIT_LIST_HEAD(&sp->list);
70152 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70153 set_slob_page_free(sp, slob_list);
70154 @@ -475,10 +478,9 @@ out:
70155 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
70156 #endif
70157
70158 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70159 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70160 {
70161 - unsigned int *m;
70162 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70163 + slob_t *m;
70164 void *ret;
70165
70166 lockdep_trace_alloc(gfp);
70167 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
70168
70169 if (!m)
70170 return NULL;
70171 - *m = size;
70172 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70173 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70174 + m[0].units = size;
70175 + m[1].units = align;
70176 ret = (void *)m + align;
70177
70178 trace_kmalloc_node(_RET_IP_, ret,
70179 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
70180
70181 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
70182 if (ret) {
70183 - struct page *page;
70184 - page = virt_to_page(ret);
70185 - page->private = size;
70186 + struct slob_page *sp;
70187 + sp = slob_page(ret);
70188 + sp->size = size;
70189 }
70190
70191 trace_kmalloc_node(_RET_IP_, ret,
70192 size, PAGE_SIZE << order, gfp, node);
70193 }
70194
70195 - kmemleak_alloc(ret, size, 1, gfp);
70196 + return ret;
70197 +}
70198 +
70199 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70200 +{
70201 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70202 + void *ret = __kmalloc_node_align(size, gfp, node, align);
70203 +
70204 + if (!ZERO_OR_NULL_PTR(ret))
70205 + kmemleak_alloc(ret, size, 1, gfp);
70206 return ret;
70207 }
70208 EXPORT_SYMBOL(__kmalloc_node);
70209 @@ -528,13 +542,88 @@ void kfree(const void *block)
70210 sp = slob_page(block);
70211 if (is_slob_page(sp)) {
70212 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70213 - unsigned int *m = (unsigned int *)(block - align);
70214 - slob_free(m, *m + align);
70215 - } else
70216 + slob_t *m = (slob_t *)(block - align);
70217 + slob_free(m, m[0].units + align);
70218 + } else {
70219 + clear_slob_page(sp);
70220 + free_slob_page(sp);
70221 + sp->size = 0;
70222 put_page(&sp->page);
70223 + }
70224 }
70225 EXPORT_SYMBOL(kfree);
70226
70227 +void check_object_size(const void *ptr, unsigned long n, bool to)
70228 +{
70229 +
70230 +#ifdef CONFIG_PAX_USERCOPY
70231 + struct slob_page *sp;
70232 + const slob_t *free;
70233 + const void *base;
70234 + unsigned long flags;
70235 +
70236 + if (!n)
70237 + return;
70238 +
70239 + if (ZERO_OR_NULL_PTR(ptr))
70240 + goto report;
70241 +
70242 + if (!virt_addr_valid(ptr))
70243 + return;
70244 +
70245 + sp = slob_page(ptr);
70246 + if (!PageSlab((struct page*)sp)) {
70247 + if (object_is_on_stack(ptr, n) == -1)
70248 + goto report;
70249 + return;
70250 + }
70251 +
70252 + if (sp->size) {
70253 + base = page_address(&sp->page);
70254 + if (base <= ptr && n <= sp->size - (ptr - base))
70255 + return;
70256 + goto report;
70257 + }
70258 +
70259 + /* some tricky double walking to find the chunk */
70260 + spin_lock_irqsave(&slob_lock, flags);
70261 + base = (void *)((unsigned long)ptr & PAGE_MASK);
70262 + free = sp->free;
70263 +
70264 + while (!slob_last(free) && (void *)free <= ptr) {
70265 + base = free + slob_units(free);
70266 + free = slob_next(free);
70267 + }
70268 +
70269 + while (base < (void *)free) {
70270 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70271 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
70272 + int offset;
70273 +
70274 + if (ptr < base + align)
70275 + break;
70276 +
70277 + offset = ptr - base - align;
70278 + if (offset >= m) {
70279 + base += size;
70280 + continue;
70281 + }
70282 +
70283 + if (n > m - offset)
70284 + break;
70285 +
70286 + spin_unlock_irqrestore(&slob_lock, flags);
70287 + return;
70288 + }
70289 +
70290 + spin_unlock_irqrestore(&slob_lock, flags);
70291 +report:
70292 + pax_report_usercopy(ptr, n, to, NULL);
70293 +#endif
70294 +
70295 +}
70296 +EXPORT_SYMBOL(check_object_size);
70297 +
70298 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70299 size_t ksize(const void *block)
70300 {
70301 @@ -547,10 +636,10 @@ size_t ksize(const void *block)
70302 sp = slob_page(block);
70303 if (is_slob_page(sp)) {
70304 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70305 - unsigned int *m = (unsigned int *)(block - align);
70306 - return SLOB_UNITS(*m) * SLOB_UNIT;
70307 + slob_t *m = (slob_t *)(block - align);
70308 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70309 } else
70310 - return sp->page.private;
70311 + return sp->size;
70312 }
70313 EXPORT_SYMBOL(ksize);
70314
70315 @@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
70316 {
70317 struct kmem_cache *c;
70318
70319 +#ifdef CONFIG_PAX_USERCOPY
70320 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
70321 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70322 +#else
70323 c = slob_alloc(sizeof(struct kmem_cache),
70324 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70325 +#endif
70326
70327 if (c) {
70328 c->name = name;
70329 @@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
70330 {
70331 void *b;
70332
70333 +#ifdef CONFIG_PAX_USERCOPY
70334 + b = __kmalloc_node_align(c->size, flags, node, c->align);
70335 +#else
70336 if (c->size < PAGE_SIZE) {
70337 b = slob_alloc(c->size, flags, c->align, node);
70338 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70339 SLOB_UNITS(c->size) * SLOB_UNIT,
70340 flags, node);
70341 } else {
70342 + struct slob_page *sp;
70343 +
70344 b = slob_new_pages(flags, get_order(c->size), node);
70345 + sp = slob_page(b);
70346 + sp->size = c->size;
70347 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70348 PAGE_SIZE << get_order(c->size),
70349 flags, node);
70350 }
70351 +#endif
70352
70353 if (c->ctor)
70354 c->ctor(b);
70355 @@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70356
70357 static void __kmem_cache_free(void *b, int size)
70358 {
70359 - if (size < PAGE_SIZE)
70360 + struct slob_page *sp = slob_page(b);
70361 +
70362 + if (is_slob_page(sp))
70363 slob_free(b, size);
70364 - else
70365 + else {
70366 + clear_slob_page(sp);
70367 + free_slob_page(sp);
70368 + sp->size = 0;
70369 slob_free_pages(b, get_order(size));
70370 + }
70371 }
70372
70373 static void kmem_rcu_free(struct rcu_head *head)
70374 @@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
70375
70376 void kmem_cache_free(struct kmem_cache *c, void *b)
70377 {
70378 + int size = c->size;
70379 +
70380 +#ifdef CONFIG_PAX_USERCOPY
70381 + if (size + c->align < PAGE_SIZE) {
70382 + size += c->align;
70383 + b -= c->align;
70384 + }
70385 +#endif
70386 +
70387 kmemleak_free_recursive(b, c->flags);
70388 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70389 struct slob_rcu *slob_rcu;
70390 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70391 + slob_rcu = b + (size - sizeof(struct slob_rcu));
70392 INIT_RCU_HEAD(&slob_rcu->head);
70393 - slob_rcu->size = c->size;
70394 + slob_rcu->size = size;
70395 call_rcu(&slob_rcu->head, kmem_rcu_free);
70396 } else {
70397 - __kmem_cache_free(b, c->size);
70398 + __kmem_cache_free(b, size);
70399 }
70400
70401 +#ifdef CONFIG_PAX_USERCOPY
70402 + trace_kfree(_RET_IP_, b);
70403 +#else
70404 trace_kmem_cache_free(_RET_IP_, b);
70405 +#endif
70406 +
70407 }
70408 EXPORT_SYMBOL(kmem_cache_free);
70409
70410 diff -urNp linux-2.6.32.45/mm/slub.c linux-2.6.32.45/mm/slub.c
70411 --- linux-2.6.32.45/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
70412 +++ linux-2.6.32.45/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
70413 @@ -410,7 +410,7 @@ static void print_track(const char *s, s
70414 if (!t->addr)
70415 return;
70416
70417 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70418 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70419 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70420 }
70421
70422 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
70423
70424 page = virt_to_head_page(x);
70425
70426 + BUG_ON(!PageSlab(page));
70427 +
70428 slab_free(s, page, x, _RET_IP_);
70429
70430 trace_kmem_cache_free(_RET_IP_, x);
70431 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
70432 * Merge control. If this is set then no merging of slab caches will occur.
70433 * (Could be removed. This was introduced to pacify the merge skeptics.)
70434 */
70435 -static int slub_nomerge;
70436 +static int slub_nomerge = 1;
70437
70438 /*
70439 * Calculate the order of allocation given an slab object size.
70440 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
70441 * list to avoid pounding the page allocator excessively.
70442 */
70443 set_min_partial(s, ilog2(s->size));
70444 - s->refcount = 1;
70445 + atomic_set(&s->refcount, 1);
70446 #ifdef CONFIG_NUMA
70447 s->remote_node_defrag_ratio = 1000;
70448 #endif
70449 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
70450 void kmem_cache_destroy(struct kmem_cache *s)
70451 {
70452 down_write(&slub_lock);
70453 - s->refcount--;
70454 - if (!s->refcount) {
70455 + if (atomic_dec_and_test(&s->refcount)) {
70456 list_del(&s->list);
70457 up_write(&slub_lock);
70458 if (kmem_cache_close(s)) {
70459 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
70460 __setup("slub_nomerge", setup_slub_nomerge);
70461
70462 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
70463 - const char *name, int size, gfp_t gfp_flags)
70464 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
70465 {
70466 - unsigned int flags = 0;
70467 -
70468 if (gfp_flags & SLUB_DMA)
70469 - flags = SLAB_CACHE_DMA;
70470 + flags |= SLAB_CACHE_DMA;
70471
70472 /*
70473 * This function is called with IRQs disabled during early-boot on
70474 @@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
70475 EXPORT_SYMBOL(__kmalloc_node);
70476 #endif
70477
70478 +void check_object_size(const void *ptr, unsigned long n, bool to)
70479 +{
70480 +
70481 +#ifdef CONFIG_PAX_USERCOPY
70482 + struct page *page;
70483 + struct kmem_cache *s = NULL;
70484 + unsigned long offset;
70485 +
70486 + if (!n)
70487 + return;
70488 +
70489 + if (ZERO_OR_NULL_PTR(ptr))
70490 + goto report;
70491 +
70492 + if (!virt_addr_valid(ptr))
70493 + return;
70494 +
70495 + page = get_object_page(ptr);
70496 +
70497 + if (!page) {
70498 + if (object_is_on_stack(ptr, n) == -1)
70499 + goto report;
70500 + return;
70501 + }
70502 +
70503 + s = page->slab;
70504 + if (!(s->flags & SLAB_USERCOPY))
70505 + goto report;
70506 +
70507 + offset = (ptr - page_address(page)) % s->size;
70508 + if (offset <= s->objsize && n <= s->objsize - offset)
70509 + return;
70510 +
70511 +report:
70512 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
70513 +#endif
70514 +
70515 +}
70516 +EXPORT_SYMBOL(check_object_size);
70517 +
70518 size_t ksize(const void *object)
70519 {
70520 struct page *page;
70521 @@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
70522 * kmem_cache_open for slab_state == DOWN.
70523 */
70524 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
70525 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
70526 - kmalloc_caches[0].refcount = -1;
70527 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
70528 + atomic_set(&kmalloc_caches[0].refcount, -1);
70529 caches++;
70530
70531 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
70532 @@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
70533 /* Caches that are not of the two-to-the-power-of size */
70534 if (KMALLOC_MIN_SIZE <= 32) {
70535 create_kmalloc_cache(&kmalloc_caches[1],
70536 - "kmalloc-96", 96, GFP_NOWAIT);
70537 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
70538 caches++;
70539 }
70540 if (KMALLOC_MIN_SIZE <= 64) {
70541 create_kmalloc_cache(&kmalloc_caches[2],
70542 - "kmalloc-192", 192, GFP_NOWAIT);
70543 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
70544 caches++;
70545 }
70546
70547 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70548 create_kmalloc_cache(&kmalloc_caches[i],
70549 - "kmalloc", 1 << i, GFP_NOWAIT);
70550 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
70551 caches++;
70552 }
70553
70554 @@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
70555 /*
70556 * We may have set a slab to be unmergeable during bootstrap.
70557 */
70558 - if (s->refcount < 0)
70559 + if (atomic_read(&s->refcount) < 0)
70560 return 1;
70561
70562 return 0;
70563 @@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
70564 if (s) {
70565 int cpu;
70566
70567 - s->refcount++;
70568 + atomic_inc(&s->refcount);
70569 /*
70570 * Adjust the object sizes so that we clear
70571 * the complete object on kzalloc.
70572 @@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
70573
70574 if (sysfs_slab_alias(s, name)) {
70575 down_write(&slub_lock);
70576 - s->refcount--;
70577 + atomic_dec(&s->refcount);
70578 up_write(&slub_lock);
70579 goto err;
70580 }
70581 @@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
70582
70583 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70584 {
70585 - return sprintf(buf, "%d\n", s->refcount - 1);
70586 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70587 }
70588 SLAB_ATTR_RO(aliases);
70589
70590 @@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
70591 kfree(s);
70592 }
70593
70594 -static struct sysfs_ops slab_sysfs_ops = {
70595 +static const struct sysfs_ops slab_sysfs_ops = {
70596 .show = slab_attr_show,
70597 .store = slab_attr_store,
70598 };
70599 @@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
70600 return 0;
70601 }
70602
70603 -static struct kset_uevent_ops slab_uevent_ops = {
70604 +static const struct kset_uevent_ops slab_uevent_ops = {
70605 .filter = uevent_filter,
70606 };
70607
70608 @@ -4785,7 +4824,13 @@ static const struct file_operations proc
70609
70610 static int __init slab_proc_init(void)
70611 {
70612 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70613 + mode_t gr_mode = S_IRUGO;
70614 +
70615 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70616 + gr_mode = S_IRUSR;
70617 +#endif
70618 +
70619 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70620 return 0;
70621 }
70622 module_init(slab_proc_init);
70623 diff -urNp linux-2.6.32.45/mm/swap.c linux-2.6.32.45/mm/swap.c
70624 --- linux-2.6.32.45/mm/swap.c 2011-03-27 14:31:47.000000000 -0400
70625 +++ linux-2.6.32.45/mm/swap.c 2011-07-09 09:15:19.000000000 -0400
70626 @@ -30,6 +30,7 @@
70627 #include <linux/notifier.h>
70628 #include <linux/backing-dev.h>
70629 #include <linux/memcontrol.h>
70630 +#include <linux/hugetlb.h>
70631
70632 #include "internal.h"
70633
70634 @@ -65,6 +66,8 @@ static void put_compound_page(struct pag
70635 compound_page_dtor *dtor;
70636
70637 dtor = get_compound_page_dtor(page);
70638 + if (!PageHuge(page))
70639 + BUG_ON(dtor != free_compound_page);
70640 (*dtor)(page);
70641 }
70642 }
70643 diff -urNp linux-2.6.32.45/mm/util.c linux-2.6.32.45/mm/util.c
70644 --- linux-2.6.32.45/mm/util.c 2011-03-27 14:31:47.000000000 -0400
70645 +++ linux-2.6.32.45/mm/util.c 2011-04-17 15:56:46.000000000 -0400
70646 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
70647 void arch_pick_mmap_layout(struct mm_struct *mm)
70648 {
70649 mm->mmap_base = TASK_UNMAPPED_BASE;
70650 +
70651 +#ifdef CONFIG_PAX_RANDMMAP
70652 + if (mm->pax_flags & MF_PAX_RANDMMAP)
70653 + mm->mmap_base += mm->delta_mmap;
70654 +#endif
70655 +
70656 mm->get_unmapped_area = arch_get_unmapped_area;
70657 mm->unmap_area = arch_unmap_area;
70658 }
70659 diff -urNp linux-2.6.32.45/mm/vmalloc.c linux-2.6.32.45/mm/vmalloc.c
70660 --- linux-2.6.32.45/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
70661 +++ linux-2.6.32.45/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
70662 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
70663
70664 pte = pte_offset_kernel(pmd, addr);
70665 do {
70666 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70667 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70668 +
70669 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70670 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70671 + BUG_ON(!pte_exec(*pte));
70672 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
70673 + continue;
70674 + }
70675 +#endif
70676 +
70677 + {
70678 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70679 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70680 + }
70681 } while (pte++, addr += PAGE_SIZE, addr != end);
70682 }
70683
70684 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
70685 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
70686 {
70687 pte_t *pte;
70688 + int ret = -ENOMEM;
70689
70690 /*
70691 * nr is a running index into the array which helps higher level
70692 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
70693 pte = pte_alloc_kernel(pmd, addr);
70694 if (!pte)
70695 return -ENOMEM;
70696 +
70697 + pax_open_kernel();
70698 do {
70699 struct page *page = pages[*nr];
70700
70701 - if (WARN_ON(!pte_none(*pte)))
70702 - return -EBUSY;
70703 - if (WARN_ON(!page))
70704 - return -ENOMEM;
70705 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70706 + if (!(pgprot_val(prot) & _PAGE_NX))
70707 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
70708 + else
70709 +#endif
70710 +
70711 + if (WARN_ON(!pte_none(*pte))) {
70712 + ret = -EBUSY;
70713 + goto out;
70714 + }
70715 + if (WARN_ON(!page)) {
70716 + ret = -ENOMEM;
70717 + goto out;
70718 + }
70719 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
70720 (*nr)++;
70721 } while (pte++, addr += PAGE_SIZE, addr != end);
70722 - return 0;
70723 + ret = 0;
70724 +out:
70725 + pax_close_kernel();
70726 + return ret;
70727 }
70728
70729 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
70730 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
70731 * and fall back on vmalloc() if that fails. Others
70732 * just put it in the vmalloc space.
70733 */
70734 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
70735 +#ifdef CONFIG_MODULES
70736 +#ifdef MODULES_VADDR
70737 unsigned long addr = (unsigned long)x;
70738 if (addr >= MODULES_VADDR && addr < MODULES_END)
70739 return 1;
70740 #endif
70741 +
70742 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70743 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
70744 + return 1;
70745 +#endif
70746 +
70747 +#endif
70748 +
70749 return is_vmalloc_addr(x);
70750 }
70751
70752 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
70753
70754 if (!pgd_none(*pgd)) {
70755 pud_t *pud = pud_offset(pgd, addr);
70756 +#ifdef CONFIG_X86
70757 + if (!pud_large(*pud))
70758 +#endif
70759 if (!pud_none(*pud)) {
70760 pmd_t *pmd = pmd_offset(pud, addr);
70761 +#ifdef CONFIG_X86
70762 + if (!pmd_large(*pmd))
70763 +#endif
70764 if (!pmd_none(*pmd)) {
70765 pte_t *ptep, pte;
70766
70767 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
70768 struct rb_node *tmp;
70769
70770 while (*p) {
70771 - struct vmap_area *tmp;
70772 + struct vmap_area *varea;
70773
70774 parent = *p;
70775 - tmp = rb_entry(parent, struct vmap_area, rb_node);
70776 - if (va->va_start < tmp->va_end)
70777 + varea = rb_entry(parent, struct vmap_area, rb_node);
70778 + if (va->va_start < varea->va_end)
70779 p = &(*p)->rb_left;
70780 - else if (va->va_end > tmp->va_start)
70781 + else if (va->va_end > varea->va_start)
70782 p = &(*p)->rb_right;
70783 else
70784 BUG();
70785 @@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
70786 struct vm_struct *area;
70787
70788 BUG_ON(in_interrupt());
70789 +
70790 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70791 + if (flags & VM_KERNEXEC) {
70792 + if (start != VMALLOC_START || end != VMALLOC_END)
70793 + return NULL;
70794 + start = (unsigned long)MODULES_EXEC_VADDR;
70795 + end = (unsigned long)MODULES_EXEC_END;
70796 + }
70797 +#endif
70798 +
70799 if (flags & VM_IOREMAP) {
70800 int bit = fls(size);
70801
70802 @@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
70803 if (count > totalram_pages)
70804 return NULL;
70805
70806 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70807 + if (!(pgprot_val(prot) & _PAGE_NX))
70808 + flags |= VM_KERNEXEC;
70809 +#endif
70810 +
70811 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
70812 __builtin_return_address(0));
70813 if (!area)
70814 @@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
70815 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
70816 return NULL;
70817
70818 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70819 + if (!(pgprot_val(prot) & _PAGE_NX))
70820 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
70821 + node, gfp_mask, caller);
70822 + else
70823 +#endif
70824 +
70825 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
70826 VMALLOC_END, node, gfp_mask, caller);
70827
70828 @@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
70829 return addr;
70830 }
70831
70832 +#undef __vmalloc
70833 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
70834 {
70835 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
70836 @@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
70837 * For tight control over page level allocator and protection flags
70838 * use __vmalloc() instead.
70839 */
70840 +#undef vmalloc
70841 void *vmalloc(unsigned long size)
70842 {
70843 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70844 @@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
70845 * The resulting memory area is zeroed so it can be mapped to userspace
70846 * without leaking data.
70847 */
70848 +#undef vmalloc_user
70849 void *vmalloc_user(unsigned long size)
70850 {
70851 struct vm_struct *area;
70852 @@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
70853 * For tight control over page level allocator and protection flags
70854 * use __vmalloc() instead.
70855 */
70856 +#undef vmalloc_node
70857 void *vmalloc_node(unsigned long size, int node)
70858 {
70859 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70860 @@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
70861 * For tight control over page level allocator and protection flags
70862 * use __vmalloc() instead.
70863 */
70864 -
70865 +#undef vmalloc_exec
70866 void *vmalloc_exec(unsigned long size)
70867 {
70868 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
70869 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
70870 -1, __builtin_return_address(0));
70871 }
70872
70873 @@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
70874 * Allocate enough 32bit PA addressable pages to cover @size from the
70875 * page level allocator and map them into contiguous kernel virtual space.
70876 */
70877 +#undef vmalloc_32
70878 void *vmalloc_32(unsigned long size)
70879 {
70880 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
70881 @@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
70882 * The resulting memory area is 32bit addressable and zeroed so it can be
70883 * mapped to userspace without leaking data.
70884 */
70885 +#undef vmalloc_32_user
70886 void *vmalloc_32_user(unsigned long size)
70887 {
70888 struct vm_struct *area;
70889 @@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
70890 unsigned long uaddr = vma->vm_start;
70891 unsigned long usize = vma->vm_end - vma->vm_start;
70892
70893 + BUG_ON(vma->vm_mirror);
70894 +
70895 if ((PAGE_SIZE-1) & (unsigned long)addr)
70896 return -EINVAL;
70897
70898 diff -urNp linux-2.6.32.45/mm/vmstat.c linux-2.6.32.45/mm/vmstat.c
70899 --- linux-2.6.32.45/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
70900 +++ linux-2.6.32.45/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
70901 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
70902 *
70903 * vm_stat contains the global counters
70904 */
70905 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70906 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70907 EXPORT_SYMBOL(vm_stat);
70908
70909 #ifdef CONFIG_SMP
70910 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
70911 v = p->vm_stat_diff[i];
70912 p->vm_stat_diff[i] = 0;
70913 local_irq_restore(flags);
70914 - atomic_long_add(v, &zone->vm_stat[i]);
70915 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
70916 global_diff[i] += v;
70917 #ifdef CONFIG_NUMA
70918 /* 3 seconds idle till flush */
70919 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
70920
70921 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
70922 if (global_diff[i])
70923 - atomic_long_add(global_diff[i], &vm_stat[i]);
70924 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
70925 }
70926
70927 #endif
70928 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
70929 start_cpu_timer(cpu);
70930 #endif
70931 #ifdef CONFIG_PROC_FS
70932 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
70933 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
70934 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
70935 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
70936 + {
70937 + mode_t gr_mode = S_IRUGO;
70938 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70939 + gr_mode = S_IRUSR;
70940 +#endif
70941 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
70942 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
70943 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
70944 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
70945 +#else
70946 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
70947 +#endif
70948 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
70949 + }
70950 #endif
70951 return 0;
70952 }
70953 diff -urNp linux-2.6.32.45/net/8021q/vlan.c linux-2.6.32.45/net/8021q/vlan.c
70954 --- linux-2.6.32.45/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
70955 +++ linux-2.6.32.45/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
70956 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
70957 err = -EPERM;
70958 if (!capable(CAP_NET_ADMIN))
70959 break;
70960 - if ((args.u.name_type >= 0) &&
70961 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
70962 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
70963 struct vlan_net *vn;
70964
70965 vn = net_generic(net, vlan_net_id);
70966 diff -urNp linux-2.6.32.45/net/atm/atm_misc.c linux-2.6.32.45/net/atm/atm_misc.c
70967 --- linux-2.6.32.45/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
70968 +++ linux-2.6.32.45/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
70969 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
70970 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
70971 return 1;
70972 atm_return(vcc,truesize);
70973 - atomic_inc(&vcc->stats->rx_drop);
70974 + atomic_inc_unchecked(&vcc->stats->rx_drop);
70975 return 0;
70976 }
70977
70978 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
70979 }
70980 }
70981 atm_return(vcc,guess);
70982 - atomic_inc(&vcc->stats->rx_drop);
70983 + atomic_inc_unchecked(&vcc->stats->rx_drop);
70984 return NULL;
70985 }
70986
70987 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
70988
70989 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
70990 {
70991 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
70992 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
70993 __SONET_ITEMS
70994 #undef __HANDLE_ITEM
70995 }
70996 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
70997
70998 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
70999 {
71000 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
71001 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71002 __SONET_ITEMS
71003 #undef __HANDLE_ITEM
71004 }
71005 diff -urNp linux-2.6.32.45/net/atm/lec.h linux-2.6.32.45/net/atm/lec.h
71006 --- linux-2.6.32.45/net/atm/lec.h 2011-03-27 14:31:47.000000000 -0400
71007 +++ linux-2.6.32.45/net/atm/lec.h 2011-08-05 20:33:55.000000000 -0400
71008 @@ -48,7 +48,7 @@ struct lane2_ops {
71009 const u8 *tlvs, u32 sizeoftlvs);
71010 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71011 const u8 *tlvs, u32 sizeoftlvs);
71012 -};
71013 +} __no_const;
71014
71015 /*
71016 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71017 diff -urNp linux-2.6.32.45/net/atm/mpc.c linux-2.6.32.45/net/atm/mpc.c
71018 --- linux-2.6.32.45/net/atm/mpc.c 2011-03-27 14:31:47.000000000 -0400
71019 +++ linux-2.6.32.45/net/atm/mpc.c 2011-08-05 20:33:55.000000000 -0400
71020 @@ -291,8 +291,8 @@ static void start_mpc(struct mpoa_client
71021 printk("mpoa: (%s) start_mpc not starting\n", dev->name);
71022 else {
71023 mpc->old_ops = dev->netdev_ops;
71024 - mpc->new_ops = *mpc->old_ops;
71025 - mpc->new_ops.ndo_start_xmit = mpc_send_packet;
71026 + memcpy((void *)&mpc->new_ops, mpc->old_ops, sizeof(mpc->new_ops));
71027 + *(void **)&mpc->new_ops.ndo_start_xmit = mpc_send_packet;
71028 dev->netdev_ops = &mpc->new_ops;
71029 }
71030 }
71031 diff -urNp linux-2.6.32.45/net/atm/mpoa_caches.c linux-2.6.32.45/net/atm/mpoa_caches.c
71032 --- linux-2.6.32.45/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
71033 +++ linux-2.6.32.45/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
71034 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
71035 struct timeval now;
71036 struct k_message msg;
71037
71038 + pax_track_stack();
71039 +
71040 do_gettimeofday(&now);
71041
71042 write_lock_irq(&client->egress_lock);
71043 diff -urNp linux-2.6.32.45/net/atm/proc.c linux-2.6.32.45/net/atm/proc.c
71044 --- linux-2.6.32.45/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
71045 +++ linux-2.6.32.45/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
71046 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
71047 const struct k_atm_aal_stats *stats)
71048 {
71049 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71050 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
71051 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
71052 - atomic_read(&stats->rx_drop));
71053 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71054 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71055 + atomic_read_unchecked(&stats->rx_drop));
71056 }
71057
71058 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71059 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
71060 {
71061 struct sock *sk = sk_atm(vcc);
71062
71063 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71064 + seq_printf(seq, "%p ", NULL);
71065 +#else
71066 seq_printf(seq, "%p ", vcc);
71067 +#endif
71068 +
71069 if (!vcc->dev)
71070 seq_printf(seq, "Unassigned ");
71071 else
71072 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
71073 {
71074 if (!vcc->dev)
71075 seq_printf(seq, sizeof(void *) == 4 ?
71076 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71077 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
71078 +#else
71079 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
71080 +#endif
71081 else
71082 seq_printf(seq, "%3d %3d %5d ",
71083 vcc->dev->number, vcc->vpi, vcc->vci);
71084 diff -urNp linux-2.6.32.45/net/atm/resources.c linux-2.6.32.45/net/atm/resources.c
71085 --- linux-2.6.32.45/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
71086 +++ linux-2.6.32.45/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
71087 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
71088 static void copy_aal_stats(struct k_atm_aal_stats *from,
71089 struct atm_aal_stats *to)
71090 {
71091 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71092 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71093 __AAL_STAT_ITEMS
71094 #undef __HANDLE_ITEM
71095 }
71096 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
71097 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71098 struct atm_aal_stats *to)
71099 {
71100 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71101 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71102 __AAL_STAT_ITEMS
71103 #undef __HANDLE_ITEM
71104 }
71105 diff -urNp linux-2.6.32.45/net/bluetooth/l2cap.c linux-2.6.32.45/net/bluetooth/l2cap.c
71106 --- linux-2.6.32.45/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
71107 +++ linux-2.6.32.45/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
71108 @@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
71109 err = -ENOTCONN;
71110 break;
71111 }
71112 -
71113 + memset(&cinfo, 0, sizeof(cinfo));
71114 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
71115 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
71116
71117 @@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
71118
71119 /* Reject if config buffer is too small. */
71120 len = cmd_len - sizeof(*req);
71121 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71122 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71123 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
71124 l2cap_build_conf_rsp(sk, rsp,
71125 L2CAP_CONF_REJECT, flags), rsp);
71126 diff -urNp linux-2.6.32.45/net/bluetooth/rfcomm/sock.c linux-2.6.32.45/net/bluetooth/rfcomm/sock.c
71127 --- linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
71128 +++ linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
71129 @@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
71130
71131 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
71132
71133 + memset(&cinfo, 0, sizeof(cinfo));
71134 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
71135 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
71136
71137 diff -urNp linux-2.6.32.45/net/bridge/br_private.h linux-2.6.32.45/net/bridge/br_private.h
71138 --- linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:35:30.000000000 -0400
71139 +++ linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:34:01.000000000 -0400
71140 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event,
71141
71142 #ifdef CONFIG_SYSFS
71143 /* br_sysfs_if.c */
71144 -extern struct sysfs_ops brport_sysfs_ops;
71145 +extern const struct sysfs_ops brport_sysfs_ops;
71146 extern int br_sysfs_addif(struct net_bridge_port *p);
71147
71148 /* br_sysfs_br.c */
71149 diff -urNp linux-2.6.32.45/net/bridge/br_stp_if.c linux-2.6.32.45/net/bridge/br_stp_if.c
71150 --- linux-2.6.32.45/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
71151 +++ linux-2.6.32.45/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
71152 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
71153 char *envp[] = { NULL };
71154
71155 if (br->stp_enabled == BR_USER_STP) {
71156 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
71157 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
71158 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
71159 br->dev->name, r);
71160
71161 diff -urNp linux-2.6.32.45/net/bridge/br_sysfs_if.c linux-2.6.32.45/net/bridge/br_sysfs_if.c
71162 --- linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
71163 +++ linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
71164 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
71165 return ret;
71166 }
71167
71168 -struct sysfs_ops brport_sysfs_ops = {
71169 +const struct sysfs_ops brport_sysfs_ops = {
71170 .show = brport_show,
71171 .store = brport_store,
71172 };
71173 diff -urNp linux-2.6.32.45/net/bridge/netfilter/ebtables.c linux-2.6.32.45/net/bridge/netfilter/ebtables.c
71174 --- linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
71175 +++ linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
71176 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
71177 unsigned int entries_size, nentries;
71178 char *entries;
71179
71180 + pax_track_stack();
71181 +
71182 if (cmd == EBT_SO_GET_ENTRIES) {
71183 entries_size = t->private->entries_size;
71184 nentries = t->private->nentries;
71185 diff -urNp linux-2.6.32.45/net/can/bcm.c linux-2.6.32.45/net/can/bcm.c
71186 --- linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
71187 +++ linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
71188 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
71189 struct bcm_sock *bo = bcm_sk(sk);
71190 struct bcm_op *op;
71191
71192 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71193 + seq_printf(m, ">>> socket %p", NULL);
71194 + seq_printf(m, " / sk %p", NULL);
71195 + seq_printf(m, " / bo %p", NULL);
71196 +#else
71197 seq_printf(m, ">>> socket %p", sk->sk_socket);
71198 seq_printf(m, " / sk %p", sk);
71199 seq_printf(m, " / bo %p", bo);
71200 +#endif
71201 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
71202 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
71203 seq_printf(m, " <<<\n");
71204 diff -urNp linux-2.6.32.45/net/core/dev.c linux-2.6.32.45/net/core/dev.c
71205 --- linux-2.6.32.45/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
71206 +++ linux-2.6.32.45/net/core/dev.c 2011-08-05 20:33:55.000000000 -0400
71207 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
71208 if (no_module && capable(CAP_NET_ADMIN))
71209 no_module = request_module("netdev-%s", name);
71210 if (no_module && capable(CAP_SYS_MODULE)) {
71211 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
71212 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
71213 +#else
71214 if (!request_module("%s", name))
71215 pr_err("Loading kernel module for a network device "
71216 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71217 "instead\n", name);
71218 +#endif
71219 }
71220 }
71221 EXPORT_SYMBOL(dev_load);
71222 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct
71223
71224 struct dev_gso_cb {
71225 void (*destructor)(struct sk_buff *skb);
71226 -};
71227 +} __no_const;
71228
71229 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71230
71231 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
71232 }
71233 EXPORT_SYMBOL(netif_rx_ni);
71234
71235 -static void net_tx_action(struct softirq_action *h)
71236 +static void net_tx_action(void)
71237 {
71238 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71239
71240 @@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
71241 EXPORT_SYMBOL(netif_napi_del);
71242
71243
71244 -static void net_rx_action(struct softirq_action *h)
71245 +static void net_rx_action(void)
71246 {
71247 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
71248 unsigned long time_limit = jiffies + 2;
71249 diff -urNp linux-2.6.32.45/net/core/flow.c linux-2.6.32.45/net/core/flow.c
71250 --- linux-2.6.32.45/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
71251 +++ linux-2.6.32.45/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
71252 @@ -35,11 +35,11 @@ struct flow_cache_entry {
71253 atomic_t *object_ref;
71254 };
71255
71256 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
71257 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71258
71259 static u32 flow_hash_shift;
71260 #define flow_hash_size (1 << flow_hash_shift)
71261 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
71262 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
71263
71264 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
71265
71266 @@ -52,7 +52,7 @@ struct flow_percpu_info {
71267 u32 hash_rnd;
71268 int count;
71269 };
71270 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
71271 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
71272
71273 #define flow_hash_rnd_recalc(cpu) \
71274 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
71275 @@ -69,7 +69,7 @@ struct flow_flush_info {
71276 atomic_t cpuleft;
71277 struct completion completion;
71278 };
71279 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
71280 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
71281
71282 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
71283
71284 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
71285 if (fle->family == family &&
71286 fle->dir == dir &&
71287 flow_key_compare(key, &fle->key) == 0) {
71288 - if (fle->genid == atomic_read(&flow_cache_genid)) {
71289 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
71290 void *ret = fle->object;
71291
71292 if (ret)
71293 @@ -228,7 +228,7 @@ nocache:
71294 err = resolver(net, key, family, dir, &obj, &obj_ref);
71295
71296 if (fle && !err) {
71297 - fle->genid = atomic_read(&flow_cache_genid);
71298 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
71299
71300 if (fle->object)
71301 atomic_dec(fle->object_ref);
71302 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
71303
71304 fle = flow_table(cpu)[i];
71305 for (; fle; fle = fle->next) {
71306 - unsigned genid = atomic_read(&flow_cache_genid);
71307 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
71308
71309 if (!fle->object || fle->genid == genid)
71310 continue;
71311 diff -urNp linux-2.6.32.45/net/core/rtnetlink.c linux-2.6.32.45/net/core/rtnetlink.c
71312 --- linux-2.6.32.45/net/core/rtnetlink.c 2011-03-27 14:31:47.000000000 -0400
71313 +++ linux-2.6.32.45/net/core/rtnetlink.c 2011-08-05 20:33:55.000000000 -0400
71314 @@ -57,7 +57,7 @@ struct rtnl_link
71315 {
71316 rtnl_doit_func doit;
71317 rtnl_dumpit_func dumpit;
71318 -};
71319 +} __no_const;
71320
71321 static DEFINE_MUTEX(rtnl_mutex);
71322
71323 diff -urNp linux-2.6.32.45/net/core/secure_seq.c linux-2.6.32.45/net/core/secure_seq.c
71324 --- linux-2.6.32.45/net/core/secure_seq.c 2011-08-16 20:37:25.000000000 -0400
71325 +++ linux-2.6.32.45/net/core/secure_seq.c 2011-08-07 19:48:09.000000000 -0400
71326 @@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be3
71327 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
71328
71329 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
71330 - __be16 dport)
71331 + __be16 dport)
71332 {
71333 u32 secret[MD5_MESSAGE_BYTES / 4];
71334 u32 hash[MD5_DIGEST_WORDS];
71335 @@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __b
71336 secret[i] = net_secret[i];
71337
71338 md5_transform(hash, secret);
71339 -
71340 return hash[0];
71341 }
71342 #endif
71343 diff -urNp linux-2.6.32.45/net/core/skbuff.c linux-2.6.32.45/net/core/skbuff.c
71344 --- linux-2.6.32.45/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
71345 +++ linux-2.6.32.45/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
71346 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
71347 struct sk_buff *frag_iter;
71348 struct sock *sk = skb->sk;
71349
71350 + pax_track_stack();
71351 +
71352 /*
71353 * __skb_splice_bits() only fails if the output has no room left,
71354 * so no point in going over the frag_list for the error case.
71355 diff -urNp linux-2.6.32.45/net/core/sock.c linux-2.6.32.45/net/core/sock.c
71356 --- linux-2.6.32.45/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
71357 +++ linux-2.6.32.45/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
71358 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
71359 break;
71360
71361 case SO_PEERCRED:
71362 + {
71363 + struct ucred peercred;
71364 if (len > sizeof(sk->sk_peercred))
71365 len = sizeof(sk->sk_peercred);
71366 - if (copy_to_user(optval, &sk->sk_peercred, len))
71367 + peercred = sk->sk_peercred;
71368 + if (copy_to_user(optval, &peercred, len))
71369 return -EFAULT;
71370 goto lenout;
71371 + }
71372
71373 case SO_PEERNAME:
71374 {
71375 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
71376 */
71377 smp_wmb();
71378 atomic_set(&sk->sk_refcnt, 1);
71379 - atomic_set(&sk->sk_drops, 0);
71380 + atomic_set_unchecked(&sk->sk_drops, 0);
71381 }
71382 EXPORT_SYMBOL(sock_init_data);
71383
71384 diff -urNp linux-2.6.32.45/net/decnet/sysctl_net_decnet.c linux-2.6.32.45/net/decnet/sysctl_net_decnet.c
71385 --- linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
71386 +++ linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
71387 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
71388
71389 if (len > *lenp) len = *lenp;
71390
71391 - if (copy_to_user(buffer, addr, len))
71392 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
71393 return -EFAULT;
71394
71395 *lenp = len;
71396 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
71397
71398 if (len > *lenp) len = *lenp;
71399
71400 - if (copy_to_user(buffer, devname, len))
71401 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
71402 return -EFAULT;
71403
71404 *lenp = len;
71405 diff -urNp linux-2.6.32.45/net/econet/Kconfig linux-2.6.32.45/net/econet/Kconfig
71406 --- linux-2.6.32.45/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
71407 +++ linux-2.6.32.45/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
71408 @@ -4,7 +4,7 @@
71409
71410 config ECONET
71411 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
71412 - depends on EXPERIMENTAL && INET
71413 + depends on EXPERIMENTAL && INET && BROKEN
71414 ---help---
71415 Econet is a fairly old and slow networking protocol mainly used by
71416 Acorn computers to access file and print servers. It uses native
71417 diff -urNp linux-2.6.32.45/net/ieee802154/dgram.c linux-2.6.32.45/net/ieee802154/dgram.c
71418 --- linux-2.6.32.45/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
71419 +++ linux-2.6.32.45/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
71420 @@ -318,7 +318,7 @@ out:
71421 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
71422 {
71423 if (sock_queue_rcv_skb(sk, skb) < 0) {
71424 - atomic_inc(&sk->sk_drops);
71425 + atomic_inc_unchecked(&sk->sk_drops);
71426 kfree_skb(skb);
71427 return NET_RX_DROP;
71428 }
71429 diff -urNp linux-2.6.32.45/net/ieee802154/raw.c linux-2.6.32.45/net/ieee802154/raw.c
71430 --- linux-2.6.32.45/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
71431 +++ linux-2.6.32.45/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
71432 @@ -206,7 +206,7 @@ out:
71433 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
71434 {
71435 if (sock_queue_rcv_skb(sk, skb) < 0) {
71436 - atomic_inc(&sk->sk_drops);
71437 + atomic_inc_unchecked(&sk->sk_drops);
71438 kfree_skb(skb);
71439 return NET_RX_DROP;
71440 }
71441 diff -urNp linux-2.6.32.45/net/ipv4/inet_diag.c linux-2.6.32.45/net/ipv4/inet_diag.c
71442 --- linux-2.6.32.45/net/ipv4/inet_diag.c 2011-07-13 17:23:04.000000000 -0400
71443 +++ linux-2.6.32.45/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
71444 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
71445 r->idiag_retrans = 0;
71446
71447 r->id.idiag_if = sk->sk_bound_dev_if;
71448 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71449 + r->id.idiag_cookie[0] = 0;
71450 + r->id.idiag_cookie[1] = 0;
71451 +#else
71452 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
71453 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
71454 +#endif
71455
71456 r->id.idiag_sport = inet->sport;
71457 r->id.idiag_dport = inet->dport;
71458 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
71459 r->idiag_family = tw->tw_family;
71460 r->idiag_retrans = 0;
71461 r->id.idiag_if = tw->tw_bound_dev_if;
71462 +
71463 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71464 + r->id.idiag_cookie[0] = 0;
71465 + r->id.idiag_cookie[1] = 0;
71466 +#else
71467 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
71468 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
71469 +#endif
71470 +
71471 r->id.idiag_sport = tw->tw_sport;
71472 r->id.idiag_dport = tw->tw_dport;
71473 r->id.idiag_src[0] = tw->tw_rcv_saddr;
71474 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
71475 if (sk == NULL)
71476 goto unlock;
71477
71478 +#ifndef CONFIG_GRKERNSEC_HIDESYM
71479 err = -ESTALE;
71480 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
71481 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
71482 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
71483 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
71484 goto out;
71485 +#endif
71486
71487 err = -ENOMEM;
71488 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
71489 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
71490 r->idiag_retrans = req->retrans;
71491
71492 r->id.idiag_if = sk->sk_bound_dev_if;
71493 +
71494 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71495 + r->id.idiag_cookie[0] = 0;
71496 + r->id.idiag_cookie[1] = 0;
71497 +#else
71498 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
71499 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
71500 +#endif
71501
71502 tmo = req->expires - jiffies;
71503 if (tmo < 0)
71504 diff -urNp linux-2.6.32.45/net/ipv4/inet_hashtables.c linux-2.6.32.45/net/ipv4/inet_hashtables.c
71505 --- linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71506 +++ linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:42:30.000000000 -0400
71507 @@ -18,12 +18,15 @@
71508 #include <linux/sched.h>
71509 #include <linux/slab.h>
71510 #include <linux/wait.h>
71511 +#include <linux/security.h>
71512
71513 #include <net/inet_connection_sock.h>
71514 #include <net/inet_hashtables.h>
71515 #include <net/secure_seq.h>
71516 #include <net/ip.h>
71517
71518 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
71519 +
71520 /*
71521 * Allocate and initialize a new local port bind bucket.
71522 * The bindhash mutex for snum's hash chain must be held here.
71523 @@ -491,6 +494,8 @@ ok:
71524 }
71525 spin_unlock(&head->lock);
71526
71527 + gr_update_task_in_ip_table(current, inet_sk(sk));
71528 +
71529 if (tw) {
71530 inet_twsk_deschedule(tw, death_row);
71531 inet_twsk_put(tw);
71532 diff -urNp linux-2.6.32.45/net/ipv4/inetpeer.c linux-2.6.32.45/net/ipv4/inetpeer.c
71533 --- linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-16 20:37:25.000000000 -0400
71534 +++ linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-07 19:48:09.000000000 -0400
71535 @@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 da
71536 struct inet_peer *p, *n;
71537 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
71538
71539 + pax_track_stack();
71540 +
71541 /* Look up for the address quickly. */
71542 read_lock_bh(&peer_pool_lock);
71543 p = lookup(daddr, NULL);
71544 @@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 da
71545 return NULL;
71546 n->v4daddr = daddr;
71547 atomic_set(&n->refcnt, 1);
71548 - atomic_set(&n->rid, 0);
71549 + atomic_set_unchecked(&n->rid, 0);
71550 n->ip_id_count = secure_ip_id(daddr);
71551 n->tcp_ts_stamp = 0;
71552
71553 diff -urNp linux-2.6.32.45/net/ipv4/ip_fragment.c linux-2.6.32.45/net/ipv4/ip_fragment.c
71554 --- linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
71555 +++ linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
71556 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
71557 return 0;
71558
71559 start = qp->rid;
71560 - end = atomic_inc_return(&peer->rid);
71561 + end = atomic_inc_return_unchecked(&peer->rid);
71562 qp->rid = end;
71563
71564 rc = qp->q.fragments && (end - start) > max;
71565 diff -urNp linux-2.6.32.45/net/ipv4/ip_sockglue.c linux-2.6.32.45/net/ipv4/ip_sockglue.c
71566 --- linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
71567 +++ linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
71568 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
71569 int val;
71570 int len;
71571
71572 + pax_track_stack();
71573 +
71574 if (level != SOL_IP)
71575 return -EOPNOTSUPP;
71576
71577 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c
71578 --- linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
71579 +++ linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
71580 @@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
71581 private = &tmp;
71582 }
71583 #endif
71584 + memset(&info, 0, sizeof(info));
71585 info.valid_hooks = t->valid_hooks;
71586 memcpy(info.hook_entry, private->hook_entry,
71587 sizeof(info.hook_entry));
71588 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c
71589 --- linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
71590 +++ linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
71591 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
71592 private = &tmp;
71593 }
71594 #endif
71595 + memset(&info, 0, sizeof(info));
71596 info.valid_hooks = t->valid_hooks;
71597 memcpy(info.hook_entry, private->hook_entry,
71598 sizeof(info.hook_entry));
71599 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c
71600 --- linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
71601 +++ linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
71602 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
71603
71604 *len = 0;
71605
71606 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
71607 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
71608 if (*octets == NULL) {
71609 if (net_ratelimit())
71610 printk("OOM in bsalg (%d)\n", __LINE__);
71611 diff -urNp linux-2.6.32.45/net/ipv4/raw.c linux-2.6.32.45/net/ipv4/raw.c
71612 --- linux-2.6.32.45/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
71613 +++ linux-2.6.32.45/net/ipv4/raw.c 2011-08-14 11:46:51.000000000 -0400
71614 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
71615 /* Charge it to the socket. */
71616
71617 if (sock_queue_rcv_skb(sk, skb) < 0) {
71618 - atomic_inc(&sk->sk_drops);
71619 + atomic_inc_unchecked(&sk->sk_drops);
71620 kfree_skb(skb);
71621 return NET_RX_DROP;
71622 }
71623 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
71624 int raw_rcv(struct sock *sk, struct sk_buff *skb)
71625 {
71626 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
71627 - atomic_inc(&sk->sk_drops);
71628 + atomic_inc_unchecked(&sk->sk_drops);
71629 kfree_skb(skb);
71630 return NET_RX_DROP;
71631 }
71632 @@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
71633
71634 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
71635 {
71636 + struct icmp_filter filter;
71637 +
71638 + if (optlen < 0)
71639 + return -EINVAL;
71640 if (optlen > sizeof(struct icmp_filter))
71641 optlen = sizeof(struct icmp_filter);
71642 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
71643 + if (copy_from_user(&filter, optval, optlen))
71644 return -EFAULT;
71645 + raw_sk(sk)->filter = filter;
71646 +
71647 return 0;
71648 }
71649
71650 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
71651 {
71652 int len, ret = -EFAULT;
71653 + struct icmp_filter filter;
71654
71655 if (get_user(len, optlen))
71656 goto out;
71657 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
71658 if (len > sizeof(struct icmp_filter))
71659 len = sizeof(struct icmp_filter);
71660 ret = -EFAULT;
71661 - if (put_user(len, optlen) ||
71662 - copy_to_user(optval, &raw_sk(sk)->filter, len))
71663 + filter = raw_sk(sk)->filter;
71664 + if (put_user(len, optlen) || len > sizeof filter ||
71665 + copy_to_user(optval, &filter, len))
71666 goto out;
71667 ret = 0;
71668 out: return ret;
71669 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
71670 sk_wmem_alloc_get(sp),
71671 sk_rmem_alloc_get(sp),
71672 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71673 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
71674 + atomic_read(&sp->sk_refcnt),
71675 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71676 + NULL,
71677 +#else
71678 + sp,
71679 +#endif
71680 + atomic_read_unchecked(&sp->sk_drops));
71681 }
71682
71683 static int raw_seq_show(struct seq_file *seq, void *v)
71684 diff -urNp linux-2.6.32.45/net/ipv4/route.c linux-2.6.32.45/net/ipv4/route.c
71685 --- linux-2.6.32.45/net/ipv4/route.c 2011-08-16 20:37:25.000000000 -0400
71686 +++ linux-2.6.32.45/net/ipv4/route.c 2011-08-07 19:48:09.000000000 -0400
71687 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be3
71688
71689 static inline int rt_genid(struct net *net)
71690 {
71691 - return atomic_read(&net->ipv4.rt_genid);
71692 + return atomic_read_unchecked(&net->ipv4.rt_genid);
71693 }
71694
71695 #ifdef CONFIG_PROC_FS
71696 @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct n
71697 unsigned char shuffle;
71698
71699 get_random_bytes(&shuffle, sizeof(shuffle));
71700 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
71701 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
71702 }
71703
71704 /*
71705 @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_oper
71706
71707 static __net_init int rt_secret_timer_init(struct net *net)
71708 {
71709 - atomic_set(&net->ipv4.rt_genid,
71710 + atomic_set_unchecked(&net->ipv4.rt_genid,
71711 (int) ((num_physpages ^ (num_physpages>>8)) ^
71712 (jiffies ^ (jiffies >> 7))));
71713
71714 diff -urNp linux-2.6.32.45/net/ipv4/tcp.c linux-2.6.32.45/net/ipv4/tcp.c
71715 --- linux-2.6.32.45/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
71716 +++ linux-2.6.32.45/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
71717 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
71718 int val;
71719 int err = 0;
71720
71721 + pax_track_stack();
71722 +
71723 /* This is a string value all the others are int's */
71724 if (optname == TCP_CONGESTION) {
71725 char name[TCP_CA_NAME_MAX];
71726 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
71727 struct tcp_sock *tp = tcp_sk(sk);
71728 int val, len;
71729
71730 + pax_track_stack();
71731 +
71732 if (get_user(len, optlen))
71733 return -EFAULT;
71734
71735 diff -urNp linux-2.6.32.45/net/ipv4/tcp_ipv4.c linux-2.6.32.45/net/ipv4/tcp_ipv4.c
71736 --- linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-16 20:37:25.000000000 -0400
71737 +++ linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-07 19:48:09.000000000 -0400
71738 @@ -85,6 +85,9 @@
71739 int sysctl_tcp_tw_reuse __read_mostly;
71740 int sysctl_tcp_low_latency __read_mostly;
71741
71742 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71743 +extern int grsec_enable_blackhole;
71744 +#endif
71745
71746 #ifdef CONFIG_TCP_MD5SIG
71747 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
71748 @@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
71749 return 0;
71750
71751 reset:
71752 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71753 + if (!grsec_enable_blackhole)
71754 +#endif
71755 tcp_v4_send_reset(rsk, skb);
71756 discard:
71757 kfree_skb(skb);
71758 @@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
71759 TCP_SKB_CB(skb)->sacked = 0;
71760
71761 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
71762 - if (!sk)
71763 + if (!sk) {
71764 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71765 + ret = 1;
71766 +#endif
71767 goto no_tcp_socket;
71768 + }
71769
71770 process:
71771 - if (sk->sk_state == TCP_TIME_WAIT)
71772 + if (sk->sk_state == TCP_TIME_WAIT) {
71773 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71774 + ret = 2;
71775 +#endif
71776 goto do_time_wait;
71777 + }
71778
71779 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
71780 goto discard_and_relse;
71781 @@ -1651,6 +1665,10 @@ no_tcp_socket:
71782 bad_packet:
71783 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
71784 } else {
71785 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71786 + if (!grsec_enable_blackhole || (ret == 1 &&
71787 + (skb->dev->flags & IFF_LOOPBACK)))
71788 +#endif
71789 tcp_v4_send_reset(NULL, skb);
71790 }
71791
71792 @@ -2195,14 +2213,14 @@ int tcp_proc_register(struct net *net, s
71793 int rc = 0;
71794 struct proc_dir_entry *p;
71795
71796 - afinfo->seq_fops.open = tcp_seq_open;
71797 - afinfo->seq_fops.read = seq_read;
71798 - afinfo->seq_fops.llseek = seq_lseek;
71799 - afinfo->seq_fops.release = seq_release_net;
71800 -
71801 - afinfo->seq_ops.start = tcp_seq_start;
71802 - afinfo->seq_ops.next = tcp_seq_next;
71803 - afinfo->seq_ops.stop = tcp_seq_stop;
71804 + *(void **)&afinfo->seq_fops.open = tcp_seq_open;
71805 + *(void **)&afinfo->seq_fops.read = seq_read;
71806 + *(void **)&afinfo->seq_fops.llseek = seq_lseek;
71807 + *(void **)&afinfo->seq_fops.release = seq_release_net;
71808 +
71809 + *(void **)&afinfo->seq_ops.start = tcp_seq_start;
71810 + *(void **)&afinfo->seq_ops.next = tcp_seq_next;
71811 + *(void **)&afinfo->seq_ops.stop = tcp_seq_stop;
71812
71813 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
71814 &afinfo->seq_fops, afinfo);
71815 @@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk
71816 0, /* non standard timer */
71817 0, /* open_requests have no inode */
71818 atomic_read(&sk->sk_refcnt),
71819 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71820 + NULL,
71821 +#else
71822 req,
71823 +#endif
71824 len);
71825 }
71826
71827 @@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *s
71828 sock_i_uid(sk),
71829 icsk->icsk_probes_out,
71830 sock_i_ino(sk),
71831 - atomic_read(&sk->sk_refcnt), sk,
71832 + atomic_read(&sk->sk_refcnt),
71833 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71834 + NULL,
71835 +#else
71836 + sk,
71837 +#endif
71838 jiffies_to_clock_t(icsk->icsk_rto),
71839 jiffies_to_clock_t(icsk->icsk_ack.ato),
71840 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
71841 @@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct in
71842 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
71843 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
71844 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
71845 - atomic_read(&tw->tw_refcnt), tw, len);
71846 + atomic_read(&tw->tw_refcnt),
71847 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71848 + NULL,
71849 +#else
71850 + tw,
71851 +#endif
71852 + len);
71853 }
71854
71855 #define TMPSZ 150
71856 diff -urNp linux-2.6.32.45/net/ipv4/tcp_minisocks.c linux-2.6.32.45/net/ipv4/tcp_minisocks.c
71857 --- linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
71858 +++ linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
71859 @@ -26,6 +26,10 @@
71860 #include <net/inet_common.h>
71861 #include <net/xfrm.h>
71862
71863 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71864 +extern int grsec_enable_blackhole;
71865 +#endif
71866 +
71867 #ifdef CONFIG_SYSCTL
71868 #define SYNC_INIT 0 /* let the user enable it */
71869 #else
71870 @@ -672,6 +676,10 @@ listen_overflow:
71871
71872 embryonic_reset:
71873 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
71874 +
71875 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71876 + if (!grsec_enable_blackhole)
71877 +#endif
71878 if (!(flg & TCP_FLAG_RST))
71879 req->rsk_ops->send_reset(sk, skb);
71880
71881 diff -urNp linux-2.6.32.45/net/ipv4/tcp_output.c linux-2.6.32.45/net/ipv4/tcp_output.c
71882 --- linux-2.6.32.45/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
71883 +++ linux-2.6.32.45/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
71884 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
71885 __u8 *md5_hash_location;
71886 int mss;
71887
71888 + pax_track_stack();
71889 +
71890 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
71891 if (skb == NULL)
71892 return NULL;
71893 diff -urNp linux-2.6.32.45/net/ipv4/tcp_probe.c linux-2.6.32.45/net/ipv4/tcp_probe.c
71894 --- linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
71895 +++ linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
71896 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
71897 if (cnt + width >= len)
71898 break;
71899
71900 - if (copy_to_user(buf + cnt, tbuf, width))
71901 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
71902 return -EFAULT;
71903 cnt += width;
71904 }
71905 diff -urNp linux-2.6.32.45/net/ipv4/tcp_timer.c linux-2.6.32.45/net/ipv4/tcp_timer.c
71906 --- linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
71907 +++ linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
71908 @@ -21,6 +21,10 @@
71909 #include <linux/module.h>
71910 #include <net/tcp.h>
71911
71912 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71913 +extern int grsec_lastack_retries;
71914 +#endif
71915 +
71916 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
71917 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
71918 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
71919 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
71920 }
71921 }
71922
71923 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71924 + if ((sk->sk_state == TCP_LAST_ACK) &&
71925 + (grsec_lastack_retries > 0) &&
71926 + (grsec_lastack_retries < retry_until))
71927 + retry_until = grsec_lastack_retries;
71928 +#endif
71929 +
71930 if (retransmits_timed_out(sk, retry_until)) {
71931 /* Has it gone just too far? */
71932 tcp_write_err(sk);
71933 diff -urNp linux-2.6.32.45/net/ipv4/udp.c linux-2.6.32.45/net/ipv4/udp.c
71934 --- linux-2.6.32.45/net/ipv4/udp.c 2011-07-13 17:23:04.000000000 -0400
71935 +++ linux-2.6.32.45/net/ipv4/udp.c 2011-08-05 20:33:55.000000000 -0400
71936 @@ -86,6 +86,7 @@
71937 #include <linux/types.h>
71938 #include <linux/fcntl.h>
71939 #include <linux/module.h>
71940 +#include <linux/security.h>
71941 #include <linux/socket.h>
71942 #include <linux/sockios.h>
71943 #include <linux/igmp.h>
71944 @@ -106,6 +107,10 @@
71945 #include <net/xfrm.h>
71946 #include "udp_impl.h"
71947
71948 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71949 +extern int grsec_enable_blackhole;
71950 +#endif
71951 +
71952 struct udp_table udp_table;
71953 EXPORT_SYMBOL(udp_table);
71954
71955 @@ -371,6 +376,9 @@ found:
71956 return s;
71957 }
71958
71959 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
71960 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
71961 +
71962 /*
71963 * This routine is called by the ICMP module when it gets some
71964 * sort of error condition. If err < 0 then the socket should
71965 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
71966 dport = usin->sin_port;
71967 if (dport == 0)
71968 return -EINVAL;
71969 +
71970 + err = gr_search_udp_sendmsg(sk, usin);
71971 + if (err)
71972 + return err;
71973 } else {
71974 if (sk->sk_state != TCP_ESTABLISHED)
71975 return -EDESTADDRREQ;
71976 +
71977 + err = gr_search_udp_sendmsg(sk, NULL);
71978 + if (err)
71979 + return err;
71980 +
71981 daddr = inet->daddr;
71982 dport = inet->dport;
71983 /* Open fast path for connected socket.
71984 @@ -945,6 +962,10 @@ try_again:
71985 if (!skb)
71986 goto out;
71987
71988 + err = gr_search_udp_recvmsg(sk, skb);
71989 + if (err)
71990 + goto out_free;
71991 +
71992 ulen = skb->len - sizeof(struct udphdr);
71993 copied = len;
71994 if (copied > ulen)
71995 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
71996 if (rc == -ENOMEM) {
71997 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
71998 is_udplite);
71999 - atomic_inc(&sk->sk_drops);
72000 + atomic_inc_unchecked(&sk->sk_drops);
72001 }
72002 goto drop;
72003 }
72004 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
72005 goto csum_error;
72006
72007 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
72008 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72009 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72010 +#endif
72011 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
72012
72013 /*
72014 @@ -1719,14 +1743,14 @@ int udp_proc_register(struct net *net, s
72015 struct proc_dir_entry *p;
72016 int rc = 0;
72017
72018 - afinfo->seq_fops.open = udp_seq_open;
72019 - afinfo->seq_fops.read = seq_read;
72020 - afinfo->seq_fops.llseek = seq_lseek;
72021 - afinfo->seq_fops.release = seq_release_net;
72022 -
72023 - afinfo->seq_ops.start = udp_seq_start;
72024 - afinfo->seq_ops.next = udp_seq_next;
72025 - afinfo->seq_ops.stop = udp_seq_stop;
72026 + *(void **)&afinfo->seq_fops.open = udp_seq_open;
72027 + *(void **)&afinfo->seq_fops.read = seq_read;
72028 + *(void **)&afinfo->seq_fops.llseek = seq_lseek;
72029 + *(void **)&afinfo->seq_fops.release = seq_release_net;
72030 +
72031 + *(void **)&afinfo->seq_ops.start = udp_seq_start;
72032 + *(void **)&afinfo->seq_ops.next = udp_seq_next;
72033 + *(void **)&afinfo->seq_ops.stop = udp_seq_stop;
72034
72035 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
72036 &afinfo->seq_fops, afinfo);
72037 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
72038 sk_wmem_alloc_get(sp),
72039 sk_rmem_alloc_get(sp),
72040 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72041 - atomic_read(&sp->sk_refcnt), sp,
72042 - atomic_read(&sp->sk_drops), len);
72043 + atomic_read(&sp->sk_refcnt),
72044 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72045 + NULL,
72046 +#else
72047 + sp,
72048 +#endif
72049 + atomic_read_unchecked(&sp->sk_drops), len);
72050 }
72051
72052 int udp4_seq_show(struct seq_file *seq, void *v)
72053 diff -urNp linux-2.6.32.45/net/ipv6/inet6_connection_sock.c linux-2.6.32.45/net/ipv6/inet6_connection_sock.c
72054 --- linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
72055 +++ linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
72056 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
72057 #ifdef CONFIG_XFRM
72058 {
72059 struct rt6_info *rt = (struct rt6_info *)dst;
72060 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
72061 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
72062 }
72063 #endif
72064 }
72065 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
72066 #ifdef CONFIG_XFRM
72067 if (dst) {
72068 struct rt6_info *rt = (struct rt6_info *)dst;
72069 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
72070 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
72071 sk->sk_dst_cache = NULL;
72072 dst_release(dst);
72073 dst = NULL;
72074 diff -urNp linux-2.6.32.45/net/ipv6/inet6_hashtables.c linux-2.6.32.45/net/ipv6/inet6_hashtables.c
72075 --- linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-16 20:37:25.000000000 -0400
72076 +++ linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-07 19:48:09.000000000 -0400
72077 @@ -119,7 +119,7 @@ out:
72078 }
72079 EXPORT_SYMBOL(__inet6_lookup_established);
72080
72081 -static int inline compute_score(struct sock *sk, struct net *net,
72082 +static inline int compute_score(struct sock *sk, struct net *net,
72083 const unsigned short hnum,
72084 const struct in6_addr *daddr,
72085 const int dif)
72086 diff -urNp linux-2.6.32.45/net/ipv6/ipv6_sockglue.c linux-2.6.32.45/net/ipv6/ipv6_sockglue.c
72087 --- linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
72088 +++ linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
72089 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
72090 int val, valbool;
72091 int retv = -ENOPROTOOPT;
72092
72093 + pax_track_stack();
72094 +
72095 if (optval == NULL)
72096 val=0;
72097 else {
72098 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
72099 int len;
72100 int val;
72101
72102 + pax_track_stack();
72103 +
72104 if (ip6_mroute_opt(optname))
72105 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
72106
72107 diff -urNp linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c
72108 --- linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
72109 +++ linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
72110 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
72111 private = &tmp;
72112 }
72113 #endif
72114 + memset(&info, 0, sizeof(info));
72115 info.valid_hooks = t->valid_hooks;
72116 memcpy(info.hook_entry, private->hook_entry,
72117 sizeof(info.hook_entry));
72118 diff -urNp linux-2.6.32.45/net/ipv6/raw.c linux-2.6.32.45/net/ipv6/raw.c
72119 --- linux-2.6.32.45/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
72120 +++ linux-2.6.32.45/net/ipv6/raw.c 2011-08-14 11:48:20.000000000 -0400
72121 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
72122 {
72123 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
72124 skb_checksum_complete(skb)) {
72125 - atomic_inc(&sk->sk_drops);
72126 + atomic_inc_unchecked(&sk->sk_drops);
72127 kfree_skb(skb);
72128 return NET_RX_DROP;
72129 }
72130
72131 /* Charge it to the socket. */
72132 if (sock_queue_rcv_skb(sk,skb)<0) {
72133 - atomic_inc(&sk->sk_drops);
72134 + atomic_inc_unchecked(&sk->sk_drops);
72135 kfree_skb(skb);
72136 return NET_RX_DROP;
72137 }
72138 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72139 struct raw6_sock *rp = raw6_sk(sk);
72140
72141 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72142 - atomic_inc(&sk->sk_drops);
72143 + atomic_inc_unchecked(&sk->sk_drops);
72144 kfree_skb(skb);
72145 return NET_RX_DROP;
72146 }
72147 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72148
72149 if (inet->hdrincl) {
72150 if (skb_checksum_complete(skb)) {
72151 - atomic_inc(&sk->sk_drops);
72152 + atomic_inc_unchecked(&sk->sk_drops);
72153 kfree_skb(skb);
72154 return NET_RX_DROP;
72155 }
72156 @@ -518,7 +518,7 @@ csum_copy_err:
72157 as some normal condition.
72158 */
72159 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
72160 - atomic_inc(&sk->sk_drops);
72161 + atomic_inc_unchecked(&sk->sk_drops);
72162 goto out;
72163 }
72164
72165 @@ -600,7 +600,7 @@ out:
72166 return err;
72167 }
72168
72169 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72170 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72171 struct flowi *fl, struct rt6_info *rt,
72172 unsigned int flags)
72173 {
72174 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
72175 u16 proto;
72176 int err;
72177
72178 + pax_track_stack();
72179 +
72180 /* Rough check on arithmetic overflow,
72181 better check is made in ip6_append_data().
72182 */
72183 @@ -916,12 +918,17 @@ do_confirm:
72184 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72185 char __user *optval, int optlen)
72186 {
72187 + struct icmp6_filter filter;
72188 +
72189 switch (optname) {
72190 case ICMPV6_FILTER:
72191 + if (optlen < 0)
72192 + return -EINVAL;
72193 if (optlen > sizeof(struct icmp6_filter))
72194 optlen = sizeof(struct icmp6_filter);
72195 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72196 + if (copy_from_user(&filter, optval, optlen))
72197 return -EFAULT;
72198 + raw6_sk(sk)->filter = filter;
72199 return 0;
72200 default:
72201 return -ENOPROTOOPT;
72202 @@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct so
72203 char __user *optval, int __user *optlen)
72204 {
72205 int len;
72206 + struct icmp6_filter filter;
72207
72208 switch (optname) {
72209 case ICMPV6_FILTER:
72210 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
72211 len = sizeof(struct icmp6_filter);
72212 if (put_user(len, optlen))
72213 return -EFAULT;
72214 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72215 + filter = raw6_sk(sk)->filter;
72216 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
72217 return -EFAULT;
72218 return 0;
72219 default:
72220 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
72221 0, 0L, 0,
72222 sock_i_uid(sp), 0,
72223 sock_i_ino(sp),
72224 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72225 + atomic_read(&sp->sk_refcnt),
72226 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72227 + NULL,
72228 +#else
72229 + sp,
72230 +#endif
72231 + atomic_read_unchecked(&sp->sk_drops));
72232 }
72233
72234 static int raw6_seq_show(struct seq_file *seq, void *v)
72235 diff -urNp linux-2.6.32.45/net/ipv6/tcp_ipv6.c linux-2.6.32.45/net/ipv6/tcp_ipv6.c
72236 --- linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-16 20:37:25.000000000 -0400
72237 +++ linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-07 19:48:09.000000000 -0400
72238 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
72239 }
72240 #endif
72241
72242 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72243 +extern int grsec_enable_blackhole;
72244 +#endif
72245 +
72246 static void tcp_v6_hash(struct sock *sk)
72247 {
72248 if (sk->sk_state != TCP_CLOSE) {
72249 @@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk
72250 return 0;
72251
72252 reset:
72253 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72254 + if (!grsec_enable_blackhole)
72255 +#endif
72256 tcp_v6_send_reset(sk, skb);
72257 discard:
72258 if (opt_skb)
72259 @@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
72260 TCP_SKB_CB(skb)->sacked = 0;
72261
72262 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72263 - if (!sk)
72264 + if (!sk) {
72265 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72266 + ret = 1;
72267 +#endif
72268 goto no_tcp_socket;
72269 + }
72270
72271 process:
72272 - if (sk->sk_state == TCP_TIME_WAIT)
72273 + if (sk->sk_state == TCP_TIME_WAIT) {
72274 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72275 + ret = 2;
72276 +#endif
72277 goto do_time_wait;
72278 + }
72279
72280 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
72281 goto discard_and_relse;
72282 @@ -1701,6 +1716,10 @@ no_tcp_socket:
72283 bad_packet:
72284 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72285 } else {
72286 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72287 + if (!grsec_enable_blackhole || (ret == 1 &&
72288 + (skb->dev->flags & IFF_LOOPBACK)))
72289 +#endif
72290 tcp_v6_send_reset(NULL, skb);
72291 }
72292
72293 @@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file
72294 uid,
72295 0, /* non standard timer */
72296 0, /* open_requests have no inode */
72297 - 0, req);
72298 + 0,
72299 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72300 + NULL
72301 +#else
72302 + req
72303 +#endif
72304 + );
72305 }
72306
72307 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
72308 @@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_fil
72309 sock_i_uid(sp),
72310 icsk->icsk_probes_out,
72311 sock_i_ino(sp),
72312 - atomic_read(&sp->sk_refcnt), sp,
72313 + atomic_read(&sp->sk_refcnt),
72314 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72315 + NULL,
72316 +#else
72317 + sp,
72318 +#endif
72319 jiffies_to_clock_t(icsk->icsk_rto),
72320 jiffies_to_clock_t(icsk->icsk_ack.ato),
72321 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
72322 @@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct se
72323 dest->s6_addr32[2], dest->s6_addr32[3], destp,
72324 tw->tw_substate, 0, 0,
72325 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72326 - atomic_read(&tw->tw_refcnt), tw);
72327 + atomic_read(&tw->tw_refcnt),
72328 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72329 + NULL
72330 +#else
72331 + tw
72332 +#endif
72333 + );
72334 }
72335
72336 static int tcp6_seq_show(struct seq_file *seq, void *v)
72337 diff -urNp linux-2.6.32.45/net/ipv6/udp.c linux-2.6.32.45/net/ipv6/udp.c
72338 --- linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:04.000000000 -0400
72339 +++ linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:27.000000000 -0400
72340 @@ -49,6 +49,10 @@
72341 #include <linux/seq_file.h>
72342 #include "udp_impl.h"
72343
72344 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72345 +extern int grsec_enable_blackhole;
72346 +#endif
72347 +
72348 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
72349 {
72350 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
72351 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
72352 if (rc == -ENOMEM) {
72353 UDP6_INC_STATS_BH(sock_net(sk),
72354 UDP_MIB_RCVBUFERRORS, is_udplite);
72355 - atomic_inc(&sk->sk_drops);
72356 + atomic_inc_unchecked(&sk->sk_drops);
72357 }
72358 goto drop;
72359 }
72360 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72361 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
72362 proto == IPPROTO_UDPLITE);
72363
72364 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72365 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72366 +#endif
72367 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
72368
72369 kfree_skb(skb);
72370 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
72371 0, 0L, 0,
72372 sock_i_uid(sp), 0,
72373 sock_i_ino(sp),
72374 - atomic_read(&sp->sk_refcnt), sp,
72375 - atomic_read(&sp->sk_drops));
72376 + atomic_read(&sp->sk_refcnt),
72377 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72378 + NULL,
72379 +#else
72380 + sp,
72381 +#endif
72382 + atomic_read_unchecked(&sp->sk_drops));
72383 }
72384
72385 int udp6_seq_show(struct seq_file *seq, void *v)
72386 diff -urNp linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c
72387 --- linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
72388 +++ linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
72389 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
72390 add_wait_queue(&self->open_wait, &wait);
72391
72392 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
72393 - __FILE__,__LINE__, tty->driver->name, self->open_count );
72394 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72395
72396 /* As far as I can see, we protect open_count - Jean II */
72397 spin_lock_irqsave(&self->spinlock, flags);
72398 if (!tty_hung_up_p(filp)) {
72399 extra_count = 1;
72400 - self->open_count--;
72401 + local_dec(&self->open_count);
72402 }
72403 spin_unlock_irqrestore(&self->spinlock, flags);
72404 - self->blocked_open++;
72405 + local_inc(&self->blocked_open);
72406
72407 while (1) {
72408 if (tty->termios->c_cflag & CBAUD) {
72409 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
72410 }
72411
72412 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
72413 - __FILE__,__LINE__, tty->driver->name, self->open_count );
72414 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72415
72416 schedule();
72417 }
72418 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
72419 if (extra_count) {
72420 /* ++ is not atomic, so this should be protected - Jean II */
72421 spin_lock_irqsave(&self->spinlock, flags);
72422 - self->open_count++;
72423 + local_inc(&self->open_count);
72424 spin_unlock_irqrestore(&self->spinlock, flags);
72425 }
72426 - self->blocked_open--;
72427 + local_dec(&self->blocked_open);
72428
72429 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
72430 - __FILE__,__LINE__, tty->driver->name, self->open_count);
72431 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
72432
72433 if (!retval)
72434 self->flags |= ASYNC_NORMAL_ACTIVE;
72435 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
72436 }
72437 /* ++ is not atomic, so this should be protected - Jean II */
72438 spin_lock_irqsave(&self->spinlock, flags);
72439 - self->open_count++;
72440 + local_inc(&self->open_count);
72441
72442 tty->driver_data = self;
72443 self->tty = tty;
72444 spin_unlock_irqrestore(&self->spinlock, flags);
72445
72446 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
72447 - self->line, self->open_count);
72448 + self->line, local_read(&self->open_count));
72449
72450 /* Not really used by us, but lets do it anyway */
72451 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
72452 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
72453 return;
72454 }
72455
72456 - if ((tty->count == 1) && (self->open_count != 1)) {
72457 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
72458 /*
72459 * Uh, oh. tty->count is 1, which means that the tty
72460 * structure will be freed. state->count should always
72461 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
72462 */
72463 IRDA_DEBUG(0, "%s(), bad serial port count; "
72464 "tty->count is 1, state->count is %d\n", __func__ ,
72465 - self->open_count);
72466 - self->open_count = 1;
72467 + local_read(&self->open_count));
72468 + local_set(&self->open_count, 1);
72469 }
72470
72471 - if (--self->open_count < 0) {
72472 + if (local_dec_return(&self->open_count) < 0) {
72473 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
72474 - __func__, self->line, self->open_count);
72475 - self->open_count = 0;
72476 + __func__, self->line, local_read(&self->open_count));
72477 + local_set(&self->open_count, 0);
72478 }
72479 - if (self->open_count) {
72480 + if (local_read(&self->open_count)) {
72481 spin_unlock_irqrestore(&self->spinlock, flags);
72482
72483 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
72484 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
72485 tty->closing = 0;
72486 self->tty = NULL;
72487
72488 - if (self->blocked_open) {
72489 + if (local_read(&self->blocked_open)) {
72490 if (self->close_delay)
72491 schedule_timeout_interruptible(self->close_delay);
72492 wake_up_interruptible(&self->open_wait);
72493 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
72494 spin_lock_irqsave(&self->spinlock, flags);
72495 self->flags &= ~ASYNC_NORMAL_ACTIVE;
72496 self->tty = NULL;
72497 - self->open_count = 0;
72498 + local_set(&self->open_count, 0);
72499 spin_unlock_irqrestore(&self->spinlock, flags);
72500
72501 wake_up_interruptible(&self->open_wait);
72502 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
72503 seq_putc(m, '\n');
72504
72505 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
72506 - seq_printf(m, "Open count: %d\n", self->open_count);
72507 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
72508 seq_printf(m, "Max data size: %d\n", self->max_data_size);
72509 seq_printf(m, "Max header size: %d\n", self->max_header_size);
72510
72511 diff -urNp linux-2.6.32.45/net/iucv/af_iucv.c linux-2.6.32.45/net/iucv/af_iucv.c
72512 --- linux-2.6.32.45/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
72513 +++ linux-2.6.32.45/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
72514 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
72515
72516 write_lock_bh(&iucv_sk_list.lock);
72517
72518 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
72519 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72520 while (__iucv_get_sock_by_name(name)) {
72521 sprintf(name, "%08x",
72522 - atomic_inc_return(&iucv_sk_list.autobind_name));
72523 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72524 }
72525
72526 write_unlock_bh(&iucv_sk_list.lock);
72527 diff -urNp linux-2.6.32.45/net/key/af_key.c linux-2.6.32.45/net/key/af_key.c
72528 --- linux-2.6.32.45/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
72529 +++ linux-2.6.32.45/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
72530 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
72531 struct xfrm_migrate m[XFRM_MAX_DEPTH];
72532 struct xfrm_kmaddress k;
72533
72534 + pax_track_stack();
72535 +
72536 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
72537 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
72538 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
72539 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
72540 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
72541 else
72542 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
72543 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72544 + NULL,
72545 +#else
72546 s,
72547 +#endif
72548 atomic_read(&s->sk_refcnt),
72549 sk_rmem_alloc_get(s),
72550 sk_wmem_alloc_get(s),
72551 diff -urNp linux-2.6.32.45/net/lapb/lapb_iface.c linux-2.6.32.45/net/lapb/lapb_iface.c
72552 --- linux-2.6.32.45/net/lapb/lapb_iface.c 2011-03-27 14:31:47.000000000 -0400
72553 +++ linux-2.6.32.45/net/lapb/lapb_iface.c 2011-08-05 20:33:55.000000000 -0400
72554 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev
72555 goto out;
72556
72557 lapb->dev = dev;
72558 - lapb->callbacks = *callbacks;
72559 + lapb->callbacks = callbacks;
72560
72561 __lapb_insert_cb(lapb);
72562
72563 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device
72564
72565 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
72566 {
72567 - if (lapb->callbacks.connect_confirmation)
72568 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
72569 + if (lapb->callbacks->connect_confirmation)
72570 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
72571 }
72572
72573 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
72574 {
72575 - if (lapb->callbacks.connect_indication)
72576 - lapb->callbacks.connect_indication(lapb->dev, reason);
72577 + if (lapb->callbacks->connect_indication)
72578 + lapb->callbacks->connect_indication(lapb->dev, reason);
72579 }
72580
72581 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
72582 {
72583 - if (lapb->callbacks.disconnect_confirmation)
72584 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
72585 + if (lapb->callbacks->disconnect_confirmation)
72586 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
72587 }
72588
72589 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
72590 {
72591 - if (lapb->callbacks.disconnect_indication)
72592 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
72593 + if (lapb->callbacks->disconnect_indication)
72594 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
72595 }
72596
72597 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
72598 {
72599 - if (lapb->callbacks.data_indication)
72600 - return lapb->callbacks.data_indication(lapb->dev, skb);
72601 + if (lapb->callbacks->data_indication)
72602 + return lapb->callbacks->data_indication(lapb->dev, skb);
72603
72604 kfree_skb(skb);
72605 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
72606 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *l
72607 {
72608 int used = 0;
72609
72610 - if (lapb->callbacks.data_transmit) {
72611 - lapb->callbacks.data_transmit(lapb->dev, skb);
72612 + if (lapb->callbacks->data_transmit) {
72613 + lapb->callbacks->data_transmit(lapb->dev, skb);
72614 used = 1;
72615 }
72616
72617 diff -urNp linux-2.6.32.45/net/mac80211/cfg.c linux-2.6.32.45/net/mac80211/cfg.c
72618 --- linux-2.6.32.45/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
72619 +++ linux-2.6.32.45/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
72620 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
72621 return err;
72622 }
72623
72624 -struct cfg80211_ops mac80211_config_ops = {
72625 +const struct cfg80211_ops mac80211_config_ops = {
72626 .add_virtual_intf = ieee80211_add_iface,
72627 .del_virtual_intf = ieee80211_del_iface,
72628 .change_virtual_intf = ieee80211_change_iface,
72629 diff -urNp linux-2.6.32.45/net/mac80211/cfg.h linux-2.6.32.45/net/mac80211/cfg.h
72630 --- linux-2.6.32.45/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
72631 +++ linux-2.6.32.45/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
72632 @@ -4,6 +4,6 @@
72633 #ifndef __CFG_H
72634 #define __CFG_H
72635
72636 -extern struct cfg80211_ops mac80211_config_ops;
72637 +extern const struct cfg80211_ops mac80211_config_ops;
72638
72639 #endif /* __CFG_H */
72640 diff -urNp linux-2.6.32.45/net/mac80211/debugfs_key.c linux-2.6.32.45/net/mac80211/debugfs_key.c
72641 --- linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
72642 +++ linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
72643 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
72644 size_t count, loff_t *ppos)
72645 {
72646 struct ieee80211_key *key = file->private_data;
72647 - int i, res, bufsize = 2 * key->conf.keylen + 2;
72648 + int i, bufsize = 2 * key->conf.keylen + 2;
72649 char *buf = kmalloc(bufsize, GFP_KERNEL);
72650 char *p = buf;
72651 + ssize_t res;
72652 +
72653 + if (buf == NULL)
72654 + return -ENOMEM;
72655
72656 for (i = 0; i < key->conf.keylen; i++)
72657 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
72658 diff -urNp linux-2.6.32.45/net/mac80211/debugfs_sta.c linux-2.6.32.45/net/mac80211/debugfs_sta.c
72659 --- linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
72660 +++ linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
72661 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
72662 int i;
72663 struct sta_info *sta = file->private_data;
72664
72665 + pax_track_stack();
72666 +
72667 spin_lock_bh(&sta->lock);
72668 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
72669 sta->ampdu_mlme.dialog_token_allocator + 1);
72670 diff -urNp linux-2.6.32.45/net/mac80211/ieee80211_i.h linux-2.6.32.45/net/mac80211/ieee80211_i.h
72671 --- linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
72672 +++ linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
72673 @@ -25,6 +25,7 @@
72674 #include <linux/etherdevice.h>
72675 #include <net/cfg80211.h>
72676 #include <net/mac80211.h>
72677 +#include <asm/local.h>
72678 #include "key.h"
72679 #include "sta_info.h"
72680
72681 @@ -635,7 +636,7 @@ struct ieee80211_local {
72682 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
72683 spinlock_t queue_stop_reason_lock;
72684
72685 - int open_count;
72686 + local_t open_count;
72687 int monitors, cooked_mntrs;
72688 /* number of interfaces with corresponding FIF_ flags */
72689 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
72690 diff -urNp linux-2.6.32.45/net/mac80211/iface.c linux-2.6.32.45/net/mac80211/iface.c
72691 --- linux-2.6.32.45/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
72692 +++ linux-2.6.32.45/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
72693 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
72694 break;
72695 }
72696
72697 - if (local->open_count == 0) {
72698 + if (local_read(&local->open_count) == 0) {
72699 res = drv_start(local);
72700 if (res)
72701 goto err_del_bss;
72702 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
72703 * Validate the MAC address for this device.
72704 */
72705 if (!is_valid_ether_addr(dev->dev_addr)) {
72706 - if (!local->open_count)
72707 + if (!local_read(&local->open_count))
72708 drv_stop(local);
72709 return -EADDRNOTAVAIL;
72710 }
72711 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
72712
72713 hw_reconf_flags |= __ieee80211_recalc_idle(local);
72714
72715 - local->open_count++;
72716 + local_inc(&local->open_count);
72717 if (hw_reconf_flags) {
72718 ieee80211_hw_config(local, hw_reconf_flags);
72719 /*
72720 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
72721 err_del_interface:
72722 drv_remove_interface(local, &conf);
72723 err_stop:
72724 - if (!local->open_count)
72725 + if (!local_read(&local->open_count))
72726 drv_stop(local);
72727 err_del_bss:
72728 sdata->bss = NULL;
72729 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
72730 WARN_ON(!list_empty(&sdata->u.ap.vlans));
72731 }
72732
72733 - local->open_count--;
72734 + local_dec(&local->open_count);
72735
72736 switch (sdata->vif.type) {
72737 case NL80211_IFTYPE_AP_VLAN:
72738 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
72739
72740 ieee80211_recalc_ps(local, -1);
72741
72742 - if (local->open_count == 0) {
72743 + if (local_read(&local->open_count) == 0) {
72744 ieee80211_clear_tx_pending(local);
72745 ieee80211_stop_device(local);
72746
72747 diff -urNp linux-2.6.32.45/net/mac80211/main.c linux-2.6.32.45/net/mac80211/main.c
72748 --- linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
72749 +++ linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
72750 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
72751 local->hw.conf.power_level = power;
72752 }
72753
72754 - if (changed && local->open_count) {
72755 + if (changed && local_read(&local->open_count)) {
72756 ret = drv_config(local, changed);
72757 /*
72758 * Goal:
72759 diff -urNp linux-2.6.32.45/net/mac80211/mlme.c linux-2.6.32.45/net/mac80211/mlme.c
72760 --- linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:35:30.000000000 -0400
72761 +++ linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:34:01.000000000 -0400
72762 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
72763 bool have_higher_than_11mbit = false, newsta = false;
72764 u16 ap_ht_cap_flags;
72765
72766 + pax_track_stack();
72767 +
72768 /*
72769 * AssocResp and ReassocResp have identical structure, so process both
72770 * of them in this function.
72771 diff -urNp linux-2.6.32.45/net/mac80211/pm.c linux-2.6.32.45/net/mac80211/pm.c
72772 --- linux-2.6.32.45/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
72773 +++ linux-2.6.32.45/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
72774 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
72775 }
72776
72777 /* stop hardware - this must stop RX */
72778 - if (local->open_count)
72779 + if (local_read(&local->open_count))
72780 ieee80211_stop_device(local);
72781
72782 local->suspended = true;
72783 diff -urNp linux-2.6.32.45/net/mac80211/rate.c linux-2.6.32.45/net/mac80211/rate.c
72784 --- linux-2.6.32.45/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
72785 +++ linux-2.6.32.45/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
72786 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
72787 struct rate_control_ref *ref, *old;
72788
72789 ASSERT_RTNL();
72790 - if (local->open_count)
72791 + if (local_read(&local->open_count))
72792 return -EBUSY;
72793
72794 ref = rate_control_alloc(name, local);
72795 diff -urNp linux-2.6.32.45/net/mac80211/tx.c linux-2.6.32.45/net/mac80211/tx.c
72796 --- linux-2.6.32.45/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
72797 +++ linux-2.6.32.45/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
72798 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
72799 return cpu_to_le16(dur);
72800 }
72801
72802 -static int inline is_ieee80211_device(struct ieee80211_local *local,
72803 +static inline int is_ieee80211_device(struct ieee80211_local *local,
72804 struct net_device *dev)
72805 {
72806 return local == wdev_priv(dev->ieee80211_ptr);
72807 diff -urNp linux-2.6.32.45/net/mac80211/util.c linux-2.6.32.45/net/mac80211/util.c
72808 --- linux-2.6.32.45/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
72809 +++ linux-2.6.32.45/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
72810 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
72811 local->resuming = true;
72812
72813 /* restart hardware */
72814 - if (local->open_count) {
72815 + if (local_read(&local->open_count)) {
72816 /*
72817 * Upon resume hardware can sometimes be goofy due to
72818 * various platform / driver / bus issues, so restarting
72819 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c
72820 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
72821 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
72822 @@ -564,7 +564,7 @@ static const struct file_operations ip_v
72823 .open = ip_vs_app_open,
72824 .read = seq_read,
72825 .llseek = seq_lseek,
72826 - .release = seq_release,
72827 + .release = seq_release_net,
72828 };
72829 #endif
72830
72831 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c
72832 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
72833 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
72834 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
72835 /* if the connection is not template and is created
72836 * by sync, preserve the activity flag.
72837 */
72838 - cp->flags |= atomic_read(&dest->conn_flags) &
72839 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
72840 (~IP_VS_CONN_F_INACTIVE);
72841 else
72842 - cp->flags |= atomic_read(&dest->conn_flags);
72843 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
72844 cp->dest = dest;
72845
72846 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
72847 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
72848 atomic_set(&cp->refcnt, 1);
72849
72850 atomic_set(&cp->n_control, 0);
72851 - atomic_set(&cp->in_pkts, 0);
72852 + atomic_set_unchecked(&cp->in_pkts, 0);
72853
72854 atomic_inc(&ip_vs_conn_count);
72855 if (flags & IP_VS_CONN_F_NO_CPORT)
72856 @@ -871,7 +871,7 @@ static const struct file_operations ip_v
72857 .open = ip_vs_conn_open,
72858 .read = seq_read,
72859 .llseek = seq_lseek,
72860 - .release = seq_release,
72861 + .release = seq_release_net,
72862 };
72863
72864 static const char *ip_vs_origin_name(unsigned flags)
72865 @@ -934,7 +934,7 @@ static const struct file_operations ip_v
72866 .open = ip_vs_conn_sync_open,
72867 .read = seq_read,
72868 .llseek = seq_lseek,
72869 - .release = seq_release,
72870 + .release = seq_release_net,
72871 };
72872
72873 #endif
72874 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
72875
72876 /* Don't drop the entry if its number of incoming packets is not
72877 located in [0, 8] */
72878 - i = atomic_read(&cp->in_pkts);
72879 + i = atomic_read_unchecked(&cp->in_pkts);
72880 if (i > 8 || i < 0) return 0;
72881
72882 if (!todrop_rate[i]) return 0;
72883 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c
72884 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
72885 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
72886 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
72887 ret = cp->packet_xmit(skb, cp, pp);
72888 /* do not touch skb anymore */
72889
72890 - atomic_inc(&cp->in_pkts);
72891 + atomic_inc_unchecked(&cp->in_pkts);
72892 ip_vs_conn_put(cp);
72893 return ret;
72894 }
72895 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
72896 * Sync connection if it is about to close to
72897 * encorage the standby servers to update the connections timeout
72898 */
72899 - pkts = atomic_add_return(1, &cp->in_pkts);
72900 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
72901 if (af == AF_INET &&
72902 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
72903 (((cp->protocol != IPPROTO_TCP ||
72904 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c
72905 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
72906 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
72907 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
72908 ip_vs_rs_hash(dest);
72909 write_unlock_bh(&__ip_vs_rs_lock);
72910 }
72911 - atomic_set(&dest->conn_flags, conn_flags);
72912 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
72913
72914 /* bind the service */
72915 if (!dest->svc) {
72916 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
72917 " %-7s %-6d %-10d %-10d\n",
72918 &dest->addr.in6,
72919 ntohs(dest->port),
72920 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72921 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72922 atomic_read(&dest->weight),
72923 atomic_read(&dest->activeconns),
72924 atomic_read(&dest->inactconns));
72925 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
72926 "%-7s %-6d %-10d %-10d\n",
72927 ntohl(dest->addr.ip),
72928 ntohs(dest->port),
72929 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72930 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72931 atomic_read(&dest->weight),
72932 atomic_read(&dest->activeconns),
72933 atomic_read(&dest->inactconns));
72934 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
72935 .open = ip_vs_info_open,
72936 .read = seq_read,
72937 .llseek = seq_lseek,
72938 - .release = seq_release_private,
72939 + .release = seq_release_net,
72940 };
72941
72942 #endif
72943 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
72944 .open = ip_vs_stats_seq_open,
72945 .read = seq_read,
72946 .llseek = seq_lseek,
72947 - .release = single_release,
72948 + .release = single_release_net,
72949 };
72950
72951 #endif
72952 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
72953
72954 entry.addr = dest->addr.ip;
72955 entry.port = dest->port;
72956 - entry.conn_flags = atomic_read(&dest->conn_flags);
72957 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
72958 entry.weight = atomic_read(&dest->weight);
72959 entry.u_threshold = dest->u_threshold;
72960 entry.l_threshold = dest->l_threshold;
72961 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
72962 unsigned char arg[128];
72963 int ret = 0;
72964
72965 + pax_track_stack();
72966 +
72967 if (!capable(CAP_NET_ADMIN))
72968 return -EPERM;
72969
72970 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
72971 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
72972
72973 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
72974 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72975 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72976 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
72977 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
72978 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
72979 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c
72980 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
72981 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
72982 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const
72983
72984 if (opt)
72985 memcpy(&cp->in_seq, opt, sizeof(*opt));
72986 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
72987 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
72988 cp->state = state;
72989 cp->old_state = cp->state;
72990 /*
72991 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c
72992 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
72993 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
72994 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
72995 else
72996 rc = NF_ACCEPT;
72997 /* do not touch skb anymore */
72998 - atomic_inc(&cp->in_pkts);
72999 + atomic_inc_unchecked(&cp->in_pkts);
73000 goto out;
73001 }
73002
73003 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
73004 else
73005 rc = NF_ACCEPT;
73006 /* do not touch skb anymore */
73007 - atomic_inc(&cp->in_pkts);
73008 + atomic_inc_unchecked(&cp->in_pkts);
73009 goto out;
73010 }
73011
73012 diff -urNp linux-2.6.32.45/net/netfilter/Kconfig linux-2.6.32.45/net/netfilter/Kconfig
73013 --- linux-2.6.32.45/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
73014 +++ linux-2.6.32.45/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
73015 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
73016
73017 To compile it as a module, choose M here. If unsure, say N.
73018
73019 +config NETFILTER_XT_MATCH_GRADM
73020 + tristate '"gradm" match support'
73021 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73022 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73023 + ---help---
73024 + The gradm match allows to match on grsecurity RBAC being enabled.
73025 + It is useful when iptables rules are applied early on bootup to
73026 + prevent connections to the machine (except from a trusted host)
73027 + while the RBAC system is disabled.
73028 +
73029 config NETFILTER_XT_MATCH_HASHLIMIT
73030 tristate '"hashlimit" match support'
73031 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73032 diff -urNp linux-2.6.32.45/net/netfilter/Makefile linux-2.6.32.45/net/netfilter/Makefile
73033 --- linux-2.6.32.45/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
73034 +++ linux-2.6.32.45/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
73035 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
73036 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
73037 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73038 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73039 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73040 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73041 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73042 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73043 diff -urNp linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c
73044 --- linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
73045 +++ linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
73046 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
73047 static int
73048 ctnetlink_parse_tuple(const struct nlattr * const cda[],
73049 struct nf_conntrack_tuple *tuple,
73050 - enum ctattr_tuple type, u_int8_t l3num)
73051 + enum ctattr_type type, u_int8_t l3num)
73052 {
73053 struct nlattr *tb[CTA_TUPLE_MAX+1];
73054 int err;
73055 diff -urNp linux-2.6.32.45/net/netfilter/nfnetlink_log.c linux-2.6.32.45/net/netfilter/nfnetlink_log.c
73056 --- linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
73057 +++ linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
73058 @@ -68,7 +68,7 @@ struct nfulnl_instance {
73059 };
73060
73061 static DEFINE_RWLOCK(instances_lock);
73062 -static atomic_t global_seq;
73063 +static atomic_unchecked_t global_seq;
73064
73065 #define INSTANCE_BUCKETS 16
73066 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73067 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
73068 /* global sequence number */
73069 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73070 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73071 - htonl(atomic_inc_return(&global_seq)));
73072 + htonl(atomic_inc_return_unchecked(&global_seq)));
73073
73074 if (data_len) {
73075 struct nlattr *nla;
73076 diff -urNp linux-2.6.32.45/net/netfilter/xt_gradm.c linux-2.6.32.45/net/netfilter/xt_gradm.c
73077 --- linux-2.6.32.45/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73078 +++ linux-2.6.32.45/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
73079 @@ -0,0 +1,51 @@
73080 +/*
73081 + * gradm match for netfilter
73082 + * Copyright © Zbigniew Krzystolik, 2010
73083 + *
73084 + * This program is free software; you can redistribute it and/or modify
73085 + * it under the terms of the GNU General Public License; either version
73086 + * 2 or 3 as published by the Free Software Foundation.
73087 + */
73088 +#include <linux/module.h>
73089 +#include <linux/moduleparam.h>
73090 +#include <linux/skbuff.h>
73091 +#include <linux/netfilter/x_tables.h>
73092 +#include <linux/grsecurity.h>
73093 +#include <linux/netfilter/xt_gradm.h>
73094 +
73095 +static bool
73096 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
73097 +{
73098 + const struct xt_gradm_mtinfo *info = par->matchinfo;
73099 + bool retval = false;
73100 + if (gr_acl_is_enabled())
73101 + retval = true;
73102 + return retval ^ info->invflags;
73103 +}
73104 +
73105 +static struct xt_match gradm_mt_reg __read_mostly = {
73106 + .name = "gradm",
73107 + .revision = 0,
73108 + .family = NFPROTO_UNSPEC,
73109 + .match = gradm_mt,
73110 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
73111 + .me = THIS_MODULE,
73112 +};
73113 +
73114 +static int __init gradm_mt_init(void)
73115 +{
73116 + return xt_register_match(&gradm_mt_reg);
73117 +}
73118 +
73119 +static void __exit gradm_mt_exit(void)
73120 +{
73121 + xt_unregister_match(&gradm_mt_reg);
73122 +}
73123 +
73124 +module_init(gradm_mt_init);
73125 +module_exit(gradm_mt_exit);
73126 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
73127 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
73128 +MODULE_LICENSE("GPL");
73129 +MODULE_ALIAS("ipt_gradm");
73130 +MODULE_ALIAS("ip6t_gradm");
73131 diff -urNp linux-2.6.32.45/net/netlink/af_netlink.c linux-2.6.32.45/net/netlink/af_netlink.c
73132 --- linux-2.6.32.45/net/netlink/af_netlink.c 2011-03-27 14:31:47.000000000 -0400
73133 +++ linux-2.6.32.45/net/netlink/af_netlink.c 2011-05-04 17:56:28.000000000 -0400
73134 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock
73135 sk->sk_error_report(sk);
73136 }
73137 }
73138 - atomic_inc(&sk->sk_drops);
73139 + atomic_inc_unchecked(&sk->sk_drops);
73140 }
73141
73142 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
73143 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_f
73144 struct netlink_sock *nlk = nlk_sk(s);
73145
73146 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
73147 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73148 + NULL,
73149 +#else
73150 s,
73151 +#endif
73152 s->sk_protocol,
73153 nlk->pid,
73154 nlk->groups ? (u32)nlk->groups[0] : 0,
73155 sk_rmem_alloc_get(s),
73156 sk_wmem_alloc_get(s),
73157 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73158 + NULL,
73159 +#else
73160 nlk->cb,
73161 +#endif
73162 atomic_read(&s->sk_refcnt),
73163 - atomic_read(&s->sk_drops)
73164 + atomic_read_unchecked(&s->sk_drops)
73165 );
73166
73167 }
73168 diff -urNp linux-2.6.32.45/net/netrom/af_netrom.c linux-2.6.32.45/net/netrom/af_netrom.c
73169 --- linux-2.6.32.45/net/netrom/af_netrom.c 2011-03-27 14:31:47.000000000 -0400
73170 +++ linux-2.6.32.45/net/netrom/af_netrom.c 2011-04-17 15:56:46.000000000 -0400
73171 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *soc
73172 struct sock *sk = sock->sk;
73173 struct nr_sock *nr = nr_sk(sk);
73174
73175 + memset(sax, 0, sizeof(*sax));
73176 lock_sock(sk);
73177 if (peer != 0) {
73178 if (sk->sk_state != TCP_ESTABLISHED) {
73179 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *soc
73180 *uaddr_len = sizeof(struct full_sockaddr_ax25);
73181 } else {
73182 sax->fsa_ax25.sax25_family = AF_NETROM;
73183 - sax->fsa_ax25.sax25_ndigis = 0;
73184 sax->fsa_ax25.sax25_call = nr->source_addr;
73185 *uaddr_len = sizeof(struct sockaddr_ax25);
73186 }
73187 diff -urNp linux-2.6.32.45/net/packet/af_packet.c linux-2.6.32.45/net/packet/af_packet.c
73188 --- linux-2.6.32.45/net/packet/af_packet.c 2011-07-13 17:23:04.000000000 -0400
73189 +++ linux-2.6.32.45/net/packet/af_packet.c 2011-07-13 17:23:27.000000000 -0400
73190 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_fi
73191
73192 seq_printf(seq,
73193 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
73194 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73195 + NULL,
73196 +#else
73197 s,
73198 +#endif
73199 atomic_read(&s->sk_refcnt),
73200 s->sk_type,
73201 ntohs(po->num),
73202 diff -urNp linux-2.6.32.45/net/phonet/af_phonet.c linux-2.6.32.45/net/phonet/af_phonet.c
73203 --- linux-2.6.32.45/net/phonet/af_phonet.c 2011-03-27 14:31:47.000000000 -0400
73204 +++ linux-2.6.32.45/net/phonet/af_phonet.c 2011-04-17 15:56:46.000000000 -0400
73205 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
73206 {
73207 struct phonet_protocol *pp;
73208
73209 - if (protocol >= PHONET_NPROTO)
73210 + if (protocol < 0 || protocol >= PHONET_NPROTO)
73211 return NULL;
73212
73213 spin_lock(&proto_tab_lock);
73214 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_regist
73215 {
73216 int err = 0;
73217
73218 - if (protocol >= PHONET_NPROTO)
73219 + if (protocol < 0 || protocol >= PHONET_NPROTO)
73220 return -EINVAL;
73221
73222 err = proto_register(pp->prot, 1);
73223 diff -urNp linux-2.6.32.45/net/phonet/datagram.c linux-2.6.32.45/net/phonet/datagram.c
73224 --- linux-2.6.32.45/net/phonet/datagram.c 2011-03-27 14:31:47.000000000 -0400
73225 +++ linux-2.6.32.45/net/phonet/datagram.c 2011-05-04 17:56:28.000000000 -0400
73226 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s
73227 if (err < 0) {
73228 kfree_skb(skb);
73229 if (err == -ENOMEM)
73230 - atomic_inc(&sk->sk_drops);
73231 + atomic_inc_unchecked(&sk->sk_drops);
73232 }
73233 return err ? NET_RX_DROP : NET_RX_SUCCESS;
73234 }
73235 diff -urNp linux-2.6.32.45/net/phonet/pep.c linux-2.6.32.45/net/phonet/pep.c
73236 --- linux-2.6.32.45/net/phonet/pep.c 2011-03-27 14:31:47.000000000 -0400
73237 +++ linux-2.6.32.45/net/phonet/pep.c 2011-05-04 17:56:28.000000000 -0400
73238 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk,
73239
73240 case PNS_PEP_CTRL_REQ:
73241 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
73242 - atomic_inc(&sk->sk_drops);
73243 + atomic_inc_unchecked(&sk->sk_drops);
73244 break;
73245 }
73246 __skb_pull(skb, 4);
73247 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk,
73248 if (!err)
73249 return 0;
73250 if (err == -ENOMEM)
73251 - atomic_inc(&sk->sk_drops);
73252 + atomic_inc_unchecked(&sk->sk_drops);
73253 break;
73254 }
73255
73256 if (pn->rx_credits == 0) {
73257 - atomic_inc(&sk->sk_drops);
73258 + atomic_inc_unchecked(&sk->sk_drops);
73259 err = -ENOBUFS;
73260 break;
73261 }
73262 diff -urNp linux-2.6.32.45/net/phonet/socket.c linux-2.6.32.45/net/phonet/socket.c
73263 --- linux-2.6.32.45/net/phonet/socket.c 2011-03-27 14:31:47.000000000 -0400
73264 +++ linux-2.6.32.45/net/phonet/socket.c 2011-05-04 17:57:07.000000000 -0400
73265 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_f
73266 sk->sk_state,
73267 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
73268 sock_i_uid(sk), sock_i_ino(sk),
73269 - atomic_read(&sk->sk_refcnt), sk,
73270 - atomic_read(&sk->sk_drops), &len);
73271 + atomic_read(&sk->sk_refcnt),
73272 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73273 + NULL,
73274 +#else
73275 + sk,
73276 +#endif
73277 + atomic_read_unchecked(&sk->sk_drops), &len);
73278 }
73279 seq_printf(seq, "%*s\n", 127 - len, "");
73280 return 0;
73281 diff -urNp linux-2.6.32.45/net/rds/cong.c linux-2.6.32.45/net/rds/cong.c
73282 --- linux-2.6.32.45/net/rds/cong.c 2011-03-27 14:31:47.000000000 -0400
73283 +++ linux-2.6.32.45/net/rds/cong.c 2011-05-04 17:56:28.000000000 -0400
73284 @@ -77,7 +77,7 @@
73285 * finds that the saved generation number is smaller than the global generation
73286 * number, it wakes up the process.
73287 */
73288 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
73289 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
73290
73291 /*
73292 * Congestion monitoring
73293 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
73294 rdsdebug("waking map %p for %pI4\n",
73295 map, &map->m_addr);
73296 rds_stats_inc(s_cong_update_received);
73297 - atomic_inc(&rds_cong_generation);
73298 + atomic_inc_unchecked(&rds_cong_generation);
73299 if (waitqueue_active(&map->m_waitq))
73300 wake_up(&map->m_waitq);
73301 if (waitqueue_active(&rds_poll_waitq))
73302 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
73303
73304 int rds_cong_updated_since(unsigned long *recent)
73305 {
73306 - unsigned long gen = atomic_read(&rds_cong_generation);
73307 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
73308
73309 if (likely(*recent == gen))
73310 return 0;
73311 diff -urNp linux-2.6.32.45/net/rds/iw_rdma.c linux-2.6.32.45/net/rds/iw_rdma.c
73312 --- linux-2.6.32.45/net/rds/iw_rdma.c 2011-03-27 14:31:47.000000000 -0400
73313 +++ linux-2.6.32.45/net/rds/iw_rdma.c 2011-05-16 21:46:57.000000000 -0400
73314 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
73315 struct rdma_cm_id *pcm_id;
73316 int rc;
73317
73318 + pax_track_stack();
73319 +
73320 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
73321 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
73322
73323 diff -urNp linux-2.6.32.45/net/rds/Kconfig linux-2.6.32.45/net/rds/Kconfig
73324 --- linux-2.6.32.45/net/rds/Kconfig 2011-03-27 14:31:47.000000000 -0400
73325 +++ linux-2.6.32.45/net/rds/Kconfig 2011-04-17 15:56:46.000000000 -0400
73326 @@ -1,7 +1,7 @@
73327
73328 config RDS
73329 tristate "The RDS Protocol (EXPERIMENTAL)"
73330 - depends on INET && EXPERIMENTAL
73331 + depends on INET && EXPERIMENTAL && BROKEN
73332 ---help---
73333 The RDS (Reliable Datagram Sockets) protocol provides reliable,
73334 sequenced delivery of datagrams over Infiniband, iWARP,
73335 diff -urNp linux-2.6.32.45/net/rxrpc/af_rxrpc.c linux-2.6.32.45/net/rxrpc/af_rxrpc.c
73336 --- linux-2.6.32.45/net/rxrpc/af_rxrpc.c 2011-03-27 14:31:47.000000000 -0400
73337 +++ linux-2.6.32.45/net/rxrpc/af_rxrpc.c 2011-05-04 17:56:28.000000000 -0400
73338 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_
73339 __be32 rxrpc_epoch;
73340
73341 /* current debugging ID */
73342 -atomic_t rxrpc_debug_id;
73343 +atomic_unchecked_t rxrpc_debug_id;
73344
73345 /* count of skbs currently in use */
73346 atomic_t rxrpc_n_skbs;
73347 diff -urNp linux-2.6.32.45/net/rxrpc/ar-ack.c linux-2.6.32.45/net/rxrpc/ar-ack.c
73348 --- linux-2.6.32.45/net/rxrpc/ar-ack.c 2011-03-27 14:31:47.000000000 -0400
73349 +++ linux-2.6.32.45/net/rxrpc/ar-ack.c 2011-05-16 21:46:57.000000000 -0400
73350 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca
73351
73352 _enter("{%d,%d,%d,%d},",
73353 call->acks_hard, call->acks_unacked,
73354 - atomic_read(&call->sequence),
73355 + atomic_read_unchecked(&call->sequence),
73356 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
73357
73358 stop = 0;
73359 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca
73360
73361 /* each Tx packet has a new serial number */
73362 sp->hdr.serial =
73363 - htonl(atomic_inc_return(&call->conn->serial));
73364 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
73365
73366 hdr = (struct rxrpc_header *) txb->head;
73367 hdr->serial = sp->hdr.serial;
73368 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc
73369 */
73370 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
73371 {
73372 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
73373 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
73374 }
73375
73376 /*
73377 @@ -627,7 +627,7 @@ process_further:
73378
73379 latest = ntohl(sp->hdr.serial);
73380 hard = ntohl(ack.firstPacket);
73381 - tx = atomic_read(&call->sequence);
73382 + tx = atomic_read_unchecked(&call->sequence);
73383
73384 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
73385 latest,
73386 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_stru
73387 u32 abort_code = RX_PROTOCOL_ERROR;
73388 u8 *acks = NULL;
73389
73390 + pax_track_stack();
73391 +
73392 //printk("\n--------------------\n");
73393 _enter("{%d,%s,%lx} [%lu]",
73394 call->debug_id, rxrpc_call_states[call->state], call->events,
73395 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
73396 goto maybe_reschedule;
73397
73398 send_ACK_with_skew:
73399 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
73400 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
73401 ntohl(ack.serial));
73402 send_ACK:
73403 mtu = call->conn->trans->peer->if_mtu;
73404 @@ -1171,7 +1173,7 @@ send_ACK:
73405 ackinfo.rxMTU = htonl(5692);
73406 ackinfo.jumbo_max = htonl(4);
73407
73408 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
73409 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
73410 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
73411 ntohl(hdr.serial),
73412 ntohs(ack.maxSkew),
73413 @@ -1189,7 +1191,7 @@ send_ACK:
73414 send_message:
73415 _debug("send message");
73416
73417 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
73418 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
73419 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
73420 send_message_2:
73421
73422 diff -urNp linux-2.6.32.45/net/rxrpc/ar-call.c linux-2.6.32.45/net/rxrpc/ar-call.c
73423 --- linux-2.6.32.45/net/rxrpc/ar-call.c 2011-03-27 14:31:47.000000000 -0400
73424 +++ linux-2.6.32.45/net/rxrpc/ar-call.c 2011-05-04 17:56:28.000000000 -0400
73425 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
73426 spin_lock_init(&call->lock);
73427 rwlock_init(&call->state_lock);
73428 atomic_set(&call->usage, 1);
73429 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
73430 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73431 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
73432
73433 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
73434 diff -urNp linux-2.6.32.45/net/rxrpc/ar-connection.c linux-2.6.32.45/net/rxrpc/ar-connection.c
73435 --- linux-2.6.32.45/net/rxrpc/ar-connection.c 2011-03-27 14:31:47.000000000 -0400
73436 +++ linux-2.6.32.45/net/rxrpc/ar-connection.c 2011-05-04 17:56:28.000000000 -0400
73437 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al
73438 rwlock_init(&conn->lock);
73439 spin_lock_init(&conn->state_lock);
73440 atomic_set(&conn->usage, 1);
73441 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
73442 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73443 conn->avail_calls = RXRPC_MAXCALLS;
73444 conn->size_align = 4;
73445 conn->header_size = sizeof(struct rxrpc_header);
73446 diff -urNp linux-2.6.32.45/net/rxrpc/ar-connevent.c linux-2.6.32.45/net/rxrpc/ar-connevent.c
73447 --- linux-2.6.32.45/net/rxrpc/ar-connevent.c 2011-03-27 14:31:47.000000000 -0400
73448 +++ linux-2.6.32.45/net/rxrpc/ar-connevent.c 2011-05-04 17:56:28.000000000 -0400
73449 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
73450
73451 len = iov[0].iov_len + iov[1].iov_len;
73452
73453 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
73454 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73455 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
73456
73457 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
73458 diff -urNp linux-2.6.32.45/net/rxrpc/ar-input.c linux-2.6.32.45/net/rxrpc/ar-input.c
73459 --- linux-2.6.32.45/net/rxrpc/ar-input.c 2011-03-27 14:31:47.000000000 -0400
73460 +++ linux-2.6.32.45/net/rxrpc/ar-input.c 2011-05-04 17:56:28.000000000 -0400
73461 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx
73462 /* track the latest serial number on this connection for ACK packet
73463 * information */
73464 serial = ntohl(sp->hdr.serial);
73465 - hi_serial = atomic_read(&call->conn->hi_serial);
73466 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
73467 while (serial > hi_serial)
73468 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
73469 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
73470 serial);
73471
73472 /* request ACK generation for any ACK or DATA packet that requests
73473 diff -urNp linux-2.6.32.45/net/rxrpc/ar-internal.h linux-2.6.32.45/net/rxrpc/ar-internal.h
73474 --- linux-2.6.32.45/net/rxrpc/ar-internal.h 2011-03-27 14:31:47.000000000 -0400
73475 +++ linux-2.6.32.45/net/rxrpc/ar-internal.h 2011-05-04 17:56:28.000000000 -0400
73476 @@ -272,8 +272,8 @@ struct rxrpc_connection {
73477 int error; /* error code for local abort */
73478 int debug_id; /* debug ID for printks */
73479 unsigned call_counter; /* call ID counter */
73480 - atomic_t serial; /* packet serial number counter */
73481 - atomic_t hi_serial; /* highest serial number received */
73482 + atomic_unchecked_t serial; /* packet serial number counter */
73483 + atomic_unchecked_t hi_serial; /* highest serial number received */
73484 u8 avail_calls; /* number of calls available */
73485 u8 size_align; /* data size alignment (for security) */
73486 u8 header_size; /* rxrpc + security header size */
73487 @@ -346,7 +346,7 @@ struct rxrpc_call {
73488 spinlock_t lock;
73489 rwlock_t state_lock; /* lock for state transition */
73490 atomic_t usage;
73491 - atomic_t sequence; /* Tx data packet sequence counter */
73492 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
73493 u32 abort_code; /* local/remote abort code */
73494 enum { /* current state of call */
73495 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
73496 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
73497 */
73498 extern atomic_t rxrpc_n_skbs;
73499 extern __be32 rxrpc_epoch;
73500 -extern atomic_t rxrpc_debug_id;
73501 +extern atomic_unchecked_t rxrpc_debug_id;
73502 extern struct workqueue_struct *rxrpc_workqueue;
73503
73504 /*
73505 diff -urNp linux-2.6.32.45/net/rxrpc/ar-key.c linux-2.6.32.45/net/rxrpc/ar-key.c
73506 --- linux-2.6.32.45/net/rxrpc/ar-key.c 2011-03-27 14:31:47.000000000 -0400
73507 +++ linux-2.6.32.45/net/rxrpc/ar-key.c 2011-04-17 15:56:46.000000000 -0400
73508 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(s
73509 return ret;
73510
73511 plen -= sizeof(*token);
73512 - token = kmalloc(sizeof(*token), GFP_KERNEL);
73513 + token = kzalloc(sizeof(*token), GFP_KERNEL);
73514 if (!token)
73515 return -ENOMEM;
73516
73517 - token->kad = kmalloc(plen, GFP_KERNEL);
73518 + token->kad = kzalloc(plen, GFP_KERNEL);
73519 if (!token->kad) {
73520 kfree(token);
73521 return -ENOMEM;
73522 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key
73523 goto error;
73524
73525 ret = -ENOMEM;
73526 - token = kmalloc(sizeof(*token), GFP_KERNEL);
73527 + token = kzalloc(sizeof(*token), GFP_KERNEL);
73528 if (!token)
73529 goto error;
73530 - token->kad = kmalloc(plen, GFP_KERNEL);
73531 + token->kad = kzalloc(plen, GFP_KERNEL);
73532 if (!token->kad)
73533 goto error_free;
73534
73535 diff -urNp linux-2.6.32.45/net/rxrpc/ar-local.c linux-2.6.32.45/net/rxrpc/ar-local.c
73536 --- linux-2.6.32.45/net/rxrpc/ar-local.c 2011-03-27 14:31:47.000000000 -0400
73537 +++ linux-2.6.32.45/net/rxrpc/ar-local.c 2011-05-04 17:56:28.000000000 -0400
73538 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
73539 spin_lock_init(&local->lock);
73540 rwlock_init(&local->services_lock);
73541 atomic_set(&local->usage, 1);
73542 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
73543 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73544 memcpy(&local->srx, srx, sizeof(*srx));
73545 }
73546
73547 diff -urNp linux-2.6.32.45/net/rxrpc/ar-output.c linux-2.6.32.45/net/rxrpc/ar-output.c
73548 --- linux-2.6.32.45/net/rxrpc/ar-output.c 2011-03-27 14:31:47.000000000 -0400
73549 +++ linux-2.6.32.45/net/rxrpc/ar-output.c 2011-05-04 17:56:28.000000000 -0400
73550 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb
73551 sp->hdr.cid = call->cid;
73552 sp->hdr.callNumber = call->call_id;
73553 sp->hdr.seq =
73554 - htonl(atomic_inc_return(&call->sequence));
73555 + htonl(atomic_inc_return_unchecked(&call->sequence));
73556 sp->hdr.serial =
73557 - htonl(atomic_inc_return(&conn->serial));
73558 + htonl(atomic_inc_return_unchecked(&conn->serial));
73559 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
73560 sp->hdr.userStatus = 0;
73561 sp->hdr.securityIndex = conn->security_ix;
73562 diff -urNp linux-2.6.32.45/net/rxrpc/ar-peer.c linux-2.6.32.45/net/rxrpc/ar-peer.c
73563 --- linux-2.6.32.45/net/rxrpc/ar-peer.c 2011-03-27 14:31:47.000000000 -0400
73564 +++ linux-2.6.32.45/net/rxrpc/ar-peer.c 2011-05-04 17:56:28.000000000 -0400
73565 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
73566 INIT_LIST_HEAD(&peer->error_targets);
73567 spin_lock_init(&peer->lock);
73568 atomic_set(&peer->usage, 1);
73569 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
73570 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73571 memcpy(&peer->srx, srx, sizeof(*srx));
73572
73573 rxrpc_assess_MTU_size(peer);
73574 diff -urNp linux-2.6.32.45/net/rxrpc/ar-proc.c linux-2.6.32.45/net/rxrpc/ar-proc.c
73575 --- linux-2.6.32.45/net/rxrpc/ar-proc.c 2011-03-27 14:31:47.000000000 -0400
73576 +++ linux-2.6.32.45/net/rxrpc/ar-proc.c 2011-05-04 17:56:28.000000000 -0400
73577 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
73578 atomic_read(&conn->usage),
73579 rxrpc_conn_states[conn->state],
73580 key_serial(conn->key),
73581 - atomic_read(&conn->serial),
73582 - atomic_read(&conn->hi_serial));
73583 + atomic_read_unchecked(&conn->serial),
73584 + atomic_read_unchecked(&conn->hi_serial));
73585
73586 return 0;
73587 }
73588 diff -urNp linux-2.6.32.45/net/rxrpc/ar-transport.c linux-2.6.32.45/net/rxrpc/ar-transport.c
73589 --- linux-2.6.32.45/net/rxrpc/ar-transport.c 2011-03-27 14:31:47.000000000 -0400
73590 +++ linux-2.6.32.45/net/rxrpc/ar-transport.c 2011-05-04 17:56:28.000000000 -0400
73591 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all
73592 spin_lock_init(&trans->client_lock);
73593 rwlock_init(&trans->conn_lock);
73594 atomic_set(&trans->usage, 1);
73595 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
73596 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73597
73598 if (peer->srx.transport.family == AF_INET) {
73599 switch (peer->srx.transport_type) {
73600 diff -urNp linux-2.6.32.45/net/rxrpc/rxkad.c linux-2.6.32.45/net/rxrpc/rxkad.c
73601 --- linux-2.6.32.45/net/rxrpc/rxkad.c 2011-03-27 14:31:47.000000000 -0400
73602 +++ linux-2.6.32.45/net/rxrpc/rxkad.c 2011-05-16 21:46:57.000000000 -0400
73603 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(c
73604 u16 check;
73605 int nsg;
73606
73607 + pax_track_stack();
73608 +
73609 sp = rxrpc_skb(skb);
73610
73611 _enter("");
73612 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(cons
73613 u16 check;
73614 int nsg;
73615
73616 + pax_track_stack();
73617 +
73618 _enter("");
73619
73620 sp = rxrpc_skb(skb);
73621 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct
73622
73623 len = iov[0].iov_len + iov[1].iov_len;
73624
73625 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
73626 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73627 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
73628
73629 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
73630 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rx
73631
73632 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
73633
73634 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
73635 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73636 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
73637
73638 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
73639 diff -urNp linux-2.6.32.45/net/sctp/proc.c linux-2.6.32.45/net/sctp/proc.c
73640 --- linux-2.6.32.45/net/sctp/proc.c 2011-03-27 14:31:47.000000000 -0400
73641 +++ linux-2.6.32.45/net/sctp/proc.c 2011-04-17 15:56:46.000000000 -0400
73642 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_
73643 sctp_for_each_hentry(epb, node, &head->chain) {
73644 ep = sctp_ep(epb);
73645 sk = epb->sk;
73646 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
73647 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
73648 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73649 + NULL, NULL,
73650 +#else
73651 + ep, sk,
73652 +#endif
73653 sctp_sk(sk)->type, sk->sk_state, hash,
73654 epb->bind_addr.port,
73655 sock_i_uid(sk), sock_i_ino(sk));
73656 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct s
73657 seq_printf(seq,
73658 "%8p %8p %-3d %-3d %-2d %-4d "
73659 "%4d %8d %8d %7d %5lu %-5d %5d ",
73660 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
73661 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73662 + NULL, NULL,
73663 +#else
73664 + assoc, sk,
73665 +#endif
73666 + sctp_sk(sk)->type, sk->sk_state,
73667 assoc->state, hash,
73668 assoc->assoc_id,
73669 assoc->sndbuf_used,
73670 diff -urNp linux-2.6.32.45/net/sctp/socket.c linux-2.6.32.45/net/sctp/socket.c
73671 --- linux-2.6.32.45/net/sctp/socket.c 2011-03-27 14:31:47.000000000 -0400
73672 +++ linux-2.6.32.45/net/sctp/socket.c 2011-04-23 12:56:11.000000000 -0400
73673 @@ -5802,7 +5802,6 @@ pp_found:
73674 */
73675 int reuse = sk->sk_reuse;
73676 struct sock *sk2;
73677 - struct hlist_node *node;
73678
73679 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
73680 if (pp->fastreuse && sk->sk_reuse &&
73681 diff -urNp linux-2.6.32.45/net/socket.c linux-2.6.32.45/net/socket.c
73682 --- linux-2.6.32.45/net/socket.c 2011-03-27 14:31:47.000000000 -0400
73683 +++ linux-2.6.32.45/net/socket.c 2011-05-16 21:46:57.000000000 -0400
73684 @@ -87,6 +87,7 @@
73685 #include <linux/wireless.h>
73686 #include <linux/nsproxy.h>
73687 #include <linux/magic.h>
73688 +#include <linux/in.h>
73689
73690 #include <asm/uaccess.h>
73691 #include <asm/unistd.h>
73692 @@ -97,6 +98,21 @@
73693 #include <net/sock.h>
73694 #include <linux/netfilter.h>
73695
73696 +extern void gr_attach_curr_ip(const struct sock *sk);
73697 +extern int gr_handle_sock_all(const int family, const int type,
73698 + const int protocol);
73699 +extern int gr_handle_sock_server(const struct sockaddr *sck);
73700 +extern int gr_handle_sock_server_other(const struct sock *sck);
73701 +extern int gr_handle_sock_client(const struct sockaddr *sck);
73702 +extern int gr_search_connect(struct socket * sock,
73703 + struct sockaddr_in * addr);
73704 +extern int gr_search_bind(struct socket * sock,
73705 + struct sockaddr_in * addr);
73706 +extern int gr_search_listen(struct socket * sock);
73707 +extern int gr_search_accept(struct socket * sock);
73708 +extern int gr_search_socket(const int domain, const int type,
73709 + const int protocol);
73710 +
73711 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
73712 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
73713 unsigned long nr_segs, loff_t pos);
73714 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_sys
73715 mnt);
73716 }
73717
73718 -static struct vfsmount *sock_mnt __read_mostly;
73719 +struct vfsmount *sock_mnt __read_mostly;
73720
73721 static struct file_system_type sock_fs_type = {
73722 .name = "sockfs",
73723 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net
73724 return -EAFNOSUPPORT;
73725 if (type < 0 || type >= SOCK_MAX)
73726 return -EINVAL;
73727 + if (protocol < 0)
73728 + return -EINVAL;
73729
73730 /* Compatibility.
73731
73732 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int
73733 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
73734 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
73735
73736 + if(!gr_search_socket(family, type, protocol)) {
73737 + retval = -EACCES;
73738 + goto out;
73739 + }
73740 +
73741 + if (gr_handle_sock_all(family, type, protocol)) {
73742 + retval = -EACCES;
73743 + goto out;
73744 + }
73745 +
73746 retval = sock_create(family, type, protocol, &sock);
73747 if (retval < 0)
73748 goto out;
73749 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
73750 if (sock) {
73751 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
73752 if (err >= 0) {
73753 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
73754 + err = -EACCES;
73755 + goto error;
73756 + }
73757 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
73758 + if (err)
73759 + goto error;
73760 +
73761 err = security_socket_bind(sock,
73762 (struct sockaddr *)&address,
73763 addrlen);
73764 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
73765 (struct sockaddr *)
73766 &address, addrlen);
73767 }
73768 +error:
73769 fput_light(sock->file, fput_needed);
73770 }
73771 return err;
73772 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
73773 if ((unsigned)backlog > somaxconn)
73774 backlog = somaxconn;
73775
73776 + if (gr_handle_sock_server_other(sock->sk)) {
73777 + err = -EPERM;
73778 + goto error;
73779 + }
73780 +
73781 + err = gr_search_listen(sock);
73782 + if (err)
73783 + goto error;
73784 +
73785 err = security_socket_listen(sock, backlog);
73786 if (!err)
73787 err = sock->ops->listen(sock, backlog);
73788
73789 +error:
73790 fput_light(sock->file, fput_needed);
73791 }
73792 return err;
73793 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
73794 newsock->type = sock->type;
73795 newsock->ops = sock->ops;
73796
73797 + if (gr_handle_sock_server_other(sock->sk)) {
73798 + err = -EPERM;
73799 + sock_release(newsock);
73800 + goto out_put;
73801 + }
73802 +
73803 + err = gr_search_accept(sock);
73804 + if (err) {
73805 + sock_release(newsock);
73806 + goto out_put;
73807 + }
73808 +
73809 /*
73810 * We don't need try_module_get here, as the listening socket (sock)
73811 * has the protocol module (sock->ops->owner) held.
73812 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
73813 fd_install(newfd, newfile);
73814 err = newfd;
73815
73816 + gr_attach_curr_ip(newsock->sk);
73817 +
73818 out_put:
73819 fput_light(sock->file, fput_needed);
73820 out:
73821 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
73822 int, addrlen)
73823 {
73824 struct socket *sock;
73825 + struct sockaddr *sck;
73826 struct sockaddr_storage address;
73827 int err, fput_needed;
73828
73829 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
73830 if (err < 0)
73831 goto out_put;
73832
73833 + sck = (struct sockaddr *)&address;
73834 +
73835 + if (gr_handle_sock_client(sck)) {
73836 + err = -EACCES;
73837 + goto out_put;
73838 + }
73839 +
73840 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
73841 + if (err)
73842 + goto out_put;
73843 +
73844 err =
73845 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
73846 if (err)
73847 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
73848 int err, ctl_len, iov_size, total_len;
73849 int fput_needed;
73850
73851 + pax_track_stack();
73852 +
73853 err = -EFAULT;
73854 if (MSG_CMSG_COMPAT & flags) {
73855 if (get_compat_msghdr(&msg_sys, msg_compat))
73856 diff -urNp linux-2.6.32.45/net/sunrpc/sched.c linux-2.6.32.45/net/sunrpc/sched.c
73857 --- linux-2.6.32.45/net/sunrpc/sched.c 2011-08-09 18:35:30.000000000 -0400
73858 +++ linux-2.6.32.45/net/sunrpc/sched.c 2011-08-09 18:34:01.000000000 -0400
73859 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w
73860 #ifdef RPC_DEBUG
73861 static void rpc_task_set_debuginfo(struct rpc_task *task)
73862 {
73863 - static atomic_t rpc_pid;
73864 + static atomic_unchecked_t rpc_pid;
73865
73866 task->tk_magic = RPC_TASK_MAGIC_ID;
73867 - task->tk_pid = atomic_inc_return(&rpc_pid);
73868 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
73869 }
73870 #else
73871 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
73872 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c
73873 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c 2011-03-27 14:31:47.000000000 -0400
73874 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-04 17:56:20.000000000 -0400
73875 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR
73876 static unsigned int min_max_inline = 4096;
73877 static unsigned int max_max_inline = 65536;
73878
73879 -atomic_t rdma_stat_recv;
73880 -atomic_t rdma_stat_read;
73881 -atomic_t rdma_stat_write;
73882 -atomic_t rdma_stat_sq_starve;
73883 -atomic_t rdma_stat_rq_starve;
73884 -atomic_t rdma_stat_rq_poll;
73885 -atomic_t rdma_stat_rq_prod;
73886 -atomic_t rdma_stat_sq_poll;
73887 -atomic_t rdma_stat_sq_prod;
73888 +atomic_unchecked_t rdma_stat_recv;
73889 +atomic_unchecked_t rdma_stat_read;
73890 +atomic_unchecked_t rdma_stat_write;
73891 +atomic_unchecked_t rdma_stat_sq_starve;
73892 +atomic_unchecked_t rdma_stat_rq_starve;
73893 +atomic_unchecked_t rdma_stat_rq_poll;
73894 +atomic_unchecked_t rdma_stat_rq_prod;
73895 +atomic_unchecked_t rdma_stat_sq_poll;
73896 +atomic_unchecked_t rdma_stat_sq_prod;
73897
73898 /* Temporary NFS request map and context caches */
73899 struct kmem_cache *svc_rdma_map_cachep;
73900 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *ta
73901 len -= *ppos;
73902 if (len > *lenp)
73903 len = *lenp;
73904 - if (len && copy_to_user(buffer, str_buf, len))
73905 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
73906 return -EFAULT;
73907 *lenp = len;
73908 *ppos += len;
73909 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] =
73910 {
73911 .procname = "rdma_stat_read",
73912 .data = &rdma_stat_read,
73913 - .maxlen = sizeof(atomic_t),
73914 + .maxlen = sizeof(atomic_unchecked_t),
73915 .mode = 0644,
73916 .proc_handler = &read_reset_stat,
73917 },
73918 {
73919 .procname = "rdma_stat_recv",
73920 .data = &rdma_stat_recv,
73921 - .maxlen = sizeof(atomic_t),
73922 + .maxlen = sizeof(atomic_unchecked_t),
73923 .mode = 0644,
73924 .proc_handler = &read_reset_stat,
73925 },
73926 {
73927 .procname = "rdma_stat_write",
73928 .data = &rdma_stat_write,
73929 - .maxlen = sizeof(atomic_t),
73930 + .maxlen = sizeof(atomic_unchecked_t),
73931 .mode = 0644,
73932 .proc_handler = &read_reset_stat,
73933 },
73934 {
73935 .procname = "rdma_stat_sq_starve",
73936 .data = &rdma_stat_sq_starve,
73937 - .maxlen = sizeof(atomic_t),
73938 + .maxlen = sizeof(atomic_unchecked_t),
73939 .mode = 0644,
73940 .proc_handler = &read_reset_stat,
73941 },
73942 {
73943 .procname = "rdma_stat_rq_starve",
73944 .data = &rdma_stat_rq_starve,
73945 - .maxlen = sizeof(atomic_t),
73946 + .maxlen = sizeof(atomic_unchecked_t),
73947 .mode = 0644,
73948 .proc_handler = &read_reset_stat,
73949 },
73950 {
73951 .procname = "rdma_stat_rq_poll",
73952 .data = &rdma_stat_rq_poll,
73953 - .maxlen = sizeof(atomic_t),
73954 + .maxlen = sizeof(atomic_unchecked_t),
73955 .mode = 0644,
73956 .proc_handler = &read_reset_stat,
73957 },
73958 {
73959 .procname = "rdma_stat_rq_prod",
73960 .data = &rdma_stat_rq_prod,
73961 - .maxlen = sizeof(atomic_t),
73962 + .maxlen = sizeof(atomic_unchecked_t),
73963 .mode = 0644,
73964 .proc_handler = &read_reset_stat,
73965 },
73966 {
73967 .procname = "rdma_stat_sq_poll",
73968 .data = &rdma_stat_sq_poll,
73969 - .maxlen = sizeof(atomic_t),
73970 + .maxlen = sizeof(atomic_unchecked_t),
73971 .mode = 0644,
73972 .proc_handler = &read_reset_stat,
73973 },
73974 {
73975 .procname = "rdma_stat_sq_prod",
73976 .data = &rdma_stat_sq_prod,
73977 - .maxlen = sizeof(atomic_t),
73978 + .maxlen = sizeof(atomic_unchecked_t),
73979 .mode = 0644,
73980 .proc_handler = &read_reset_stat,
73981 },
73982 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
73983 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-03-27 14:31:47.000000000 -0400
73984 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-04 17:56:28.000000000 -0400
73985 @@ -495,7 +495,7 @@ next_sge:
73986 svc_rdma_put_context(ctxt, 0);
73987 goto out;
73988 }
73989 - atomic_inc(&rdma_stat_read);
73990 + atomic_inc_unchecked(&rdma_stat_read);
73991
73992 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
73993 chl_map->ch[ch_no].count -= read_wr.num_sge;
73994 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
73995 dto_q);
73996 list_del_init(&ctxt->dto_q);
73997 } else {
73998 - atomic_inc(&rdma_stat_rq_starve);
73999 + atomic_inc_unchecked(&rdma_stat_rq_starve);
74000 clear_bit(XPT_DATA, &xprt->xpt_flags);
74001 ctxt = NULL;
74002 }
74003 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
74004 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
74005 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
74006 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
74007 - atomic_inc(&rdma_stat_recv);
74008 + atomic_inc_unchecked(&rdma_stat_recv);
74009
74010 /* Build up the XDR from the receive buffers. */
74011 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
74012 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c
74013 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-03-27 14:31:47.000000000 -0400
74014 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-04 17:56:28.000000000 -0400
74015 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm
74016 write_wr.wr.rdma.remote_addr = to;
74017
74018 /* Post It */
74019 - atomic_inc(&rdma_stat_write);
74020 + atomic_inc_unchecked(&rdma_stat_write);
74021 if (svc_rdma_send(xprt, &write_wr))
74022 goto err;
74023 return 0;
74024 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c
74025 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-03-27 14:31:47.000000000 -0400
74026 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-04 17:56:28.000000000 -0400
74027 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
74028 return;
74029
74030 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
74031 - atomic_inc(&rdma_stat_rq_poll);
74032 + atomic_inc_unchecked(&rdma_stat_rq_poll);
74033
74034 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
74035 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
74036 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
74037 }
74038
74039 if (ctxt)
74040 - atomic_inc(&rdma_stat_rq_prod);
74041 + atomic_inc_unchecked(&rdma_stat_rq_prod);
74042
74043 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
74044 /*
74045 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
74046 return;
74047
74048 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
74049 - atomic_inc(&rdma_stat_sq_poll);
74050 + atomic_inc_unchecked(&rdma_stat_sq_poll);
74051 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
74052 if (wc.status != IB_WC_SUCCESS)
74053 /* Close the transport */
74054 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
74055 }
74056
74057 if (ctxt)
74058 - atomic_inc(&rdma_stat_sq_prod);
74059 + atomic_inc_unchecked(&rdma_stat_sq_prod);
74060 }
74061
74062 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
74063 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
74064 spin_lock_bh(&xprt->sc_lock);
74065 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
74066 spin_unlock_bh(&xprt->sc_lock);
74067 - atomic_inc(&rdma_stat_sq_starve);
74068 + atomic_inc_unchecked(&rdma_stat_sq_starve);
74069
74070 /* See if we can opportunistically reap SQ WR to make room */
74071 sq_cq_reap(xprt);
74072 diff -urNp linux-2.6.32.45/net/sysctl_net.c linux-2.6.32.45/net/sysctl_net.c
74073 --- linux-2.6.32.45/net/sysctl_net.c 2011-03-27 14:31:47.000000000 -0400
74074 +++ linux-2.6.32.45/net/sysctl_net.c 2011-04-17 15:56:46.000000000 -0400
74075 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
74076 struct ctl_table *table)
74077 {
74078 /* Allow network administrator to have same access as root. */
74079 - if (capable(CAP_NET_ADMIN)) {
74080 + if (capable_nolog(CAP_NET_ADMIN)) {
74081 int mode = (table->mode >> 6) & 7;
74082 return (mode << 6) | (mode << 3) | mode;
74083 }
74084 diff -urNp linux-2.6.32.45/net/unix/af_unix.c linux-2.6.32.45/net/unix/af_unix.c
74085 --- linux-2.6.32.45/net/unix/af_unix.c 2011-05-10 22:12:02.000000000 -0400
74086 +++ linux-2.6.32.45/net/unix/af_unix.c 2011-07-18 18:17:33.000000000 -0400
74087 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(stru
74088 err = -ECONNREFUSED;
74089 if (!S_ISSOCK(inode->i_mode))
74090 goto put_fail;
74091 +
74092 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
74093 + err = -EACCES;
74094 + goto put_fail;
74095 + }
74096 +
74097 u = unix_find_socket_byinode(net, inode);
74098 if (!u)
74099 goto put_fail;
74100 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(stru
74101 if (u) {
74102 struct dentry *dentry;
74103 dentry = unix_sk(u)->dentry;
74104 +
74105 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
74106 + err = -EPERM;
74107 + sock_put(u);
74108 + goto fail;
74109 + }
74110 +
74111 if (dentry)
74112 touch_atime(unix_sk(u)->mnt, dentry);
74113 } else
74114 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock
74115 err = security_path_mknod(&nd.path, dentry, mode, 0);
74116 if (err)
74117 goto out_mknod_drop_write;
74118 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
74119 + err = -EACCES;
74120 + goto out_mknod_drop_write;
74121 + }
74122 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
74123 out_mknod_drop_write:
74124 mnt_drop_write(nd.path.mnt);
74125 if (err)
74126 goto out_mknod_dput;
74127 +
74128 + gr_handle_create(dentry, nd.path.mnt);
74129 +
74130 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
74131 dput(nd.path.dentry);
74132 nd.path.dentry = dentry;
74133 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file
74134 unix_state_lock(s);
74135
74136 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
74137 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74138 + NULL,
74139 +#else
74140 s,
74141 +#endif
74142 atomic_read(&s->sk_refcnt),
74143 0,
74144 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
74145 diff -urNp linux-2.6.32.45/net/wireless/core.c linux-2.6.32.45/net/wireless/core.c
74146 --- linux-2.6.32.45/net/wireless/core.c 2011-03-27 14:31:47.000000000 -0400
74147 +++ linux-2.6.32.45/net/wireless/core.c 2011-08-05 20:33:55.000000000 -0400
74148 @@ -367,7 +367,7 @@ struct wiphy *wiphy_new(const struct cfg
74149
74150 wiphy_net_set(&rdev->wiphy, &init_net);
74151
74152 - rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block;
74153 + *(void **)&rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block;
74154 rdev->rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev),
74155 &rdev->wiphy.dev, RFKILL_TYPE_WLAN,
74156 &rdev->rfkill_ops, rdev);
74157 @@ -505,7 +505,7 @@ void wiphy_rfkill_start_polling(struct w
74158
74159 if (!rdev->ops->rfkill_poll)
74160 return;
74161 - rdev->rfkill_ops.poll = cfg80211_rfkill_poll;
74162 + *(void **)&rdev->rfkill_ops.poll = cfg80211_rfkill_poll;
74163 rfkill_resume_polling(rdev->rfkill);
74164 }
74165 EXPORT_SYMBOL(wiphy_rfkill_start_polling);
74166 diff -urNp linux-2.6.32.45/net/wireless/wext.c linux-2.6.32.45/net/wireless/wext.c
74167 --- linux-2.6.32.45/net/wireless/wext.c 2011-03-27 14:31:47.000000000 -0400
74168 +++ linux-2.6.32.45/net/wireless/wext.c 2011-04-17 15:56:46.000000000 -0400
74169 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struc
74170 */
74171
74172 /* Support for very large requests */
74173 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
74174 - (user_length > descr->max_tokens)) {
74175 + if (user_length > descr->max_tokens) {
74176 /* Allow userspace to GET more than max so
74177 * we can support any size GET requests.
74178 * There is still a limit : -ENOMEM.
74179 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struc
74180 }
74181 }
74182
74183 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
74184 - /*
74185 - * If this is a GET, but not NOMAX, it means that the extra
74186 - * data is not bounded by userspace, but by max_tokens. Thus
74187 - * set the length to max_tokens. This matches the extra data
74188 - * allocation.
74189 - * The driver should fill it with the number of tokens it
74190 - * provided, and it may check iwp->length rather than having
74191 - * knowledge of max_tokens. If the driver doesn't change the
74192 - * iwp->length, this ioctl just copies back max_token tokens
74193 - * filled with zeroes. Hopefully the driver isn't claiming
74194 - * them to be valid data.
74195 - */
74196 - iwp->length = descr->max_tokens;
74197 - }
74198 -
74199 err = handler(dev, info, (union iwreq_data *) iwp, extra);
74200
74201 iwp->length += essid_compat;
74202 diff -urNp linux-2.6.32.45/net/xfrm/xfrm_policy.c linux-2.6.32.45/net/xfrm/xfrm_policy.c
74203 --- linux-2.6.32.45/net/xfrm/xfrm_policy.c 2011-03-27 14:31:47.000000000 -0400
74204 +++ linux-2.6.32.45/net/xfrm/xfrm_policy.c 2011-05-04 17:56:20.000000000 -0400
74205 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x
74206 hlist_add_head(&policy->bydst, chain);
74207 xfrm_pol_hold(policy);
74208 net->xfrm.policy_count[dir]++;
74209 - atomic_inc(&flow_cache_genid);
74210 + atomic_inc_unchecked(&flow_cache_genid);
74211 if (delpol)
74212 __xfrm_policy_unlink(delpol, dir);
74213 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
74214 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct
74215 write_unlock_bh(&xfrm_policy_lock);
74216
74217 if (ret && delete) {
74218 - atomic_inc(&flow_cache_genid);
74219 + atomic_inc_unchecked(&flow_cache_genid);
74220 xfrm_policy_kill(ret);
74221 }
74222 return ret;
74223 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str
74224 write_unlock_bh(&xfrm_policy_lock);
74225
74226 if (ret && delete) {
74227 - atomic_inc(&flow_cache_genid);
74228 + atomic_inc_unchecked(&flow_cache_genid);
74229 xfrm_policy_kill(ret);
74230 }
74231 return ret;
74232 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u
74233 }
74234
74235 }
74236 - atomic_inc(&flow_cache_genid);
74237 + atomic_inc_unchecked(&flow_cache_genid);
74238 out:
74239 write_unlock_bh(&xfrm_policy_lock);
74240 return err;
74241 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic
74242 write_unlock_bh(&xfrm_policy_lock);
74243 if (pol) {
74244 if (dir < XFRM_POLICY_MAX)
74245 - atomic_inc(&flow_cache_genid);
74246 + atomic_inc_unchecked(&flow_cache_genid);
74247 xfrm_policy_kill(pol);
74248 return 0;
74249 }
74250 @@ -1477,7 +1477,7 @@ free_dst:
74251 goto out;
74252 }
74253
74254 -static int inline
74255 +static inline int
74256 xfrm_dst_alloc_copy(void **target, void *src, int size)
74257 {
74258 if (!*target) {
74259 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void
74260 return 0;
74261 }
74262
74263 -static int inline
74264 +static inline int
74265 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
74266 {
74267 #ifdef CONFIG_XFRM_SUB_POLICY
74268 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry
74269 #endif
74270 }
74271
74272 -static int inline
74273 +static inline int
74274 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
74275 {
74276 #ifdef CONFIG_XFRM_SUB_POLICY
74277 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc
74278 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
74279
74280 restart:
74281 - genid = atomic_read(&flow_cache_genid);
74282 + genid = atomic_read_unchecked(&flow_cache_genid);
74283 policy = NULL;
74284 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
74285 pols[pi] = NULL;
74286 @@ -1680,7 +1680,7 @@ restart:
74287 goto error;
74288 }
74289 if (nx == -EAGAIN ||
74290 - genid != atomic_read(&flow_cache_genid)) {
74291 + genid != atomic_read_unchecked(&flow_cache_genid)) {
74292 xfrm_pols_put(pols, npols);
74293 goto restart;
74294 }
74295 diff -urNp linux-2.6.32.45/net/xfrm/xfrm_user.c linux-2.6.32.45/net/xfrm/xfrm_user.c
74296 --- linux-2.6.32.45/net/xfrm/xfrm_user.c 2011-03-27 14:31:47.000000000 -0400
74297 +++ linux-2.6.32.45/net/xfrm/xfrm_user.c 2011-05-16 21:46:57.000000000 -0400
74298 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm
74299 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
74300 int i;
74301
74302 + pax_track_stack();
74303 +
74304 if (xp->xfrm_nr == 0)
74305 return 0;
74306
74307 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buf
74308 int err;
74309 int n = 0;
74310
74311 + pax_track_stack();
74312 +
74313 if (attrs[XFRMA_MIGRATE] == NULL)
74314 return -EINVAL;
74315
74316 diff -urNp linux-2.6.32.45/samples/kobject/kset-example.c linux-2.6.32.45/samples/kobject/kset-example.c
74317 --- linux-2.6.32.45/samples/kobject/kset-example.c 2011-03-27 14:31:47.000000000 -0400
74318 +++ linux-2.6.32.45/samples/kobject/kset-example.c 2011-04-17 15:56:46.000000000 -0400
74319 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
74320 }
74321
74322 /* Our custom sysfs_ops that we will associate with our ktype later on */
74323 -static struct sysfs_ops foo_sysfs_ops = {
74324 +static const struct sysfs_ops foo_sysfs_ops = {
74325 .show = foo_attr_show,
74326 .store = foo_attr_store,
74327 };
74328 diff -urNp linux-2.6.32.45/scripts/basic/fixdep.c linux-2.6.32.45/scripts/basic/fixdep.c
74329 --- linux-2.6.32.45/scripts/basic/fixdep.c 2011-03-27 14:31:47.000000000 -0400
74330 +++ linux-2.6.32.45/scripts/basic/fixdep.c 2011-04-17 15:56:46.000000000 -0400
74331 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
74332
74333 static void parse_config_file(char *map, size_t len)
74334 {
74335 - int *end = (int *) (map + len);
74336 + unsigned int *end = (unsigned int *) (map + len);
74337 /* start at +1, so that p can never be < map */
74338 - int *m = (int *) map + 1;
74339 + unsigned int *m = (unsigned int *) map + 1;
74340 char *p, *q;
74341
74342 for (; m < end; m++) {
74343 @@ -371,7 +371,7 @@ static void print_deps(void)
74344 static void traps(void)
74345 {
74346 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
74347 - int *p = (int *)test;
74348 + unsigned int *p = (unsigned int *)test;
74349
74350 if (*p != INT_CONF) {
74351 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
74352 diff -urNp linux-2.6.32.45/scripts/gcc-plugin.sh linux-2.6.32.45/scripts/gcc-plugin.sh
74353 --- linux-2.6.32.45/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
74354 +++ linux-2.6.32.45/scripts/gcc-plugin.sh 2011-08-05 20:33:55.000000000 -0400
74355 @@ -0,0 +1,3 @@
74356 +#!/bin/sh
74357 +
74358 +echo "#include \"gcc-plugin.h\"" | $* -x c - -c -o /dev/null -I`$* -print-file-name=plugin`/include>/dev/null 2>&1 && echo "y"
74359 diff -urNp linux-2.6.32.45/scripts/Makefile.build linux-2.6.32.45/scripts/Makefile.build
74360 --- linux-2.6.32.45/scripts/Makefile.build 2011-03-27 14:31:47.000000000 -0400
74361 +++ linux-2.6.32.45/scripts/Makefile.build 2011-06-04 20:46:51.000000000 -0400
74362 @@ -59,7 +59,7 @@ endif
74363 endif
74364
74365 # Do not include host rules unless needed
74366 -ifneq ($(hostprogs-y)$(hostprogs-m),)
74367 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
74368 include scripts/Makefile.host
74369 endif
74370
74371 diff -urNp linux-2.6.32.45/scripts/Makefile.clean linux-2.6.32.45/scripts/Makefile.clean
74372 --- linux-2.6.32.45/scripts/Makefile.clean 2011-03-27 14:31:47.000000000 -0400
74373 +++ linux-2.6.32.45/scripts/Makefile.clean 2011-06-04 20:47:19.000000000 -0400
74374 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
74375 __clean-files := $(extra-y) $(always) \
74376 $(targets) $(clean-files) \
74377 $(host-progs) \
74378 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
74379 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
74380 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
74381
74382 # as clean-files is given relative to the current directory, this adds
74383 # a $(obj) prefix, except for absolute paths
74384 diff -urNp linux-2.6.32.45/scripts/Makefile.host linux-2.6.32.45/scripts/Makefile.host
74385 --- linux-2.6.32.45/scripts/Makefile.host 2011-03-27 14:31:47.000000000 -0400
74386 +++ linux-2.6.32.45/scripts/Makefile.host 2011-06-04 20:48:22.000000000 -0400
74387 @@ -31,6 +31,7 @@
74388 # Note: Shared libraries consisting of C++ files are not supported
74389
74390 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
74391 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
74392
74393 # C code
74394 # Executables compiled from a single .c file
74395 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
74396 # Shared libaries (only .c supported)
74397 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
74398 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
74399 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
74400 # Remove .so files from "xxx-objs"
74401 host-cobjs := $(filter-out %.so,$(host-cobjs))
74402
74403 diff -urNp linux-2.6.32.45/scripts/mod/file2alias.c linux-2.6.32.45/scripts/mod/file2alias.c
74404 --- linux-2.6.32.45/scripts/mod/file2alias.c 2011-03-27 14:31:47.000000000 -0400
74405 +++ linux-2.6.32.45/scripts/mod/file2alias.c 2011-04-17 15:56:46.000000000 -0400
74406 @@ -72,7 +72,7 @@ static void device_id_check(const char *
74407 unsigned long size, unsigned long id_size,
74408 void *symval)
74409 {
74410 - int i;
74411 + unsigned int i;
74412
74413 if (size % id_size || size < id_size) {
74414 if (cross_build != 0)
74415 @@ -102,7 +102,7 @@ static void device_id_check(const char *
74416 /* USB is special because the bcdDevice can be matched against a numeric range */
74417 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
74418 static void do_usb_entry(struct usb_device_id *id,
74419 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
74420 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
74421 unsigned char range_lo, unsigned char range_hi,
74422 struct module *mod)
74423 {
74424 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
74425 for (i = 0; i < count; i++) {
74426 const char *id = (char *)devs[i].id;
74427 char acpi_id[sizeof(devs[0].id)];
74428 - int j;
74429 + unsigned int j;
74430
74431 buf_printf(&mod->dev_table_buf,
74432 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
74433 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
74434
74435 for (j = 0; j < PNP_MAX_DEVICES; j++) {
74436 const char *id = (char *)card->devs[j].id;
74437 - int i2, j2;
74438 + unsigned int i2, j2;
74439 int dup = 0;
74440
74441 if (!id[0])
74442 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
74443 /* add an individual alias for every device entry */
74444 if (!dup) {
74445 char acpi_id[sizeof(card->devs[0].id)];
74446 - int k;
74447 + unsigned int k;
74448
74449 buf_printf(&mod->dev_table_buf,
74450 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
74451 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, co
74452 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
74453 char *alias)
74454 {
74455 - int i, j;
74456 + unsigned int i, j;
74457
74458 sprintf(alias, "dmi*");
74459
74460 diff -urNp linux-2.6.32.45/scripts/mod/modpost.c linux-2.6.32.45/scripts/mod/modpost.c
74461 --- linux-2.6.32.45/scripts/mod/modpost.c 2011-03-27 14:31:47.000000000 -0400
74462 +++ linux-2.6.32.45/scripts/mod/modpost.c 2011-07-06 19:53:33.000000000 -0400
74463 @@ -835,6 +835,7 @@ enum mismatch {
74464 INIT_TO_EXIT,
74465 EXIT_TO_INIT,
74466 EXPORT_TO_INIT_EXIT,
74467 + DATA_TO_TEXT
74468 };
74469
74470 struct sectioncheck {
74471 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
74472 .fromsec = { "__ksymtab*", NULL },
74473 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
74474 .mismatch = EXPORT_TO_INIT_EXIT
74475 +},
74476 +/* Do not reference code from writable data */
74477 +{
74478 + .fromsec = { DATA_SECTIONS, NULL },
74479 + .tosec = { TEXT_SECTIONS, NULL },
74480 + .mismatch = DATA_TO_TEXT
74481 }
74482 };
74483
74484 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
74485 continue;
74486 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
74487 continue;
74488 - if (sym->st_value == addr)
74489 - return sym;
74490 /* Find a symbol nearby - addr are maybe negative */
74491 d = sym->st_value - addr;
74492 + if (d == 0)
74493 + return sym;
74494 if (d < 0)
74495 d = addr - sym->st_value;
74496 if (d < distance) {
74497 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
74498 "Fix this by removing the %sannotation of %s "
74499 "or drop the export.\n",
74500 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
74501 + case DATA_TO_TEXT:
74502 +/*
74503 + fprintf(stderr,
74504 + "The variable %s references\n"
74505 + "the %s %s%s%s\n",
74506 + fromsym, to, sec2annotation(tosec), tosym, to_p);
74507 +*/
74508 + break;
74509 case NO_MISMATCH:
74510 /* To get warnings on missing members */
74511 break;
74512 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modn
74513 static void check_sec_ref(struct module *mod, const char *modname,
74514 struct elf_info *elf)
74515 {
74516 - int i;
74517 + unsigned int i;
74518 Elf_Shdr *sechdrs = elf->sechdrs;
74519
74520 /* Walk through all sections */
74521 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
74522 va_end(ap);
74523 }
74524
74525 -void buf_write(struct buffer *buf, const char *s, int len)
74526 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
74527 {
74528 if (buf->size - buf->pos < len) {
74529 buf->size += len + SZ;
74530 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
74531 if (fstat(fileno(file), &st) < 0)
74532 goto close_write;
74533
74534 - if (st.st_size != b->pos)
74535 + if (st.st_size != (off_t)b->pos)
74536 goto close_write;
74537
74538 tmp = NOFAIL(malloc(b->pos));
74539 diff -urNp linux-2.6.32.45/scripts/mod/modpost.h linux-2.6.32.45/scripts/mod/modpost.h
74540 --- linux-2.6.32.45/scripts/mod/modpost.h 2011-03-27 14:31:47.000000000 -0400
74541 +++ linux-2.6.32.45/scripts/mod/modpost.h 2011-04-17 15:56:46.000000000 -0400
74542 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
74543
74544 struct buffer {
74545 char *p;
74546 - int pos;
74547 - int size;
74548 + unsigned int pos;
74549 + unsigned int size;
74550 };
74551
74552 void __attribute__((format(printf, 2, 3)))
74553 buf_printf(struct buffer *buf, const char *fmt, ...);
74554
74555 void
74556 -buf_write(struct buffer *buf, const char *s, int len);
74557 +buf_write(struct buffer *buf, const char *s, unsigned int len);
74558
74559 struct module {
74560 struct module *next;
74561 diff -urNp linux-2.6.32.45/scripts/mod/sumversion.c linux-2.6.32.45/scripts/mod/sumversion.c
74562 --- linux-2.6.32.45/scripts/mod/sumversion.c 2011-03-27 14:31:47.000000000 -0400
74563 +++ linux-2.6.32.45/scripts/mod/sumversion.c 2011-04-17 15:56:46.000000000 -0400
74564 @@ -455,7 +455,7 @@ static void write_version(const char *fi
74565 goto out;
74566 }
74567
74568 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
74569 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
74570 warn("writing sum in %s failed: %s\n",
74571 filename, strerror(errno));
74572 goto out;
74573 diff -urNp linux-2.6.32.45/scripts/package/mkspec linux-2.6.32.45/scripts/package/mkspec
74574 --- linux-2.6.32.45/scripts/package/mkspec 2011-03-27 14:31:47.000000000 -0400
74575 +++ linux-2.6.32.45/scripts/package/mkspec 2011-07-19 18:19:12.000000000 -0400
74576 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM
74577 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
74578 echo "%endif"
74579
74580 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
74581 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
74582 echo "%ifarch ia64"
74583 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
74584 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
74585 diff -urNp linux-2.6.32.45/scripts/pnmtologo.c linux-2.6.32.45/scripts/pnmtologo.c
74586 --- linux-2.6.32.45/scripts/pnmtologo.c 2011-03-27 14:31:47.000000000 -0400
74587 +++ linux-2.6.32.45/scripts/pnmtologo.c 2011-04-17 15:56:46.000000000 -0400
74588 @@ -237,14 +237,14 @@ static void write_header(void)
74589 fprintf(out, " * Linux logo %s\n", logoname);
74590 fputs(" */\n\n", out);
74591 fputs("#include <linux/linux_logo.h>\n\n", out);
74592 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
74593 + fprintf(out, "static unsigned char %s_data[] = {\n",
74594 logoname);
74595 }
74596
74597 static void write_footer(void)
74598 {
74599 fputs("\n};\n\n", out);
74600 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
74601 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
74602 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
74603 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
74604 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
74605 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
74606 fputs("\n};\n\n", out);
74607
74608 /* write logo clut */
74609 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
74610 + fprintf(out, "static unsigned char %s_clut[] = {\n",
74611 logoname);
74612 write_hex_cnt = 0;
74613 for (i = 0; i < logo_clutsize; i++) {
74614 diff -urNp linux-2.6.32.45/scripts/tags.sh linux-2.6.32.45/scripts/tags.sh
74615 --- linux-2.6.32.45/scripts/tags.sh 2011-03-27 14:31:47.000000000 -0400
74616 +++ linux-2.6.32.45/scripts/tags.sh 2011-06-07 18:06:04.000000000 -0400
74617 @@ -93,6 +93,11 @@ docscope()
74618 cscope -b -f cscope.out
74619 }
74620
74621 +dogtags()
74622 +{
74623 + all_sources | gtags -f -
74624 +}
74625 +
74626 exuberant()
74627 {
74628 all_sources | xargs $1 -a \
74629 @@ -164,6 +169,10 @@ case "$1" in
74630 docscope
74631 ;;
74632
74633 + "gtags")
74634 + dogtags
74635 + ;;
74636 +
74637 "tags")
74638 rm -f tags
74639 xtags ctags
74640 diff -urNp linux-2.6.32.45/security/capability.c linux-2.6.32.45/security/capability.c
74641 --- linux-2.6.32.45/security/capability.c 2011-03-27 14:31:47.000000000 -0400
74642 +++ linux-2.6.32.45/security/capability.c 2011-04-17 15:56:46.000000000 -0400
74643 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *ls
74644 }
74645 #endif /* CONFIG_AUDIT */
74646
74647 -struct security_operations default_security_ops = {
74648 +struct security_operations default_security_ops __read_only = {
74649 .name = "default",
74650 };
74651
74652 diff -urNp linux-2.6.32.45/security/commoncap.c linux-2.6.32.45/security/commoncap.c
74653 --- linux-2.6.32.45/security/commoncap.c 2011-03-27 14:31:47.000000000 -0400
74654 +++ linux-2.6.32.45/security/commoncap.c 2011-08-17 19:22:13.000000000 -0400
74655 @@ -27,7 +27,7 @@
74656 #include <linux/sched.h>
74657 #include <linux/prctl.h>
74658 #include <linux/securebits.h>
74659 -
74660 +#include <net/sock.h>
74661 /*
74662 * If a non-root user executes a setuid-root binary in
74663 * !secure(SECURE_NOROOT) mode, then we raise capabilities.
74664 @@ -50,9 +50,18 @@ static void warn_setuid_and_fcaps_mixed(
74665 }
74666 }
74667
74668 +#ifdef CONFIG_NET
74669 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
74670 +#endif
74671 +
74672 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
74673 {
74674 +#ifdef CONFIG_NET
74675 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
74676 +#else
74677 NETLINK_CB(skb).eff_cap = current_cap();
74678 +#endif
74679 +
74680 return 0;
74681 }
74682
74683 @@ -582,6 +591,9 @@ int cap_bprm_secureexec(struct linux_bin
74684 {
74685 const struct cred *cred = current_cred();
74686
74687 + if (gr_acl_enable_at_secure())
74688 + return 1;
74689 +
74690 if (cred->uid != 0) {
74691 if (bprm->cap_effective)
74692 return 1;
74693 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_api.c linux-2.6.32.45/security/integrity/ima/ima_api.c
74694 --- linux-2.6.32.45/security/integrity/ima/ima_api.c 2011-03-27 14:31:47.000000000 -0400
74695 +++ linux-2.6.32.45/security/integrity/ima/ima_api.c 2011-04-17 15:56:46.000000000 -0400
74696 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino
74697 int result;
74698
74699 /* can overflow, only indicator */
74700 - atomic_long_inc(&ima_htable.violations);
74701 + atomic_long_inc_unchecked(&ima_htable.violations);
74702
74703 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
74704 if (!entry) {
74705 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_fs.c linux-2.6.32.45/security/integrity/ima/ima_fs.c
74706 --- linux-2.6.32.45/security/integrity/ima/ima_fs.c 2011-03-27 14:31:47.000000000 -0400
74707 +++ linux-2.6.32.45/security/integrity/ima/ima_fs.c 2011-04-17 15:56:46.000000000 -0400
74708 @@ -27,12 +27,12 @@
74709 static int valid_policy = 1;
74710 #define TMPBUFLEN 12
74711 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
74712 - loff_t *ppos, atomic_long_t *val)
74713 + loff_t *ppos, atomic_long_unchecked_t *val)
74714 {
74715 char tmpbuf[TMPBUFLEN];
74716 ssize_t len;
74717
74718 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
74719 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
74720 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
74721 }
74722
74723 diff -urNp linux-2.6.32.45/security/integrity/ima/ima.h linux-2.6.32.45/security/integrity/ima/ima.h
74724 --- linux-2.6.32.45/security/integrity/ima/ima.h 2011-03-27 14:31:47.000000000 -0400
74725 +++ linux-2.6.32.45/security/integrity/ima/ima.h 2011-04-17 15:56:46.000000000 -0400
74726 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino
74727 extern spinlock_t ima_queue_lock;
74728
74729 struct ima_h_table {
74730 - atomic_long_t len; /* number of stored measurements in the list */
74731 - atomic_long_t violations;
74732 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
74733 + atomic_long_unchecked_t violations;
74734 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
74735 };
74736 extern struct ima_h_table ima_htable;
74737 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_queue.c linux-2.6.32.45/security/integrity/ima/ima_queue.c
74738 --- linux-2.6.32.45/security/integrity/ima/ima_queue.c 2011-03-27 14:31:47.000000000 -0400
74739 +++ linux-2.6.32.45/security/integrity/ima/ima_queue.c 2011-04-17 15:56:46.000000000 -0400
74740 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i
74741 INIT_LIST_HEAD(&qe->later);
74742 list_add_tail_rcu(&qe->later, &ima_measurements);
74743
74744 - atomic_long_inc(&ima_htable.len);
74745 + atomic_long_inc_unchecked(&ima_htable.len);
74746 key = ima_hash_key(entry->digest);
74747 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
74748 return 0;
74749 diff -urNp linux-2.6.32.45/security/Kconfig linux-2.6.32.45/security/Kconfig
74750 --- linux-2.6.32.45/security/Kconfig 2011-03-27 14:31:47.000000000 -0400
74751 +++ linux-2.6.32.45/security/Kconfig 2011-07-06 19:58:11.000000000 -0400
74752 @@ -4,6 +4,555 @@
74753
74754 menu "Security options"
74755
74756 +source grsecurity/Kconfig
74757 +
74758 +menu "PaX"
74759 +
74760 + config ARCH_TRACK_EXEC_LIMIT
74761 + bool
74762 +
74763 + config PAX_PER_CPU_PGD
74764 + bool
74765 +
74766 + config TASK_SIZE_MAX_SHIFT
74767 + int
74768 + depends on X86_64
74769 + default 47 if !PAX_PER_CPU_PGD
74770 + default 42 if PAX_PER_CPU_PGD
74771 +
74772 + config PAX_ENABLE_PAE
74773 + bool
74774 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
74775 +
74776 +config PAX
74777 + bool "Enable various PaX features"
74778 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
74779 + help
74780 + This allows you to enable various PaX features. PaX adds
74781 + intrusion prevention mechanisms to the kernel that reduce
74782 + the risks posed by exploitable memory corruption bugs.
74783 +
74784 +menu "PaX Control"
74785 + depends on PAX
74786 +
74787 +config PAX_SOFTMODE
74788 + bool 'Support soft mode'
74789 + select PAX_PT_PAX_FLAGS
74790 + help
74791 + Enabling this option will allow you to run PaX in soft mode, that
74792 + is, PaX features will not be enforced by default, only on executables
74793 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
74794 + is the only way to mark executables for soft mode use.
74795 +
74796 + Soft mode can be activated by using the "pax_softmode=1" kernel command
74797 + line option on boot. Furthermore you can control various PaX features
74798 + at runtime via the entries in /proc/sys/kernel/pax.
74799 +
74800 +config PAX_EI_PAX
74801 + bool 'Use legacy ELF header marking'
74802 + help
74803 + Enabling this option will allow you to control PaX features on
74804 + a per executable basis via the 'chpax' utility available at
74805 + http://pax.grsecurity.net/. The control flags will be read from
74806 + an otherwise reserved part of the ELF header. This marking has
74807 + numerous drawbacks (no support for soft-mode, toolchain does not
74808 + know about the non-standard use of the ELF header) therefore it
74809 + has been deprecated in favour of PT_PAX_FLAGS support.
74810 +
74811 + Note that if you enable PT_PAX_FLAGS marking support as well,
74812 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
74813 +
74814 +config PAX_PT_PAX_FLAGS
74815 + bool 'Use ELF program header marking'
74816 + help
74817 + Enabling this option will allow you to control PaX features on
74818 + a per executable basis via the 'paxctl' utility available at
74819 + http://pax.grsecurity.net/. The control flags will be read from
74820 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
74821 + has the benefits of supporting both soft mode and being fully
74822 + integrated into the toolchain (the binutils patch is available
74823 + from http://pax.grsecurity.net).
74824 +
74825 + If your toolchain does not support PT_PAX_FLAGS markings,
74826 + you can create one in most cases with 'paxctl -C'.
74827 +
74828 + Note that if you enable the legacy EI_PAX marking support as well,
74829 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
74830 +
74831 +choice
74832 + prompt 'MAC system integration'
74833 + default PAX_HAVE_ACL_FLAGS
74834 + help
74835 + Mandatory Access Control systems have the option of controlling
74836 + PaX flags on a per executable basis, choose the method supported
74837 + by your particular system.
74838 +
74839 + - "none": if your MAC system does not interact with PaX,
74840 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
74841 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
74842 +
74843 + NOTE: this option is for developers/integrators only.
74844 +
74845 + config PAX_NO_ACL_FLAGS
74846 + bool 'none'
74847 +
74848 + config PAX_HAVE_ACL_FLAGS
74849 + bool 'direct'
74850 +
74851 + config PAX_HOOK_ACL_FLAGS
74852 + bool 'hook'
74853 +endchoice
74854 +
74855 +endmenu
74856 +
74857 +menu "Non-executable pages"
74858 + depends on PAX
74859 +
74860 +config PAX_NOEXEC
74861 + bool "Enforce non-executable pages"
74862 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
74863 + help
74864 + By design some architectures do not allow for protecting memory
74865 + pages against execution or even if they do, Linux does not make
74866 + use of this feature. In practice this means that if a page is
74867 + readable (such as the stack or heap) it is also executable.
74868 +
74869 + There is a well known exploit technique that makes use of this
74870 + fact and a common programming mistake where an attacker can
74871 + introduce code of his choice somewhere in the attacked program's
74872 + memory (typically the stack or the heap) and then execute it.
74873 +
74874 + If the attacked program was running with different (typically
74875 + higher) privileges than that of the attacker, then he can elevate
74876 + his own privilege level (e.g. get a root shell, write to files for
74877 + which he does not have write access to, etc).
74878 +
74879 + Enabling this option will let you choose from various features
74880 + that prevent the injection and execution of 'foreign' code in
74881 + a program.
74882 +
74883 + This will also break programs that rely on the old behaviour and
74884 + expect that dynamically allocated memory via the malloc() family
74885 + of functions is executable (which it is not). Notable examples
74886 + are the XFree86 4.x server, the java runtime and wine.
74887 +
74888 +config PAX_PAGEEXEC
74889 + bool "Paging based non-executable pages"
74890 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
74891 + select S390_SWITCH_AMODE if S390
74892 + select S390_EXEC_PROTECT if S390
74893 + select ARCH_TRACK_EXEC_LIMIT if X86_32
74894 + help
74895 + This implementation is based on the paging feature of the CPU.
74896 + On i386 without hardware non-executable bit support there is a
74897 + variable but usually low performance impact, however on Intel's
74898 + P4 core based CPUs it is very high so you should not enable this
74899 + for kernels meant to be used on such CPUs.
74900 +
74901 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
74902 + with hardware non-executable bit support there is no performance
74903 + impact, on ppc the impact is negligible.
74904 +
74905 + Note that several architectures require various emulations due to
74906 + badly designed userland ABIs, this will cause a performance impact
74907 + but will disappear as soon as userland is fixed. For example, ppc
74908 + userland MUST have been built with secure-plt by a recent toolchain.
74909 +
74910 +config PAX_SEGMEXEC
74911 + bool "Segmentation based non-executable pages"
74912 + depends on PAX_NOEXEC && X86_32
74913 + help
74914 + This implementation is based on the segmentation feature of the
74915 + CPU and has a very small performance impact, however applications
74916 + will be limited to a 1.5 GB address space instead of the normal
74917 + 3 GB.
74918 +
74919 +config PAX_EMUTRAMP
74920 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
74921 + default y if PARISC
74922 + help
74923 + There are some programs and libraries that for one reason or
74924 + another attempt to execute special small code snippets from
74925 + non-executable memory pages. Most notable examples are the
74926 + signal handler return code generated by the kernel itself and
74927 + the GCC trampolines.
74928 +
74929 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
74930 + such programs will no longer work under your kernel.
74931 +
74932 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
74933 + utilities to enable trampoline emulation for the affected programs
74934 + yet still have the protection provided by the non-executable pages.
74935 +
74936 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
74937 + your system will not even boot.
74938 +
74939 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
74940 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
74941 + for the affected files.
74942 +
74943 + NOTE: enabling this feature *may* open up a loophole in the
74944 + protection provided by non-executable pages that an attacker
74945 + could abuse. Therefore the best solution is to not have any
74946 + files on your system that would require this option. This can
74947 + be achieved by not using libc5 (which relies on the kernel
74948 + signal handler return code) and not using or rewriting programs
74949 + that make use of the nested function implementation of GCC.
74950 + Skilled users can just fix GCC itself so that it implements
74951 + nested function calls in a way that does not interfere with PaX.
74952 +
74953 +config PAX_EMUSIGRT
74954 + bool "Automatically emulate sigreturn trampolines"
74955 + depends on PAX_EMUTRAMP && PARISC
74956 + default y
74957 + help
74958 + Enabling this option will have the kernel automatically detect
74959 + and emulate signal return trampolines executing on the stack
74960 + that would otherwise lead to task termination.
74961 +
74962 + This solution is intended as a temporary one for users with
74963 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
74964 + Modula-3 runtime, etc) or executables linked to such, basically
74965 + everything that does not specify its own SA_RESTORER function in
74966 + normal executable memory like glibc 2.1+ does.
74967 +
74968 + On parisc you MUST enable this option, otherwise your system will
74969 + not even boot.
74970 +
74971 + NOTE: this feature cannot be disabled on a per executable basis
74972 + and since it *does* open up a loophole in the protection provided
74973 + by non-executable pages, the best solution is to not have any
74974 + files on your system that would require this option.
74975 +
74976 +config PAX_MPROTECT
74977 + bool "Restrict mprotect()"
74978 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
74979 + help
74980 + Enabling this option will prevent programs from
74981 + - changing the executable status of memory pages that were
74982 + not originally created as executable,
74983 + - making read-only executable pages writable again,
74984 + - creating executable pages from anonymous memory,
74985 + - making read-only-after-relocations (RELRO) data pages writable again.
74986 +
74987 + You should say Y here to complete the protection provided by
74988 + the enforcement of non-executable pages.
74989 +
74990 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
74991 + this feature on a per file basis.
74992 +
74993 +config PAX_MPROTECT_COMPAT
74994 + bool "Use legacy/compat protection demoting (read help)"
74995 + depends on PAX_MPROTECT
74996 + default n
74997 + help
74998 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
74999 + by sending the proper error code to the application. For some broken
75000 + userland, this can cause problems with Python or other applications. The
75001 + current implementation however allows for applications like clamav to
75002 + detect if JIT compilation/execution is allowed and to fall back gracefully
75003 + to an interpreter-based mode if it does not. While we encourage everyone
75004 + to use the current implementation as-is and push upstream to fix broken
75005 + userland (note that the RWX logging option can assist with this), in some
75006 + environments this may not be possible. Having to disable MPROTECT
75007 + completely on certain binaries reduces the security benefit of PaX,
75008 + so this option is provided for those environments to revert to the old
75009 + behavior.
75010 +
75011 +config PAX_ELFRELOCS
75012 + bool "Allow ELF text relocations (read help)"
75013 + depends on PAX_MPROTECT
75014 + default n
75015 + help
75016 + Non-executable pages and mprotect() restrictions are effective
75017 + in preventing the introduction of new executable code into an
75018 + attacked task's address space. There remain only two venues
75019 + for this kind of attack: if the attacker can execute already
75020 + existing code in the attacked task then he can either have it
75021 + create and mmap() a file containing his code or have it mmap()
75022 + an already existing ELF library that does not have position
75023 + independent code in it and use mprotect() on it to make it
75024 + writable and copy his code there. While protecting against
75025 + the former approach is beyond PaX, the latter can be prevented
75026 + by having only PIC ELF libraries on one's system (which do not
75027 + need to relocate their code). If you are sure this is your case,
75028 + as is the case with all modern Linux distributions, then leave
75029 + this option disabled. You should say 'n' here.
75030 +
75031 +config PAX_ETEXECRELOCS
75032 + bool "Allow ELF ET_EXEC text relocations"
75033 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
75034 + select PAX_ELFRELOCS
75035 + default y
75036 + help
75037 + On some architectures there are incorrectly created applications
75038 + that require text relocations and would not work without enabling
75039 + this option. If you are an alpha, ia64 or parisc user, you should
75040 + enable this option and disable it once you have made sure that
75041 + none of your applications need it.
75042 +
75043 +config PAX_EMUPLT
75044 + bool "Automatically emulate ELF PLT"
75045 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
75046 + default y
75047 + help
75048 + Enabling this option will have the kernel automatically detect
75049 + and emulate the Procedure Linkage Table entries in ELF files.
75050 + On some architectures such entries are in writable memory, and
75051 + become non-executable leading to task termination. Therefore
75052 + it is mandatory that you enable this option on alpha, parisc,
75053 + sparc and sparc64, otherwise your system would not even boot.
75054 +
75055 + NOTE: this feature *does* open up a loophole in the protection
75056 + provided by the non-executable pages, therefore the proper
75057 + solution is to modify the toolchain to produce a PLT that does
75058 + not need to be writable.
75059 +
75060 +config PAX_DLRESOLVE
75061 + bool 'Emulate old glibc resolver stub'
75062 + depends on PAX_EMUPLT && SPARC
75063 + default n
75064 + help
75065 + This option is needed if userland has an old glibc (before 2.4)
75066 + that puts a 'save' instruction into the runtime generated resolver
75067 + stub that needs special emulation.
75068 +
75069 +config PAX_KERNEXEC
75070 + bool "Enforce non-executable kernel pages"
75071 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
75072 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
75073 + help
75074 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
75075 + that is, enabling this option will make it harder to inject
75076 + and execute 'foreign' code in kernel memory itself.
75077 +
75078 + Note that on x86_64 kernels there is a known regression when
75079 + this feature and KVM/VMX are both enabled in the host kernel.
75080 +
75081 +config PAX_KERNEXEC_MODULE_TEXT
75082 + int "Minimum amount of memory reserved for module code"
75083 + default "4"
75084 + depends on PAX_KERNEXEC && X86_32 && MODULES
75085 + help
75086 + Due to implementation details the kernel must reserve a fixed
75087 + amount of memory for module code at compile time that cannot be
75088 + changed at runtime. Here you can specify the minimum amount
75089 + in MB that will be reserved. Due to the same implementation
75090 + details this size will always be rounded up to the next 2/4 MB
75091 + boundary (depends on PAE) so the actually available memory for
75092 + module code will usually be more than this minimum.
75093 +
75094 + The default 4 MB should be enough for most users but if you have
75095 + an excessive number of modules (e.g., most distribution configs
75096 + compile many drivers as modules) or use huge modules such as
75097 + nvidia's kernel driver, you will need to adjust this amount.
75098 + A good rule of thumb is to look at your currently loaded kernel
75099 + modules and add up their sizes.
75100 +
75101 +endmenu
75102 +
75103 +menu "Address Space Layout Randomization"
75104 + depends on PAX
75105 +
75106 +config PAX_ASLR
75107 + bool "Address Space Layout Randomization"
75108 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
75109 + help
75110 + Many if not most exploit techniques rely on the knowledge of
75111 + certain addresses in the attacked program. The following options
75112 + will allow the kernel to apply a certain amount of randomization
75113 + to specific parts of the program thereby forcing an attacker to
75114 + guess them in most cases. Any failed guess will most likely crash
75115 + the attacked program which allows the kernel to detect such attempts
75116 + and react on them. PaX itself provides no reaction mechanisms,
75117 + instead it is strongly encouraged that you make use of Nergal's
75118 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
75119 + (http://www.grsecurity.net/) built-in crash detection features or
75120 + develop one yourself.
75121 +
75122 + By saying Y here you can choose to randomize the following areas:
75123 + - top of the task's kernel stack
75124 + - top of the task's userland stack
75125 + - base address for mmap() requests that do not specify one
75126 + (this includes all libraries)
75127 + - base address of the main executable
75128 +
75129 + It is strongly recommended to say Y here as address space layout
75130 + randomization has negligible impact on performance yet it provides
75131 + a very effective protection.
75132 +
75133 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
75134 + this feature on a per file basis.
75135 +
75136 +config PAX_RANDKSTACK
75137 + bool "Randomize kernel stack base"
75138 + depends on PAX_ASLR && X86_TSC && X86
75139 + help
75140 + By saying Y here the kernel will randomize every task's kernel
75141 + stack on every system call. This will not only force an attacker
75142 + to guess it but also prevent him from making use of possible
75143 + leaked information about it.
75144 +
75145 + Since the kernel stack is a rather scarce resource, randomization
75146 + may cause unexpected stack overflows, therefore you should very
75147 + carefully test your system. Note that once enabled in the kernel
75148 + configuration, this feature cannot be disabled on a per file basis.
75149 +
75150 +config PAX_RANDUSTACK
75151 + bool "Randomize user stack base"
75152 + depends on PAX_ASLR
75153 + help
75154 + By saying Y here the kernel will randomize every task's userland
75155 + stack. The randomization is done in two steps where the second
75156 + one may apply a big amount of shift to the top of the stack and
75157 + cause problems for programs that want to use lots of memory (more
75158 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
75159 + For this reason the second step can be controlled by 'chpax' or
75160 + 'paxctl' on a per file basis.
75161 +
75162 +config PAX_RANDMMAP
75163 + bool "Randomize mmap() base"
75164 + depends on PAX_ASLR
75165 + help
75166 + By saying Y here the kernel will use a randomized base address for
75167 + mmap() requests that do not specify one themselves. As a result
75168 + all dynamically loaded libraries will appear at random addresses
75169 + and therefore be harder to exploit by a technique where an attacker
75170 + attempts to execute library code for his purposes (e.g. spawn a
75171 + shell from an exploited program that is running at an elevated
75172 + privilege level).
75173 +
75174 + Furthermore, if a program is relinked as a dynamic ELF file, its
75175 + base address will be randomized as well, completing the full
75176 + randomization of the address space layout. Attacking such programs
75177 + becomes a guess game. You can find an example of doing this at
75178 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
75179 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
75180 +
75181 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
75182 + feature on a per file basis.
75183 +
75184 +endmenu
75185 +
75186 +menu "Miscellaneous hardening features"
75187 +
75188 +config PAX_MEMORY_SANITIZE
75189 + bool "Sanitize all freed memory"
75190 + help
75191 + By saying Y here the kernel will erase memory pages as soon as they
75192 + are freed. This in turn reduces the lifetime of data stored in the
75193 + pages, making it less likely that sensitive information such as
75194 + passwords, cryptographic secrets, etc stay in memory for too long.
75195 +
75196 + This is especially useful for programs whose runtime is short, long
75197 + lived processes and the kernel itself benefit from this as long as
75198 + they operate on whole memory pages and ensure timely freeing of pages
75199 + that may hold sensitive information.
75200 +
75201 + The tradeoff is performance impact, on a single CPU system kernel
75202 + compilation sees a 3% slowdown, other systems and workloads may vary
75203 + and you are advised to test this feature on your expected workload
75204 + before deploying it.
75205 +
75206 + Note that this feature does not protect data stored in live pages,
75207 + e.g., process memory swapped to disk may stay there for a long time.
75208 +
75209 +config PAX_MEMORY_STACKLEAK
75210 + bool "Sanitize kernel stack"
75211 + depends on X86
75212 + help
75213 + By saying Y here the kernel will erase the kernel stack before it
75214 + returns from a system call. This in turn reduces the information
75215 + that a kernel stack leak bug can reveal.
75216 +
75217 + Note that such a bug can still leak information that was put on
75218 + the stack by the current system call (the one eventually triggering
75219 + the bug) but traces of earlier system calls on the kernel stack
75220 + cannot leak anymore.
75221 +
75222 + The tradeoff is performance impact, on a single CPU system kernel
75223 + compilation sees a 1% slowdown, other systems and workloads may vary
75224 + and you are advised to test this feature on your expected workload
75225 + before deploying it.
75226 +
75227 + Note: full support for this feature requires gcc with plugin support
75228 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
75229 + is not supported). Using older gcc versions means that functions
75230 + with large enough stack frames may leave uninitialized memory behind
75231 + that may be exposed to a later syscall leaking the stack.
75232 +
75233 +config PAX_MEMORY_UDEREF
75234 + bool "Prevent invalid userland pointer dereference"
75235 + depends on X86 && !UML_X86 && !XEN
75236 + select PAX_PER_CPU_PGD if X86_64
75237 + help
75238 + By saying Y here the kernel will be prevented from dereferencing
75239 + userland pointers in contexts where the kernel expects only kernel
75240 + pointers. This is both a useful runtime debugging feature and a
75241 + security measure that prevents exploiting a class of kernel bugs.
75242 +
75243 + The tradeoff is that some virtualization solutions may experience
75244 + a huge slowdown and therefore you should not enable this feature
75245 + for kernels meant to run in such environments. Whether a given VM
75246 + solution is affected or not is best determined by simply trying it
75247 + out, the performance impact will be obvious right on boot as this
75248 + mechanism engages from very early on. A good rule of thumb is that
75249 + VMs running on CPUs without hardware virtualization support (i.e.,
75250 + the majority of IA-32 CPUs) will likely experience the slowdown.
75251 +
75252 +config PAX_REFCOUNT
75253 + bool "Prevent various kernel object reference counter overflows"
75254 + depends on GRKERNSEC && (X86 || SPARC64)
75255 + help
75256 + By saying Y here the kernel will detect and prevent overflowing
75257 + various (but not all) kinds of object reference counters. Such
75258 + overflows can normally occur due to bugs only and are often, if
75259 + not always, exploitable.
75260 +
75261 + The tradeoff is that data structures protected by an overflowed
75262 + refcount will never be freed and therefore will leak memory. Note
75263 + that this leak also happens even without this protection but in
75264 + that case the overflow can eventually trigger the freeing of the
75265 + data structure while it is still being used elsewhere, resulting
75266 + in the exploitable situation that this feature prevents.
75267 +
75268 + Since this has a negligible performance impact, you should enable
75269 + this feature.
75270 +
75271 +config PAX_USERCOPY
75272 + bool "Harden heap object copies between kernel and userland"
75273 + depends on X86 || PPC || SPARC || ARM
75274 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
75275 + help
75276 + By saying Y here the kernel will enforce the size of heap objects
75277 + when they are copied in either direction between the kernel and
75278 + userland, even if only a part of the heap object is copied.
75279 +
75280 + Specifically, this checking prevents information leaking from the
75281 + kernel heap during kernel to userland copies (if the kernel heap
75282 + object is otherwise fully initialized) and prevents kernel heap
75283 + overflows during userland to kernel copies.
75284 +
75285 + Note that the current implementation provides the strictest bounds
75286 + checks for the SLUB allocator.
75287 +
75288 + Enabling this option also enables per-slab cache protection against
75289 + data in a given cache being copied into/out of via userland
75290 + accessors. Though the whitelist of regions will be reduced over
75291 + time, it notably protects important data structures like task structs.
75292 +
75293 +
75294 + If frame pointers are enabled on x86, this option will also
75295 + restrict copies into and out of the kernel stack to local variables
75296 + within a single frame.
75297 +
75298 + Since this has a negligible performance impact, you should enable
75299 + this feature.
75300 +
75301 +endmenu
75302 +
75303 +endmenu
75304 +
75305 config KEYS
75306 bool "Enable access key retention support"
75307 help
75308 @@ -146,7 +695,7 @@ config INTEL_TXT
75309 config LSM_MMAP_MIN_ADDR
75310 int "Low address space for LSM to protect from user allocation"
75311 depends on SECURITY && SECURITY_SELINUX
75312 - default 65536
75313 + default 32768
75314 help
75315 This is the portion of low virtual memory which should be protected
75316 from userspace allocation. Keeping a user from writing to low pages
75317 diff -urNp linux-2.6.32.45/security/keys/keyring.c linux-2.6.32.45/security/keys/keyring.c
75318 --- linux-2.6.32.45/security/keys/keyring.c 2011-03-27 14:31:47.000000000 -0400
75319 +++ linux-2.6.32.45/security/keys/keyring.c 2011-04-18 22:03:00.000000000 -0400
75320 @@ -214,15 +214,15 @@ static long keyring_read(const struct ke
75321 ret = -EFAULT;
75322
75323 for (loop = 0; loop < klist->nkeys; loop++) {
75324 + key_serial_t serial;
75325 key = klist->keys[loop];
75326 + serial = key->serial;
75327
75328 tmp = sizeof(key_serial_t);
75329 if (tmp > buflen)
75330 tmp = buflen;
75331
75332 - if (copy_to_user(buffer,
75333 - &key->serial,
75334 - tmp) != 0)
75335 + if (copy_to_user(buffer, &serial, tmp))
75336 goto error;
75337
75338 buflen -= tmp;
75339 diff -urNp linux-2.6.32.45/security/min_addr.c linux-2.6.32.45/security/min_addr.c
75340 --- linux-2.6.32.45/security/min_addr.c 2011-03-27 14:31:47.000000000 -0400
75341 +++ linux-2.6.32.45/security/min_addr.c 2011-04-17 15:56:46.000000000 -0400
75342 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
75343 */
75344 static void update_mmap_min_addr(void)
75345 {
75346 +#ifndef SPARC
75347 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
75348 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
75349 mmap_min_addr = dac_mmap_min_addr;
75350 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
75351 #else
75352 mmap_min_addr = dac_mmap_min_addr;
75353 #endif
75354 +#endif
75355 }
75356
75357 /*
75358 diff -urNp linux-2.6.32.45/security/root_plug.c linux-2.6.32.45/security/root_plug.c
75359 --- linux-2.6.32.45/security/root_plug.c 2011-03-27 14:31:47.000000000 -0400
75360 +++ linux-2.6.32.45/security/root_plug.c 2011-04-17 15:56:46.000000000 -0400
75361 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security
75362 return 0;
75363 }
75364
75365 -static struct security_operations rootplug_security_ops = {
75366 +static struct security_operations rootplug_security_ops __read_only = {
75367 .bprm_check_security = rootplug_bprm_check_security,
75368 };
75369
75370 diff -urNp linux-2.6.32.45/security/security.c linux-2.6.32.45/security/security.c
75371 --- linux-2.6.32.45/security/security.c 2011-03-27 14:31:47.000000000 -0400
75372 +++ linux-2.6.32.45/security/security.c 2011-04-17 15:56:46.000000000 -0400
75373 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURI
75374 extern struct security_operations default_security_ops;
75375 extern void security_fixup_ops(struct security_operations *ops);
75376
75377 -struct security_operations *security_ops; /* Initialized to NULL */
75378 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
75379
75380 static inline int verify(struct security_operations *ops)
75381 {
75382 @@ -106,7 +106,7 @@ int __init security_module_enable(struct
75383 * If there is already a security module registered with the kernel,
75384 * an error will be returned. Otherwise %0 is returned on success.
75385 */
75386 -int register_security(struct security_operations *ops)
75387 +int __init register_security(struct security_operations *ops)
75388 {
75389 if (verify(ops)) {
75390 printk(KERN_DEBUG "%s could not verify "
75391 diff -urNp linux-2.6.32.45/security/selinux/hooks.c linux-2.6.32.45/security/selinux/hooks.c
75392 --- linux-2.6.32.45/security/selinux/hooks.c 2011-03-27 14:31:47.000000000 -0400
75393 +++ linux-2.6.32.45/security/selinux/hooks.c 2011-04-17 15:56:46.000000000 -0400
75394 @@ -131,7 +131,7 @@ int selinux_enabled = 1;
75395 * Minimal support for a secondary security module,
75396 * just to allow the use of the capability module.
75397 */
75398 -static struct security_operations *secondary_ops;
75399 +static struct security_operations *secondary_ops __read_only;
75400
75401 /* Lists of inode and superblock security structures initialized
75402 before the policy was loaded. */
75403 @@ -5457,7 +5457,7 @@ static int selinux_key_getsecurity(struc
75404
75405 #endif
75406
75407 -static struct security_operations selinux_ops = {
75408 +static struct security_operations selinux_ops __read_only = {
75409 .name = "selinux",
75410
75411 .ptrace_access_check = selinux_ptrace_access_check,
75412 @@ -5841,7 +5841,9 @@ int selinux_disable(void)
75413 avc_disable();
75414
75415 /* Reset security_ops to the secondary module, dummy or capability. */
75416 + pax_open_kernel();
75417 security_ops = secondary_ops;
75418 + pax_close_kernel();
75419
75420 /* Unregister netfilter hooks. */
75421 selinux_nf_ip_exit();
75422 diff -urNp linux-2.6.32.45/security/selinux/include/xfrm.h linux-2.6.32.45/security/selinux/include/xfrm.h
75423 --- linux-2.6.32.45/security/selinux/include/xfrm.h 2011-03-27 14:31:47.000000000 -0400
75424 +++ linux-2.6.32.45/security/selinux/include/xfrm.h 2011-05-18 20:09:37.000000000 -0400
75425 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
75426
75427 static inline void selinux_xfrm_notify_policyload(void)
75428 {
75429 - atomic_inc(&flow_cache_genid);
75430 + atomic_inc_unchecked(&flow_cache_genid);
75431 }
75432 #else
75433 static inline int selinux_xfrm_enabled(void)
75434 diff -urNp linux-2.6.32.45/security/selinux/ss/services.c linux-2.6.32.45/security/selinux/ss/services.c
75435 --- linux-2.6.32.45/security/selinux/ss/services.c 2011-03-27 14:31:47.000000000 -0400
75436 +++ linux-2.6.32.45/security/selinux/ss/services.c 2011-05-16 21:46:57.000000000 -0400
75437 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, siz
75438 int rc = 0;
75439 struct policy_file file = { data, len }, *fp = &file;
75440
75441 + pax_track_stack();
75442 +
75443 if (!ss_initialized) {
75444 avtab_cache_init();
75445 if (policydb_read(&policydb, fp)) {
75446 diff -urNp linux-2.6.32.45/security/smack/smack_lsm.c linux-2.6.32.45/security/smack/smack_lsm.c
75447 --- linux-2.6.32.45/security/smack/smack_lsm.c 2011-03-27 14:31:47.000000000 -0400
75448 +++ linux-2.6.32.45/security/smack/smack_lsm.c 2011-04-17 15:56:46.000000000 -0400
75449 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct
75450 return 0;
75451 }
75452
75453 -struct security_operations smack_ops = {
75454 +struct security_operations smack_ops __read_only = {
75455 .name = "smack",
75456
75457 .ptrace_access_check = smack_ptrace_access_check,
75458 diff -urNp linux-2.6.32.45/security/tomoyo/tomoyo.c linux-2.6.32.45/security/tomoyo/tomoyo.c
75459 --- linux-2.6.32.45/security/tomoyo/tomoyo.c 2011-03-27 14:31:47.000000000 -0400
75460 +++ linux-2.6.32.45/security/tomoyo/tomoyo.c 2011-04-17 15:56:46.000000000 -0400
75461 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct fil
75462 * tomoyo_security_ops is a "struct security_operations" which is used for
75463 * registering TOMOYO.
75464 */
75465 -static struct security_operations tomoyo_security_ops = {
75466 +static struct security_operations tomoyo_security_ops __read_only = {
75467 .name = "tomoyo",
75468 .cred_alloc_blank = tomoyo_cred_alloc_blank,
75469 .cred_prepare = tomoyo_cred_prepare,
75470 diff -urNp linux-2.6.32.45/sound/aoa/codecs/onyx.c linux-2.6.32.45/sound/aoa/codecs/onyx.c
75471 --- linux-2.6.32.45/sound/aoa/codecs/onyx.c 2011-03-27 14:31:47.000000000 -0400
75472 +++ linux-2.6.32.45/sound/aoa/codecs/onyx.c 2011-04-17 15:56:46.000000000 -0400
75473 @@ -53,7 +53,7 @@ struct onyx {
75474 spdif_locked:1,
75475 analog_locked:1,
75476 original_mute:2;
75477 - int open_count;
75478 + local_t open_count;
75479 struct codec_info *codec_info;
75480
75481 /* mutex serializes concurrent access to the device
75482 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
75483 struct onyx *onyx = cii->codec_data;
75484
75485 mutex_lock(&onyx->mutex);
75486 - onyx->open_count++;
75487 + local_inc(&onyx->open_count);
75488 mutex_unlock(&onyx->mutex);
75489
75490 return 0;
75491 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
75492 struct onyx *onyx = cii->codec_data;
75493
75494 mutex_lock(&onyx->mutex);
75495 - onyx->open_count--;
75496 - if (!onyx->open_count)
75497 + if (local_dec_and_test(&onyx->open_count))
75498 onyx->spdif_locked = onyx->analog_locked = 0;
75499 mutex_unlock(&onyx->mutex);
75500
75501 diff -urNp linux-2.6.32.45/sound/aoa/codecs/onyx.h linux-2.6.32.45/sound/aoa/codecs/onyx.h
75502 --- linux-2.6.32.45/sound/aoa/codecs/onyx.h 2011-03-27 14:31:47.000000000 -0400
75503 +++ linux-2.6.32.45/sound/aoa/codecs/onyx.h 2011-04-17 15:56:46.000000000 -0400
75504 @@ -11,6 +11,7 @@
75505 #include <linux/i2c.h>
75506 #include <asm/pmac_low_i2c.h>
75507 #include <asm/prom.h>
75508 +#include <asm/local.h>
75509
75510 /* PCM3052 register definitions */
75511
75512 diff -urNp linux-2.6.32.45/sound/core/seq/seq_device.c linux-2.6.32.45/sound/core/seq/seq_device.c
75513 --- linux-2.6.32.45/sound/core/seq/seq_device.c 2011-03-27 14:31:47.000000000 -0400
75514 +++ linux-2.6.32.45/sound/core/seq/seq_device.c 2011-08-05 20:33:55.000000000 -0400
75515 @@ -63,7 +63,7 @@ struct ops_list {
75516 int argsize; /* argument size */
75517
75518 /* operators */
75519 - struct snd_seq_dev_ops ops;
75520 + struct snd_seq_dev_ops *ops;
75521
75522 /* registred devices */
75523 struct list_head dev_list; /* list of devices */
75524 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
75525
75526 mutex_lock(&ops->reg_mutex);
75527 /* copy driver operators */
75528 - ops->ops = *entry;
75529 + ops->ops = entry;
75530 ops->driver |= DRIVER_LOADED;
75531 ops->argsize = argsize;
75532
75533 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
75534 dev->name, ops->id, ops->argsize, dev->argsize);
75535 return -EINVAL;
75536 }
75537 - if (ops->ops.init_device(dev) >= 0) {
75538 + if (ops->ops->init_device(dev) >= 0) {
75539 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
75540 ops->num_init_devices++;
75541 } else {
75542 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
75543 dev->name, ops->id, ops->argsize, dev->argsize);
75544 return -EINVAL;
75545 }
75546 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
75547 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
75548 dev->status = SNDRV_SEQ_DEVICE_FREE;
75549 dev->driver_data = NULL;
75550 ops->num_init_devices--;
75551 diff -urNp linux-2.6.32.45/sound/drivers/mts64.c linux-2.6.32.45/sound/drivers/mts64.c
75552 --- linux-2.6.32.45/sound/drivers/mts64.c 2011-03-27 14:31:47.000000000 -0400
75553 +++ linux-2.6.32.45/sound/drivers/mts64.c 2011-04-17 15:56:46.000000000 -0400
75554 @@ -27,6 +27,7 @@
75555 #include <sound/initval.h>
75556 #include <sound/rawmidi.h>
75557 #include <sound/control.h>
75558 +#include <asm/local.h>
75559
75560 #define CARD_NAME "Miditerminal 4140"
75561 #define DRIVER_NAME "MTS64"
75562 @@ -65,7 +66,7 @@ struct mts64 {
75563 struct pardevice *pardev;
75564 int pardev_claimed;
75565
75566 - int open_count;
75567 + local_t open_count;
75568 int current_midi_output_port;
75569 int current_midi_input_port;
75570 u8 mode[MTS64_NUM_INPUT_PORTS];
75571 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
75572 {
75573 struct mts64 *mts = substream->rmidi->private_data;
75574
75575 - if (mts->open_count == 0) {
75576 + if (local_read(&mts->open_count) == 0) {
75577 /* We don't need a spinlock here, because this is just called
75578 if the device has not been opened before.
75579 So there aren't any IRQs from the device */
75580 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
75581
75582 msleep(50);
75583 }
75584 - ++(mts->open_count);
75585 + local_inc(&mts->open_count);
75586
75587 return 0;
75588 }
75589 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
75590 struct mts64 *mts = substream->rmidi->private_data;
75591 unsigned long flags;
75592
75593 - --(mts->open_count);
75594 - if (mts->open_count == 0) {
75595 + if (local_dec_return(&mts->open_count) == 0) {
75596 /* We need the spinlock_irqsave here because we can still
75597 have IRQs at this point */
75598 spin_lock_irqsave(&mts->lock, flags);
75599 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
75600
75601 msleep(500);
75602
75603 - } else if (mts->open_count < 0)
75604 - mts->open_count = 0;
75605 + } else if (local_read(&mts->open_count) < 0)
75606 + local_set(&mts->open_count, 0);
75607
75608 return 0;
75609 }
75610 diff -urNp linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c
75611 --- linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c 2011-03-27 14:31:47.000000000 -0400
75612 +++ linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c 2011-08-05 20:33:55.000000000 -0400
75613 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
75614 MODULE_DESCRIPTION("OPL4 driver");
75615 MODULE_LICENSE("GPL");
75616
75617 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
75618 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
75619 {
75620 int timeout = 10;
75621 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
75622 diff -urNp linux-2.6.32.45/sound/drivers/portman2x4.c linux-2.6.32.45/sound/drivers/portman2x4.c
75623 --- linux-2.6.32.45/sound/drivers/portman2x4.c 2011-03-27 14:31:47.000000000 -0400
75624 +++ linux-2.6.32.45/sound/drivers/portman2x4.c 2011-04-17 15:56:46.000000000 -0400
75625 @@ -46,6 +46,7 @@
75626 #include <sound/initval.h>
75627 #include <sound/rawmidi.h>
75628 #include <sound/control.h>
75629 +#include <asm/local.h>
75630
75631 #define CARD_NAME "Portman 2x4"
75632 #define DRIVER_NAME "portman"
75633 @@ -83,7 +84,7 @@ struct portman {
75634 struct pardevice *pardev;
75635 int pardev_claimed;
75636
75637 - int open_count;
75638 + local_t open_count;
75639 int mode[PORTMAN_NUM_INPUT_PORTS];
75640 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
75641 };
75642 diff -urNp linux-2.6.32.45/sound/isa/cmi8330.c linux-2.6.32.45/sound/isa/cmi8330.c
75643 --- linux-2.6.32.45/sound/isa/cmi8330.c 2011-03-27 14:31:47.000000000 -0400
75644 +++ linux-2.6.32.45/sound/isa/cmi8330.c 2011-08-05 20:33:55.000000000 -0400
75645 @@ -455,16 +455,16 @@ static int __devinit snd_cmi8330_pcm(str
75646
75647 /* SB16 */
75648 ops = snd_sb16dsp_get_pcm_ops(CMI_SB_STREAM);
75649 - chip->streams[CMI_SB_STREAM].ops = *ops;
75650 + memcpy((void *)&chip->streams[CMI_SB_STREAM].ops, ops, sizeof(*ops));
75651 chip->streams[CMI_SB_STREAM].open = ops->open;
75652 - chip->streams[CMI_SB_STREAM].ops.open = cmi_open_callbacks[CMI_SB_STREAM];
75653 + *(void **)&chip->streams[CMI_SB_STREAM].ops.open = cmi_open_callbacks[CMI_SB_STREAM];
75654 chip->streams[CMI_SB_STREAM].private_data = chip->sb;
75655
75656 /* AD1848 */
75657 ops = snd_wss_get_pcm_ops(CMI_AD_STREAM);
75658 - chip->streams[CMI_AD_STREAM].ops = *ops;
75659 + memcpy((void *)&chip->streams[CMI_AD_STREAM].ops, ops, sizeof(*ops));
75660 chip->streams[CMI_AD_STREAM].open = ops->open;
75661 - chip->streams[CMI_AD_STREAM].ops.open = cmi_open_callbacks[CMI_AD_STREAM];
75662 + *(void **)&chip->streams[CMI_AD_STREAM].ops.open = cmi_open_callbacks[CMI_AD_STREAM];
75663 chip->streams[CMI_AD_STREAM].private_data = chip->wss;
75664
75665 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &chip->streams[SNDRV_PCM_STREAM_PLAYBACK].ops);
75666 diff -urNp linux-2.6.32.45/sound/oss/sb_audio.c linux-2.6.32.45/sound/oss/sb_audio.c
75667 --- linux-2.6.32.45/sound/oss/sb_audio.c 2011-03-27 14:31:47.000000000 -0400
75668 +++ linux-2.6.32.45/sound/oss/sb_audio.c 2011-04-17 15:56:46.000000000 -0400
75669 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
75670 buf16 = (signed short *)(localbuf + localoffs);
75671 while (c)
75672 {
75673 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
75674 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
75675 if (copy_from_user(lbuf8,
75676 userbuf+useroffs + p,
75677 locallen))
75678 diff -urNp linux-2.6.32.45/sound/oss/swarm_cs4297a.c linux-2.6.32.45/sound/oss/swarm_cs4297a.c
75679 --- linux-2.6.32.45/sound/oss/swarm_cs4297a.c 2011-03-27 14:31:47.000000000 -0400
75680 +++ linux-2.6.32.45/sound/oss/swarm_cs4297a.c 2011-04-17 15:56:46.000000000 -0400
75681 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
75682 {
75683 struct cs4297a_state *s;
75684 u32 pwr, id;
75685 - mm_segment_t fs;
75686 int rval;
75687 #ifndef CONFIG_BCM_CS4297A_CSWARM
75688 u64 cfg;
75689 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
75690 if (!rval) {
75691 char *sb1250_duart_present;
75692
75693 +#if 0
75694 + mm_segment_t fs;
75695 fs = get_fs();
75696 set_fs(KERNEL_DS);
75697 -#if 0
75698 val = SOUND_MASK_LINE;
75699 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
75700 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
75701 val = initvol[i].vol;
75702 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
75703 }
75704 + set_fs(fs);
75705 // cs4297a_write_ac97(s, 0x18, 0x0808);
75706 #else
75707 // cs4297a_write_ac97(s, 0x5e, 0x180);
75708 cs4297a_write_ac97(s, 0x02, 0x0808);
75709 cs4297a_write_ac97(s, 0x18, 0x0808);
75710 #endif
75711 - set_fs(fs);
75712
75713 list_add(&s->list, &cs4297a_devs);
75714
75715 diff -urNp linux-2.6.32.45/sound/pci/ac97/ac97_codec.c linux-2.6.32.45/sound/pci/ac97/ac97_codec.c
75716 --- linux-2.6.32.45/sound/pci/ac97/ac97_codec.c 2011-03-27 14:31:47.000000000 -0400
75717 +++ linux-2.6.32.45/sound/pci/ac97/ac97_codec.c 2011-04-17 15:56:46.000000000 -0400
75718 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
75719 }
75720
75721 /* build_ops to do nothing */
75722 -static struct snd_ac97_build_ops null_build_ops;
75723 +static const struct snd_ac97_build_ops null_build_ops;
75724
75725 #ifdef CONFIG_SND_AC97_POWER_SAVE
75726 static void do_update_power(struct work_struct *work)
75727 diff -urNp linux-2.6.32.45/sound/pci/ac97/ac97_patch.c linux-2.6.32.45/sound/pci/ac97/ac97_patch.c
75728 --- linux-2.6.32.45/sound/pci/ac97/ac97_patch.c 2011-03-27 14:31:47.000000000 -0400
75729 +++ linux-2.6.32.45/sound/pci/ac97/ac97_patch.c 2011-04-23 12:56:12.000000000 -0400
75730 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
75731 return 0;
75732 }
75733
75734 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
75735 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
75736 .build_spdif = patch_yamaha_ymf743_build_spdif,
75737 .build_3d = patch_yamaha_ymf7x3_3d,
75738 };
75739 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
75740 return 0;
75741 }
75742
75743 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
75744 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
75745 .build_3d = patch_yamaha_ymf7x3_3d,
75746 .build_post_spdif = patch_yamaha_ymf753_post_spdif
75747 };
75748 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
75749 return 0;
75750 }
75751
75752 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
75753 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
75754 .build_specific = patch_wolfson_wm9703_specific,
75755 };
75756
75757 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
75758 return 0;
75759 }
75760
75761 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
75762 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
75763 .build_specific = patch_wolfson_wm9704_specific,
75764 };
75765
75766 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific
75767 return 0;
75768 }
75769
75770 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
75771 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
75772 .build_specific = patch_wolfson_wm9705_specific,
75773 };
75774
75775 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific
75776 return 0;
75777 }
75778
75779 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
75780 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
75781 .build_specific = patch_wolfson_wm9711_specific,
75782 };
75783
75784 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume
75785 }
75786 #endif
75787
75788 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
75789 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
75790 .build_specific = patch_wolfson_wm9713_specific,
75791 .build_3d = patch_wolfson_wm9713_3d,
75792 #ifdef CONFIG_PM
75793 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_speci
75794 return 0;
75795 }
75796
75797 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
75798 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
75799 .build_3d = patch_sigmatel_stac9700_3d,
75800 .build_specific = patch_sigmatel_stac97xx_specific
75801 };
75802 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_speci
75803 return patch_sigmatel_stac97xx_specific(ac97);
75804 }
75805
75806 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
75807 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
75808 .build_3d = patch_sigmatel_stac9708_3d,
75809 .build_specific = patch_sigmatel_stac9708_specific
75810 };
75811 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_speci
75812 return 0;
75813 }
75814
75815 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
75816 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
75817 .build_3d = patch_sigmatel_stac9700_3d,
75818 .build_specific = patch_sigmatel_stac9758_specific
75819 };
75820 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(stru
75821 return 0;
75822 }
75823
75824 -static struct snd_ac97_build_ops patch_cirrus_ops = {
75825 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
75826 .build_spdif = patch_cirrus_build_spdif
75827 };
75828
75829 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(st
75830 return 0;
75831 }
75832
75833 -static struct snd_ac97_build_ops patch_conexant_ops = {
75834 +static const struct snd_ac97_build_ops patch_conexant_ops = {
75835 .build_spdif = patch_conexant_build_spdif
75836 };
75837
75838 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct
75839 }
75840 }
75841
75842 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
75843 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
75844 #ifdef CONFIG_PM
75845 .resume = ad18xx_resume
75846 #endif
75847 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct
75848 return 0;
75849 }
75850
75851 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
75852 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
75853 .build_specific = &patch_ad1885_specific,
75854 #ifdef CONFIG_PM
75855 .resume = ad18xx_resume
75856 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct
75857 return 0;
75858 }
75859
75860 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
75861 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
75862 .build_specific = &patch_ad1886_specific,
75863 #ifdef CONFIG_PM
75864 .resume = ad18xx_resume
75865 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct
75866 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
75867 }
75868
75869 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
75870 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
75871 .build_post_spdif = patch_ad198x_post_spdif,
75872 .build_specific = patch_ad1981a_specific,
75873 #ifdef CONFIG_PM
75874 @@ -1951,7 +1951,7 @@ static int patch_ad1981b_specific(struct
75875 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
75876 }
75877
75878 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
75879 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
75880 .build_post_spdif = patch_ad198x_post_spdif,
75881 .build_specific = patch_ad1981b_specific,
75882 #ifdef CONFIG_PM
75883 @@ -2090,7 +2090,7 @@ static int patch_ad1888_specific(struct
75884 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
75885 }
75886
75887 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
75888 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
75889 .build_post_spdif = patch_ad198x_post_spdif,
75890 .build_specific = patch_ad1888_specific,
75891 #ifdef CONFIG_PM
75892 @@ -2139,7 +2139,7 @@ static int patch_ad1980_specific(struct
75893 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
75894 }
75895
75896 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
75897 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
75898 .build_post_spdif = patch_ad198x_post_spdif,
75899 .build_specific = patch_ad1980_specific,
75900 #ifdef CONFIG_PM
75901 @@ -2254,7 +2254,7 @@ static int patch_ad1985_specific(struct
75902 ARRAY_SIZE(snd_ac97_ad1985_controls));
75903 }
75904
75905 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
75906 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
75907 .build_post_spdif = patch_ad198x_post_spdif,
75908 .build_specific = patch_ad1985_specific,
75909 #ifdef CONFIG_PM
75910 @@ -2546,7 +2546,7 @@ static int patch_ad1986_specific(struct
75911 ARRAY_SIZE(snd_ac97_ad1985_controls));
75912 }
75913
75914 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
75915 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
75916 .build_post_spdif = patch_ad198x_post_spdif,
75917 .build_specific = patch_ad1986_specific,
75918 #ifdef CONFIG_PM
75919 @@ -2651,7 +2651,7 @@ static int patch_alc650_specific(struct
75920 return 0;
75921 }
75922
75923 -static struct snd_ac97_build_ops patch_alc650_ops = {
75924 +static const struct snd_ac97_build_ops patch_alc650_ops = {
75925 .build_specific = patch_alc650_specific,
75926 .update_jacks = alc650_update_jacks
75927 };
75928 @@ -2803,7 +2803,7 @@ static int patch_alc655_specific(struct
75929 return 0;
75930 }
75931
75932 -static struct snd_ac97_build_ops patch_alc655_ops = {
75933 +static const struct snd_ac97_build_ops patch_alc655_ops = {
75934 .build_specific = patch_alc655_specific,
75935 .update_jacks = alc655_update_jacks
75936 };
75937 @@ -2915,7 +2915,7 @@ static int patch_alc850_specific(struct
75938 return 0;
75939 }
75940
75941 -static struct snd_ac97_build_ops patch_alc850_ops = {
75942 +static const struct snd_ac97_build_ops patch_alc850_ops = {
75943 .build_specific = patch_alc850_specific,
75944 .update_jacks = alc850_update_jacks
75945 };
75946 @@ -2977,7 +2977,7 @@ static int patch_cm9738_specific(struct
75947 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
75948 }
75949
75950 -static struct snd_ac97_build_ops patch_cm9738_ops = {
75951 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
75952 .build_specific = patch_cm9738_specific,
75953 .update_jacks = cm9738_update_jacks
75954 };
75955 @@ -3068,7 +3068,7 @@ static int patch_cm9739_post_spdif(struc
75956 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
75957 }
75958
75959 -static struct snd_ac97_build_ops patch_cm9739_ops = {
75960 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
75961 .build_specific = patch_cm9739_specific,
75962 .build_post_spdif = patch_cm9739_post_spdif,
75963 .update_jacks = cm9739_update_jacks
75964 @@ -3242,7 +3242,7 @@ static int patch_cm9761_specific(struct
75965 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
75966 }
75967
75968 -static struct snd_ac97_build_ops patch_cm9761_ops = {
75969 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
75970 .build_specific = patch_cm9761_specific,
75971 .build_post_spdif = patch_cm9761_post_spdif,
75972 .update_jacks = cm9761_update_jacks
75973 @@ -3338,7 +3338,7 @@ static int patch_cm9780_specific(struct
75974 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
75975 }
75976
75977 -static struct snd_ac97_build_ops patch_cm9780_ops = {
75978 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
75979 .build_specific = patch_cm9780_specific,
75980 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
75981 };
75982 @@ -3458,7 +3458,7 @@ static int patch_vt1616_specific(struct
75983 return 0;
75984 }
75985
75986 -static struct snd_ac97_build_ops patch_vt1616_ops = {
75987 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
75988 .build_specific = patch_vt1616_specific
75989 };
75990
75991 @@ -3812,7 +3812,7 @@ static int patch_it2646_specific(struct
75992 return 0;
75993 }
75994
75995 -static struct snd_ac97_build_ops patch_it2646_ops = {
75996 +static const struct snd_ac97_build_ops patch_it2646_ops = {
75997 .build_specific = patch_it2646_specific,
75998 .update_jacks = it2646_update_jacks
75999 };
76000 @@ -3846,7 +3846,7 @@ static int patch_si3036_specific(struct
76001 return 0;
76002 }
76003
76004 -static struct snd_ac97_build_ops patch_si3036_ops = {
76005 +static const struct snd_ac97_build_ops patch_si3036_ops = {
76006 .build_specific = patch_si3036_specific,
76007 };
76008
76009 @@ -3913,7 +3913,7 @@ static int patch_ucb1400_specific(struct
76010 return 0;
76011 }
76012
76013 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
76014 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
76015 .build_specific = patch_ucb1400_specific,
76016 };
76017
76018 diff -urNp linux-2.6.32.45/sound/pci/hda/hda_codec.h linux-2.6.32.45/sound/pci/hda/hda_codec.h
76019 --- linux-2.6.32.45/sound/pci/hda/hda_codec.h 2011-03-27 14:31:47.000000000 -0400
76020 +++ linux-2.6.32.45/sound/pci/hda/hda_codec.h 2011-08-05 20:33:55.000000000 -0400
76021 @@ -580,7 +580,7 @@ struct hda_bus_ops {
76022 /* notify power-up/down from codec to controller */
76023 void (*pm_notify)(struct hda_bus *bus);
76024 #endif
76025 -};
76026 +} __no_const;
76027
76028 /* template to pass to the bus constructor */
76029 struct hda_bus_template {
76030 @@ -705,7 +705,7 @@ struct hda_pcm_ops {
76031 struct snd_pcm_substream *substream);
76032 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
76033 struct snd_pcm_substream *substream);
76034 -};
76035 +} __no_const;
76036
76037 /* PCM information for each substream */
76038 struct hda_pcm_stream {
76039 diff -urNp linux-2.6.32.45/sound/pci/hda/hda_generic.c linux-2.6.32.45/sound/pci/hda/hda_generic.c
76040 --- linux-2.6.32.45/sound/pci/hda/hda_generic.c 2011-03-27 14:31:47.000000000 -0400
76041 +++ linux-2.6.32.45/sound/pci/hda/hda_generic.c 2011-08-05 20:33:55.000000000 -0400
76042 @@ -1097,7 +1097,7 @@ int snd_hda_parse_generic_codec(struct h
76043 (err = parse_output(codec)) < 0)
76044 goto error;
76045
76046 - codec->patch_ops = generic_patch_ops;
76047 + memcpy((void *)&codec->patch_ops, &generic_patch_ops, sizeof(generic_patch_ops));
76048
76049 return 0;
76050
76051 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_analog.c linux-2.6.32.45/sound/pci/hda/patch_analog.c
76052 --- linux-2.6.32.45/sound/pci/hda/patch_analog.c 2011-03-27 14:31:47.000000000 -0400
76053 +++ linux-2.6.32.45/sound/pci/hda/patch_analog.c 2011-08-05 20:33:55.000000000 -0400
76054 @@ -1069,7 +1069,7 @@ static int patch_ad1986a(struct hda_code
76055 #endif
76056 spec->vmaster_nid = 0x1b;
76057
76058 - codec->patch_ops = ad198x_patch_ops;
76059 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76060
76061 /* override some parameters */
76062 board_config = snd_hda_check_board_config(codec, AD1986A_MODELS,
76063 @@ -1120,8 +1120,8 @@ static int patch_ad1986a(struct hda_code
76064 if (!is_jack_available(codec, 0x25))
76065 spec->multiout.dig_out_nid = 0;
76066 spec->input_mux = &ad1986a_automic_capture_source;
76067 - codec->patch_ops.unsol_event = ad1986a_automic_unsol_event;
76068 - codec->patch_ops.init = ad1986a_automic_init;
76069 + *(void **)&codec->patch_ops.unsol_event = ad1986a_automic_unsol_event;
76070 + *(void **)&codec->patch_ops.init = ad1986a_automic_init;
76071 break;
76072 case AD1986A_SAMSUNG_P50:
76073 spec->num_mixers = 2;
76074 @@ -1137,8 +1137,8 @@ static int patch_ad1986a(struct hda_code
76075 if (!is_jack_available(codec, 0x25))
76076 spec->multiout.dig_out_nid = 0;
76077 spec->input_mux = &ad1986a_automic_capture_source;
76078 - codec->patch_ops.unsol_event = ad1986a_samsung_p50_unsol_event;
76079 - codec->patch_ops.init = ad1986a_samsung_p50_init;
76080 + *(void **)&codec->patch_ops.unsol_event = ad1986a_samsung_p50_unsol_event;
76081 + *(void **)&codec->patch_ops.init = ad1986a_samsung_p50_init;
76082 break;
76083 case AD1986A_LAPTOP_AUTOMUTE:
76084 spec->num_mixers = 3;
76085 @@ -1154,8 +1154,8 @@ static int patch_ad1986a(struct hda_code
76086 if (!is_jack_available(codec, 0x25))
76087 spec->multiout.dig_out_nid = 0;
76088 spec->input_mux = &ad1986a_laptop_eapd_capture_source;
76089 - codec->patch_ops.unsol_event = ad1986a_hp_unsol_event;
76090 - codec->patch_ops.init = ad1986a_hp_init;
76091 + *(void **)&codec->patch_ops.unsol_event = ad1986a_hp_unsol_event;
76092 + *(void **)&codec->patch_ops.init = ad1986a_hp_init;
76093 /* Lenovo N100 seems to report the reversed bit
76094 * for HP jack-sensing
76095 */
76096 @@ -1363,7 +1363,7 @@ static int patch_ad1983(struct hda_codec
76097 #endif
76098 spec->vmaster_nid = 0x05;
76099
76100 - codec->patch_ops = ad198x_patch_ops;
76101 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76102
76103 return 0;
76104 }
76105 @@ -1769,7 +1769,7 @@ static int patch_ad1981(struct hda_codec
76106 #endif
76107 spec->vmaster_nid = 0x05;
76108
76109 - codec->patch_ops = ad198x_patch_ops;
76110 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76111
76112 /* override some parameters */
76113 board_config = snd_hda_check_board_config(codec, AD1981_MODELS,
76114 @@ -1783,8 +1783,8 @@ static int patch_ad1981(struct hda_codec
76115 spec->multiout.dig_out_nid = 0;
76116 spec->input_mux = &ad1981_hp_capture_source;
76117
76118 - codec->patch_ops.init = ad1981_hp_init;
76119 - codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76120 + *(void **)&codec->patch_ops.init = ad1981_hp_init;
76121 + *(void **)&codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76122 break;
76123 case AD1981_THINKPAD:
76124 spec->mixers[0] = ad1981_thinkpad_mixers;
76125 @@ -1805,8 +1805,8 @@ static int patch_ad1981(struct hda_codec
76126 spec->init_verbs[1] = ad1981_toshiba_init_verbs;
76127 spec->multiout.dig_out_nid = 0;
76128 spec->input_mux = &ad1981_hp_capture_source;
76129 - codec->patch_ops.init = ad1981_hp_init;
76130 - codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76131 + *(void **)&codec->patch_ops.init = ad1981_hp_init;
76132 + *(void **)&codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76133 break;
76134 }
76135 return 0;
76136 @@ -3096,14 +3096,14 @@ static int patch_ad1988(struct hda_codec
76137 if (spec->dig_in_nid && codec->vendor_id < 0x11d4989a)
76138 spec->mixers[spec->num_mixers++] = ad1988_spdif_in_mixers;
76139
76140 - codec->patch_ops = ad198x_patch_ops;
76141 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76142 switch (board_config) {
76143 case AD1988_AUTO:
76144 - codec->patch_ops.init = ad1988_auto_init;
76145 + *(void **)&codec->patch_ops.init = ad1988_auto_init;
76146 break;
76147 case AD1988_LAPTOP:
76148 case AD1988_LAPTOP_DIG:
76149 - codec->patch_ops.unsol_event = ad1988_laptop_unsol_event;
76150 + *(void **)&codec->patch_ops.unsol_event = ad1988_laptop_unsol_event;
76151 break;
76152 }
76153 #ifdef CONFIG_SND_HDA_POWER_SAVE
76154 @@ -3321,7 +3321,7 @@ static int patch_ad1884(struct hda_codec
76155 /* we need to cover all playback volumes */
76156 spec->slave_vols = ad1884_slave_vols;
76157
76158 - codec->patch_ops = ad198x_patch_ops;
76159 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76160
76161 return 0;
76162 }
76163 @@ -3529,7 +3529,7 @@ static int patch_ad1984(struct hda_codec
76164 case AD1984_BASIC:
76165 /* additional digital mics */
76166 spec->mixers[spec->num_mixers++] = ad1984_dmic_mixers;
76167 - codec->patch_ops.build_pcms = ad1984_build_pcms;
76168 + *(void **)&codec->patch_ops.build_pcms = ad1984_build_pcms;
76169 break;
76170 case AD1984_THINKPAD:
76171 spec->multiout.dig_out_nid = AD1884_SPDIF_OUT;
76172 @@ -4229,7 +4229,7 @@ static int patch_ad1884a(struct hda_code
76173 #ifdef CONFIG_SND_HDA_POWER_SAVE
76174 spec->loopback.amplist = ad1884a_loopbacks;
76175 #endif
76176 - codec->patch_ops = ad198x_patch_ops;
76177 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76178
76179 /* override some parameters */
76180 board_config = snd_hda_check_board_config(codec, AD1884A_MODELS,
76181 @@ -4240,8 +4240,8 @@ static int patch_ad1884a(struct hda_code
76182 spec->mixers[0] = ad1884a_laptop_mixers;
76183 spec->init_verbs[spec->num_init_verbs++] = ad1884a_laptop_verbs;
76184 spec->multiout.dig_out_nid = 0;
76185 - codec->patch_ops.unsol_event = ad1884a_laptop_unsol_event;
76186 - codec->patch_ops.init = ad1884a_laptop_init;
76187 + *(void **)&codec->patch_ops.unsol_event = ad1884a_laptop_unsol_event;
76188 + *(void **)&codec->patch_ops.init = ad1884a_laptop_init;
76189 /* set the upper-limit for mixer amp to 0dB for avoiding the
76190 * possible damage by overloading
76191 */
76192 @@ -4255,8 +4255,8 @@ static int patch_ad1884a(struct hda_code
76193 spec->mixers[0] = ad1884a_mobile_mixers;
76194 spec->init_verbs[0] = ad1884a_mobile_verbs;
76195 spec->multiout.dig_out_nid = 0;
76196 - codec->patch_ops.unsol_event = ad1884a_hp_unsol_event;
76197 - codec->patch_ops.init = ad1884a_hp_init;
76198 + *(void **)&codec->patch_ops.unsol_event = ad1884a_hp_unsol_event;
76199 + *(void **)&codec->patch_ops.init = ad1884a_hp_init;
76200 /* set the upper-limit for mixer amp to 0dB for avoiding the
76201 * possible damage by overloading
76202 */
76203 @@ -4272,15 +4272,15 @@ static int patch_ad1884a(struct hda_code
76204 ad1984a_thinkpad_verbs;
76205 spec->multiout.dig_out_nid = 0;
76206 spec->input_mux = &ad1984a_thinkpad_capture_source;
76207 - codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event;
76208 - codec->patch_ops.init = ad1984a_thinkpad_init;
76209 + *(void **)&codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event;
76210 + *(void **)&codec->patch_ops.init = ad1984a_thinkpad_init;
76211 break;
76212 case AD1984A_TOUCHSMART:
76213 spec->mixers[0] = ad1984a_touchsmart_mixers;
76214 spec->init_verbs[0] = ad1984a_touchsmart_verbs;
76215 spec->multiout.dig_out_nid = 0;
76216 - codec->patch_ops.unsol_event = ad1984a_touchsmart_unsol_event;
76217 - codec->patch_ops.init = ad1984a_touchsmart_init;
76218 + *(void **)&codec->patch_ops.unsol_event = ad1984a_touchsmart_unsol_event;
76219 + *(void **)&codec->patch_ops.init = ad1984a_touchsmart_init;
76220 /* set the upper-limit for mixer amp to 0dB for avoiding the
76221 * possible damage by overloading
76222 */
76223 @@ -4607,7 +4607,7 @@ static int patch_ad1882(struct hda_codec
76224 #endif
76225 spec->vmaster_nid = 0x04;
76226
76227 - codec->patch_ops = ad198x_patch_ops;
76228 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76229
76230 /* override some parameters */
76231 board_config = snd_hda_check_board_config(codec, AD1882_MODELS,
76232 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c
76233 --- linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c 2011-03-27 14:31:47.000000000 -0400
76234 +++ linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c 2011-08-05 20:33:55.000000000 -0400
76235 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_code
76236 */
76237 spec->multiout.dig_out_nid = CVT_NID;
76238
76239 - codec->patch_ops = atihdmi_patch_ops;
76240 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
76241
76242 return 0;
76243 }
76244 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_ca0110.c linux-2.6.32.45/sound/pci/hda/patch_ca0110.c
76245 --- linux-2.6.32.45/sound/pci/hda/patch_ca0110.c 2011-03-27 14:31:47.000000000 -0400
76246 +++ linux-2.6.32.45/sound/pci/hda/patch_ca0110.c 2011-08-05 20:33:55.000000000 -0400
76247 @@ -525,7 +525,7 @@ static int patch_ca0110(struct hda_codec
76248 if (err < 0)
76249 goto error;
76250
76251 - codec->patch_ops = ca0110_patch_ops;
76252 + memcpy((void *)&codec->patch_ops, &ca0110_patch_ops, sizeof(ca0110_patch_ops));
76253
76254 return 0;
76255
76256 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_cirrus.c linux-2.6.32.45/sound/pci/hda/patch_cirrus.c
76257 --- linux-2.6.32.45/sound/pci/hda/patch_cirrus.c 2011-05-10 22:12:02.000000000 -0400
76258 +++ linux-2.6.32.45/sound/pci/hda/patch_cirrus.c 2011-08-05 20:33:55.000000000 -0400
76259 @@ -1191,7 +1191,7 @@ static int patch_cs420x(struct hda_codec
76260 if (err < 0)
76261 goto error;
76262
76263 - codec->patch_ops = cs_patch_ops;
76264 + memcpy((void *)&codec->patch_ops, &cs_patch_ops, sizeof(cs_patch_ops));
76265
76266 return 0;
76267
76268 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_cmedia.c linux-2.6.32.45/sound/pci/hda/patch_cmedia.c
76269 --- linux-2.6.32.45/sound/pci/hda/patch_cmedia.c 2011-03-27 14:31:47.000000000 -0400
76270 +++ linux-2.6.32.45/sound/pci/hda/patch_cmedia.c 2011-08-05 20:33:55.000000000 -0400
76271 @@ -728,7 +728,7 @@ static int patch_cmi9880(struct hda_code
76272
76273 spec->adc_nids = cmi9880_adc_nids;
76274
76275 - codec->patch_ops = cmi9880_patch_ops;
76276 + memcpy((void *)&codec->patch_ops, &cmi9880_patch_ops, sizeof(cmi9880_patch_ops));
76277
76278 return 0;
76279 }
76280 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_conexant.c linux-2.6.32.45/sound/pci/hda/patch_conexant.c
76281 --- linux-2.6.32.45/sound/pci/hda/patch_conexant.c 2011-03-27 14:31:47.000000000 -0400
76282 +++ linux-2.6.32.45/sound/pci/hda/patch_conexant.c 2011-08-05 20:33:55.000000000 -0400
76283 @@ -1119,55 +1119,55 @@ static int patch_cxt5045(struct hda_code
76284 spec->channel_mode = cxt5045_modes,
76285
76286
76287 - codec->patch_ops = conexant_patch_ops;
76288 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
76289
76290 board_config = snd_hda_check_board_config(codec, CXT5045_MODELS,
76291 cxt5045_models,
76292 cxt5045_cfg_tbl);
76293 switch (board_config) {
76294 case CXT5045_LAPTOP_HPSENSE:
76295 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76296 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76297 spec->input_mux = &cxt5045_capture_source;
76298 spec->num_init_verbs = 2;
76299 spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
76300 spec->mixers[0] = cxt5045_mixers;
76301 - codec->patch_ops.init = cxt5045_init;
76302 + *(void **)&codec->patch_ops.init = cxt5045_init;
76303 break;
76304 case CXT5045_LAPTOP_MICSENSE:
76305 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76306 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76307 spec->input_mux = &cxt5045_capture_source;
76308 spec->num_init_verbs = 2;
76309 spec->init_verbs[1] = cxt5045_mic_sense_init_verbs;
76310 spec->mixers[0] = cxt5045_mixers;
76311 - codec->patch_ops.init = cxt5045_init;
76312 + *(void **)&codec->patch_ops.init = cxt5045_init;
76313 break;
76314 default:
76315 case CXT5045_LAPTOP_HPMICSENSE:
76316 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76317 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76318 spec->input_mux = &cxt5045_capture_source;
76319 spec->num_init_verbs = 3;
76320 spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
76321 spec->init_verbs[2] = cxt5045_mic_sense_init_verbs;
76322 spec->mixers[0] = cxt5045_mixers;
76323 - codec->patch_ops.init = cxt5045_init;
76324 + *(void **)&codec->patch_ops.init = cxt5045_init;
76325 break;
76326 case CXT5045_BENQ:
76327 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76328 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76329 spec->input_mux = &cxt5045_capture_source_benq;
76330 spec->num_init_verbs = 1;
76331 spec->init_verbs[0] = cxt5045_benq_init_verbs;
76332 spec->mixers[0] = cxt5045_mixers;
76333 spec->mixers[1] = cxt5045_benq_mixers;
76334 spec->num_mixers = 2;
76335 - codec->patch_ops.init = cxt5045_init;
76336 + *(void **)&codec->patch_ops.init = cxt5045_init;
76337 break;
76338 case CXT5045_LAPTOP_HP530:
76339 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76340 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76341 spec->input_mux = &cxt5045_capture_source_hp530;
76342 spec->num_init_verbs = 2;
76343 spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
76344 spec->mixers[0] = cxt5045_mixers_hp530;
76345 - codec->patch_ops.init = cxt5045_init;
76346 + *(void **)&codec->patch_ops.init = cxt5045_init;
76347 break;
76348 #ifdef CONFIG_SND_DEBUG
76349 case CXT5045_TEST:
76350 @@ -1556,7 +1556,7 @@ static int patch_cxt5047(struct hda_code
76351 spec->num_channel_mode = ARRAY_SIZE(cxt5047_modes),
76352 spec->channel_mode = cxt5047_modes,
76353
76354 - codec->patch_ops = conexant_patch_ops;
76355 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
76356
76357 board_config = snd_hda_check_board_config(codec, CXT5047_MODELS,
76358 cxt5047_models,
76359 @@ -1565,13 +1565,13 @@ static int patch_cxt5047(struct hda_code
76360 case CXT5047_LAPTOP:
76361 spec->num_mixers = 2;
76362 spec->mixers[1] = cxt5047_hp_spk_mixers;
76363 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76364 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76365 break;
76366 case CXT5047_LAPTOP_HP:
76367 spec->num_mixers = 2;
76368 spec->mixers[1] = cxt5047_hp_only_mixers;
76369 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76370 - codec->patch_ops.init = cxt5047_hp_init;
76371 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76372 + *(void **)&codec->patch_ops.init = cxt5047_hp_init;
76373 break;
76374 case CXT5047_LAPTOP_EAPD:
76375 spec->input_mux = &cxt5047_toshiba_capture_source;
76376 @@ -1579,14 +1579,14 @@ static int patch_cxt5047(struct hda_code
76377 spec->mixers[1] = cxt5047_hp_spk_mixers;
76378 spec->num_init_verbs = 2;
76379 spec->init_verbs[1] = cxt5047_toshiba_init_verbs;
76380 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76381 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76382 break;
76383 #ifdef CONFIG_SND_DEBUG
76384 case CXT5047_TEST:
76385 spec->input_mux = &cxt5047_test_capture_source;
76386 spec->mixers[0] = cxt5047_test_mixer;
76387 spec->init_verbs[0] = cxt5047_test_init_verbs;
76388 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76389 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76390 #endif
76391 }
76392 spec->vmaster_nid = 0x13;
76393 @@ -1904,8 +1904,8 @@ static int patch_cxt5051(struct hda_code
76394 codec->spec = spec;
76395 codec->pin_amp_workaround = 1;
76396
76397 - codec->patch_ops = conexant_patch_ops;
76398 - codec->patch_ops.init = cxt5051_init;
76399 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
76400 + *(void **)&codec->patch_ops.init = cxt5051_init;
76401
76402 spec->multiout.max_channels = 2;
76403 spec->multiout.num_dacs = ARRAY_SIZE(cxt5051_dac_nids);
76404 @@ -1923,7 +1923,7 @@ static int patch_cxt5051(struct hda_code
76405 spec->cur_adc = 0;
76406 spec->cur_adc_idx = 0;
76407
76408 - codec->patch_ops.unsol_event = cxt5051_hp_unsol_event;
76409 + *(void **)&codec->patch_ops.unsol_event = cxt5051_hp_unsol_event;
76410
76411 board_config = snd_hda_check_board_config(codec, CXT5051_MODELS,
76412 cxt5051_models,
76413 @@ -2372,8 +2372,8 @@ static int patch_cxt5066(struct hda_code
76414 return -ENOMEM;
76415 codec->spec = spec;
76416
76417 - codec->patch_ops = conexant_patch_ops;
76418 - codec->patch_ops.init = cxt5066_init;
76419 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
76420 + *(void **)&codec->patch_ops.init = cxt5066_init;
76421
76422 spec->dell_automute = 0;
76423 spec->multiout.max_channels = 2;
76424 @@ -2413,7 +2413,7 @@ static int patch_cxt5066(struct hda_code
76425 spec->dell_automute = 1;
76426 break;
76427 case CXT5066_OLPC_XO_1_5:
76428 - codec->patch_ops.unsol_event = cxt5066_unsol_event;
76429 + *(void **)&codec->patch_ops.unsol_event = cxt5066_unsol_event;
76430 spec->init_verbs[0] = cxt5066_init_verbs_olpc;
76431 spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
76432 spec->mixers[spec->num_mixers++] = cxt5066_mixers;
76433 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c
76434 --- linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c 2011-03-27 14:31:47.000000000 -0400
76435 +++ linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c 2011-08-05 20:33:55.000000000 -0400
76436 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(str
76437 cp_ready);
76438
76439 /* TODO */
76440 - if (cp_state)
76441 - ;
76442 - if (cp_ready)
76443 - ;
76444 + if (cp_state) {
76445 + }
76446 + if (cp_ready) {
76447 + }
76448 }
76449
76450
76451 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hd
76452 spec->multiout.dig_out_nid = cvt_nid;
76453
76454 codec->spec = spec;
76455 - codec->patch_ops = intel_hdmi_patch_ops;
76456 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
76457
76458 snd_hda_eld_proc_new(codec, &spec->sink_eld);
76459
76460 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c
76461 --- linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c 2011-03-27 14:31:47.000000000 -0400
76462 +++ linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c 2011-08-05 20:33:55.000000000 -0400
76463 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_c
76464 spec->multiout.max_channels = 8;
76465 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
76466
76467 - codec->patch_ops = nvhdmi_patch_ops_8ch;
76468 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
76469
76470 return 0;
76471 }
76472 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_c
76473 spec->multiout.max_channels = 2;
76474 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
76475
76476 - codec->patch_ops = nvhdmi_patch_ops_2ch;
76477 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
76478
76479 return 0;
76480 }
76481 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_realtek.c linux-2.6.32.45/sound/pci/hda/patch_realtek.c
76482 --- linux-2.6.32.45/sound/pci/hda/patch_realtek.c 2011-06-25 12:55:35.000000000 -0400
76483 +++ linux-2.6.32.45/sound/pci/hda/patch_realtek.c 2011-08-05 20:33:55.000000000 -0400
76484 @@ -4856,7 +4856,7 @@ static int patch_alc880(struct hda_codec
76485
76486 spec->vmaster_nid = 0x0c;
76487
76488 - codec->patch_ops = alc_patch_ops;
76489 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76490 if (board_config == ALC880_AUTO)
76491 spec->init_hook = alc880_auto_init;
76492 #ifdef CONFIG_SND_HDA_POWER_SAVE
76493 @@ -6479,7 +6479,7 @@ static int patch_alc260(struct hda_codec
76494
76495 spec->vmaster_nid = 0x08;
76496
76497 - codec->patch_ops = alc_patch_ops;
76498 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76499 if (board_config == ALC260_AUTO)
76500 spec->init_hook = alc260_auto_init;
76501 #ifdef CONFIG_SND_HDA_POWER_SAVE
76502 @@ -9997,7 +9997,7 @@ static int patch_alc882(struct hda_codec
76503
76504 spec->vmaster_nid = 0x0c;
76505
76506 - codec->patch_ops = alc_patch_ops;
76507 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76508 if (board_config == ALC882_AUTO)
76509 spec->init_hook = alc882_auto_init;
76510 #ifdef CONFIG_SND_HDA_POWER_SAVE
76511 @@ -11871,7 +11871,7 @@ static int patch_alc262(struct hda_codec
76512
76513 spec->vmaster_nid = 0x0c;
76514
76515 - codec->patch_ops = alc_patch_ops;
76516 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76517 if (board_config == ALC262_AUTO)
76518 spec->init_hook = alc262_auto_init;
76519 #ifdef CONFIG_SND_HDA_POWER_SAVE
76520 @@ -12950,7 +12950,7 @@ static int patch_alc268(struct hda_codec
76521
76522 spec->vmaster_nid = 0x02;
76523
76524 - codec->patch_ops = alc_patch_ops;
76525 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76526 if (board_config == ALC268_AUTO)
76527 spec->init_hook = alc268_auto_init;
76528
76529 @@ -13636,7 +13636,7 @@ static int patch_alc269(struct hda_codec
76530
76531 spec->vmaster_nid = 0x02;
76532
76533 - codec->patch_ops = alc_patch_ops;
76534 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76535 if (board_config == ALC269_AUTO)
76536 spec->init_hook = alc269_auto_init;
76537 #ifdef CONFIG_SND_HDA_POWER_SAVE
76538 @@ -14741,7 +14741,7 @@ static int patch_alc861(struct hda_codec
76539
76540 spec->vmaster_nid = 0x03;
76541
76542 - codec->patch_ops = alc_patch_ops;
76543 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76544 if (board_config == ALC861_AUTO)
76545 spec->init_hook = alc861_auto_init;
76546 #ifdef CONFIG_SND_HDA_POWER_SAVE
76547 @@ -15727,7 +15727,7 @@ static int patch_alc861vd(struct hda_cod
76548
76549 spec->vmaster_nid = 0x02;
76550
76551 - codec->patch_ops = alc_patch_ops;
76552 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76553
76554 if (board_config == ALC861VD_AUTO)
76555 spec->init_hook = alc861vd_auto_init;
76556 @@ -17652,7 +17652,7 @@ static int patch_alc662(struct hda_codec
76557
76558 spec->vmaster_nid = 0x02;
76559
76560 - codec->patch_ops = alc_patch_ops;
76561 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76562 if (board_config == ALC662_AUTO)
76563 spec->init_hook = alc662_auto_init;
76564 #ifdef CONFIG_SND_HDA_POWER_SAVE
76565 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_si3054.c linux-2.6.32.45/sound/pci/hda/patch_si3054.c
76566 --- linux-2.6.32.45/sound/pci/hda/patch_si3054.c 2011-03-27 14:31:47.000000000 -0400
76567 +++ linux-2.6.32.45/sound/pci/hda/patch_si3054.c 2011-08-05 20:33:55.000000000 -0400
76568 @@ -275,7 +275,7 @@ static int patch_si3054(struct hda_codec
76569 if (spec == NULL)
76570 return -ENOMEM;
76571 codec->spec = spec;
76572 - codec->patch_ops = si3054_patch_ops;
76573 + memcpy((void *)&codec->patch_ops, &si3054_patch_ops, sizeof(si3054_patch_ops));
76574 return 0;
76575 }
76576
76577 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c
76578 --- linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c 2011-06-25 12:55:35.000000000 -0400
76579 +++ linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c 2011-08-05 20:33:55.000000000 -0400
76580 @@ -4899,7 +4899,7 @@ static int patch_stac9200(struct hda_cod
76581 if (spec->board_config == STAC_9200_PANASONIC)
76582 spec->hp_detect = 0;
76583
76584 - codec->patch_ops = stac92xx_patch_ops;
76585 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76586
76587 return 0;
76588 }
76589 @@ -4981,7 +4981,7 @@ static int patch_stac925x(struct hda_cod
76590 return err;
76591 }
76592
76593 - codec->patch_ops = stac92xx_patch_ops;
76594 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76595
76596 return 0;
76597 }
76598 @@ -5125,7 +5125,7 @@ again:
76599 if (spec->board_config == STAC_92HD73XX_NO_JD)
76600 spec->hp_detect = 0;
76601
76602 - codec->patch_ops = stac92xx_patch_ops;
76603 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76604
76605 codec->proc_widget_hook = stac92hd7x_proc_hook;
76606
76607 @@ -5220,7 +5220,7 @@ again:
76608 snd_hda_codec_write_cache(codec, nid, 0,
76609 AC_VERB_SET_CONNECT_SEL, num_dacs);
76610
76611 - codec->patch_ops = stac92xx_patch_ops;
76612 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76613
76614 codec->proc_widget_hook = stac92hd_proc_hook;
76615
76616 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hd
76617 return -ENOMEM;
76618
76619 codec->spec = spec;
76620 - codec->patch_ops = stac92xx_patch_ops;
76621 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76622 spec->num_pins = STAC92HD71BXX_NUM_PINS;
76623 switch (codec->vendor_id) {
76624 case 0x111d76b6:
76625 @@ -5515,7 +5515,7 @@ again:
76626 spec->gpio_dir |= spec->gpio_led;
76627 spec->gpio_data |= spec->gpio_led;
76628 /* register check_power_status callback. */
76629 - codec->patch_ops.check_power_status =
76630 + *(void **)&codec->patch_ops.check_power_status =
76631 stac92xx_hp_check_power_status;
76632 }
76633 #endif
76634 @@ -5634,7 +5634,7 @@ static int patch_stac922x(struct hda_cod
76635 return err;
76636 }
76637
76638 - codec->patch_ops = stac92xx_patch_ops;
76639 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76640
76641 /* Fix Mux capture level; max to 2 */
76642 snd_hda_override_amp_caps(codec, 0x12, HDA_OUTPUT,
76643 @@ -5757,7 +5757,7 @@ static int patch_stac927x(struct hda_cod
76644 return err;
76645 }
76646
76647 - codec->patch_ops = stac92xx_patch_ops;
76648 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76649
76650 codec->proc_widget_hook = stac927x_proc_hook;
76651
76652 @@ -5880,7 +5880,7 @@ static int patch_stac9205(struct hda_cod
76653 return err;
76654 }
76655
76656 - codec->patch_ops = stac92xx_patch_ops;
76657 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76658
76659 codec->proc_widget_hook = stac9205_proc_hook;
76660
76661 @@ -5974,7 +5974,7 @@ static int patch_stac9872(struct hda_cod
76662 return -EINVAL;
76663 }
76664 spec->input_mux = &spec->private_imux;
76665 - codec->patch_ops = stac92xx_patch_ops;
76666 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76667 return 0;
76668 }
76669
76670 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_via.c linux-2.6.32.45/sound/pci/hda/patch_via.c
76671 --- linux-2.6.32.45/sound/pci/hda/patch_via.c 2011-03-27 14:31:47.000000000 -0400
76672 +++ linux-2.6.32.45/sound/pci/hda/patch_via.c 2011-08-05 20:33:55.000000000 -0400
76673 @@ -1399,9 +1399,9 @@ static int patch_vt1708(struct hda_codec
76674 spec->num_mixers++;
76675 }
76676
76677 - codec->patch_ops = via_patch_ops;
76678 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76679
76680 - codec->patch_ops.init = via_auto_init;
76681 + *(void **)&codec->patch_ops.init = via_auto_init;
76682 #ifdef CONFIG_SND_HDA_POWER_SAVE
76683 spec->loopback.amplist = vt1708_loopbacks;
76684 #endif
76685 @@ -1870,10 +1870,10 @@ static int patch_vt1709_10ch(struct hda_
76686 spec->num_mixers++;
76687 }
76688
76689 - codec->patch_ops = via_patch_ops;
76690 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76691
76692 - codec->patch_ops.init = via_auto_init;
76693 - codec->patch_ops.unsol_event = via_unsol_event;
76694 + *(void **)&codec->patch_ops.init = via_auto_init;
76695 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76696 #ifdef CONFIG_SND_HDA_POWER_SAVE
76697 spec->loopback.amplist = vt1709_loopbacks;
76698 #endif
76699 @@ -1964,10 +1964,10 @@ static int patch_vt1709_6ch(struct hda_c
76700 spec->num_mixers++;
76701 }
76702
76703 - codec->patch_ops = via_patch_ops;
76704 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76705
76706 - codec->patch_ops.init = via_auto_init;
76707 - codec->patch_ops.unsol_event = via_unsol_event;
76708 + *(void **)&codec->patch_ops.init = via_auto_init;
76709 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76710 #ifdef CONFIG_SND_HDA_POWER_SAVE
76711 spec->loopback.amplist = vt1709_loopbacks;
76712 #endif
76713 @@ -2418,10 +2418,10 @@ static int patch_vt1708B_8ch(struct hda_
76714 spec->num_mixers++;
76715 }
76716
76717 - codec->patch_ops = via_patch_ops;
76718 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76719
76720 - codec->patch_ops.init = via_auto_init;
76721 - codec->patch_ops.unsol_event = via_unsol_event;
76722 + *(void **)&codec->patch_ops.init = via_auto_init;
76723 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76724 #ifdef CONFIG_SND_HDA_POWER_SAVE
76725 spec->loopback.amplist = vt1708B_loopbacks;
76726 #endif
76727 @@ -2470,10 +2470,10 @@ static int patch_vt1708B_4ch(struct hda_
76728 spec->num_mixers++;
76729 }
76730
76731 - codec->patch_ops = via_patch_ops;
76732 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76733
76734 - codec->patch_ops.init = via_auto_init;
76735 - codec->patch_ops.unsol_event = via_unsol_event;
76736 + *(void **)&codec->patch_ops.init = via_auto_init;
76737 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76738 #ifdef CONFIG_SND_HDA_POWER_SAVE
76739 spec->loopback.amplist = vt1708B_loopbacks;
76740 #endif
76741 @@ -2905,10 +2905,10 @@ static int patch_vt1708S(struct hda_code
76742 spec->num_mixers++;
76743 }
76744
76745 - codec->patch_ops = via_patch_ops;
76746 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76747
76748 - codec->patch_ops.init = via_auto_init;
76749 - codec->patch_ops.unsol_event = via_unsol_event;
76750 + *(void **)&codec->patch_ops.init = via_auto_init;
76751 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76752 #ifdef CONFIG_SND_HDA_POWER_SAVE
76753 spec->loopback.amplist = vt1708S_loopbacks;
76754 #endif
76755 @@ -3223,10 +3223,10 @@ static int patch_vt1702(struct hda_codec
76756 spec->num_mixers++;
76757 }
76758
76759 - codec->patch_ops = via_patch_ops;
76760 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76761
76762 - codec->patch_ops.init = via_auto_init;
76763 - codec->patch_ops.unsol_event = via_unsol_event;
76764 + *(void **)&codec->patch_ops.init = via_auto_init;
76765 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76766 #ifdef CONFIG_SND_HDA_POWER_SAVE
76767 spec->loopback.amplist = vt1702_loopbacks;
76768 #endif
76769 diff -urNp linux-2.6.32.45/sound/pci/ice1712/ice1712.h linux-2.6.32.45/sound/pci/ice1712/ice1712.h
76770 --- linux-2.6.32.45/sound/pci/ice1712/ice1712.h 2011-03-27 14:31:47.000000000 -0400
76771 +++ linux-2.6.32.45/sound/pci/ice1712/ice1712.h 2011-08-05 20:33:55.000000000 -0400
76772 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
76773 unsigned int mask_flags; /* total mask bits */
76774 struct snd_akm4xxx_ops {
76775 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
76776 - } ops;
76777 + } __no_const ops;
76778 };
76779
76780 struct snd_ice1712_spdif {
76781 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
76782 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
76783 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
76784 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
76785 - } ops;
76786 + } __no_const ops;
76787 };
76788
76789
76790 diff -urNp linux-2.6.32.45/sound/pci/intel8x0m.c linux-2.6.32.45/sound/pci/intel8x0m.c
76791 --- linux-2.6.32.45/sound/pci/intel8x0m.c 2011-03-27 14:31:47.000000000 -0400
76792 +++ linux-2.6.32.45/sound/pci/intel8x0m.c 2011-04-23 12:56:12.000000000 -0400
76793 @@ -1264,7 +1264,7 @@ static struct shortname_table {
76794 { 0x5455, "ALi M5455" },
76795 { 0x746d, "AMD AMD8111" },
76796 #endif
76797 - { 0 },
76798 + { 0, },
76799 };
76800
76801 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
76802 diff -urNp linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c
76803 --- linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c 2011-03-27 14:31:47.000000000 -0400
76804 +++ linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c 2011-05-04 17:56:28.000000000 -0400
76805 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
76806 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
76807 break;
76808 }
76809 - if (atomic_read(&chip->interrupt_sleep_count)) {
76810 - atomic_set(&chip->interrupt_sleep_count, 0);
76811 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
76812 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
76813 wake_up(&chip->interrupt_sleep);
76814 }
76815 __end:
76816 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
76817 continue;
76818 init_waitqueue_entry(&wait, current);
76819 add_wait_queue(&chip->interrupt_sleep, &wait);
76820 - atomic_inc(&chip->interrupt_sleep_count);
76821 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
76822 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
76823 remove_wait_queue(&chip->interrupt_sleep, &wait);
76824 }
76825 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
76826 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
76827 spin_unlock(&chip->reg_lock);
76828
76829 - if (atomic_read(&chip->interrupt_sleep_count)) {
76830 - atomic_set(&chip->interrupt_sleep_count, 0);
76831 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
76832 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
76833 wake_up(&chip->interrupt_sleep);
76834 }
76835 }
76836 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s
76837 spin_lock_init(&chip->reg_lock);
76838 spin_lock_init(&chip->voice_lock);
76839 init_waitqueue_head(&chip->interrupt_sleep);
76840 - atomic_set(&chip->interrupt_sleep_count, 0);
76841 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
76842 chip->card = card;
76843 chip->pci = pci;
76844 chip->irq = -1;
76845 diff -urNp linux-2.6.32.45/sound/soc/soc-core.c linux-2.6.32.45/sound/soc/soc-core.c
76846 --- linux-2.6.32.45/sound/soc/soc-core.c 2011-03-27 14:31:47.000000000 -0400
76847 +++ linux-2.6.32.45/sound/soc/soc-core.c 2011-08-05 20:33:55.000000000 -0400
76848 @@ -1107,13 +1107,13 @@ static int soc_new_pcm(struct snd_soc_de
76849
76850 dai_link->pcm = pcm;
76851 pcm->private_data = rtd;
76852 - soc_pcm_ops.mmap = platform->pcm_ops->mmap;
76853 - soc_pcm_ops.pointer = platform->pcm_ops->pointer;
76854 - soc_pcm_ops.ioctl = platform->pcm_ops->ioctl;
76855 - soc_pcm_ops.copy = platform->pcm_ops->copy;
76856 - soc_pcm_ops.silence = platform->pcm_ops->silence;
76857 - soc_pcm_ops.ack = platform->pcm_ops->ack;
76858 - soc_pcm_ops.page = platform->pcm_ops->page;
76859 + *(void **)&soc_pcm_ops.mmap = platform->pcm_ops->mmap;
76860 + *(void **)&soc_pcm_ops.pointer = platform->pcm_ops->pointer;
76861 + *(void **)&soc_pcm_ops.ioctl = platform->pcm_ops->ioctl;
76862 + *(void **)&soc_pcm_ops.copy = platform->pcm_ops->copy;
76863 + *(void **)&soc_pcm_ops.silence = platform->pcm_ops->silence;
76864 + *(void **)&soc_pcm_ops.ack = platform->pcm_ops->ack;
76865 + *(void **)&soc_pcm_ops.page = platform->pcm_ops->page;
76866
76867 if (playback)
76868 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &soc_pcm_ops);
76869 diff -urNp linux-2.6.32.45/sound/usb/usbaudio.c linux-2.6.32.45/sound/usb/usbaudio.c
76870 --- linux-2.6.32.45/sound/usb/usbaudio.c 2011-03-27 14:31:47.000000000 -0400
76871 +++ linux-2.6.32.45/sound/usb/usbaudio.c 2011-08-05 20:33:55.000000000 -0400
76872 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(
76873 switch (cmd) {
76874 case SNDRV_PCM_TRIGGER_START:
76875 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
76876 - subs->ops.prepare = prepare_playback_urb;
76877 + *(void **)&subs->ops.prepare = prepare_playback_urb;
76878 return 0;
76879 case SNDRV_PCM_TRIGGER_STOP:
76880 return deactivate_urbs(subs, 0, 0);
76881 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
76882 - subs->ops.prepare = prepare_nodata_playback_urb;
76883 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
76884 return 0;
76885 default:
76886 return -EINVAL;
76887 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(s
76888
76889 switch (cmd) {
76890 case SNDRV_PCM_TRIGGER_START:
76891 - subs->ops.retire = retire_capture_urb;
76892 + *(void **)&subs->ops.retire = retire_capture_urb;
76893 return start_urbs(subs, substream->runtime);
76894 case SNDRV_PCM_TRIGGER_STOP:
76895 return deactivate_urbs(subs, 0, 0);
76896 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
76897 - subs->ops.retire = retire_paused_capture_urb;
76898 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
76899 return 0;
76900 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
76901 - subs->ops.retire = retire_capture_urb;
76902 + *(void **)&subs->ops.retire = retire_capture_urb;
76903 return 0;
76904 default:
76905 return -EINVAL;
76906 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct sn
76907 /* for playback, submit the URBs now; otherwise, the first hwptr_done
76908 * updates for all URBs would happen at the same time when starting */
76909 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
76910 - subs->ops.prepare = prepare_nodata_playback_urb;
76911 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
76912 return start_urbs(subs, runtime);
76913 } else
76914 return 0;
76915 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_us
76916 subs->direction = stream;
76917 subs->dev = as->chip->dev;
76918 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
76919 - subs->ops = audio_urb_ops[stream];
76920 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
76921 } else {
76922 - subs->ops = audio_urb_ops_high_speed[stream];
76923 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
76924 switch (as->chip->usb_id) {
76925 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
76926 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
76927 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
76928 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
76929 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
76930 break;
76931 }
76932 }
76933 diff -urNp linux-2.6.32.45/tools/gcc/constify_plugin.c linux-2.6.32.45/tools/gcc/constify_plugin.c
76934 --- linux-2.6.32.45/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
76935 +++ linux-2.6.32.45/tools/gcc/constify_plugin.c 2011-08-11 19:12:51.000000000 -0400
76936 @@ -0,0 +1,189 @@
76937 +/*
76938 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
76939 + * Licensed under the GPL v2, or (at your option) v3
76940 + *
76941 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
76942 + *
76943 + * Usage:
76944 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
76945 + * $ gcc -fplugin=constify_plugin.so test.c -O2
76946 + */
76947 +
76948 +#include "gcc-plugin.h"
76949 +#include "config.h"
76950 +#include "system.h"
76951 +#include "coretypes.h"
76952 +#include "tree.h"
76953 +#include "tree-pass.h"
76954 +#include "intl.h"
76955 +#include "plugin-version.h"
76956 +#include "tm.h"
76957 +#include "toplev.h"
76958 +#include "function.h"
76959 +#include "tree-flow.h"
76960 +#include "plugin.h"
76961 +
76962 +int plugin_is_GPL_compatible;
76963 +
76964 +static struct plugin_info const_plugin_info = {
76965 + .version = "20110721",
76966 + .help = "no-constify\tturn off constification\n",
76967 +};
76968 +
76969 +static bool walk_struct(tree node);
76970 +
76971 +static void deconstify_node(tree node)
76972 +{
76973 + tree field;
76974 +
76975 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
76976 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
76977 + if (code == RECORD_TYPE || code == UNION_TYPE)
76978 + deconstify_node(TREE_TYPE(field));
76979 + TREE_READONLY(field) = 0;
76980 + TREE_READONLY(TREE_TYPE(field)) = 0;
76981 + }
76982 +}
76983 +
76984 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
76985 +{
76986 + if (TREE_CODE(*node) == FUNCTION_DECL) {
76987 + error("%qE attribute does not apply to functions", name);
76988 + *no_add_attrs = true;
76989 + return NULL_TREE;
76990 + }
76991 +
76992 + if (DECL_P(*node) && lookup_attribute("no_const", TYPE_ATTRIBUTES(TREE_TYPE(*node)))) {
76993 + error("%qE attribute is already applied to the type" , name);
76994 + *no_add_attrs = true;
76995 + return NULL_TREE;
76996 + }
76997 +
76998 + if (TREE_CODE(*node) == TYPE_DECL && !TREE_READONLY(TREE_TYPE(*node))) {
76999 + error("%qE attribute used on type that is not constified" , name);
77000 + *no_add_attrs = true;
77001 + return NULL_TREE;
77002 + }
77003 +
77004 + if (TREE_CODE(*node) == TYPE_DECL) {
77005 + tree chain = TREE_CHAIN(TREE_TYPE(*node));
77006 + TREE_TYPE(*node) = copy_node(TREE_TYPE(*node));
77007 + TREE_CHAIN(TREE_TYPE(*node)) = copy_list(chain);
77008 + TREE_READONLY(TREE_TYPE(*node)) = 0;
77009 + deconstify_node(TREE_TYPE(*node));
77010 + return NULL_TREE;
77011 + }
77012 +
77013 + return NULL_TREE;
77014 +}
77015 +
77016 +static struct attribute_spec no_const_attr = {
77017 + .name = "no_const",
77018 + .min_length = 0,
77019 + .max_length = 0,
77020 + .decl_required = false,
77021 + .type_required = false,
77022 + .function_type_required = false,
77023 + .handler = handle_no_const_attribute
77024 +};
77025 +
77026 +static void register_attributes(void *event_data, void *data)
77027 +{
77028 + register_attribute(&no_const_attr);
77029 +}
77030 +
77031 +/*
77032 +static void printnode(char *prefix, tree node)
77033 +{
77034 + enum tree_code code;
77035 + enum tree_code_class tclass;
77036 +
77037 + tclass = TREE_CODE_CLASS(TREE_CODE (node));
77038 +
77039 + code = TREE_CODE(node);
77040 + fprintf(stderr, "\n%s node: %p, code: %d type: %s\n", prefix, node, code, tree_code_name[(int)code]);
77041 + if (DECL_CONTEXT(node) != NULL_TREE && TYPE_NAME(DECL_CONTEXT(node)) != NULL_TREE)
77042 + fprintf(stderr, "struct name: %s\n", IDENTIFIER_POINTER(TYPE_NAME(DECL_CONTEXT(node))));
77043 + if (tclass == tcc_declaration && DECL_NAME(node) != NULL_TREE)
77044 + fprintf(stderr, "field name: %s\n", IDENTIFIER_POINTER(DECL_NAME(node)));
77045 +}
77046 +*/
77047 +
77048 +static void constify_node(tree node)
77049 +{
77050 + TREE_READONLY(node) = 1;
77051 +}
77052 +
77053 +static bool is_fptr(tree field)
77054 +{
77055 + tree ptr = TREE_TYPE(field);
77056 +
77057 + if (TREE_CODE(ptr) != POINTER_TYPE)
77058 + return false;
77059 +
77060 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
77061 +}
77062 +
77063 +static bool walk_struct(tree node)
77064 +{
77065 + tree field;
77066 +
77067 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
77068 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
77069 + if (code == RECORD_TYPE || code == UNION_TYPE) {
77070 + if (!(walk_struct(TREE_TYPE(field))))
77071 + return false;
77072 + } else if (is_fptr(field) == false && !TREE_READONLY(field))
77073 + return false;
77074 + }
77075 + return true;
77076 +}
77077 +
77078 +static void finish_type(void *event_data, void *data)
77079 +{
77080 + tree node = (tree)event_data;
77081 +
77082 + if (node == NULL_TREE)
77083 + return;
77084 +
77085 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
77086 + return;
77087 +
77088 + if (TREE_READONLY(node))
77089 + return;
77090 +
77091 + if (TYPE_FIELDS(node) == NULL_TREE)
77092 + return;
77093 +
77094 + if (walk_struct(node))
77095 + constify_node(node);
77096 +}
77097 +
77098 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77099 +{
77100 + const char * const plugin_name = plugin_info->base_name;
77101 + const int argc = plugin_info->argc;
77102 + const struct plugin_argument * const argv = plugin_info->argv;
77103 + int i;
77104 + bool constify = true;
77105 +
77106 + if (!plugin_default_version_check(version, &gcc_version)) {
77107 + error(G_("incompatible gcc/plugin versions"));
77108 + return 1;
77109 + }
77110 +
77111 + for (i = 0; i < argc; ++i) {
77112 + if (!(strcmp(argv[i].key, "no-constify"))) {
77113 + constify = false;
77114 + continue;
77115 + }
77116 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77117 + }
77118 +
77119 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
77120 + if (constify)
77121 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
77122 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
77123 +
77124 + return 0;
77125 +}
77126 Binary files linux-2.6.32.45/tools/gcc/constify_plugin.so and linux-2.6.32.45/tools/gcc/constify_plugin.so differ
77127 diff -urNp linux-2.6.32.45/tools/gcc/Makefile linux-2.6.32.45/tools/gcc/Makefile
77128 --- linux-2.6.32.45/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
77129 +++ linux-2.6.32.45/tools/gcc/Makefile 2011-08-05 20:33:55.000000000 -0400
77130 @@ -0,0 +1,12 @@
77131 +#CC := gcc
77132 +#PLUGIN_SOURCE_FILES := pax_plugin.c
77133 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
77134 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
77135 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
77136 +
77137 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
77138 +
77139 +hostlibs-y := stackleak_plugin.so constify_plugin.so
77140 +always := $(hostlibs-y)
77141 +stackleak_plugin-objs := stackleak_plugin.o
77142 +constify_plugin-objs := constify_plugin.o
77143 Binary files linux-2.6.32.45/tools/gcc/pax_plugin.so and linux-2.6.32.45/tools/gcc/pax_plugin.so differ
77144 diff -urNp linux-2.6.32.45/tools/gcc/stackleak_plugin.c linux-2.6.32.45/tools/gcc/stackleak_plugin.c
77145 --- linux-2.6.32.45/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
77146 +++ linux-2.6.32.45/tools/gcc/stackleak_plugin.c 2011-08-05 20:33:55.000000000 -0400
77147 @@ -0,0 +1,243 @@
77148 +/*
77149 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77150 + * Licensed under the GPL v2
77151 + *
77152 + * Note: the choice of the license means that the compilation process is
77153 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77154 + * but for the kernel it doesn't matter since it doesn't link against
77155 + * any of the gcc libraries
77156 + *
77157 + * gcc plugin to help implement various PaX features
77158 + *
77159 + * - track lowest stack pointer
77160 + *
77161 + * TODO:
77162 + * - initialize all local variables
77163 + *
77164 + * BUGS:
77165 + * - cloned functions are instrumented twice
77166 + */
77167 +#include "gcc-plugin.h"
77168 +#include "plugin-version.h"
77169 +#include "config.h"
77170 +#include "system.h"
77171 +#include "coretypes.h"
77172 +#include "tm.h"
77173 +#include "toplev.h"
77174 +#include "basic-block.h"
77175 +#include "gimple.h"
77176 +//#include "expr.h" where are you...
77177 +#include "diagnostic.h"
77178 +#include "rtl.h"
77179 +#include "emit-rtl.h"
77180 +#include "function.h"
77181 +#include "tree.h"
77182 +#include "tree-pass.h"
77183 +#include "intl.h"
77184 +
77185 +int plugin_is_GPL_compatible;
77186 +
77187 +static int track_frame_size = -1;
77188 +static const char track_function[] = "pax_track_stack";
77189 +static bool init_locals;
77190 +
77191 +static struct plugin_info stackleak_plugin_info = {
77192 + .version = "201106030000",
77193 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
77194 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
77195 +};
77196 +
77197 +static bool gate_stackleak_track_stack(void);
77198 +static unsigned int execute_stackleak_tree_instrument(void);
77199 +static unsigned int execute_stackleak_final(void);
77200 +
77201 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
77202 + .pass = {
77203 + .type = GIMPLE_PASS,
77204 + .name = "stackleak_tree_instrument",
77205 + .gate = gate_stackleak_track_stack,
77206 + .execute = execute_stackleak_tree_instrument,
77207 + .sub = NULL,
77208 + .next = NULL,
77209 + .static_pass_number = 0,
77210 + .tv_id = TV_NONE,
77211 + .properties_required = PROP_gimple_leh | PROP_cfg,
77212 + .properties_provided = 0,
77213 + .properties_destroyed = 0,
77214 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
77215 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
77216 + }
77217 +};
77218 +
77219 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
77220 + .pass = {
77221 + .type = RTL_PASS,
77222 + .name = "stackleak_final",
77223 + .gate = gate_stackleak_track_stack,
77224 + .execute = execute_stackleak_final,
77225 + .sub = NULL,
77226 + .next = NULL,
77227 + .static_pass_number = 0,
77228 + .tv_id = TV_NONE,
77229 + .properties_required = 0,
77230 + .properties_provided = 0,
77231 + .properties_destroyed = 0,
77232 + .todo_flags_start = 0,
77233 + .todo_flags_finish = 0
77234 + }
77235 +};
77236 +
77237 +static bool gate_stackleak_track_stack(void)
77238 +{
77239 + return track_frame_size >= 0;
77240 +}
77241 +
77242 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
77243 +{
77244 + gimple call;
77245 + tree decl, type;
77246 +
77247 + // insert call to void pax_track_stack(void)
77248 + type = build_function_type_list(void_type_node, NULL_TREE);
77249 + decl = build_fn_decl(track_function, type);
77250 + DECL_ASSEMBLER_NAME(decl); // for LTO
77251 + call = gimple_build_call(decl, 0);
77252 + if (before)
77253 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
77254 + else
77255 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
77256 +}
77257 +
77258 +static unsigned int execute_stackleak_tree_instrument(void)
77259 +{
77260 + basic_block bb;
77261 + gimple_stmt_iterator gsi;
77262 +
77263 + // 1. loop through BBs and GIMPLE statements
77264 + FOR_EACH_BB(bb) {
77265 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
77266 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
77267 + tree decl;
77268 + gimple stmt = gsi_stmt(gsi);
77269 +
77270 + if (!is_gimple_call(stmt))
77271 + continue;
77272 + decl = gimple_call_fndecl(stmt);
77273 + if (!decl)
77274 + continue;
77275 + if (TREE_CODE(decl) != FUNCTION_DECL)
77276 + continue;
77277 + if (!DECL_BUILT_IN(decl))
77278 + continue;
77279 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
77280 + continue;
77281 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
77282 + continue;
77283 +
77284 + // 2. insert track call after each __builtin_alloca call
77285 + stackleak_add_instrumentation(&gsi, false);
77286 +// print_node(stderr, "pax", decl, 4);
77287 + }
77288 + }
77289 +
77290 + // 3. insert track call at the beginning
77291 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
77292 + gsi = gsi_start_bb(bb);
77293 + stackleak_add_instrumentation(&gsi, true);
77294 +
77295 + return 0;
77296 +}
77297 +
77298 +static unsigned int execute_stackleak_final(void)
77299 +{
77300 + rtx insn;
77301 +
77302 + if (cfun->calls_alloca)
77303 + return 0;
77304 +
77305 + // 1. find pax_track_stack calls
77306 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
77307 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
77308 + rtx body;
77309 +
77310 + if (!CALL_P(insn))
77311 + continue;
77312 + body = PATTERN(insn);
77313 + if (GET_CODE(body) != CALL)
77314 + continue;
77315 + body = XEXP(body, 0);
77316 + if (GET_CODE(body) != MEM)
77317 + continue;
77318 + body = XEXP(body, 0);
77319 + if (GET_CODE(body) != SYMBOL_REF)
77320 + continue;
77321 + if (strcmp(XSTR(body, 0), track_function))
77322 + continue;
77323 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
77324 + // 2. delete call if function frame is not big enough
77325 + if (get_frame_size() >= track_frame_size)
77326 + continue;
77327 + delete_insn_and_edges(insn);
77328 + }
77329 +
77330 +// print_simple_rtl(stderr, get_insns());
77331 +// print_rtl(stderr, get_insns());
77332 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
77333 +
77334 + return 0;
77335 +}
77336 +
77337 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77338 +{
77339 + const char * const plugin_name = plugin_info->base_name;
77340 + const int argc = plugin_info->argc;
77341 + const struct plugin_argument * const argv = plugin_info->argv;
77342 + int i;
77343 + struct register_pass_info stackleak_tree_instrument_pass_info = {
77344 + .pass = &stackleak_tree_instrument_pass.pass,
77345 +// .reference_pass_name = "tree_profile",
77346 + .reference_pass_name = "optimized",
77347 + .ref_pass_instance_number = 0,
77348 + .pos_op = PASS_POS_INSERT_AFTER
77349 + };
77350 + struct register_pass_info stackleak_final_pass_info = {
77351 + .pass = &stackleak_final_rtl_opt_pass.pass,
77352 + .reference_pass_name = "final",
77353 + .ref_pass_instance_number = 0,
77354 + .pos_op = PASS_POS_INSERT_BEFORE
77355 + };
77356 +
77357 + if (!plugin_default_version_check(version, &gcc_version)) {
77358 + error(G_("incompatible gcc/plugin versions"));
77359 + return 1;
77360 + }
77361 +
77362 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
77363 +
77364 + for (i = 0; i < argc; ++i) {
77365 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
77366 + if (!argv[i].value) {
77367 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77368 + continue;
77369 + }
77370 + track_frame_size = atoi(argv[i].value);
77371 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
77372 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
77373 + continue;
77374 + }
77375 + if (!strcmp(argv[i].key, "initialize-locals")) {
77376 + if (argv[i].value) {
77377 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
77378 + continue;
77379 + }
77380 + init_locals = true;
77381 + continue;
77382 + }
77383 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77384 + }
77385 +
77386 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
77387 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
77388 +
77389 + return 0;
77390 +}
77391 Binary files linux-2.6.32.45/tools/gcc/stackleak_plugin.so and linux-2.6.32.45/tools/gcc/stackleak_plugin.so differ
77392 diff -urNp linux-2.6.32.45/usr/gen_init_cpio.c linux-2.6.32.45/usr/gen_init_cpio.c
77393 --- linux-2.6.32.45/usr/gen_init_cpio.c 2011-03-27 14:31:47.000000000 -0400
77394 +++ linux-2.6.32.45/usr/gen_init_cpio.c 2011-04-17 15:56:46.000000000 -0400
77395 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
77396 int retval;
77397 int rc = -1;
77398 int namesize;
77399 - int i;
77400 + unsigned int i;
77401
77402 mode |= S_IFREG;
77403
77404 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
77405 *env_var = *expanded = '\0';
77406 strncat(env_var, start + 2, end - start - 2);
77407 strncat(expanded, new_location, start - new_location);
77408 - strncat(expanded, getenv(env_var), PATH_MAX);
77409 - strncat(expanded, end + 1, PATH_MAX);
77410 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
77411 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
77412 strncpy(new_location, expanded, PATH_MAX);
77413 + new_location[PATH_MAX] = 0;
77414 } else
77415 break;
77416 }
77417 diff -urNp linux-2.6.32.45/virt/kvm/kvm_main.c linux-2.6.32.45/virt/kvm/kvm_main.c
77418 --- linux-2.6.32.45/virt/kvm/kvm_main.c 2011-03-27 14:31:47.000000000 -0400
77419 +++ linux-2.6.32.45/virt/kvm/kvm_main.c 2011-08-05 20:33:55.000000000 -0400
77420 @@ -2494,7 +2494,7 @@ asmlinkage void kvm_handle_fault_on_rebo
77421 if (kvm_rebooting)
77422 /* spin while reset goes on */
77423 while (true)
77424 - ;
77425 + cpu_relax();
77426 /* Fault while not rebooting. We want the trace. */
77427 BUG();
77428 }
77429 @@ -2714,7 +2714,7 @@ static void kvm_sched_out(struct preempt
77430 kvm_arch_vcpu_put(vcpu);
77431 }
77432
77433 -int kvm_init(void *opaque, unsigned int vcpu_size,
77434 +int kvm_init(const void *opaque, unsigned int vcpu_size,
77435 struct module *module)
77436 {
77437 int r;
77438 @@ -2767,15 +2767,17 @@ int kvm_init(void *opaque, unsigned int
77439 /* A kmem cache lets us meet the alignment requirements of fx_save. */
77440 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
77441 __alignof__(struct kvm_vcpu),
77442 - 0, NULL);
77443 + SLAB_USERCOPY, NULL);
77444 if (!kvm_vcpu_cache) {
77445 r = -ENOMEM;
77446 goto out_free_5;
77447 }
77448
77449 - kvm_chardev_ops.owner = module;
77450 - kvm_vm_fops.owner = module;
77451 - kvm_vcpu_fops.owner = module;
77452 + pax_open_kernel();
77453 + *(void **)&kvm_chardev_ops.owner = module;
77454 + *(void **)&kvm_vm_fops.owner = module;
77455 + *(void **)&kvm_vcpu_fops.owner = module;
77456 + pax_close_kernel();
77457
77458 r = misc_register(&kvm_dev);
77459 if (r) {