]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.45-201108182325.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.45-201108182325.patch
1 diff -urNp linux-2.6.32.45/arch/alpha/include/asm/elf.h linux-2.6.32.45/arch/alpha/include/asm/elf.h
2 --- linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3 +++ linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.32.45/arch/alpha/include/asm/pgtable.h linux-2.6.32.45/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20 +++ linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.32.45/arch/alpha/kernel/module.c linux-2.6.32.45/arch/alpha/kernel/module.c
40 --- linux-2.6.32.45/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41 +++ linux-2.6.32.45/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.32.45/arch/alpha/kernel/osf_sys.c linux-2.6.32.45/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-08-09 18:35:28.000000000 -0400
53 +++ linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-2.6.32.45/arch/alpha/mm/fault.c linux-2.6.32.45/arch/alpha/mm/fault.c
86 --- linux-2.6.32.45/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
87 +++ linux-2.6.32.45/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-2.6.32.45/arch/arm/include/asm/elf.h linux-2.6.32.45/arch/arm/include/asm/elf.h
245 --- linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
246 +++ linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
247 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 diff -urNp linux-2.6.32.45/arch/arm/include/asm/kmap_types.h linux-2.6.32.45/arch/arm/include/asm/kmap_types.h
264 --- linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
265 +++ linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
266 @@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270 + KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274 diff -urNp linux-2.6.32.45/arch/arm/include/asm/uaccess.h linux-2.6.32.45/arch/arm/include/asm/uaccess.h
275 --- linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
276 +++ linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
277 @@ -22,6 +22,8 @@
278 #define VERIFY_READ 0
279 #define VERIFY_WRITE 1
280
281 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
282 +
283 /*
284 * The exception table consists of pairs of addresses: the first is the
285 * address of an instruction that is allowed to fault, and the second is
286 @@ -387,8 +389,23 @@ do { \
287
288
289 #ifdef CONFIG_MMU
290 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
291 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
292 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
293 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
294 +
295 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
296 +{
297 + if (!__builtin_constant_p(n))
298 + check_object_size(to, n, false);
299 + return ___copy_from_user(to, from, n);
300 +}
301 +
302 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
303 +{
304 + if (!__builtin_constant_p(n))
305 + check_object_size(from, n, true);
306 + return ___copy_to_user(to, from, n);
307 +}
308 +
309 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
310 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
311 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
312 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
313
314 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
315 {
316 + if ((long)n < 0)
317 + return n;
318 +
319 if (access_ok(VERIFY_READ, from, n))
320 n = __copy_from_user(to, from, n);
321 else /* security hole - plug it */
322 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
323
324 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
325 {
326 + if ((long)n < 0)
327 + return n;
328 +
329 if (access_ok(VERIFY_WRITE, to, n))
330 n = __copy_to_user(to, from, n);
331 return n;
332 diff -urNp linux-2.6.32.45/arch/arm/kernel/armksyms.c linux-2.6.32.45/arch/arm/kernel/armksyms.c
333 --- linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
334 +++ linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
335 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
336 #ifdef CONFIG_MMU
337 EXPORT_SYMBOL(copy_page);
338
339 -EXPORT_SYMBOL(__copy_from_user);
340 -EXPORT_SYMBOL(__copy_to_user);
341 +EXPORT_SYMBOL(___copy_from_user);
342 +EXPORT_SYMBOL(___copy_to_user);
343 EXPORT_SYMBOL(__clear_user);
344
345 EXPORT_SYMBOL(__get_user_1);
346 diff -urNp linux-2.6.32.45/arch/arm/kernel/kgdb.c linux-2.6.32.45/arch/arm/kernel/kgdb.c
347 --- linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
348 +++ linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
349 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
350 * and we handle the normal undef case within the do_undefinstr
351 * handler.
352 */
353 -struct kgdb_arch arch_kgdb_ops = {
354 +const struct kgdb_arch arch_kgdb_ops = {
355 #ifndef __ARMEB__
356 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
357 #else /* ! __ARMEB__ */
358 diff -urNp linux-2.6.32.45/arch/arm/kernel/traps.c linux-2.6.32.45/arch/arm/kernel/traps.c
359 --- linux-2.6.32.45/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
360 +++ linux-2.6.32.45/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
361 @@ -247,6 +247,8 @@ static void __die(const char *str, int e
362
363 DEFINE_SPINLOCK(die_lock);
364
365 +extern void gr_handle_kernel_exploit(void);
366 +
367 /*
368 * This function is protected against re-entrancy.
369 */
370 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
371 if (panic_on_oops)
372 panic("Fatal exception");
373
374 + gr_handle_kernel_exploit();
375 +
376 do_exit(SIGSEGV);
377 }
378
379 diff -urNp linux-2.6.32.45/arch/arm/lib/copy_from_user.S linux-2.6.32.45/arch/arm/lib/copy_from_user.S
380 --- linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
381 +++ linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
382 @@ -16,7 +16,7 @@
383 /*
384 * Prototype:
385 *
386 - * size_t __copy_from_user(void *to, const void *from, size_t n)
387 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
388 *
389 * Purpose:
390 *
391 @@ -84,11 +84,11 @@
392
393 .text
394
395 -ENTRY(__copy_from_user)
396 +ENTRY(___copy_from_user)
397
398 #include "copy_template.S"
399
400 -ENDPROC(__copy_from_user)
401 +ENDPROC(___copy_from_user)
402
403 .section .fixup,"ax"
404 .align 0
405 diff -urNp linux-2.6.32.45/arch/arm/lib/copy_to_user.S linux-2.6.32.45/arch/arm/lib/copy_to_user.S
406 --- linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
407 +++ linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
408 @@ -16,7 +16,7 @@
409 /*
410 * Prototype:
411 *
412 - * size_t __copy_to_user(void *to, const void *from, size_t n)
413 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
414 *
415 * Purpose:
416 *
417 @@ -88,11 +88,11 @@
418 .text
419
420 ENTRY(__copy_to_user_std)
421 -WEAK(__copy_to_user)
422 +WEAK(___copy_to_user)
423
424 #include "copy_template.S"
425
426 -ENDPROC(__copy_to_user)
427 +ENDPROC(___copy_to_user)
428
429 .section .fixup,"ax"
430 .align 0
431 diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess.S linux-2.6.32.45/arch/arm/lib/uaccess.S
432 --- linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
433 +++ linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
434 @@ -19,7 +19,7 @@
435
436 #define PAGE_SHIFT 12
437
438 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
439 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
440 * Purpose : copy a block to user memory from kernel memory
441 * Params : to - user memory
442 * : from - kernel memory
443 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
444 sub r2, r2, ip
445 b .Lc2u_dest_aligned
446
447 -ENTRY(__copy_to_user)
448 +ENTRY(___copy_to_user)
449 stmfd sp!, {r2, r4 - r7, lr}
450 cmp r2, #4
451 blt .Lc2u_not_enough
452 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
453 ldrgtb r3, [r1], #0
454 USER( strgtbt r3, [r0], #1) @ May fault
455 b .Lc2u_finished
456 -ENDPROC(__copy_to_user)
457 +ENDPROC(___copy_to_user)
458
459 .section .fixup,"ax"
460 .align 0
461 9001: ldmfd sp!, {r0, r4 - r7, pc}
462 .previous
463
464 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
465 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
466 * Purpose : copy a block from user memory to kernel memory
467 * Params : to - kernel memory
468 * : from - user memory
469 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
470 sub r2, r2, ip
471 b .Lcfu_dest_aligned
472
473 -ENTRY(__copy_from_user)
474 +ENTRY(___copy_from_user)
475 stmfd sp!, {r0, r2, r4 - r7, lr}
476 cmp r2, #4
477 blt .Lcfu_not_enough
478 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
479 USER( ldrgtbt r3, [r1], #1) @ May fault
480 strgtb r3, [r0], #1
481 b .Lcfu_finished
482 -ENDPROC(__copy_from_user)
483 +ENDPROC(___copy_from_user)
484
485 .section .fixup,"ax"
486 .align 0
487 diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c
488 --- linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
489 +++ linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
490 @@ -97,7 +97,7 @@ out:
491 }
492
493 unsigned long
494 -__copy_to_user(void __user *to, const void *from, unsigned long n)
495 +___copy_to_user(void __user *to, const void *from, unsigned long n)
496 {
497 /*
498 * This test is stubbed out of the main function above to keep
499 diff -urNp linux-2.6.32.45/arch/arm/mach-at91/pm.c linux-2.6.32.45/arch/arm/mach-at91/pm.c
500 --- linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
501 +++ linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
502 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
503 }
504
505
506 -static struct platform_suspend_ops at91_pm_ops ={
507 +static const struct platform_suspend_ops at91_pm_ops ={
508 .valid = at91_pm_valid_state,
509 .begin = at91_pm_begin,
510 .enter = at91_pm_enter,
511 diff -urNp linux-2.6.32.45/arch/arm/mach-omap1/pm.c linux-2.6.32.45/arch/arm/mach-omap1/pm.c
512 --- linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
513 +++ linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
514 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
515
516
517
518 -static struct platform_suspend_ops omap_pm_ops ={
519 +static const struct platform_suspend_ops omap_pm_ops ={
520 .prepare = omap_pm_prepare,
521 .enter = omap_pm_enter,
522 .finish = omap_pm_finish,
523 diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c
524 --- linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
525 +++ linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
526 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
527 enable_hlt();
528 }
529
530 -static struct platform_suspend_ops omap_pm_ops = {
531 +static const struct platform_suspend_ops omap_pm_ops = {
532 .prepare = omap2_pm_prepare,
533 .enter = omap2_pm_enter,
534 .finish = omap2_pm_finish,
535 diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c
536 --- linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
537 +++ linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
538 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
539 return;
540 }
541
542 -static struct platform_suspend_ops omap_pm_ops = {
543 +static const struct platform_suspend_ops omap_pm_ops = {
544 .begin = omap3_pm_begin,
545 .end = omap3_pm_end,
546 .prepare = omap3_pm_prepare,
547 diff -urNp linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c
548 --- linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
549 +++ linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
550 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
551 (state == PM_SUSPEND_MEM);
552 }
553
554 -static struct platform_suspend_ops pnx4008_pm_ops = {
555 +static const struct platform_suspend_ops pnx4008_pm_ops = {
556 .enter = pnx4008_pm_enter,
557 .valid = pnx4008_pm_valid,
558 };
559 diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/pm.c linux-2.6.32.45/arch/arm/mach-pxa/pm.c
560 --- linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
561 +++ linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
562 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
563 pxa_cpu_pm_fns->finish();
564 }
565
566 -static struct platform_suspend_ops pxa_pm_ops = {
567 +static const struct platform_suspend_ops pxa_pm_ops = {
568 .valid = pxa_pm_valid,
569 .enter = pxa_pm_enter,
570 .prepare = pxa_pm_prepare,
571 diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c
572 --- linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
573 +++ linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
574 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
575 }
576
577 #ifdef CONFIG_PM
578 -static struct platform_suspend_ops sharpsl_pm_ops = {
579 +static const struct platform_suspend_ops sharpsl_pm_ops = {
580 .prepare = pxa_pm_prepare,
581 .finish = pxa_pm_finish,
582 .enter = corgi_pxa_pm_enter,
583 diff -urNp linux-2.6.32.45/arch/arm/mach-sa1100/pm.c linux-2.6.32.45/arch/arm/mach-sa1100/pm.c
584 --- linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
585 +++ linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
586 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
587 return virt_to_phys(sp);
588 }
589
590 -static struct platform_suspend_ops sa11x0_pm_ops = {
591 +static const struct platform_suspend_ops sa11x0_pm_ops = {
592 .enter = sa11x0_pm_enter,
593 .valid = suspend_valid_only_mem,
594 };
595 diff -urNp linux-2.6.32.45/arch/arm/mm/fault.c linux-2.6.32.45/arch/arm/mm/fault.c
596 --- linux-2.6.32.45/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
597 +++ linux-2.6.32.45/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
598 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
599 }
600 #endif
601
602 +#ifdef CONFIG_PAX_PAGEEXEC
603 + if (fsr & FSR_LNX_PF) {
604 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
605 + do_group_exit(SIGKILL);
606 + }
607 +#endif
608 +
609 tsk->thread.address = addr;
610 tsk->thread.error_code = fsr;
611 tsk->thread.trap_no = 14;
612 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
613 }
614 #endif /* CONFIG_MMU */
615
616 +#ifdef CONFIG_PAX_PAGEEXEC
617 +void pax_report_insns(void *pc, void *sp)
618 +{
619 + long i;
620 +
621 + printk(KERN_ERR "PAX: bytes at PC: ");
622 + for (i = 0; i < 20; i++) {
623 + unsigned char c;
624 + if (get_user(c, (__force unsigned char __user *)pc+i))
625 + printk(KERN_CONT "?? ");
626 + else
627 + printk(KERN_CONT "%02x ", c);
628 + }
629 + printk("\n");
630 +
631 + printk(KERN_ERR "PAX: bytes at SP-4: ");
632 + for (i = -1; i < 20; i++) {
633 + unsigned long c;
634 + if (get_user(c, (__force unsigned long __user *)sp+i))
635 + printk(KERN_CONT "???????? ");
636 + else
637 + printk(KERN_CONT "%08lx ", c);
638 + }
639 + printk("\n");
640 +}
641 +#endif
642 +
643 /*
644 * First Level Translation Fault Handler
645 *
646 diff -urNp linux-2.6.32.45/arch/arm/mm/mmap.c linux-2.6.32.45/arch/arm/mm/mmap.c
647 --- linux-2.6.32.45/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
648 +++ linux-2.6.32.45/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
649 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
650 if (len > TASK_SIZE)
651 return -ENOMEM;
652
653 +#ifdef CONFIG_PAX_RANDMMAP
654 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
655 +#endif
656 +
657 if (addr) {
658 if (do_align)
659 addr = COLOUR_ALIGN(addr, pgoff);
660 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
661 addr = PAGE_ALIGN(addr);
662
663 vma = find_vma(mm, addr);
664 - if (TASK_SIZE - len >= addr &&
665 - (!vma || addr + len <= vma->vm_start))
666 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
667 return addr;
668 }
669 if (len > mm->cached_hole_size) {
670 - start_addr = addr = mm->free_area_cache;
671 + start_addr = addr = mm->free_area_cache;
672 } else {
673 - start_addr = addr = TASK_UNMAPPED_BASE;
674 - mm->cached_hole_size = 0;
675 + start_addr = addr = mm->mmap_base;
676 + mm->cached_hole_size = 0;
677 }
678
679 full_search:
680 @@ -94,14 +97,14 @@ full_search:
681 * Start a new search - just in case we missed
682 * some holes.
683 */
684 - if (start_addr != TASK_UNMAPPED_BASE) {
685 - start_addr = addr = TASK_UNMAPPED_BASE;
686 + if (start_addr != mm->mmap_base) {
687 + start_addr = addr = mm->mmap_base;
688 mm->cached_hole_size = 0;
689 goto full_search;
690 }
691 return -ENOMEM;
692 }
693 - if (!vma || addr + len <= vma->vm_start) {
694 + if (check_heap_stack_gap(vma, addr, len)) {
695 /*
696 * Remember the place where we stopped the search:
697 */
698 diff -urNp linux-2.6.32.45/arch/arm/plat-s3c/pm.c linux-2.6.32.45/arch/arm/plat-s3c/pm.c
699 --- linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
700 +++ linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
701 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
702 s3c_pm_check_cleanup();
703 }
704
705 -static struct platform_suspend_ops s3c_pm_ops = {
706 +static const struct platform_suspend_ops s3c_pm_ops = {
707 .enter = s3c_pm_enter,
708 .prepare = s3c_pm_prepare,
709 .finish = s3c_pm_finish,
710 diff -urNp linux-2.6.32.45/arch/avr32/include/asm/elf.h linux-2.6.32.45/arch/avr32/include/asm/elf.h
711 --- linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
712 +++ linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
713 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719
720 +#ifdef CONFIG_PAX_ASLR
721 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
722 +
723 +#define PAX_DELTA_MMAP_LEN 15
724 +#define PAX_DELTA_STACK_LEN 15
725 +#endif
726
727 /* This yields a mask that user programs can use to figure out what
728 instruction set this CPU supports. This could be done in user space,
729 diff -urNp linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h
730 --- linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
731 +++ linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
732 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
733 D(11) KM_IRQ1,
734 D(12) KM_SOFTIRQ0,
735 D(13) KM_SOFTIRQ1,
736 -D(14) KM_TYPE_NR
737 +D(14) KM_CLEARPAGE,
738 +D(15) KM_TYPE_NR
739 };
740
741 #undef D
742 diff -urNp linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c
743 --- linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
744 +++ linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
745 @@ -176,7 +176,7 @@ out:
746 return 0;
747 }
748
749 -static struct platform_suspend_ops avr32_pm_ops = {
750 +static const struct platform_suspend_ops avr32_pm_ops = {
751 .valid = avr32_pm_valid_state,
752 .enter = avr32_pm_enter,
753 };
754 diff -urNp linux-2.6.32.45/arch/avr32/mm/fault.c linux-2.6.32.45/arch/avr32/mm/fault.c
755 --- linux-2.6.32.45/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
756 +++ linux-2.6.32.45/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
757 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
758
759 int exception_trace = 1;
760
761 +#ifdef CONFIG_PAX_PAGEEXEC
762 +void pax_report_insns(void *pc, void *sp)
763 +{
764 + unsigned long i;
765 +
766 + printk(KERN_ERR "PAX: bytes at PC: ");
767 + for (i = 0; i < 20; i++) {
768 + unsigned char c;
769 + if (get_user(c, (unsigned char *)pc+i))
770 + printk(KERN_CONT "???????? ");
771 + else
772 + printk(KERN_CONT "%02x ", c);
773 + }
774 + printk("\n");
775 +}
776 +#endif
777 +
778 /*
779 * This routine handles page faults. It determines the address and the
780 * problem, and then passes it off to one of the appropriate routines.
781 @@ -157,6 +174,16 @@ bad_area:
782 up_read(&mm->mmap_sem);
783
784 if (user_mode(regs)) {
785 +
786 +#ifdef CONFIG_PAX_PAGEEXEC
787 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
788 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
789 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
790 + do_group_exit(SIGKILL);
791 + }
792 + }
793 +#endif
794 +
795 if (exception_trace && printk_ratelimit())
796 printk("%s%s[%d]: segfault at %08lx pc %08lx "
797 "sp %08lx ecr %lu\n",
798 diff -urNp linux-2.6.32.45/arch/blackfin/kernel/kgdb.c linux-2.6.32.45/arch/blackfin/kernel/kgdb.c
799 --- linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
800 +++ linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
801 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
802 return -1; /* this means that we do not want to exit from the handler */
803 }
804
805 -struct kgdb_arch arch_kgdb_ops = {
806 +const struct kgdb_arch arch_kgdb_ops = {
807 .gdb_bpt_instr = {0xa1},
808 #ifdef CONFIG_SMP
809 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
810 diff -urNp linux-2.6.32.45/arch/blackfin/mach-common/pm.c linux-2.6.32.45/arch/blackfin/mach-common/pm.c
811 --- linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
812 +++ linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
813 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
814 return 0;
815 }
816
817 -struct platform_suspend_ops bfin_pm_ops = {
818 +const struct platform_suspend_ops bfin_pm_ops = {
819 .enter = bfin_pm_enter,
820 .valid = bfin_pm_valid,
821 };
822 diff -urNp linux-2.6.32.45/arch/frv/include/asm/kmap_types.h linux-2.6.32.45/arch/frv/include/asm/kmap_types.h
823 --- linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
824 +++ linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
825 @@ -23,6 +23,7 @@ enum km_type {
826 KM_IRQ1,
827 KM_SOFTIRQ0,
828 KM_SOFTIRQ1,
829 + KM_CLEARPAGE,
830 KM_TYPE_NR
831 };
832
833 diff -urNp linux-2.6.32.45/arch/frv/mm/elf-fdpic.c linux-2.6.32.45/arch/frv/mm/elf-fdpic.c
834 --- linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
835 +++ linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
836 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
837 if (addr) {
838 addr = PAGE_ALIGN(addr);
839 vma = find_vma(current->mm, addr);
840 - if (TASK_SIZE - len >= addr &&
841 - (!vma || addr + len <= vma->vm_start))
842 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
843 goto success;
844 }
845
846 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
847 for (; vma; vma = vma->vm_next) {
848 if (addr > limit)
849 break;
850 - if (addr + len <= vma->vm_start)
851 + if (check_heap_stack_gap(vma, addr, len))
852 goto success;
853 addr = vma->vm_end;
854 }
855 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
856 for (; vma; vma = vma->vm_next) {
857 if (addr > limit)
858 break;
859 - if (addr + len <= vma->vm_start)
860 + if (check_heap_stack_gap(vma, addr, len))
861 goto success;
862 addr = vma->vm_end;
863 }
864 diff -urNp linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c
865 --- linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
866 +++ linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
867 @@ -17,7 +17,7 @@
868 #include <linux/swiotlb.h>
869 #include <asm/machvec.h>
870
871 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
872 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
873
874 /* swiotlb declarations & definitions: */
875 extern int swiotlb_late_init_with_default_size (size_t size);
876 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
877 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
878 }
879
880 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
881 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
882 {
883 if (use_swiotlb(dev))
884 return &swiotlb_dma_ops;
885 diff -urNp linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c
886 --- linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
887 +++ linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
888 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
889 },
890 };
891
892 -extern struct dma_map_ops swiotlb_dma_ops;
893 +extern const struct dma_map_ops swiotlb_dma_ops;
894
895 static int __init
896 sba_init(void)
897 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
898
899 __setup("sbapagesize=",sba_page_override);
900
901 -struct dma_map_ops sba_dma_ops = {
902 +const struct dma_map_ops sba_dma_ops = {
903 .alloc_coherent = sba_alloc_coherent,
904 .free_coherent = sba_free_coherent,
905 .map_page = sba_map_page,
906 diff -urNp linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c
907 --- linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
908 +++ linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
909 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
910
911 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
912
913 +#ifdef CONFIG_PAX_ASLR
914 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
915 +
916 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
917 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
918 +#endif
919 +
920 /* Ugly but avoids duplication */
921 #include "../../../fs/binfmt_elf.c"
922
923 diff -urNp linux-2.6.32.45/arch/ia64/ia32/ia32priv.h linux-2.6.32.45/arch/ia64/ia32/ia32priv.h
924 --- linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
925 +++ linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
926 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
927 #define ELF_DATA ELFDATA2LSB
928 #define ELF_ARCH EM_386
929
930 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
931 +#ifdef CONFIG_PAX_RANDUSTACK
932 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
933 +#else
934 +#define __IA32_DELTA_STACK 0UL
935 +#endif
936 +
937 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
938 +
939 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
940 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
941
942 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h
943 --- linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
944 +++ linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
945 @@ -12,7 +12,7 @@
946
947 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
948
949 -extern struct dma_map_ops *dma_ops;
950 +extern const struct dma_map_ops *dma_ops;
951 extern struct ia64_machine_vector ia64_mv;
952 extern void set_iommu_machvec(void);
953
954 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
955 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
956 dma_addr_t *daddr, gfp_t gfp)
957 {
958 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
959 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
960 void *caddr;
961
962 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
963 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
964 static inline void dma_free_coherent(struct device *dev, size_t size,
965 void *caddr, dma_addr_t daddr)
966 {
967 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
968 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
969 debug_dma_free_coherent(dev, size, caddr, daddr);
970 ops->free_coherent(dev, size, caddr, daddr);
971 }
972 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
973
974 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
975 {
976 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
977 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
978 return ops->mapping_error(dev, daddr);
979 }
980
981 static inline int dma_supported(struct device *dev, u64 mask)
982 {
983 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
984 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
985 return ops->dma_supported(dev, mask);
986 }
987
988 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/elf.h linux-2.6.32.45/arch/ia64/include/asm/elf.h
989 --- linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
990 +++ linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
991 @@ -43,6 +43,13 @@
992 */
993 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
994
995 +#ifdef CONFIG_PAX_ASLR
996 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
997 +
998 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
999 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1000 +#endif
1001 +
1002 #define PT_IA_64_UNWIND 0x70000001
1003
1004 /* IA-64 relocations: */
1005 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/machvec.h linux-2.6.32.45/arch/ia64/include/asm/machvec.h
1006 --- linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1007 +++ linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1008 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1009 /* DMA-mapping interface: */
1010 typedef void ia64_mv_dma_init (void);
1011 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1012 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1013 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1014
1015 /*
1016 * WARNING: The legacy I/O space is _architected_. Platforms are
1017 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1018 # endif /* CONFIG_IA64_GENERIC */
1019
1020 extern void swiotlb_dma_init(void);
1021 -extern struct dma_map_ops *dma_get_ops(struct device *);
1022 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1023
1024 /*
1025 * Define default versions so we can extend machvec for new platforms without having
1026 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/pgtable.h linux-2.6.32.45/arch/ia64/include/asm/pgtable.h
1027 --- linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1028 +++ linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1029 @@ -12,7 +12,7 @@
1030 * David Mosberger-Tang <davidm@hpl.hp.com>
1031 */
1032
1033 -
1034 +#include <linux/const.h>
1035 #include <asm/mman.h>
1036 #include <asm/page.h>
1037 #include <asm/processor.h>
1038 @@ -143,6 +143,17 @@
1039 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1040 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1041 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1042 +
1043 +#ifdef CONFIG_PAX_PAGEEXEC
1044 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1045 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1046 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1047 +#else
1048 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1049 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1050 +# define PAGE_COPY_NOEXEC PAGE_COPY
1051 +#endif
1052 +
1053 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1054 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1055 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1056 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/spinlock.h linux-2.6.32.45/arch/ia64/include/asm/spinlock.h
1057 --- linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1058 +++ linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1059 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1060 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1061
1062 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1063 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1064 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1065 }
1066
1067 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1068 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/uaccess.h linux-2.6.32.45/arch/ia64/include/asm/uaccess.h
1069 --- linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1070 +++ linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1071 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1072 const void *__cu_from = (from); \
1073 long __cu_len = (n); \
1074 \
1075 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1076 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1077 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1078 __cu_len; \
1079 })
1080 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1081 long __cu_len = (n); \
1082 \
1083 __chk_user_ptr(__cu_from); \
1084 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1085 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1086 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1087 __cu_len; \
1088 })
1089 diff -urNp linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c
1090 --- linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1091 +++ linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1092 @@ -3,7 +3,7 @@
1093 /* Set this to 1 if there is a HW IOMMU in the system */
1094 int iommu_detected __read_mostly;
1095
1096 -struct dma_map_ops *dma_ops;
1097 +const struct dma_map_ops *dma_ops;
1098 EXPORT_SYMBOL(dma_ops);
1099
1100 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1101 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1102 }
1103 fs_initcall(dma_init);
1104
1105 -struct dma_map_ops *dma_get_ops(struct device *dev)
1106 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1107 {
1108 return dma_ops;
1109 }
1110 diff -urNp linux-2.6.32.45/arch/ia64/kernel/module.c linux-2.6.32.45/arch/ia64/kernel/module.c
1111 --- linux-2.6.32.45/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1112 +++ linux-2.6.32.45/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1113 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1114 void
1115 module_free (struct module *mod, void *module_region)
1116 {
1117 - if (mod && mod->arch.init_unw_table &&
1118 - module_region == mod->module_init) {
1119 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1120 unw_remove_unwind_table(mod->arch.init_unw_table);
1121 mod->arch.init_unw_table = NULL;
1122 }
1123 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1124 }
1125
1126 static inline int
1127 +in_init_rx (const struct module *mod, uint64_t addr)
1128 +{
1129 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1130 +}
1131 +
1132 +static inline int
1133 +in_init_rw (const struct module *mod, uint64_t addr)
1134 +{
1135 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1136 +}
1137 +
1138 +static inline int
1139 in_init (const struct module *mod, uint64_t addr)
1140 {
1141 - return addr - (uint64_t) mod->module_init < mod->init_size;
1142 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1143 +}
1144 +
1145 +static inline int
1146 +in_core_rx (const struct module *mod, uint64_t addr)
1147 +{
1148 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1149 +}
1150 +
1151 +static inline int
1152 +in_core_rw (const struct module *mod, uint64_t addr)
1153 +{
1154 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1155 }
1156
1157 static inline int
1158 in_core (const struct module *mod, uint64_t addr)
1159 {
1160 - return addr - (uint64_t) mod->module_core < mod->core_size;
1161 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1162 }
1163
1164 static inline int
1165 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1166 break;
1167
1168 case RV_BDREL:
1169 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1170 + if (in_init_rx(mod, val))
1171 + val -= (uint64_t) mod->module_init_rx;
1172 + else if (in_init_rw(mod, val))
1173 + val -= (uint64_t) mod->module_init_rw;
1174 + else if (in_core_rx(mod, val))
1175 + val -= (uint64_t) mod->module_core_rx;
1176 + else if (in_core_rw(mod, val))
1177 + val -= (uint64_t) mod->module_core_rw;
1178 break;
1179
1180 case RV_LTV:
1181 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1182 * addresses have been selected...
1183 */
1184 uint64_t gp;
1185 - if (mod->core_size > MAX_LTOFF)
1186 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1187 /*
1188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1189 * at the end of the module.
1190 */
1191 - gp = mod->core_size - MAX_LTOFF / 2;
1192 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1193 else
1194 - gp = mod->core_size / 2;
1195 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1196 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1197 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1198 mod->arch.gp = gp;
1199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1200 }
1201 diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-dma.c linux-2.6.32.45/arch/ia64/kernel/pci-dma.c
1202 --- linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1203 +++ linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1204 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1205 .dma_mask = &fallback_dev.coherent_dma_mask,
1206 };
1207
1208 -extern struct dma_map_ops intel_dma_ops;
1209 +extern const struct dma_map_ops intel_dma_ops;
1210
1211 static int __init pci_iommu_init(void)
1212 {
1213 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1214 }
1215 EXPORT_SYMBOL(iommu_dma_supported);
1216
1217 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1218 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1219 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1220 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1221 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1222 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1223 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1224 +
1225 +static const struct dma_map_ops intel_iommu_dma_ops = {
1226 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1227 + .alloc_coherent = intel_alloc_coherent,
1228 + .free_coherent = intel_free_coherent,
1229 + .map_sg = intel_map_sg,
1230 + .unmap_sg = intel_unmap_sg,
1231 + .map_page = intel_map_page,
1232 + .unmap_page = intel_unmap_page,
1233 + .mapping_error = intel_mapping_error,
1234 +
1235 + .sync_single_for_cpu = machvec_dma_sync_single,
1236 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1237 + .sync_single_for_device = machvec_dma_sync_single,
1238 + .sync_sg_for_device = machvec_dma_sync_sg,
1239 + .dma_supported = iommu_dma_supported,
1240 +};
1241 +
1242 void __init pci_iommu_alloc(void)
1243 {
1244 - dma_ops = &intel_dma_ops;
1245 -
1246 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1247 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1248 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1249 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1250 - dma_ops->dma_supported = iommu_dma_supported;
1251 + dma_ops = &intel_iommu_dma_ops;
1252
1253 /*
1254 * The order of these functions is important for
1255 diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c
1256 --- linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1257 +++ linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1258 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1259 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1260 }
1261
1262 -struct dma_map_ops swiotlb_dma_ops = {
1263 +const struct dma_map_ops swiotlb_dma_ops = {
1264 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1265 .free_coherent = swiotlb_free_coherent,
1266 .map_page = swiotlb_map_page,
1267 diff -urNp linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c
1268 --- linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1269 +++ linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1270 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1271 if (REGION_NUMBER(addr) == RGN_HPAGE)
1272 addr = 0;
1273 #endif
1274 +
1275 +#ifdef CONFIG_PAX_RANDMMAP
1276 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1277 + addr = mm->free_area_cache;
1278 + else
1279 +#endif
1280 +
1281 if (!addr)
1282 addr = mm->free_area_cache;
1283
1284 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1285 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1286 /* At this point: (!vma || addr < vma->vm_end). */
1287 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1288 - if (start_addr != TASK_UNMAPPED_BASE) {
1289 + if (start_addr != mm->mmap_base) {
1290 /* Start a new search --- just in case we missed some holes. */
1291 - addr = TASK_UNMAPPED_BASE;
1292 + addr = mm->mmap_base;
1293 goto full_search;
1294 }
1295 return -ENOMEM;
1296 }
1297 - if (!vma || addr + len <= vma->vm_start) {
1298 + if (check_heap_stack_gap(vma, addr, len)) {
1299 /* Remember the address where we stopped this search: */
1300 mm->free_area_cache = addr + len;
1301 return addr;
1302 diff -urNp linux-2.6.32.45/arch/ia64/kernel/topology.c linux-2.6.32.45/arch/ia64/kernel/topology.c
1303 --- linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1304 +++ linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1305 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1306 return ret;
1307 }
1308
1309 -static struct sysfs_ops cache_sysfs_ops = {
1310 +static const struct sysfs_ops cache_sysfs_ops = {
1311 .show = cache_show
1312 };
1313
1314 diff -urNp linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S
1315 --- linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1316 +++ linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1317 @@ -190,7 +190,7 @@ SECTIONS
1318 /* Per-cpu data: */
1319 . = ALIGN(PERCPU_PAGE_SIZE);
1320 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1321 - __phys_per_cpu_start = __per_cpu_load;
1322 + __phys_per_cpu_start = per_cpu_load;
1323 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1324 * into percpu page size
1325 */
1326 diff -urNp linux-2.6.32.45/arch/ia64/mm/fault.c linux-2.6.32.45/arch/ia64/mm/fault.c
1327 --- linux-2.6.32.45/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1328 +++ linux-2.6.32.45/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1329 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1330 return pte_present(pte);
1331 }
1332
1333 +#ifdef CONFIG_PAX_PAGEEXEC
1334 +void pax_report_insns(void *pc, void *sp)
1335 +{
1336 + unsigned long i;
1337 +
1338 + printk(KERN_ERR "PAX: bytes at PC: ");
1339 + for (i = 0; i < 8; i++) {
1340 + unsigned int c;
1341 + if (get_user(c, (unsigned int *)pc+i))
1342 + printk(KERN_CONT "???????? ");
1343 + else
1344 + printk(KERN_CONT "%08x ", c);
1345 + }
1346 + printk("\n");
1347 +}
1348 +#endif
1349 +
1350 void __kprobes
1351 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1352 {
1353 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1354 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1355 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1356
1357 - if ((vma->vm_flags & mask) != mask)
1358 + if ((vma->vm_flags & mask) != mask) {
1359 +
1360 +#ifdef CONFIG_PAX_PAGEEXEC
1361 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1362 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1363 + goto bad_area;
1364 +
1365 + up_read(&mm->mmap_sem);
1366 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1367 + do_group_exit(SIGKILL);
1368 + }
1369 +#endif
1370 +
1371 goto bad_area;
1372
1373 + }
1374 +
1375 survive:
1376 /*
1377 * If for any reason at all we couldn't handle the fault, make
1378 diff -urNp linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c
1379 --- linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1380 +++ linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1381 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1382 /* At this point: (!vmm || addr < vmm->vm_end). */
1383 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1384 return -ENOMEM;
1385 - if (!vmm || (addr + len) <= vmm->vm_start)
1386 + if (check_heap_stack_gap(vmm, addr, len))
1387 return addr;
1388 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1389 }
1390 diff -urNp linux-2.6.32.45/arch/ia64/mm/init.c linux-2.6.32.45/arch/ia64/mm/init.c
1391 --- linux-2.6.32.45/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1392 +++ linux-2.6.32.45/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1393 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1394 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1395 vma->vm_end = vma->vm_start + PAGE_SIZE;
1396 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1397 +
1398 +#ifdef CONFIG_PAX_PAGEEXEC
1399 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1400 + vma->vm_flags &= ~VM_EXEC;
1401 +
1402 +#ifdef CONFIG_PAX_MPROTECT
1403 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1404 + vma->vm_flags &= ~VM_MAYEXEC;
1405 +#endif
1406 +
1407 + }
1408 +#endif
1409 +
1410 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1411 down_write(&current->mm->mmap_sem);
1412 if (insert_vm_struct(current->mm, vma)) {
1413 diff -urNp linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c
1414 --- linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1415 +++ linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1416 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1417 return ret;
1418 }
1419
1420 -static struct dma_map_ops sn_dma_ops = {
1421 +static const struct dma_map_ops sn_dma_ops = {
1422 .alloc_coherent = sn_dma_alloc_coherent,
1423 .free_coherent = sn_dma_free_coherent,
1424 .map_page = sn_dma_map_page,
1425 diff -urNp linux-2.6.32.45/arch/m32r/lib/usercopy.c linux-2.6.32.45/arch/m32r/lib/usercopy.c
1426 --- linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1427 +++ linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1428 @@ -14,6 +14,9 @@
1429 unsigned long
1430 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1431 {
1432 + if ((long)n < 0)
1433 + return n;
1434 +
1435 prefetch(from);
1436 if (access_ok(VERIFY_WRITE, to, n))
1437 __copy_user(to,from,n);
1438 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1439 unsigned long
1440 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1441 {
1442 + if ((long)n < 0)
1443 + return n;
1444 +
1445 prefetchw(to);
1446 if (access_ok(VERIFY_READ, from, n))
1447 __copy_user_zeroing(to,from,n);
1448 diff -urNp linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c
1449 --- linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1450 +++ linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1451 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1452
1453 }
1454
1455 -static struct platform_suspend_ops db1x_pm_ops = {
1456 +static const struct platform_suspend_ops db1x_pm_ops = {
1457 .valid = suspend_valid_only_mem,
1458 .begin = db1x_pm_begin,
1459 .enter = db1x_pm_enter,
1460 diff -urNp linux-2.6.32.45/arch/mips/include/asm/elf.h linux-2.6.32.45/arch/mips/include/asm/elf.h
1461 --- linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1462 +++ linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1463 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1464 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1465 #endif
1466
1467 +#ifdef CONFIG_PAX_ASLR
1468 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1469 +
1470 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1471 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1472 +#endif
1473 +
1474 #endif /* _ASM_ELF_H */
1475 diff -urNp linux-2.6.32.45/arch/mips/include/asm/page.h linux-2.6.32.45/arch/mips/include/asm/page.h
1476 --- linux-2.6.32.45/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1477 +++ linux-2.6.32.45/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1478 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1479 #ifdef CONFIG_CPU_MIPS32
1480 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1481 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1482 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1483 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1484 #else
1485 typedef struct { unsigned long long pte; } pte_t;
1486 #define pte_val(x) ((x).pte)
1487 diff -urNp linux-2.6.32.45/arch/mips/include/asm/system.h linux-2.6.32.45/arch/mips/include/asm/system.h
1488 --- linux-2.6.32.45/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1489 +++ linux-2.6.32.45/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1490 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1491 */
1492 #define __ARCH_WANT_UNLOCKED_CTXSW
1493
1494 -extern unsigned long arch_align_stack(unsigned long sp);
1495 +#define arch_align_stack(x) ((x) & ~0xfUL)
1496
1497 #endif /* _ASM_SYSTEM_H */
1498 diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c
1499 --- linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1500 +++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1501 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1502 #undef ELF_ET_DYN_BASE
1503 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1504
1505 +#ifdef CONFIG_PAX_ASLR
1506 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1507 +
1508 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1509 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1510 +#endif
1511 +
1512 #include <asm/processor.h>
1513 #include <linux/module.h>
1514 #include <linux/elfcore.h>
1515 diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c
1516 --- linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1517 +++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1518 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1519 #undef ELF_ET_DYN_BASE
1520 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1521
1522 +#ifdef CONFIG_PAX_ASLR
1523 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1524 +
1525 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1526 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1527 +#endif
1528 +
1529 #include <asm/processor.h>
1530
1531 /*
1532 diff -urNp linux-2.6.32.45/arch/mips/kernel/kgdb.c linux-2.6.32.45/arch/mips/kernel/kgdb.c
1533 --- linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1534 +++ linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1535 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1536 return -1;
1537 }
1538
1539 +/* cannot be const */
1540 struct kgdb_arch arch_kgdb_ops;
1541
1542 /*
1543 diff -urNp linux-2.6.32.45/arch/mips/kernel/process.c linux-2.6.32.45/arch/mips/kernel/process.c
1544 --- linux-2.6.32.45/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1545 +++ linux-2.6.32.45/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1546 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1547 out:
1548 return pc;
1549 }
1550 -
1551 -/*
1552 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1553 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1554 - */
1555 -unsigned long arch_align_stack(unsigned long sp)
1556 -{
1557 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1558 - sp -= get_random_int() & ~PAGE_MASK;
1559 -
1560 - return sp & ALMASK;
1561 -}
1562 diff -urNp linux-2.6.32.45/arch/mips/kernel/syscall.c linux-2.6.32.45/arch/mips/kernel/syscall.c
1563 --- linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1564 +++ linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1565 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1566 do_color_align = 0;
1567 if (filp || (flags & MAP_SHARED))
1568 do_color_align = 1;
1569 +
1570 +#ifdef CONFIG_PAX_RANDMMAP
1571 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1572 +#endif
1573 +
1574 if (addr) {
1575 if (do_color_align)
1576 addr = COLOUR_ALIGN(addr, pgoff);
1577 else
1578 addr = PAGE_ALIGN(addr);
1579 vmm = find_vma(current->mm, addr);
1580 - if (task_size - len >= addr &&
1581 - (!vmm || addr + len <= vmm->vm_start))
1582 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1583 return addr;
1584 }
1585 - addr = TASK_UNMAPPED_BASE;
1586 + addr = current->mm->mmap_base;
1587 if (do_color_align)
1588 addr = COLOUR_ALIGN(addr, pgoff);
1589 else
1590 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1591 /* At this point: (!vmm || addr < vmm->vm_end). */
1592 if (task_size - len < addr)
1593 return -ENOMEM;
1594 - if (!vmm || addr + len <= vmm->vm_start)
1595 + if (check_heap_stack_gap(vmm, addr, len))
1596 return addr;
1597 addr = vmm->vm_end;
1598 if (do_color_align)
1599 diff -urNp linux-2.6.32.45/arch/mips/mm/fault.c linux-2.6.32.45/arch/mips/mm/fault.c
1600 --- linux-2.6.32.45/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1601 +++ linux-2.6.32.45/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1602 @@ -26,6 +26,23 @@
1603 #include <asm/ptrace.h>
1604 #include <asm/highmem.h> /* For VMALLOC_END */
1605
1606 +#ifdef CONFIG_PAX_PAGEEXEC
1607 +void pax_report_insns(void *pc, void *sp)
1608 +{
1609 + unsigned long i;
1610 +
1611 + printk(KERN_ERR "PAX: bytes at PC: ");
1612 + for (i = 0; i < 5; i++) {
1613 + unsigned int c;
1614 + if (get_user(c, (unsigned int *)pc+i))
1615 + printk(KERN_CONT "???????? ");
1616 + else
1617 + printk(KERN_CONT "%08x ", c);
1618 + }
1619 + printk("\n");
1620 +}
1621 +#endif
1622 +
1623 /*
1624 * This routine handles page faults. It determines the address,
1625 * and the problem, and then passes it off to one of the appropriate
1626 diff -urNp linux-2.6.32.45/arch/parisc/include/asm/elf.h linux-2.6.32.45/arch/parisc/include/asm/elf.h
1627 --- linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1628 +++ linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1629 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1630
1631 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1632
1633 +#ifdef CONFIG_PAX_ASLR
1634 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1635 +
1636 +#define PAX_DELTA_MMAP_LEN 16
1637 +#define PAX_DELTA_STACK_LEN 16
1638 +#endif
1639 +
1640 /* This yields a mask that user programs can use to figure out what
1641 instruction set this CPU supports. This could be done in user space,
1642 but it's not easy, and we've already done it here. */
1643 diff -urNp linux-2.6.32.45/arch/parisc/include/asm/pgtable.h linux-2.6.32.45/arch/parisc/include/asm/pgtable.h
1644 --- linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1645 +++ linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1646 @@ -207,6 +207,17 @@
1647 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1648 #define PAGE_COPY PAGE_EXECREAD
1649 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1650 +
1651 +#ifdef CONFIG_PAX_PAGEEXEC
1652 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1653 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1654 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1655 +#else
1656 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1657 +# define PAGE_COPY_NOEXEC PAGE_COPY
1658 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1659 +#endif
1660 +
1661 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1662 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1663 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1664 diff -urNp linux-2.6.32.45/arch/parisc/kernel/module.c linux-2.6.32.45/arch/parisc/kernel/module.c
1665 --- linux-2.6.32.45/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1666 +++ linux-2.6.32.45/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1667 @@ -95,16 +95,38 @@
1668
1669 /* three functions to determine where in the module core
1670 * or init pieces the location is */
1671 +static inline int in_init_rx(struct module *me, void *loc)
1672 +{
1673 + return (loc >= me->module_init_rx &&
1674 + loc < (me->module_init_rx + me->init_size_rx));
1675 +}
1676 +
1677 +static inline int in_init_rw(struct module *me, void *loc)
1678 +{
1679 + return (loc >= me->module_init_rw &&
1680 + loc < (me->module_init_rw + me->init_size_rw));
1681 +}
1682 +
1683 static inline int in_init(struct module *me, void *loc)
1684 {
1685 - return (loc >= me->module_init &&
1686 - loc <= (me->module_init + me->init_size));
1687 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1688 +}
1689 +
1690 +static inline int in_core_rx(struct module *me, void *loc)
1691 +{
1692 + return (loc >= me->module_core_rx &&
1693 + loc < (me->module_core_rx + me->core_size_rx));
1694 +}
1695 +
1696 +static inline int in_core_rw(struct module *me, void *loc)
1697 +{
1698 + return (loc >= me->module_core_rw &&
1699 + loc < (me->module_core_rw + me->core_size_rw));
1700 }
1701
1702 static inline int in_core(struct module *me, void *loc)
1703 {
1704 - return (loc >= me->module_core &&
1705 - loc <= (me->module_core + me->core_size));
1706 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1707 }
1708
1709 static inline int in_local(struct module *me, void *loc)
1710 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1711 }
1712
1713 /* align things a bit */
1714 - me->core_size = ALIGN(me->core_size, 16);
1715 - me->arch.got_offset = me->core_size;
1716 - me->core_size += gots * sizeof(struct got_entry);
1717 -
1718 - me->core_size = ALIGN(me->core_size, 16);
1719 - me->arch.fdesc_offset = me->core_size;
1720 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1721 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1722 + me->arch.got_offset = me->core_size_rw;
1723 + me->core_size_rw += gots * sizeof(struct got_entry);
1724 +
1725 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1726 + me->arch.fdesc_offset = me->core_size_rw;
1727 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1728
1729 me->arch.got_max = gots;
1730 me->arch.fdesc_max = fdescs;
1731 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1732
1733 BUG_ON(value == 0);
1734
1735 - got = me->module_core + me->arch.got_offset;
1736 + got = me->module_core_rw + me->arch.got_offset;
1737 for (i = 0; got[i].addr; i++)
1738 if (got[i].addr == value)
1739 goto out;
1740 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1741 #ifdef CONFIG_64BIT
1742 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1743 {
1744 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1745 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1746
1747 if (!value) {
1748 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1749 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1750
1751 /* Create new one */
1752 fdesc->addr = value;
1753 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1754 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1755 return (Elf_Addr)fdesc;
1756 }
1757 #endif /* CONFIG_64BIT */
1758 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1759
1760 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1761 end = table + sechdrs[me->arch.unwind_section].sh_size;
1762 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1763 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1764
1765 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1766 me->arch.unwind_section, table, end, gp);
1767 diff -urNp linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c
1768 --- linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1769 +++ linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1770 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1771 /* At this point: (!vma || addr < vma->vm_end). */
1772 if (TASK_SIZE - len < addr)
1773 return -ENOMEM;
1774 - if (!vma || addr + len <= vma->vm_start)
1775 + if (check_heap_stack_gap(vma, addr, len))
1776 return addr;
1777 addr = vma->vm_end;
1778 }
1779 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1780 /* At this point: (!vma || addr < vma->vm_end). */
1781 if (TASK_SIZE - len < addr)
1782 return -ENOMEM;
1783 - if (!vma || addr + len <= vma->vm_start)
1784 + if (check_heap_stack_gap(vma, addr, len))
1785 return addr;
1786 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1787 if (addr < vma->vm_end) /* handle wraparound */
1788 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1789 if (flags & MAP_FIXED)
1790 return addr;
1791 if (!addr)
1792 - addr = TASK_UNMAPPED_BASE;
1793 + addr = current->mm->mmap_base;
1794
1795 if (filp) {
1796 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1797 diff -urNp linux-2.6.32.45/arch/parisc/kernel/traps.c linux-2.6.32.45/arch/parisc/kernel/traps.c
1798 --- linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1799 +++ linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1800 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1801
1802 down_read(&current->mm->mmap_sem);
1803 vma = find_vma(current->mm,regs->iaoq[0]);
1804 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1805 - && (vma->vm_flags & VM_EXEC)) {
1806 -
1807 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1808 fault_address = regs->iaoq[0];
1809 fault_space = regs->iasq[0];
1810
1811 diff -urNp linux-2.6.32.45/arch/parisc/mm/fault.c linux-2.6.32.45/arch/parisc/mm/fault.c
1812 --- linux-2.6.32.45/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1813 +++ linux-2.6.32.45/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1814 @@ -15,6 +15,7 @@
1815 #include <linux/sched.h>
1816 #include <linux/interrupt.h>
1817 #include <linux/module.h>
1818 +#include <linux/unistd.h>
1819
1820 #include <asm/uaccess.h>
1821 #include <asm/traps.h>
1822 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1823 static unsigned long
1824 parisc_acctyp(unsigned long code, unsigned int inst)
1825 {
1826 - if (code == 6 || code == 16)
1827 + if (code == 6 || code == 7 || code == 16)
1828 return VM_EXEC;
1829
1830 switch (inst & 0xf0000000) {
1831 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1832 }
1833 #endif
1834
1835 +#ifdef CONFIG_PAX_PAGEEXEC
1836 +/*
1837 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1838 + *
1839 + * returns 1 when task should be killed
1840 + * 2 when rt_sigreturn trampoline was detected
1841 + * 3 when unpatched PLT trampoline was detected
1842 + */
1843 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1844 +{
1845 +
1846 +#ifdef CONFIG_PAX_EMUPLT
1847 + int err;
1848 +
1849 + do { /* PaX: unpatched PLT emulation */
1850 + unsigned int bl, depwi;
1851 +
1852 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1853 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1854 +
1855 + if (err)
1856 + break;
1857 +
1858 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1859 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1860 +
1861 + err = get_user(ldw, (unsigned int *)addr);
1862 + err |= get_user(bv, (unsigned int *)(addr+4));
1863 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1864 +
1865 + if (err)
1866 + break;
1867 +
1868 + if (ldw == 0x0E801096U &&
1869 + bv == 0xEAC0C000U &&
1870 + ldw2 == 0x0E881095U)
1871 + {
1872 + unsigned int resolver, map;
1873 +
1874 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1875 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1876 + if (err)
1877 + break;
1878 +
1879 + regs->gr[20] = instruction_pointer(regs)+8;
1880 + regs->gr[21] = map;
1881 + regs->gr[22] = resolver;
1882 + regs->iaoq[0] = resolver | 3UL;
1883 + regs->iaoq[1] = regs->iaoq[0] + 4;
1884 + return 3;
1885 + }
1886 + }
1887 + } while (0);
1888 +#endif
1889 +
1890 +#ifdef CONFIG_PAX_EMUTRAMP
1891 +
1892 +#ifndef CONFIG_PAX_EMUSIGRT
1893 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1894 + return 1;
1895 +#endif
1896 +
1897 + do { /* PaX: rt_sigreturn emulation */
1898 + unsigned int ldi1, ldi2, bel, nop;
1899 +
1900 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1901 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1902 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1903 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1904 +
1905 + if (err)
1906 + break;
1907 +
1908 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1909 + ldi2 == 0x3414015AU &&
1910 + bel == 0xE4008200U &&
1911 + nop == 0x08000240U)
1912 + {
1913 + regs->gr[25] = (ldi1 & 2) >> 1;
1914 + regs->gr[20] = __NR_rt_sigreturn;
1915 + regs->gr[31] = regs->iaoq[1] + 16;
1916 + regs->sr[0] = regs->iasq[1];
1917 + regs->iaoq[0] = 0x100UL;
1918 + regs->iaoq[1] = regs->iaoq[0] + 4;
1919 + regs->iasq[0] = regs->sr[2];
1920 + regs->iasq[1] = regs->sr[2];
1921 + return 2;
1922 + }
1923 + } while (0);
1924 +#endif
1925 +
1926 + return 1;
1927 +}
1928 +
1929 +void pax_report_insns(void *pc, void *sp)
1930 +{
1931 + unsigned long i;
1932 +
1933 + printk(KERN_ERR "PAX: bytes at PC: ");
1934 + for (i = 0; i < 5; i++) {
1935 + unsigned int c;
1936 + if (get_user(c, (unsigned int *)pc+i))
1937 + printk(KERN_CONT "???????? ");
1938 + else
1939 + printk(KERN_CONT "%08x ", c);
1940 + }
1941 + printk("\n");
1942 +}
1943 +#endif
1944 +
1945 int fixup_exception(struct pt_regs *regs)
1946 {
1947 const struct exception_table_entry *fix;
1948 @@ -192,8 +303,33 @@ good_area:
1949
1950 acc_type = parisc_acctyp(code,regs->iir);
1951
1952 - if ((vma->vm_flags & acc_type) != acc_type)
1953 + if ((vma->vm_flags & acc_type) != acc_type) {
1954 +
1955 +#ifdef CONFIG_PAX_PAGEEXEC
1956 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1957 + (address & ~3UL) == instruction_pointer(regs))
1958 + {
1959 + up_read(&mm->mmap_sem);
1960 + switch (pax_handle_fetch_fault(regs)) {
1961 +
1962 +#ifdef CONFIG_PAX_EMUPLT
1963 + case 3:
1964 + return;
1965 +#endif
1966 +
1967 +#ifdef CONFIG_PAX_EMUTRAMP
1968 + case 2:
1969 + return;
1970 +#endif
1971 +
1972 + }
1973 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1974 + do_group_exit(SIGKILL);
1975 + }
1976 +#endif
1977 +
1978 goto bad_area;
1979 + }
1980
1981 /*
1982 * If for any reason at all we couldn't handle the fault, make
1983 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/device.h linux-2.6.32.45/arch/powerpc/include/asm/device.h
1984 --- linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
1985 +++ linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
1986 @@ -14,7 +14,7 @@ struct dev_archdata {
1987 struct device_node *of_node;
1988
1989 /* DMA operations on that device */
1990 - struct dma_map_ops *dma_ops;
1991 + const struct dma_map_ops *dma_ops;
1992
1993 /*
1994 * When an iommu is in use, dma_data is used as a ptr to the base of the
1995 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h
1996 --- linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
1997 +++ linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
1998 @@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
1999 #ifdef CONFIG_PPC64
2000 extern struct dma_map_ops dma_iommu_ops;
2001 #endif
2002 -extern struct dma_map_ops dma_direct_ops;
2003 +extern const struct dma_map_ops dma_direct_ops;
2004
2005 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2006 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2007 {
2008 /* We don't handle the NULL dev case for ISA for now. We could
2009 * do it via an out of line call but it is not needed for now. The
2010 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2011 return dev->archdata.dma_ops;
2012 }
2013
2014 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2015 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2016 {
2017 dev->archdata.dma_ops = ops;
2018 }
2019 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2020
2021 static inline int dma_supported(struct device *dev, u64 mask)
2022 {
2023 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2024 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2025
2026 if (unlikely(dma_ops == NULL))
2027 return 0;
2028 @@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2029
2030 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2031 {
2032 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2033 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2034
2035 if (unlikely(dma_ops == NULL))
2036 return -EIO;
2037 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2038 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2039 dma_addr_t *dma_handle, gfp_t flag)
2040 {
2041 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2042 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2043 void *cpu_addr;
2044
2045 BUG_ON(!dma_ops);
2046 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2047 static inline void dma_free_coherent(struct device *dev, size_t size,
2048 void *cpu_addr, dma_addr_t dma_handle)
2049 {
2050 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2051 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2052
2053 BUG_ON(!dma_ops);
2054
2055 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2056
2057 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2058 {
2059 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2060 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2061
2062 if (dma_ops->mapping_error)
2063 return dma_ops->mapping_error(dev, dma_addr);
2064 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/elf.h linux-2.6.32.45/arch/powerpc/include/asm/elf.h
2065 --- linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2066 +++ linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2067 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2068 the loader. We need to make sure that it is out of the way of the program
2069 that it will "exec", and that there is sufficient room for the brk. */
2070
2071 -extern unsigned long randomize_et_dyn(unsigned long base);
2072 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2073 +#define ELF_ET_DYN_BASE (0x20000000)
2074 +
2075 +#ifdef CONFIG_PAX_ASLR
2076 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2077 +
2078 +#ifdef __powerpc64__
2079 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2080 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2081 +#else
2082 +#define PAX_DELTA_MMAP_LEN 15
2083 +#define PAX_DELTA_STACK_LEN 15
2084 +#endif
2085 +#endif
2086
2087 /*
2088 * Our registers are always unsigned longs, whether we're a 32 bit
2089 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2090 (0x7ff >> (PAGE_SHIFT - 12)) : \
2091 (0x3ffff >> (PAGE_SHIFT - 12)))
2092
2093 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2094 -#define arch_randomize_brk arch_randomize_brk
2095 -
2096 #endif /* __KERNEL__ */
2097
2098 /*
2099 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/iommu.h linux-2.6.32.45/arch/powerpc/include/asm/iommu.h
2100 --- linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2101 +++ linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2102 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2103 extern void iommu_init_early_dart(void);
2104 extern void iommu_init_early_pasemi(void);
2105
2106 +/* dma-iommu.c */
2107 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2108 +
2109 #ifdef CONFIG_PCI
2110 extern void pci_iommu_init(void);
2111 extern void pci_direct_iommu_init(void);
2112 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h
2113 --- linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2114 +++ linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2115 @@ -26,6 +26,7 @@ enum km_type {
2116 KM_SOFTIRQ1,
2117 KM_PPC_SYNC_PAGE,
2118 KM_PPC_SYNC_ICACHE,
2119 + KM_CLEARPAGE,
2120 KM_TYPE_NR
2121 };
2122
2123 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page_64.h linux-2.6.32.45/arch/powerpc/include/asm/page_64.h
2124 --- linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2125 +++ linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2126 @@ -180,15 +180,18 @@ do { \
2127 * stack by default, so in the absense of a PT_GNU_STACK program header
2128 * we turn execute permission off.
2129 */
2130 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2131 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2132 +#define VM_STACK_DEFAULT_FLAGS32 \
2133 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2134 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2135
2136 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2137 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2138
2139 +#ifndef CONFIG_PAX_PAGEEXEC
2140 #define VM_STACK_DEFAULT_FLAGS \
2141 (test_thread_flag(TIF_32BIT) ? \
2142 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2143 +#endif
2144
2145 #include <asm-generic/getorder.h>
2146
2147 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page.h linux-2.6.32.45/arch/powerpc/include/asm/page.h
2148 --- linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2149 +++ linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
2150 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2151 * and needs to be executable. This means the whole heap ends
2152 * up being executable.
2153 */
2154 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2155 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156 +#define VM_DATA_DEFAULT_FLAGS32 \
2157 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2158 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159
2160 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2161 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2162 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2163 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2164 #endif
2165
2166 +#define ktla_ktva(addr) (addr)
2167 +#define ktva_ktla(addr) (addr)
2168 +
2169 #ifndef __ASSEMBLY__
2170
2171 #undef STRICT_MM_TYPECHECKS
2172 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pci.h linux-2.6.32.45/arch/powerpc/include/asm/pci.h
2173 --- linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2174 +++ linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2175 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2176 }
2177
2178 #ifdef CONFIG_PCI
2179 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2180 -extern struct dma_map_ops *get_pci_dma_ops(void);
2181 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2182 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2183 #else /* CONFIG_PCI */
2184 #define set_pci_dma_ops(d)
2185 #define get_pci_dma_ops() NULL
2186 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h
2187 --- linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2188 +++ linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2189 @@ -2,6 +2,7 @@
2190 #define _ASM_POWERPC_PGTABLE_H
2191 #ifdef __KERNEL__
2192
2193 +#include <linux/const.h>
2194 #ifndef __ASSEMBLY__
2195 #include <asm/processor.h> /* For TASK_SIZE */
2196 #include <asm/mmu.h>
2197 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h
2198 --- linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2199 +++ linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2200 @@ -21,6 +21,7 @@
2201 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2202 #define _PAGE_USER 0x004 /* usermode access allowed */
2203 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2204 +#define _PAGE_EXEC _PAGE_GUARDED
2205 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2206 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2207 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2208 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/reg.h linux-2.6.32.45/arch/powerpc/include/asm/reg.h
2209 --- linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2210 +++ linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2211 @@ -191,6 +191,7 @@
2212 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2213 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2214 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2215 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2216 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2217 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2218 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2219 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h
2220 --- linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2221 +++ linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2222 @@ -13,7 +13,7 @@
2223
2224 #include <linux/swiotlb.h>
2225
2226 -extern struct dma_map_ops swiotlb_dma_ops;
2227 +extern const struct dma_map_ops swiotlb_dma_ops;
2228
2229 static inline void dma_mark_clean(void *addr, size_t size) {}
2230
2231 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/system.h linux-2.6.32.45/arch/powerpc/include/asm/system.h
2232 --- linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2233 +++ linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2234 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2235 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2236 #endif
2237
2238 -extern unsigned long arch_align_stack(unsigned long sp);
2239 +#define arch_align_stack(x) ((x) & ~0xfUL)
2240
2241 /* Used in very early kernel initialization. */
2242 extern unsigned long reloc_offset(void);
2243 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h
2244 --- linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2245 +++ linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2246 @@ -13,6 +13,8 @@
2247 #define VERIFY_READ 0
2248 #define VERIFY_WRITE 1
2249
2250 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2251 +
2252 /*
2253 * The fs value determines whether argument validity checking should be
2254 * performed or not. If get_fs() == USER_DS, checking is performed, with
2255 @@ -327,52 +329,6 @@ do { \
2256 extern unsigned long __copy_tofrom_user(void __user *to,
2257 const void __user *from, unsigned long size);
2258
2259 -#ifndef __powerpc64__
2260 -
2261 -static inline unsigned long copy_from_user(void *to,
2262 - const void __user *from, unsigned long n)
2263 -{
2264 - unsigned long over;
2265 -
2266 - if (access_ok(VERIFY_READ, from, n))
2267 - return __copy_tofrom_user((__force void __user *)to, from, n);
2268 - if ((unsigned long)from < TASK_SIZE) {
2269 - over = (unsigned long)from + n - TASK_SIZE;
2270 - return __copy_tofrom_user((__force void __user *)to, from,
2271 - n - over) + over;
2272 - }
2273 - return n;
2274 -}
2275 -
2276 -static inline unsigned long copy_to_user(void __user *to,
2277 - const void *from, unsigned long n)
2278 -{
2279 - unsigned long over;
2280 -
2281 - if (access_ok(VERIFY_WRITE, to, n))
2282 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2283 - if ((unsigned long)to < TASK_SIZE) {
2284 - over = (unsigned long)to + n - TASK_SIZE;
2285 - return __copy_tofrom_user(to, (__force void __user *)from,
2286 - n - over) + over;
2287 - }
2288 - return n;
2289 -}
2290 -
2291 -#else /* __powerpc64__ */
2292 -
2293 -#define __copy_in_user(to, from, size) \
2294 - __copy_tofrom_user((to), (from), (size))
2295 -
2296 -extern unsigned long copy_from_user(void *to, const void __user *from,
2297 - unsigned long n);
2298 -extern unsigned long copy_to_user(void __user *to, const void *from,
2299 - unsigned long n);
2300 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2301 - unsigned long n);
2302 -
2303 -#endif /* __powerpc64__ */
2304 -
2305 static inline unsigned long __copy_from_user_inatomic(void *to,
2306 const void __user *from, unsigned long n)
2307 {
2308 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2309 if (ret == 0)
2310 return 0;
2311 }
2312 +
2313 + if (!__builtin_constant_p(n))
2314 + check_object_size(to, n, false);
2315 +
2316 return __copy_tofrom_user((__force void __user *)to, from, n);
2317 }
2318
2319 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2320 if (ret == 0)
2321 return 0;
2322 }
2323 +
2324 + if (!__builtin_constant_p(n))
2325 + check_object_size(from, n, true);
2326 +
2327 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2328 }
2329
2330 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2331 return __copy_to_user_inatomic(to, from, size);
2332 }
2333
2334 +#ifndef __powerpc64__
2335 +
2336 +static inline unsigned long __must_check copy_from_user(void *to,
2337 + const void __user *from, unsigned long n)
2338 +{
2339 + unsigned long over;
2340 +
2341 + if ((long)n < 0)
2342 + return n;
2343 +
2344 + if (access_ok(VERIFY_READ, from, n)) {
2345 + if (!__builtin_constant_p(n))
2346 + check_object_size(to, n, false);
2347 + return __copy_tofrom_user((__force void __user *)to, from, n);
2348 + }
2349 + if ((unsigned long)from < TASK_SIZE) {
2350 + over = (unsigned long)from + n - TASK_SIZE;
2351 + if (!__builtin_constant_p(n - over))
2352 + check_object_size(to, n - over, false);
2353 + return __copy_tofrom_user((__force void __user *)to, from,
2354 + n - over) + over;
2355 + }
2356 + return n;
2357 +}
2358 +
2359 +static inline unsigned long __must_check copy_to_user(void __user *to,
2360 + const void *from, unsigned long n)
2361 +{
2362 + unsigned long over;
2363 +
2364 + if ((long)n < 0)
2365 + return n;
2366 +
2367 + if (access_ok(VERIFY_WRITE, to, n)) {
2368 + if (!__builtin_constant_p(n))
2369 + check_object_size(from, n, true);
2370 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2371 + }
2372 + if ((unsigned long)to < TASK_SIZE) {
2373 + over = (unsigned long)to + n - TASK_SIZE;
2374 + if (!__builtin_constant_p(n))
2375 + check_object_size(from, n - over, true);
2376 + return __copy_tofrom_user(to, (__force void __user *)from,
2377 + n - over) + over;
2378 + }
2379 + return n;
2380 +}
2381 +
2382 +#else /* __powerpc64__ */
2383 +
2384 +#define __copy_in_user(to, from, size) \
2385 + __copy_tofrom_user((to), (from), (size))
2386 +
2387 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2388 +{
2389 + if ((long)n < 0 || n > INT_MAX)
2390 + return n;
2391 +
2392 + if (!__builtin_constant_p(n))
2393 + check_object_size(to, n, false);
2394 +
2395 + if (likely(access_ok(VERIFY_READ, from, n)))
2396 + n = __copy_from_user(to, from, n);
2397 + else
2398 + memset(to, 0, n);
2399 + return n;
2400 +}
2401 +
2402 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2403 +{
2404 + if ((long)n < 0 || n > INT_MAX)
2405 + return n;
2406 +
2407 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2408 + if (!__builtin_constant_p(n))
2409 + check_object_size(from, n, true);
2410 + n = __copy_to_user(to, from, n);
2411 + }
2412 + return n;
2413 +}
2414 +
2415 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2416 + unsigned long n);
2417 +
2418 +#endif /* __powerpc64__ */
2419 +
2420 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2421
2422 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2423 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c
2424 --- linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2425 +++ linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2426 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2427 &cache_assoc_attr,
2428 };
2429
2430 -static struct sysfs_ops cache_index_ops = {
2431 +static const struct sysfs_ops cache_index_ops = {
2432 .show = cache_index_show,
2433 };
2434
2435 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma.c linux-2.6.32.45/arch/powerpc/kernel/dma.c
2436 --- linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2437 +++ linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2438 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2439 }
2440 #endif
2441
2442 -struct dma_map_ops dma_direct_ops = {
2443 +const struct dma_map_ops dma_direct_ops = {
2444 .alloc_coherent = dma_direct_alloc_coherent,
2445 .free_coherent = dma_direct_free_coherent,
2446 .map_sg = dma_direct_map_sg,
2447 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c
2448 --- linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2449 +++ linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2450 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2451 }
2452
2453 /* We support DMA to/from any memory page via the iommu */
2454 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2455 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2456 {
2457 struct iommu_table *tbl = get_iommu_table_base(dev);
2458
2459 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c
2460 --- linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2461 +++ linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2462 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2463 * map_page, and unmap_page on highmem, use normal dma_ops
2464 * for everything else.
2465 */
2466 -struct dma_map_ops swiotlb_dma_ops = {
2467 +const struct dma_map_ops swiotlb_dma_ops = {
2468 .alloc_coherent = dma_direct_alloc_coherent,
2469 .free_coherent = dma_direct_free_coherent,
2470 .map_sg = swiotlb_map_sg_attrs,
2471 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S
2472 --- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2473 +++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2474 @@ -455,6 +455,7 @@ storage_fault_common:
2475 std r14,_DAR(r1)
2476 std r15,_DSISR(r1)
2477 addi r3,r1,STACK_FRAME_OVERHEAD
2478 + bl .save_nvgprs
2479 mr r4,r14
2480 mr r5,r15
2481 ld r14,PACA_EXGEN+EX_R14(r13)
2482 @@ -464,8 +465,7 @@ storage_fault_common:
2483 cmpdi r3,0
2484 bne- 1f
2485 b .ret_from_except_lite
2486 -1: bl .save_nvgprs
2487 - mr r5,r3
2488 +1: mr r5,r3
2489 addi r3,r1,STACK_FRAME_OVERHEAD
2490 ld r4,_DAR(r1)
2491 bl .bad_page_fault
2492 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S
2493 --- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2494 +++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2495 @@ -818,10 +818,10 @@ handle_page_fault:
2496 11: ld r4,_DAR(r1)
2497 ld r5,_DSISR(r1)
2498 addi r3,r1,STACK_FRAME_OVERHEAD
2499 + bl .save_nvgprs
2500 bl .do_page_fault
2501 cmpdi r3,0
2502 beq+ 13f
2503 - bl .save_nvgprs
2504 mr r5,r3
2505 addi r3,r1,STACK_FRAME_OVERHEAD
2506 lwz r4,_DAR(r1)
2507 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c
2508 --- linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2509 +++ linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2510 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2511 return 1;
2512 }
2513
2514 -static struct dma_map_ops ibmebus_dma_ops = {
2515 +static const struct dma_map_ops ibmebus_dma_ops = {
2516 .alloc_coherent = ibmebus_alloc_coherent,
2517 .free_coherent = ibmebus_free_coherent,
2518 .map_sg = ibmebus_map_sg,
2519 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/kgdb.c linux-2.6.32.45/arch/powerpc/kernel/kgdb.c
2520 --- linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2521 +++ linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2522 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2523 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2524 return 0;
2525
2526 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2527 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2528 regs->nip += 4;
2529
2530 return 1;
2531 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2532 /*
2533 * Global data
2534 */
2535 -struct kgdb_arch arch_kgdb_ops = {
2536 +const struct kgdb_arch arch_kgdb_ops = {
2537 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2538 };
2539
2540 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module_32.c linux-2.6.32.45/arch/powerpc/kernel/module_32.c
2541 --- linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2542 +++ linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2543 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2544 me->arch.core_plt_section = i;
2545 }
2546 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2547 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2548 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2549 return -ENOEXEC;
2550 }
2551
2552 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2553
2554 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2555 /* Init, or core PLT? */
2556 - if (location >= mod->module_core
2557 - && location < mod->module_core + mod->core_size)
2558 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2559 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2560 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2561 - else
2562 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2563 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2564 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2565 + else {
2566 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2567 + return ~0UL;
2568 + }
2569
2570 /* Find this entry, or if that fails, the next avail. entry */
2571 while (entry->jump[0]) {
2572 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module.c linux-2.6.32.45/arch/powerpc/kernel/module.c
2573 --- linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2574 +++ linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2575 @@ -31,11 +31,24 @@
2576
2577 LIST_HEAD(module_bug_list);
2578
2579 +#ifdef CONFIG_PAX_KERNEXEC
2580 void *module_alloc(unsigned long size)
2581 {
2582 if (size == 0)
2583 return NULL;
2584
2585 + return vmalloc(size);
2586 +}
2587 +
2588 +void *module_alloc_exec(unsigned long size)
2589 +#else
2590 +void *module_alloc(unsigned long size)
2591 +#endif
2592 +
2593 +{
2594 + if (size == 0)
2595 + return NULL;
2596 +
2597 return vmalloc_exec(size);
2598 }
2599
2600 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2601 vfree(module_region);
2602 }
2603
2604 +#ifdef CONFIG_PAX_KERNEXEC
2605 +void module_free_exec(struct module *mod, void *module_region)
2606 +{
2607 + module_free(mod, module_region);
2608 +}
2609 +#endif
2610 +
2611 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2612 const Elf_Shdr *sechdrs,
2613 const char *name)
2614 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/pci-common.c linux-2.6.32.45/arch/powerpc/kernel/pci-common.c
2615 --- linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2616 +++ linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2617 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2618 unsigned int ppc_pci_flags = 0;
2619
2620
2621 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2622 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2623
2624 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2625 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2626 {
2627 pci_dma_ops = dma_ops;
2628 }
2629
2630 -struct dma_map_ops *get_pci_dma_ops(void)
2631 +const struct dma_map_ops *get_pci_dma_ops(void)
2632 {
2633 return pci_dma_ops;
2634 }
2635 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/process.c linux-2.6.32.45/arch/powerpc/kernel/process.c
2636 --- linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2637 +++ linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2638 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2639 * Lookup NIP late so we have the best change of getting the
2640 * above info out without failing
2641 */
2642 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2643 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2644 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2645 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2646 #endif
2647 show_stack(current, (unsigned long *) regs->gpr[1]);
2648 if (!user_mode(regs))
2649 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2650 newsp = stack[0];
2651 ip = stack[STACK_FRAME_LR_SAVE];
2652 if (!firstframe || ip != lr) {
2653 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2654 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2655 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2656 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2657 - printk(" (%pS)",
2658 + printk(" (%pA)",
2659 (void *)current->ret_stack[curr_frame].ret);
2660 curr_frame--;
2661 }
2662 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2663 struct pt_regs *regs = (struct pt_regs *)
2664 (sp + STACK_FRAME_OVERHEAD);
2665 lr = regs->link;
2666 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2667 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2668 regs->trap, (void *)regs->nip, (void *)lr);
2669 firstframe = 1;
2670 }
2671 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2672 }
2673
2674 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2675 -
2676 -unsigned long arch_align_stack(unsigned long sp)
2677 -{
2678 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2679 - sp -= get_random_int() & ~PAGE_MASK;
2680 - return sp & ~0xf;
2681 -}
2682 -
2683 -static inline unsigned long brk_rnd(void)
2684 -{
2685 - unsigned long rnd = 0;
2686 -
2687 - /* 8MB for 32bit, 1GB for 64bit */
2688 - if (is_32bit_task())
2689 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2690 - else
2691 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2692 -
2693 - return rnd << PAGE_SHIFT;
2694 -}
2695 -
2696 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2697 -{
2698 - unsigned long base = mm->brk;
2699 - unsigned long ret;
2700 -
2701 -#ifdef CONFIG_PPC_STD_MMU_64
2702 - /*
2703 - * If we are using 1TB segments and we are allowed to randomise
2704 - * the heap, we can put it above 1TB so it is backed by a 1TB
2705 - * segment. Otherwise the heap will be in the bottom 1TB
2706 - * which always uses 256MB segments and this may result in a
2707 - * performance penalty.
2708 - */
2709 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2710 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2711 -#endif
2712 -
2713 - ret = PAGE_ALIGN(base + brk_rnd());
2714 -
2715 - if (ret < mm->brk)
2716 - return mm->brk;
2717 -
2718 - return ret;
2719 -}
2720 -
2721 -unsigned long randomize_et_dyn(unsigned long base)
2722 -{
2723 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2724 -
2725 - if (ret < base)
2726 - return base;
2727 -
2728 - return ret;
2729 -}
2730 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_32.c linux-2.6.32.45/arch/powerpc/kernel/signal_32.c
2731 --- linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2732 +++ linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2733 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2734 /* Save user registers on the stack */
2735 frame = &rt_sf->uc.uc_mcontext;
2736 addr = frame;
2737 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2738 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2739 if (save_user_regs(regs, frame, 0, 1))
2740 goto badframe;
2741 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2742 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_64.c linux-2.6.32.45/arch/powerpc/kernel/signal_64.c
2743 --- linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2744 +++ linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2745 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2746 current->thread.fpscr.val = 0;
2747
2748 /* Set up to return from userspace. */
2749 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2750 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2751 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2752 } else {
2753 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2754 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c
2755 --- linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2756 +++ linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2757 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2758 if (oldlenp) {
2759 if (!error) {
2760 if (get_user(oldlen, oldlenp) ||
2761 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2762 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2763 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2764 error = -EFAULT;
2765 }
2766 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2767 }
2768 return error;
2769 }
2770 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/traps.c linux-2.6.32.45/arch/powerpc/kernel/traps.c
2771 --- linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2772 +++ linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2773 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2774 static inline void pmac_backlight_unblank(void) { }
2775 #endif
2776
2777 +extern void gr_handle_kernel_exploit(void);
2778 +
2779 int die(const char *str, struct pt_regs *regs, long err)
2780 {
2781 static struct {
2782 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2783 if (panic_on_oops)
2784 panic("Fatal exception");
2785
2786 + gr_handle_kernel_exploit();
2787 +
2788 oops_exit();
2789 do_exit(err);
2790
2791 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vdso.c linux-2.6.32.45/arch/powerpc/kernel/vdso.c
2792 --- linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2793 +++ linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2794 @@ -36,6 +36,7 @@
2795 #include <asm/firmware.h>
2796 #include <asm/vdso.h>
2797 #include <asm/vdso_datapage.h>
2798 +#include <asm/mman.h>
2799
2800 #include "setup.h"
2801
2802 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2803 vdso_base = VDSO32_MBASE;
2804 #endif
2805
2806 - current->mm->context.vdso_base = 0;
2807 + current->mm->context.vdso_base = ~0UL;
2808
2809 /* vDSO has a problem and was disabled, just don't "enable" it for the
2810 * process
2811 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2812 vdso_base = get_unmapped_area(NULL, vdso_base,
2813 (vdso_pages << PAGE_SHIFT) +
2814 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2815 - 0, 0);
2816 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2817 if (IS_ERR_VALUE(vdso_base)) {
2818 rc = vdso_base;
2819 goto fail_mmapsem;
2820 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vio.c linux-2.6.32.45/arch/powerpc/kernel/vio.c
2821 --- linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2822 +++ linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2823 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2824 vio_cmo_dealloc(viodev, alloc_size);
2825 }
2826
2827 -struct dma_map_ops vio_dma_mapping_ops = {
2828 +static const struct dma_map_ops vio_dma_mapping_ops = {
2829 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2830 .free_coherent = vio_dma_iommu_free_coherent,
2831 .map_sg = vio_dma_iommu_map_sg,
2832 .unmap_sg = vio_dma_iommu_unmap_sg,
2833 + .dma_supported = dma_iommu_dma_supported,
2834 .map_page = vio_dma_iommu_map_page,
2835 .unmap_page = vio_dma_iommu_unmap_page,
2836
2837 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2838
2839 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2840 {
2841 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2842 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2843 }
2844
2845 diff -urNp linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c
2846 --- linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2847 +++ linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2848 @@ -9,22 +9,6 @@
2849 #include <linux/module.h>
2850 #include <asm/uaccess.h>
2851
2852 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2853 -{
2854 - if (likely(access_ok(VERIFY_READ, from, n)))
2855 - n = __copy_from_user(to, from, n);
2856 - else
2857 - memset(to, 0, n);
2858 - return n;
2859 -}
2860 -
2861 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2862 -{
2863 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2864 - n = __copy_to_user(to, from, n);
2865 - return n;
2866 -}
2867 -
2868 unsigned long copy_in_user(void __user *to, const void __user *from,
2869 unsigned long n)
2870 {
2871 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2872 return n;
2873 }
2874
2875 -EXPORT_SYMBOL(copy_from_user);
2876 -EXPORT_SYMBOL(copy_to_user);
2877 EXPORT_SYMBOL(copy_in_user);
2878
2879 diff -urNp linux-2.6.32.45/arch/powerpc/mm/fault.c linux-2.6.32.45/arch/powerpc/mm/fault.c
2880 --- linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2881 +++ linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2882 @@ -30,6 +30,10 @@
2883 #include <linux/kprobes.h>
2884 #include <linux/kdebug.h>
2885 #include <linux/perf_event.h>
2886 +#include <linux/slab.h>
2887 +#include <linux/pagemap.h>
2888 +#include <linux/compiler.h>
2889 +#include <linux/unistd.h>
2890
2891 #include <asm/firmware.h>
2892 #include <asm/page.h>
2893 @@ -40,6 +44,7 @@
2894 #include <asm/uaccess.h>
2895 #include <asm/tlbflush.h>
2896 #include <asm/siginfo.h>
2897 +#include <asm/ptrace.h>
2898
2899
2900 #ifdef CONFIG_KPROBES
2901 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2902 }
2903 #endif
2904
2905 +#ifdef CONFIG_PAX_PAGEEXEC
2906 +/*
2907 + * PaX: decide what to do with offenders (regs->nip = fault address)
2908 + *
2909 + * returns 1 when task should be killed
2910 + */
2911 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2912 +{
2913 + return 1;
2914 +}
2915 +
2916 +void pax_report_insns(void *pc, void *sp)
2917 +{
2918 + unsigned long i;
2919 +
2920 + printk(KERN_ERR "PAX: bytes at PC: ");
2921 + for (i = 0; i < 5; i++) {
2922 + unsigned int c;
2923 + if (get_user(c, (unsigned int __user *)pc+i))
2924 + printk(KERN_CONT "???????? ");
2925 + else
2926 + printk(KERN_CONT "%08x ", c);
2927 + }
2928 + printk("\n");
2929 +}
2930 +#endif
2931 +
2932 /*
2933 * Check whether the instruction at regs->nip is a store using
2934 * an update addressing form which will update r1.
2935 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2936 * indicate errors in DSISR but can validly be set in SRR1.
2937 */
2938 if (trap == 0x400)
2939 - error_code &= 0x48200000;
2940 + error_code &= 0x58200000;
2941 else
2942 is_write = error_code & DSISR_ISSTORE;
2943 #else
2944 @@ -250,7 +282,7 @@ good_area:
2945 * "undefined". Of those that can be set, this is the only
2946 * one which seems bad.
2947 */
2948 - if (error_code & 0x10000000)
2949 + if (error_code & DSISR_GUARDED)
2950 /* Guarded storage error. */
2951 goto bad_area;
2952 #endif /* CONFIG_8xx */
2953 @@ -265,7 +297,7 @@ good_area:
2954 * processors use the same I/D cache coherency mechanism
2955 * as embedded.
2956 */
2957 - if (error_code & DSISR_PROTFAULT)
2958 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2959 goto bad_area;
2960 #endif /* CONFIG_PPC_STD_MMU */
2961
2962 @@ -335,6 +367,23 @@ bad_area:
2963 bad_area_nosemaphore:
2964 /* User mode accesses cause a SIGSEGV */
2965 if (user_mode(regs)) {
2966 +
2967 +#ifdef CONFIG_PAX_PAGEEXEC
2968 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2969 +#ifdef CONFIG_PPC_STD_MMU
2970 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2971 +#else
2972 + if (is_exec && regs->nip == address) {
2973 +#endif
2974 + switch (pax_handle_fetch_fault(regs)) {
2975 + }
2976 +
2977 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2978 + do_group_exit(SIGKILL);
2979 + }
2980 + }
2981 +#endif
2982 +
2983 _exception(SIGSEGV, regs, code, address);
2984 return 0;
2985 }
2986 diff -urNp linux-2.6.32.45/arch/powerpc/mm/mmap_64.c linux-2.6.32.45/arch/powerpc/mm/mmap_64.c
2987 --- linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
2988 +++ linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
2989 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2990 */
2991 if (mmap_is_legacy()) {
2992 mm->mmap_base = TASK_UNMAPPED_BASE;
2993 +
2994 +#ifdef CONFIG_PAX_RANDMMAP
2995 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2996 + mm->mmap_base += mm->delta_mmap;
2997 +#endif
2998 +
2999 mm->get_unmapped_area = arch_get_unmapped_area;
3000 mm->unmap_area = arch_unmap_area;
3001 } else {
3002 mm->mmap_base = mmap_base();
3003 +
3004 +#ifdef CONFIG_PAX_RANDMMAP
3005 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3006 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3007 +#endif
3008 +
3009 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3010 mm->unmap_area = arch_unmap_area_topdown;
3011 }
3012 diff -urNp linux-2.6.32.45/arch/powerpc/mm/slice.c linux-2.6.32.45/arch/powerpc/mm/slice.c
3013 --- linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3014 +++ linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3015 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3016 if ((mm->task_size - len) < addr)
3017 return 0;
3018 vma = find_vma(mm, addr);
3019 - return (!vma || (addr + len) <= vma->vm_start);
3020 + return check_heap_stack_gap(vma, addr, len);
3021 }
3022
3023 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3024 @@ -256,7 +256,7 @@ full_search:
3025 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3026 continue;
3027 }
3028 - if (!vma || addr + len <= vma->vm_start) {
3029 + if (check_heap_stack_gap(vma, addr, len)) {
3030 /*
3031 * Remember the place where we stopped the search:
3032 */
3033 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3034 }
3035 }
3036
3037 - addr = mm->mmap_base;
3038 - while (addr > len) {
3039 + if (mm->mmap_base < len)
3040 + addr = -ENOMEM;
3041 + else
3042 + addr = mm->mmap_base - len;
3043 +
3044 + while (!IS_ERR_VALUE(addr)) {
3045 /* Go down by chunk size */
3046 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3047 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3048
3049 /* Check for hit with different page size */
3050 mask = slice_range_to_mask(addr, len);
3051 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3052 * return with success:
3053 */
3054 vma = find_vma(mm, addr);
3055 - if (!vma || (addr + len) <= vma->vm_start) {
3056 + if (check_heap_stack_gap(vma, addr, len)) {
3057 /* remember the address as a hint for next time */
3058 if (use_cache)
3059 mm->free_area_cache = addr;
3060 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3061 mm->cached_hole_size = vma->vm_start - addr;
3062
3063 /* try just below the current vma->vm_start */
3064 - addr = vma->vm_start;
3065 + addr = skip_heap_stack_gap(vma, len);
3066 }
3067
3068 /*
3069 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3070 if (fixed && addr > (mm->task_size - len))
3071 return -EINVAL;
3072
3073 +#ifdef CONFIG_PAX_RANDMMAP
3074 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3075 + addr = 0;
3076 +#endif
3077 +
3078 /* If hint, make sure it matches our alignment restrictions */
3079 if (!fixed && addr) {
3080 addr = _ALIGN_UP(addr, 1ul << pshift);
3081 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c
3082 --- linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3083 +++ linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3084 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3085 lite5200_pm_target_state = PM_SUSPEND_ON;
3086 }
3087
3088 -static struct platform_suspend_ops lite5200_pm_ops = {
3089 +static const struct platform_suspend_ops lite5200_pm_ops = {
3090 .valid = lite5200_pm_valid,
3091 .begin = lite5200_pm_begin,
3092 .prepare = lite5200_pm_prepare,
3093 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3094 --- linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3095 +++ linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3096 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3097 iounmap(mbar);
3098 }
3099
3100 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3101 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3102 .valid = mpc52xx_pm_valid,
3103 .prepare = mpc52xx_pm_prepare,
3104 .enter = mpc52xx_pm_enter,
3105 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c
3106 --- linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3107 +++ linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3108 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3109 return ret;
3110 }
3111
3112 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3113 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3114 .valid = mpc83xx_suspend_valid,
3115 .begin = mpc83xx_suspend_begin,
3116 .enter = mpc83xx_suspend_enter,
3117 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c
3118 --- linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3119 +++ linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3120 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3121
3122 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3123
3124 -struct dma_map_ops dma_iommu_fixed_ops = {
3125 +const struct dma_map_ops dma_iommu_fixed_ops = {
3126 .alloc_coherent = dma_fixed_alloc_coherent,
3127 .free_coherent = dma_fixed_free_coherent,
3128 .map_sg = dma_fixed_map_sg,
3129 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c
3130 --- linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3131 +++ linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3132 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3133 return mask >= DMA_BIT_MASK(32);
3134 }
3135
3136 -static struct dma_map_ops ps3_sb_dma_ops = {
3137 +static const struct dma_map_ops ps3_sb_dma_ops = {
3138 .alloc_coherent = ps3_alloc_coherent,
3139 .free_coherent = ps3_free_coherent,
3140 .map_sg = ps3_sb_map_sg,
3141 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3142 .unmap_page = ps3_unmap_page,
3143 };
3144
3145 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3146 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3147 .alloc_coherent = ps3_alloc_coherent,
3148 .free_coherent = ps3_free_coherent,
3149 .map_sg = ps3_ioc0_map_sg,
3150 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig
3151 --- linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3152 +++ linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3153 @@ -2,6 +2,8 @@ config PPC_PSERIES
3154 depends on PPC64 && PPC_BOOK3S
3155 bool "IBM pSeries & new (POWER5-based) iSeries"
3156 select MPIC
3157 + select PCI_MSI
3158 + select XICS
3159 select PPC_I8259
3160 select PPC_RTAS
3161 select RTAS_ERROR_LOGGING
3162 diff -urNp linux-2.6.32.45/arch/s390/include/asm/elf.h linux-2.6.32.45/arch/s390/include/asm/elf.h
3163 --- linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3164 +++ linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3165 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3166 that it will "exec", and that there is sufficient room for the brk. */
3167 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3168
3169 +#ifdef CONFIG_PAX_ASLR
3170 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3171 +
3172 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3173 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3174 +#endif
3175 +
3176 /* This yields a mask that user programs can use to figure out what
3177 instruction set this CPU supports. */
3178
3179 diff -urNp linux-2.6.32.45/arch/s390/include/asm/setup.h linux-2.6.32.45/arch/s390/include/asm/setup.h
3180 --- linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3181 +++ linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3182 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3183 void detect_memory_layout(struct mem_chunk chunk[]);
3184
3185 #ifdef CONFIG_S390_SWITCH_AMODE
3186 -extern unsigned int switch_amode;
3187 +#define switch_amode (1)
3188 #else
3189 #define switch_amode (0)
3190 #endif
3191
3192 #ifdef CONFIG_S390_EXEC_PROTECT
3193 -extern unsigned int s390_noexec;
3194 +#define s390_noexec (1)
3195 #else
3196 #define s390_noexec (0)
3197 #endif
3198 diff -urNp linux-2.6.32.45/arch/s390/include/asm/uaccess.h linux-2.6.32.45/arch/s390/include/asm/uaccess.h
3199 --- linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3200 +++ linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3201 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3202 copy_to_user(void __user *to, const void *from, unsigned long n)
3203 {
3204 might_fault();
3205 +
3206 + if ((long)n < 0)
3207 + return n;
3208 +
3209 if (access_ok(VERIFY_WRITE, to, n))
3210 n = __copy_to_user(to, from, n);
3211 return n;
3212 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3213 static inline unsigned long __must_check
3214 __copy_from_user(void *to, const void __user *from, unsigned long n)
3215 {
3216 + if ((long)n < 0)
3217 + return n;
3218 +
3219 if (__builtin_constant_p(n) && (n <= 256))
3220 return uaccess.copy_from_user_small(n, from, to);
3221 else
3222 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3223 copy_from_user(void *to, const void __user *from, unsigned long n)
3224 {
3225 might_fault();
3226 +
3227 + if ((long)n < 0)
3228 + return n;
3229 +
3230 if (access_ok(VERIFY_READ, from, n))
3231 n = __copy_from_user(to, from, n);
3232 else
3233 diff -urNp linux-2.6.32.45/arch/s390/Kconfig linux-2.6.32.45/arch/s390/Kconfig
3234 --- linux-2.6.32.45/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3235 +++ linux-2.6.32.45/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3236 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3237
3238 config S390_SWITCH_AMODE
3239 bool "Switch kernel/user addressing modes"
3240 + default y
3241 help
3242 This option allows to switch the addressing modes of kernel and user
3243 - space. The kernel parameter switch_amode=on will enable this feature,
3244 - default is disabled. Enabling this (via kernel parameter) on machines
3245 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3246 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3247 + will reduce system performance.
3248
3249 Note that this option will also be selected by selecting the execute
3250 - protection option below. Enabling the execute protection via the
3251 - noexec kernel parameter will also switch the addressing modes,
3252 - independent of the switch_amode kernel parameter.
3253 + protection option below. Enabling the execute protection will also
3254 + switch the addressing modes, independent of this option.
3255
3256
3257 config S390_EXEC_PROTECT
3258 bool "Data execute protection"
3259 + default y
3260 select S390_SWITCH_AMODE
3261 help
3262 This option allows to enable a buffer overflow protection for user
3263 space programs and it also selects the addressing mode option above.
3264 - The kernel parameter noexec=on will enable this feature and also
3265 - switch the addressing modes, default is disabled. Enabling this (via
3266 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3267 - will reduce system performance.
3268 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3269 + reduce system performance.
3270
3271 comment "Code generation options"
3272
3273 diff -urNp linux-2.6.32.45/arch/s390/kernel/module.c linux-2.6.32.45/arch/s390/kernel/module.c
3274 --- linux-2.6.32.45/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3275 +++ linux-2.6.32.45/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3276 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3277
3278 /* Increase core size by size of got & plt and set start
3279 offsets for got and plt. */
3280 - me->core_size = ALIGN(me->core_size, 4);
3281 - me->arch.got_offset = me->core_size;
3282 - me->core_size += me->arch.got_size;
3283 - me->arch.plt_offset = me->core_size;
3284 - me->core_size += me->arch.plt_size;
3285 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3286 + me->arch.got_offset = me->core_size_rw;
3287 + me->core_size_rw += me->arch.got_size;
3288 + me->arch.plt_offset = me->core_size_rx;
3289 + me->core_size_rx += me->arch.plt_size;
3290 return 0;
3291 }
3292
3293 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3294 if (info->got_initialized == 0) {
3295 Elf_Addr *gotent;
3296
3297 - gotent = me->module_core + me->arch.got_offset +
3298 + gotent = me->module_core_rw + me->arch.got_offset +
3299 info->got_offset;
3300 *gotent = val;
3301 info->got_initialized = 1;
3302 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3303 else if (r_type == R_390_GOTENT ||
3304 r_type == R_390_GOTPLTENT)
3305 *(unsigned int *) loc =
3306 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3307 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3308 else if (r_type == R_390_GOT64 ||
3309 r_type == R_390_GOTPLT64)
3310 *(unsigned long *) loc = val;
3311 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3312 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3313 if (info->plt_initialized == 0) {
3314 unsigned int *ip;
3315 - ip = me->module_core + me->arch.plt_offset +
3316 + ip = me->module_core_rx + me->arch.plt_offset +
3317 info->plt_offset;
3318 #ifndef CONFIG_64BIT
3319 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3320 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3321 val - loc + 0xffffUL < 0x1ffffeUL) ||
3322 (r_type == R_390_PLT32DBL &&
3323 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3324 - val = (Elf_Addr) me->module_core +
3325 + val = (Elf_Addr) me->module_core_rx +
3326 me->arch.plt_offset +
3327 info->plt_offset;
3328 val += rela->r_addend - loc;
3329 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3330 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3331 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3332 val = val + rela->r_addend -
3333 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3334 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3335 if (r_type == R_390_GOTOFF16)
3336 *(unsigned short *) loc = val;
3337 else if (r_type == R_390_GOTOFF32)
3338 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3339 break;
3340 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3341 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3342 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3343 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3344 rela->r_addend - loc;
3345 if (r_type == R_390_GOTPC)
3346 *(unsigned int *) loc = val;
3347 diff -urNp linux-2.6.32.45/arch/s390/kernel/setup.c linux-2.6.32.45/arch/s390/kernel/setup.c
3348 --- linux-2.6.32.45/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3349 +++ linux-2.6.32.45/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3350 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3351 early_param("mem", early_parse_mem);
3352
3353 #ifdef CONFIG_S390_SWITCH_AMODE
3354 -unsigned int switch_amode = 0;
3355 -EXPORT_SYMBOL_GPL(switch_amode);
3356 -
3357 static int set_amode_and_uaccess(unsigned long user_amode,
3358 unsigned long user32_amode)
3359 {
3360 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3361 return 0;
3362 }
3363 }
3364 -
3365 -/*
3366 - * Switch kernel/user addressing modes?
3367 - */
3368 -static int __init early_parse_switch_amode(char *p)
3369 -{
3370 - switch_amode = 1;
3371 - return 0;
3372 -}
3373 -early_param("switch_amode", early_parse_switch_amode);
3374 -
3375 #else /* CONFIG_S390_SWITCH_AMODE */
3376 static inline int set_amode_and_uaccess(unsigned long user_amode,
3377 unsigned long user32_amode)
3378 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3379 }
3380 #endif /* CONFIG_S390_SWITCH_AMODE */
3381
3382 -#ifdef CONFIG_S390_EXEC_PROTECT
3383 -unsigned int s390_noexec = 0;
3384 -EXPORT_SYMBOL_GPL(s390_noexec);
3385 -
3386 -/*
3387 - * Enable execute protection?
3388 - */
3389 -static int __init early_parse_noexec(char *p)
3390 -{
3391 - if (!strncmp(p, "off", 3))
3392 - return 0;
3393 - switch_amode = 1;
3394 - s390_noexec = 1;
3395 - return 0;
3396 -}
3397 -early_param("noexec", early_parse_noexec);
3398 -#endif /* CONFIG_S390_EXEC_PROTECT */
3399 -
3400 static void setup_addressing_mode(void)
3401 {
3402 if (s390_noexec) {
3403 diff -urNp linux-2.6.32.45/arch/s390/mm/mmap.c linux-2.6.32.45/arch/s390/mm/mmap.c
3404 --- linux-2.6.32.45/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3405 +++ linux-2.6.32.45/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3406 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3407 */
3408 if (mmap_is_legacy()) {
3409 mm->mmap_base = TASK_UNMAPPED_BASE;
3410 +
3411 +#ifdef CONFIG_PAX_RANDMMAP
3412 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3413 + mm->mmap_base += mm->delta_mmap;
3414 +#endif
3415 +
3416 mm->get_unmapped_area = arch_get_unmapped_area;
3417 mm->unmap_area = arch_unmap_area;
3418 } else {
3419 mm->mmap_base = mmap_base();
3420 +
3421 +#ifdef CONFIG_PAX_RANDMMAP
3422 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3423 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3424 +#endif
3425 +
3426 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3427 mm->unmap_area = arch_unmap_area_topdown;
3428 }
3429 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3430 */
3431 if (mmap_is_legacy()) {
3432 mm->mmap_base = TASK_UNMAPPED_BASE;
3433 +
3434 +#ifdef CONFIG_PAX_RANDMMAP
3435 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3436 + mm->mmap_base += mm->delta_mmap;
3437 +#endif
3438 +
3439 mm->get_unmapped_area = s390_get_unmapped_area;
3440 mm->unmap_area = arch_unmap_area;
3441 } else {
3442 mm->mmap_base = mmap_base();
3443 +
3444 +#ifdef CONFIG_PAX_RANDMMAP
3445 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3446 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3447 +#endif
3448 +
3449 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3450 mm->unmap_area = arch_unmap_area_topdown;
3451 }
3452 diff -urNp linux-2.6.32.45/arch/score/include/asm/system.h linux-2.6.32.45/arch/score/include/asm/system.h
3453 --- linux-2.6.32.45/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3454 +++ linux-2.6.32.45/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3455 @@ -17,7 +17,7 @@ do { \
3456 #define finish_arch_switch(prev) do {} while (0)
3457
3458 typedef void (*vi_handler_t)(void);
3459 -extern unsigned long arch_align_stack(unsigned long sp);
3460 +#define arch_align_stack(x) (x)
3461
3462 #define mb() barrier()
3463 #define rmb() barrier()
3464 diff -urNp linux-2.6.32.45/arch/score/kernel/process.c linux-2.6.32.45/arch/score/kernel/process.c
3465 --- linux-2.6.32.45/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3466 +++ linux-2.6.32.45/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3467 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3468
3469 return task_pt_regs(task)->cp0_epc;
3470 }
3471 -
3472 -unsigned long arch_align_stack(unsigned long sp)
3473 -{
3474 - return sp;
3475 -}
3476 diff -urNp linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c
3477 --- linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3478 +++ linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3479 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3480 return 0;
3481 }
3482
3483 -static struct platform_suspend_ops hp6x0_pm_ops = {
3484 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3485 .enter = hp6x0_pm_enter,
3486 .valid = suspend_valid_only_mem,
3487 };
3488 diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c
3489 --- linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3490 +++ linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3491 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3492 NULL,
3493 };
3494
3495 -static struct sysfs_ops sq_sysfs_ops = {
3496 +static const struct sysfs_ops sq_sysfs_ops = {
3497 .show = sq_sysfs_show,
3498 .store = sq_sysfs_store,
3499 };
3500 diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c
3501 --- linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3502 +++ linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3503 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3504 return 0;
3505 }
3506
3507 -static struct platform_suspend_ops sh_pm_ops = {
3508 +static const struct platform_suspend_ops sh_pm_ops = {
3509 .enter = sh_pm_enter,
3510 .valid = suspend_valid_only_mem,
3511 };
3512 diff -urNp linux-2.6.32.45/arch/sh/kernel/kgdb.c linux-2.6.32.45/arch/sh/kernel/kgdb.c
3513 --- linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3514 +++ linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3515 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3516 {
3517 }
3518
3519 -struct kgdb_arch arch_kgdb_ops = {
3520 +const struct kgdb_arch arch_kgdb_ops = {
3521 /* Breakpoint instruction: trapa #0x3c */
3522 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3523 .gdb_bpt_instr = { 0x3c, 0xc3 },
3524 diff -urNp linux-2.6.32.45/arch/sh/mm/mmap.c linux-2.6.32.45/arch/sh/mm/mmap.c
3525 --- linux-2.6.32.45/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3526 +++ linux-2.6.32.45/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3527 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3528 addr = PAGE_ALIGN(addr);
3529
3530 vma = find_vma(mm, addr);
3531 - if (TASK_SIZE - len >= addr &&
3532 - (!vma || addr + len <= vma->vm_start))
3533 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3534 return addr;
3535 }
3536
3537 @@ -106,7 +105,7 @@ full_search:
3538 }
3539 return -ENOMEM;
3540 }
3541 - if (likely(!vma || addr + len <= vma->vm_start)) {
3542 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3543 /*
3544 * Remember the place where we stopped the search:
3545 */
3546 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3547 addr = PAGE_ALIGN(addr);
3548
3549 vma = find_vma(mm, addr);
3550 - if (TASK_SIZE - len >= addr &&
3551 - (!vma || addr + len <= vma->vm_start))
3552 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3553 return addr;
3554 }
3555
3556 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3557 /* make sure it can fit in the remaining address space */
3558 if (likely(addr > len)) {
3559 vma = find_vma(mm, addr-len);
3560 - if (!vma || addr <= vma->vm_start) {
3561 + if (check_heap_stack_gap(vma, addr - len, len)) {
3562 /* remember the address as a hint for next time */
3563 return (mm->free_area_cache = addr-len);
3564 }
3565 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3566 if (unlikely(mm->mmap_base < len))
3567 goto bottomup;
3568
3569 - addr = mm->mmap_base-len;
3570 - if (do_colour_align)
3571 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3572 + addr = mm->mmap_base - len;
3573
3574 do {
3575 + if (do_colour_align)
3576 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3577 /*
3578 * Lookup failure means no vma is above this address,
3579 * else if new region fits below vma->vm_start,
3580 * return with success:
3581 */
3582 vma = find_vma(mm, addr);
3583 - if (likely(!vma || addr+len <= vma->vm_start)) {
3584 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3585 /* remember the address as a hint for next time */
3586 return (mm->free_area_cache = addr);
3587 }
3588 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3589 mm->cached_hole_size = vma->vm_start - addr;
3590
3591 /* try just below the current vma->vm_start */
3592 - addr = vma->vm_start-len;
3593 - if (do_colour_align)
3594 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3595 - } while (likely(len < vma->vm_start));
3596 + addr = skip_heap_stack_gap(vma, len);
3597 + } while (!IS_ERR_VALUE(addr));
3598
3599 bottomup:
3600 /*
3601 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h
3602 --- linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3603 +++ linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-08-18 23:11:34.000000000 -0400
3604 @@ -14,18 +14,40 @@
3605 #define ATOMIC64_INIT(i) { (i) }
3606
3607 #define atomic_read(v) ((v)->counter)
3608 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3609 +{
3610 + return v->counter;
3611 +}
3612 #define atomic64_read(v) ((v)->counter)
3613 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3614 +{
3615 + return v->counter;
3616 +}
3617
3618 #define atomic_set(v, i) (((v)->counter) = i)
3619 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3620 +{
3621 + v->counter = i;
3622 +}
3623 #define atomic64_set(v, i) (((v)->counter) = i)
3624 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3625 +{
3626 + v->counter = i;
3627 +}
3628
3629 extern void atomic_add(int, atomic_t *);
3630 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3631 extern void atomic64_add(long, atomic64_t *);
3632 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3633 extern void atomic_sub(int, atomic_t *);
3634 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3635 extern void atomic64_sub(long, atomic64_t *);
3636 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3637
3638 extern int atomic_add_ret(int, atomic_t *);
3639 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3640 extern long atomic64_add_ret(long, atomic64_t *);
3641 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3642 extern int atomic_sub_ret(int, atomic_t *);
3643 extern long atomic64_sub_ret(long, atomic64_t *);
3644
3645 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3646 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3647
3648 #define atomic_inc_return(v) atomic_add_ret(1, v)
3649 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3650 +{
3651 + return atomic_add_ret_unchecked(1, v);
3652 +}
3653 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3654 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3655 +{
3656 + return atomic64_add_ret_unchecked(1, v);
3657 +}
3658
3659 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3660 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3661
3662 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3663 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3664 +{
3665 + return atomic_add_ret_unchecked(i, v);
3666 +}
3667 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3668 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3669 +{
3670 + return atomic64_add_ret_unchecked(i, v);
3671 +}
3672
3673 /*
3674 * atomic_inc_and_test - increment and test
3675 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
3676 * other cases.
3677 */
3678 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3679 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3680 +{
3681 + return atomic_inc_return_unchecked(v) == 0;
3682 +}
3683 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3684
3685 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3686 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
3687 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3688
3689 #define atomic_inc(v) atomic_add(1, v)
3690 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3691 +{
3692 + atomic_add_unchecked(1, v);
3693 +}
3694 #define atomic64_inc(v) atomic64_add(1, v)
3695 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3696 +{
3697 + atomic64_add_unchecked(1, v);
3698 +}
3699
3700 #define atomic_dec(v) atomic_sub(1, v)
3701 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3702 +{
3703 + atomic_sub_unchecked(1, v);
3704 +}
3705 #define atomic64_dec(v) atomic64_sub(1, v)
3706 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3707 +{
3708 + atomic64_sub_unchecked(1, v);
3709 +}
3710
3711 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3712 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3713
3714 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3715 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3716 +{
3717 + return cmpxchg(&v->counter, old, new);
3718 +}
3719 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3720 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3721 +{
3722 + return xchg(&v->counter, new);
3723 +}
3724
3725 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3726 {
3727 - int c, old;
3728 + int c, old, new;
3729 c = atomic_read(v);
3730 for (;;) {
3731 - if (unlikely(c == (u)))
3732 + if (unlikely(c == u))
3733 break;
3734 - old = atomic_cmpxchg((v), c, c + (a));
3735 +
3736 + asm volatile("addcc %2, %0, %0\n"
3737 +
3738 +#ifdef CONFIG_PAX_REFCOUNT
3739 + "tvs %%icc, 6\n"
3740 +#endif
3741 +
3742 + : "=r" (new)
3743 + : "0" (c), "ir" (a)
3744 + : "cc");
3745 +
3746 + old = atomic_cmpxchg(v, c, new);
3747 if (likely(old == c))
3748 break;
3749 c = old;
3750 }
3751 - return c != (u);
3752 + return c != u;
3753 }
3754
3755 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3756 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
3757 #define atomic64_cmpxchg(v, o, n) \
3758 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3759 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3760 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3761 +{
3762 + return xchg(&v->counter, new);
3763 +}
3764
3765 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3766 {
3767 - long c, old;
3768 + long c, old, new;
3769 c = atomic64_read(v);
3770 for (;;) {
3771 - if (unlikely(c == (u)))
3772 + if (unlikely(c == u))
3773 break;
3774 - old = atomic64_cmpxchg((v), c, c + (a));
3775 +
3776 + asm volatile("addcc %2, %0, %0\n"
3777 +
3778 +#ifdef CONFIG_PAX_REFCOUNT
3779 + "tvs %%xcc, 6\n"
3780 +#endif
3781 +
3782 + : "=r" (new)
3783 + : "0" (c), "ir" (a)
3784 + : "cc");
3785 +
3786 + old = atomic64_cmpxchg(v, c, new);
3787 if (likely(old == c))
3788 break;
3789 c = old;
3790 }
3791 - return c != (u);
3792 + return c != u;
3793 }
3794
3795 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3796 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/cache.h linux-2.6.32.45/arch/sparc/include/asm/cache.h
3797 --- linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3798 +++ linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3799 @@ -8,7 +8,7 @@
3800 #define _SPARC_CACHE_H
3801
3802 #define L1_CACHE_SHIFT 5
3803 -#define L1_CACHE_BYTES 32
3804 +#define L1_CACHE_BYTES 32UL
3805 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3806
3807 #ifdef CONFIG_SPARC32
3808 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h
3809 --- linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3810 +++ linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3811 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3812 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3813 #define dma_is_consistent(d, h) (1)
3814
3815 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3816 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3817 extern struct bus_type pci_bus_type;
3818
3819 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3820 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3821 {
3822 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3823 if (dev->bus == &pci_bus_type)
3824 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3825 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3826 dma_addr_t *dma_handle, gfp_t flag)
3827 {
3828 - struct dma_map_ops *ops = get_dma_ops(dev);
3829 + const struct dma_map_ops *ops = get_dma_ops(dev);
3830 void *cpu_addr;
3831
3832 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3833 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3834 static inline void dma_free_coherent(struct device *dev, size_t size,
3835 void *cpu_addr, dma_addr_t dma_handle)
3836 {
3837 - struct dma_map_ops *ops = get_dma_ops(dev);
3838 + const struct dma_map_ops *ops = get_dma_ops(dev);
3839
3840 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3841 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3842 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_32.h linux-2.6.32.45/arch/sparc/include/asm/elf_32.h
3843 --- linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3844 +++ linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3845 @@ -116,6 +116,13 @@ typedef struct {
3846
3847 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3848
3849 +#ifdef CONFIG_PAX_ASLR
3850 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3851 +
3852 +#define PAX_DELTA_MMAP_LEN 16
3853 +#define PAX_DELTA_STACK_LEN 16
3854 +#endif
3855 +
3856 /* This yields a mask that user programs can use to figure out what
3857 instruction set this cpu supports. This can NOT be done in userspace
3858 on Sparc. */
3859 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_64.h linux-2.6.32.45/arch/sparc/include/asm/elf_64.h
3860 --- linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3861 +++ linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3862 @@ -163,6 +163,12 @@ typedef struct {
3863 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3864 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3865
3866 +#ifdef CONFIG_PAX_ASLR
3867 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3868 +
3869 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3870 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3871 +#endif
3872
3873 /* This yields a mask that user programs can use to figure out what
3874 instruction set this cpu supports. */
3875 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h
3876 --- linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3877 +++ linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3878 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3879 BTFIXUPDEF_INT(page_none)
3880 BTFIXUPDEF_INT(page_copy)
3881 BTFIXUPDEF_INT(page_readonly)
3882 +
3883 +#ifdef CONFIG_PAX_PAGEEXEC
3884 +BTFIXUPDEF_INT(page_shared_noexec)
3885 +BTFIXUPDEF_INT(page_copy_noexec)
3886 +BTFIXUPDEF_INT(page_readonly_noexec)
3887 +#endif
3888 +
3889 BTFIXUPDEF_INT(page_kernel)
3890
3891 #define PMD_SHIFT SUN4C_PMD_SHIFT
3892 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3893 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3894 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3895
3896 +#ifdef CONFIG_PAX_PAGEEXEC
3897 +extern pgprot_t PAGE_SHARED_NOEXEC;
3898 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3899 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3900 +#else
3901 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3902 +# define PAGE_COPY_NOEXEC PAGE_COPY
3903 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3904 +#endif
3905 +
3906 extern unsigned long page_kernel;
3907
3908 #ifdef MODULE
3909 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h
3910 --- linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3911 +++ linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3912 @@ -115,6 +115,13 @@
3913 SRMMU_EXEC | SRMMU_REF)
3914 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3915 SRMMU_EXEC | SRMMU_REF)
3916 +
3917 +#ifdef CONFIG_PAX_PAGEEXEC
3918 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3919 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3920 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3921 +#endif
3922 +
3923 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3924 SRMMU_DIRTY | SRMMU_REF)
3925
3926 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h
3927 --- linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3928 +++ linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-08-18 23:19:30.000000000 -0400
3929 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3930
3931 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3932
3933 -static void inline arch_read_lock(raw_rwlock_t *lock)
3934 +static inline void arch_read_lock(raw_rwlock_t *lock)
3935 {
3936 unsigned long tmp1, tmp2;
3937
3938 __asm__ __volatile__ (
3939 "1: ldsw [%2], %0\n"
3940 " brlz,pn %0, 2f\n"
3941 -"4: add %0, 1, %1\n"
3942 +"4: addcc %0, 1, %1\n"
3943 +
3944 +#ifdef CONFIG_PAX_REFCOUNT
3945 +" tvs %%icc, 6\n"
3946 +#endif
3947 +
3948 " cas [%2], %0, %1\n"
3949 " cmp %0, %1\n"
3950 " bne,pn %%icc, 1b\n"
3951 @@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rw
3952 " .previous"
3953 : "=&r" (tmp1), "=&r" (tmp2)
3954 : "r" (lock)
3955 - : "memory");
3956 + : "memory", "cc");
3957 }
3958
3959 -static int inline arch_read_trylock(raw_rwlock_t *lock)
3960 +static inline int arch_read_trylock(raw_rwlock_t *lock)
3961 {
3962 int tmp1, tmp2;
3963
3964 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3965 "1: ldsw [%2], %0\n"
3966 " brlz,a,pn %0, 2f\n"
3967 " mov 0, %0\n"
3968 -" add %0, 1, %1\n"
3969 +" addcc %0, 1, %1\n"
3970 +
3971 +#ifdef CONFIG_PAX_REFCOUNT
3972 +" tvs %%icc, 6\n"
3973 +#endif
3974 +
3975 " cas [%2], %0, %1\n"
3976 " cmp %0, %1\n"
3977 " bne,pn %%icc, 1b\n"
3978 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
3979 return tmp1;
3980 }
3981
3982 -static void inline arch_read_unlock(raw_rwlock_t *lock)
3983 +static inline void arch_read_unlock(raw_rwlock_t *lock)
3984 {
3985 unsigned long tmp1, tmp2;
3986
3987 __asm__ __volatile__(
3988 "1: lduw [%2], %0\n"
3989 -" sub %0, 1, %1\n"
3990 +" subcc %0, 1, %1\n"
3991 +
3992 +#ifdef CONFIG_PAX_REFCOUNT
3993 +" tvs %%icc, 6\n"
3994 +#endif
3995 +
3996 " cas [%2], %0, %1\n"
3997 " cmp %0, %1\n"
3998 " bne,pn %%xcc, 1b\n"
3999 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4000 : "memory");
4001 }
4002
4003 -static void inline arch_write_lock(raw_rwlock_t *lock)
4004 +static inline void arch_write_lock(raw_rwlock_t *lock)
4005 {
4006 unsigned long mask, tmp1, tmp2;
4007
4008 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4009 : "memory");
4010 }
4011
4012 -static void inline arch_write_unlock(raw_rwlock_t *lock)
4013 +static inline void arch_write_unlock(raw_rwlock_t *lock)
4014 {
4015 __asm__ __volatile__(
4016 " stw %%g0, [%0]"
4017 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw
4018 : "memory");
4019 }
4020
4021 -static int inline arch_write_trylock(raw_rwlock_t *lock)
4022 +static inline int arch_write_trylock(raw_rwlock_t *lock)
4023 {
4024 unsigned long mask, tmp1, tmp2, result;
4025
4026 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h
4027 --- linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
4028 +++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4029 @@ -50,6 +50,8 @@ struct thread_info {
4030 unsigned long w_saved;
4031
4032 struct restart_block restart_block;
4033 +
4034 + unsigned long lowest_stack;
4035 };
4036
4037 /*
4038 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h
4039 --- linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4040 +++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4041 @@ -68,6 +68,8 @@ struct thread_info {
4042 struct pt_regs *kern_una_regs;
4043 unsigned int kern_una_insn;
4044
4045 + unsigned long lowest_stack;
4046 +
4047 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4048 };
4049
4050 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h
4051 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4052 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4053 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4054
4055 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4056 {
4057 - if (n && __access_ok((unsigned long) to, n))
4058 + if ((long)n < 0)
4059 + return n;
4060 +
4061 + if (n && __access_ok((unsigned long) to, n)) {
4062 + if (!__builtin_constant_p(n))
4063 + check_object_size(from, n, true);
4064 return __copy_user(to, (__force void __user *) from, n);
4065 - else
4066 + } else
4067 return n;
4068 }
4069
4070 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4071 {
4072 + if ((long)n < 0)
4073 + return n;
4074 +
4075 + if (!__builtin_constant_p(n))
4076 + check_object_size(from, n, true);
4077 +
4078 return __copy_user(to, (__force void __user *) from, n);
4079 }
4080
4081 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4082 {
4083 - if (n && __access_ok((unsigned long) from, n))
4084 + if ((long)n < 0)
4085 + return n;
4086 +
4087 + if (n && __access_ok((unsigned long) from, n)) {
4088 + if (!__builtin_constant_p(n))
4089 + check_object_size(to, n, false);
4090 return __copy_user((__force void __user *) to, from, n);
4091 - else
4092 + } else
4093 return n;
4094 }
4095
4096 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4097 {
4098 + if ((long)n < 0)
4099 + return n;
4100 +
4101 return __copy_user((__force void __user *) to, from, n);
4102 }
4103
4104 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h
4105 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4106 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4107 @@ -9,6 +9,7 @@
4108 #include <linux/compiler.h>
4109 #include <linux/string.h>
4110 #include <linux/thread_info.h>
4111 +#include <linux/kernel.h>
4112 #include <asm/asi.h>
4113 #include <asm/system.h>
4114 #include <asm/spitfire.h>
4115 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4116 static inline unsigned long __must_check
4117 copy_from_user(void *to, const void __user *from, unsigned long size)
4118 {
4119 - unsigned long ret = ___copy_from_user(to, from, size);
4120 + unsigned long ret;
4121
4122 + if ((long)size < 0 || size > INT_MAX)
4123 + return size;
4124 +
4125 + if (!__builtin_constant_p(size))
4126 + check_object_size(to, size, false);
4127 +
4128 + ret = ___copy_from_user(to, from, size);
4129 if (unlikely(ret))
4130 ret = copy_from_user_fixup(to, from, size);
4131 return ret;
4132 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4133 static inline unsigned long __must_check
4134 copy_to_user(void __user *to, const void *from, unsigned long size)
4135 {
4136 - unsigned long ret = ___copy_to_user(to, from, size);
4137 + unsigned long ret;
4138 +
4139 + if ((long)size < 0 || size > INT_MAX)
4140 + return size;
4141 +
4142 + if (!__builtin_constant_p(size))
4143 + check_object_size(from, size, true);
4144
4145 + ret = ___copy_to_user(to, from, size);
4146 if (unlikely(ret))
4147 ret = copy_to_user_fixup(to, from, size);
4148 return ret;
4149 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess.h linux-2.6.32.45/arch/sparc/include/asm/uaccess.h
4150 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4151 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4152 @@ -1,5 +1,13 @@
4153 #ifndef ___ASM_SPARC_UACCESS_H
4154 #define ___ASM_SPARC_UACCESS_H
4155 +
4156 +#ifdef __KERNEL__
4157 +#ifndef __ASSEMBLY__
4158 +#include <linux/types.h>
4159 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4160 +#endif
4161 +#endif
4162 +
4163 #if defined(__sparc__) && defined(__arch64__)
4164 #include <asm/uaccess_64.h>
4165 #else
4166 diff -urNp linux-2.6.32.45/arch/sparc/kernel/iommu.c linux-2.6.32.45/arch/sparc/kernel/iommu.c
4167 --- linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4168 +++ linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4169 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4170 spin_unlock_irqrestore(&iommu->lock, flags);
4171 }
4172
4173 -static struct dma_map_ops sun4u_dma_ops = {
4174 +static const struct dma_map_ops sun4u_dma_ops = {
4175 .alloc_coherent = dma_4u_alloc_coherent,
4176 .free_coherent = dma_4u_free_coherent,
4177 .map_page = dma_4u_map_page,
4178 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4179 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4180 };
4181
4182 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4183 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4184 EXPORT_SYMBOL(dma_ops);
4185
4186 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4187 diff -urNp linux-2.6.32.45/arch/sparc/kernel/ioport.c linux-2.6.32.45/arch/sparc/kernel/ioport.c
4188 --- linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4189 +++ linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4190 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4191 BUG();
4192 }
4193
4194 -struct dma_map_ops sbus_dma_ops = {
4195 +const struct dma_map_ops sbus_dma_ops = {
4196 .alloc_coherent = sbus_alloc_coherent,
4197 .free_coherent = sbus_free_coherent,
4198 .map_page = sbus_map_page,
4199 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4200 .sync_sg_for_device = sbus_sync_sg_for_device,
4201 };
4202
4203 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4204 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4205 EXPORT_SYMBOL(dma_ops);
4206
4207 static int __init sparc_register_ioport(void)
4208 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4209 }
4210 }
4211
4212 -struct dma_map_ops pci32_dma_ops = {
4213 +const struct dma_map_ops pci32_dma_ops = {
4214 .alloc_coherent = pci32_alloc_coherent,
4215 .free_coherent = pci32_free_coherent,
4216 .map_page = pci32_map_page,
4217 diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c
4218 --- linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4219 +++ linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4220 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4221 {
4222 }
4223
4224 -struct kgdb_arch arch_kgdb_ops = {
4225 +const struct kgdb_arch arch_kgdb_ops = {
4226 /* Breakpoint instruction: ta 0x7d */
4227 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4228 };
4229 diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c
4230 --- linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4231 +++ linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4232 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4233 {
4234 }
4235
4236 -struct kgdb_arch arch_kgdb_ops = {
4237 +const struct kgdb_arch arch_kgdb_ops = {
4238 /* Breakpoint instruction: ta 0x72 */
4239 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4240 };
4241 diff -urNp linux-2.6.32.45/arch/sparc/kernel/Makefile linux-2.6.32.45/arch/sparc/kernel/Makefile
4242 --- linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4243 +++ linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4244 @@ -3,7 +3,7 @@
4245 #
4246
4247 asflags-y := -ansi
4248 -ccflags-y := -Werror
4249 +#ccflags-y := -Werror
4250
4251 extra-y := head_$(BITS).o
4252 extra-y += init_task.o
4253 diff -urNp linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c
4254 --- linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4255 +++ linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4256 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4257 spin_unlock_irqrestore(&iommu->lock, flags);
4258 }
4259
4260 -static struct dma_map_ops sun4v_dma_ops = {
4261 +static const struct dma_map_ops sun4v_dma_ops = {
4262 .alloc_coherent = dma_4v_alloc_coherent,
4263 .free_coherent = dma_4v_free_coherent,
4264 .map_page = dma_4v_map_page,
4265 diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_32.c linux-2.6.32.45/arch/sparc/kernel/process_32.c
4266 --- linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4267 +++ linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4268 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4269 rw->ins[4], rw->ins[5],
4270 rw->ins[6],
4271 rw->ins[7]);
4272 - printk("%pS\n", (void *) rw->ins[7]);
4273 + printk("%pA\n", (void *) rw->ins[7]);
4274 rw = (struct reg_window32 *) rw->ins[6];
4275 }
4276 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4277 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4278
4279 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4280 r->psr, r->pc, r->npc, r->y, print_tainted());
4281 - printk("PC: <%pS>\n", (void *) r->pc);
4282 + printk("PC: <%pA>\n", (void *) r->pc);
4283 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4284 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4285 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4286 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4287 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4288 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4289 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4290 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4291
4292 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4293 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4294 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4295 rw = (struct reg_window32 *) fp;
4296 pc = rw->ins[7];
4297 printk("[%08lx : ", pc);
4298 - printk("%pS ] ", (void *) pc);
4299 + printk("%pA ] ", (void *) pc);
4300 fp = rw->ins[6];
4301 } while (++count < 16);
4302 printk("\n");
4303 diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_64.c linux-2.6.32.45/arch/sparc/kernel/process_64.c
4304 --- linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4305 +++ linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4306 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4307 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4308 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4309 if (regs->tstate & TSTATE_PRIV)
4310 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4311 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4312 }
4313
4314 void show_regs(struct pt_regs *regs)
4315 {
4316 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4317 regs->tpc, regs->tnpc, regs->y, print_tainted());
4318 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4319 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4320 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4321 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4322 regs->u_regs[3]);
4323 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4324 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4325 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4326 regs->u_regs[15]);
4327 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4328 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4329 show_regwindow(regs);
4330 }
4331
4332 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4333 ((tp && tp->task) ? tp->task->pid : -1));
4334
4335 if (gp->tstate & TSTATE_PRIV) {
4336 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4337 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4338 (void *) gp->tpc,
4339 (void *) gp->o7,
4340 (void *) gp->i7,
4341 diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c
4342 --- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4343 +++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4344 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4345 if (ARCH_SUN4C && len > 0x20000000)
4346 return -ENOMEM;
4347 if (!addr)
4348 - addr = TASK_UNMAPPED_BASE;
4349 + addr = current->mm->mmap_base;
4350
4351 if (flags & MAP_SHARED)
4352 addr = COLOUR_ALIGN(addr);
4353 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4354 }
4355 if (TASK_SIZE - PAGE_SIZE - len < addr)
4356 return -ENOMEM;
4357 - if (!vmm || addr + len <= vmm->vm_start)
4358 + if (check_heap_stack_gap(vmm, addr, len))
4359 return addr;
4360 addr = vmm->vm_end;
4361 if (flags & MAP_SHARED)
4362 diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c
4363 --- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4364 +++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4365 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4366 /* We do not accept a shared mapping if it would violate
4367 * cache aliasing constraints.
4368 */
4369 - if ((flags & MAP_SHARED) &&
4370 + if ((filp || (flags & MAP_SHARED)) &&
4371 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4372 return -EINVAL;
4373 return addr;
4374 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4375 if (filp || (flags & MAP_SHARED))
4376 do_color_align = 1;
4377
4378 +#ifdef CONFIG_PAX_RANDMMAP
4379 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4380 +#endif
4381 +
4382 if (addr) {
4383 if (do_color_align)
4384 addr = COLOUR_ALIGN(addr, pgoff);
4385 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4386 addr = PAGE_ALIGN(addr);
4387
4388 vma = find_vma(mm, addr);
4389 - if (task_size - len >= addr &&
4390 - (!vma || addr + len <= vma->vm_start))
4391 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4392 return addr;
4393 }
4394
4395 if (len > mm->cached_hole_size) {
4396 - start_addr = addr = mm->free_area_cache;
4397 + start_addr = addr = mm->free_area_cache;
4398 } else {
4399 - start_addr = addr = TASK_UNMAPPED_BASE;
4400 + start_addr = addr = mm->mmap_base;
4401 mm->cached_hole_size = 0;
4402 }
4403
4404 @@ -175,14 +178,14 @@ full_search:
4405 vma = find_vma(mm, VA_EXCLUDE_END);
4406 }
4407 if (unlikely(task_size < addr)) {
4408 - if (start_addr != TASK_UNMAPPED_BASE) {
4409 - start_addr = addr = TASK_UNMAPPED_BASE;
4410 + if (start_addr != mm->mmap_base) {
4411 + start_addr = addr = mm->mmap_base;
4412 mm->cached_hole_size = 0;
4413 goto full_search;
4414 }
4415 return -ENOMEM;
4416 }
4417 - if (likely(!vma || addr + len <= vma->vm_start)) {
4418 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4419 /*
4420 * Remember the place where we stopped the search:
4421 */
4422 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4423 /* We do not accept a shared mapping if it would violate
4424 * cache aliasing constraints.
4425 */
4426 - if ((flags & MAP_SHARED) &&
4427 + if ((filp || (flags & MAP_SHARED)) &&
4428 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4429 return -EINVAL;
4430 return addr;
4431 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4432 addr = PAGE_ALIGN(addr);
4433
4434 vma = find_vma(mm, addr);
4435 - if (task_size - len >= addr &&
4436 - (!vma || addr + len <= vma->vm_start))
4437 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4438 return addr;
4439 }
4440
4441 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4442 /* make sure it can fit in the remaining address space */
4443 if (likely(addr > len)) {
4444 vma = find_vma(mm, addr-len);
4445 - if (!vma || addr <= vma->vm_start) {
4446 + if (check_heap_stack_gap(vma, addr - len, len)) {
4447 /* remember the address as a hint for next time */
4448 return (mm->free_area_cache = addr-len);
4449 }
4450 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4451 if (unlikely(mm->mmap_base < len))
4452 goto bottomup;
4453
4454 - addr = mm->mmap_base-len;
4455 - if (do_color_align)
4456 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4457 + addr = mm->mmap_base - len;
4458
4459 do {
4460 + if (do_color_align)
4461 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4462 /*
4463 * Lookup failure means no vma is above this address,
4464 * else if new region fits below vma->vm_start,
4465 * return with success:
4466 */
4467 vma = find_vma(mm, addr);
4468 - if (likely(!vma || addr+len <= vma->vm_start)) {
4469 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4470 /* remember the address as a hint for next time */
4471 return (mm->free_area_cache = addr);
4472 }
4473 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4474 mm->cached_hole_size = vma->vm_start - addr;
4475
4476 /* try just below the current vma->vm_start */
4477 - addr = vma->vm_start-len;
4478 - if (do_color_align)
4479 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4480 - } while (likely(len < vma->vm_start));
4481 + addr = skip_heap_stack_gap(vma, len);
4482 + } while (!IS_ERR_VALUE(addr));
4483
4484 bottomup:
4485 /*
4486 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4487 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4488 sysctl_legacy_va_layout) {
4489 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4490 +
4491 +#ifdef CONFIG_PAX_RANDMMAP
4492 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4493 + mm->mmap_base += mm->delta_mmap;
4494 +#endif
4495 +
4496 mm->get_unmapped_area = arch_get_unmapped_area;
4497 mm->unmap_area = arch_unmap_area;
4498 } else {
4499 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4500 gap = (task_size / 6 * 5);
4501
4502 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4503 +
4504 +#ifdef CONFIG_PAX_RANDMMAP
4505 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4506 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4507 +#endif
4508 +
4509 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4510 mm->unmap_area = arch_unmap_area_topdown;
4511 }
4512 diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_32.c linux-2.6.32.45/arch/sparc/kernel/traps_32.c
4513 --- linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4514 +++ linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4515 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4516 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4517 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4518
4519 +extern void gr_handle_kernel_exploit(void);
4520 +
4521 void die_if_kernel(char *str, struct pt_regs *regs)
4522 {
4523 static int die_counter;
4524 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4525 count++ < 30 &&
4526 (((unsigned long) rw) >= PAGE_OFFSET) &&
4527 !(((unsigned long) rw) & 0x7)) {
4528 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4529 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4530 (void *) rw->ins[7]);
4531 rw = (struct reg_window32 *)rw->ins[6];
4532 }
4533 }
4534 printk("Instruction DUMP:");
4535 instruction_dump ((unsigned long *) regs->pc);
4536 - if(regs->psr & PSR_PS)
4537 + if(regs->psr & PSR_PS) {
4538 + gr_handle_kernel_exploit();
4539 do_exit(SIGKILL);
4540 + }
4541 do_exit(SIGSEGV);
4542 }
4543
4544 diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_64.c linux-2.6.32.45/arch/sparc/kernel/traps_64.c
4545 --- linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4546 +++ linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4547 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4548 i + 1,
4549 p->trapstack[i].tstate, p->trapstack[i].tpc,
4550 p->trapstack[i].tnpc, p->trapstack[i].tt);
4551 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4552 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4553 }
4554 }
4555
4556 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4557
4558 lvl -= 0x100;
4559 if (regs->tstate & TSTATE_PRIV) {
4560 +
4561 +#ifdef CONFIG_PAX_REFCOUNT
4562 + if (lvl == 6)
4563 + pax_report_refcount_overflow(regs);
4564 +#endif
4565 +
4566 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4567 die_if_kernel(buffer, regs);
4568 }
4569 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4570 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4571 {
4572 char buffer[32];
4573 -
4574 +
4575 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4576 0, lvl, SIGTRAP) == NOTIFY_STOP)
4577 return;
4578
4579 +#ifdef CONFIG_PAX_REFCOUNT
4580 + if (lvl == 6)
4581 + pax_report_refcount_overflow(regs);
4582 +#endif
4583 +
4584 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4585
4586 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4587 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4588 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4589 printk("%s" "ERROR(%d): ",
4590 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4591 - printk("TPC<%pS>\n", (void *) regs->tpc);
4592 + printk("TPC<%pA>\n", (void *) regs->tpc);
4593 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4594 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4595 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4596 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4597 smp_processor_id(),
4598 (type & 0x1) ? 'I' : 'D',
4599 regs->tpc);
4600 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4601 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4602 panic("Irrecoverable Cheetah+ parity error.");
4603 }
4604
4605 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4606 smp_processor_id(),
4607 (type & 0x1) ? 'I' : 'D',
4608 regs->tpc);
4609 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4610 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4611 }
4612
4613 struct sun4v_error_entry {
4614 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4615
4616 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4617 regs->tpc, tl);
4618 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4619 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4620 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4621 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4622 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4623 (void *) regs->u_regs[UREG_I7]);
4624 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4625 "pte[%lx] error[%lx]\n",
4626 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4627
4628 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4629 regs->tpc, tl);
4630 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4631 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4632 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4633 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4634 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4635 (void *) regs->u_regs[UREG_I7]);
4636 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4637 "pte[%lx] error[%lx]\n",
4638 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4639 fp = (unsigned long)sf->fp + STACK_BIAS;
4640 }
4641
4642 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4643 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4644 } while (++count < 16);
4645 }
4646
4647 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4648 return (struct reg_window *) (fp + STACK_BIAS);
4649 }
4650
4651 +extern void gr_handle_kernel_exploit(void);
4652 +
4653 void die_if_kernel(char *str, struct pt_regs *regs)
4654 {
4655 static int die_counter;
4656 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4657 while (rw &&
4658 count++ < 30&&
4659 is_kernel_stack(current, rw)) {
4660 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4661 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4662 (void *) rw->ins[7]);
4663
4664 rw = kernel_stack_up(rw);
4665 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4666 }
4667 user_instruction_dump ((unsigned int __user *) regs->tpc);
4668 }
4669 - if (regs->tstate & TSTATE_PRIV)
4670 + if (regs->tstate & TSTATE_PRIV) {
4671 + gr_handle_kernel_exploit();
4672 do_exit(SIGKILL);
4673 + }
4674 +
4675 do_exit(SIGSEGV);
4676 }
4677 EXPORT_SYMBOL(die_if_kernel);
4678 diff -urNp linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S
4679 --- linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-03-27 14:31:47.000000000 -0400
4680 +++ linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-07-13 22:20:05.000000000 -0400
4681 @@ -127,7 +127,7 @@ do_int_load:
4682 wr %o5, 0x0, %asi
4683 retl
4684 mov 0, %o0
4685 - .size __do_int_load, .-__do_int_load
4686 + .size do_int_load, .-do_int_load
4687
4688 .section __ex_table,"a"
4689 .word 4b, __retl_efault
4690 diff -urNp linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c
4691 --- linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4692 +++ linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4693 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4694 if (count < 5) {
4695 last_time = jiffies;
4696 count++;
4697 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4698 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4699 regs->tpc, (void *) regs->tpc);
4700 }
4701 }
4702 diff -urNp linux-2.6.32.45/arch/sparc/lib/atomic_64.S linux-2.6.32.45/arch/sparc/lib/atomic_64.S
4703 --- linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4704 +++ linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4705 @@ -18,7 +18,12 @@
4706 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4707 BACKOFF_SETUP(%o2)
4708 1: lduw [%o1], %g1
4709 - add %g1, %o0, %g7
4710 + addcc %g1, %o0, %g7
4711 +
4712 +#ifdef CONFIG_PAX_REFCOUNT
4713 + tvs %icc, 6
4714 +#endif
4715 +
4716 cas [%o1], %g1, %g7
4717 cmp %g1, %g7
4718 bne,pn %icc, 2f
4719 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4720 2: BACKOFF_SPIN(%o2, %o3, 1b)
4721 .size atomic_add, .-atomic_add
4722
4723 + .globl atomic_add_unchecked
4724 + .type atomic_add_unchecked,#function
4725 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4726 + BACKOFF_SETUP(%o2)
4727 +1: lduw [%o1], %g1
4728 + add %g1, %o0, %g7
4729 + cas [%o1], %g1, %g7
4730 + cmp %g1, %g7
4731 + bne,pn %icc, 2f
4732 + nop
4733 + retl
4734 + nop
4735 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4736 + .size atomic_add_unchecked, .-atomic_add_unchecked
4737 +
4738 .globl atomic_sub
4739 .type atomic_sub,#function
4740 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4741 BACKOFF_SETUP(%o2)
4742 1: lduw [%o1], %g1
4743 - sub %g1, %o0, %g7
4744 + subcc %g1, %o0, %g7
4745 +
4746 +#ifdef CONFIG_PAX_REFCOUNT
4747 + tvs %icc, 6
4748 +#endif
4749 +
4750 cas [%o1], %g1, %g7
4751 cmp %g1, %g7
4752 bne,pn %icc, 2f
4753 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4754 2: BACKOFF_SPIN(%o2, %o3, 1b)
4755 .size atomic_sub, .-atomic_sub
4756
4757 + .globl atomic_sub_unchecked
4758 + .type atomic_sub_unchecked,#function
4759 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4760 + BACKOFF_SETUP(%o2)
4761 +1: lduw [%o1], %g1
4762 + sub %g1, %o0, %g7
4763 + cas [%o1], %g1, %g7
4764 + cmp %g1, %g7
4765 + bne,pn %icc, 2f
4766 + nop
4767 + retl
4768 + nop
4769 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4770 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4771 +
4772 .globl atomic_add_ret
4773 .type atomic_add_ret,#function
4774 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4775 BACKOFF_SETUP(%o2)
4776 1: lduw [%o1], %g1
4777 - add %g1, %o0, %g7
4778 + addcc %g1, %o0, %g7
4779 +
4780 +#ifdef CONFIG_PAX_REFCOUNT
4781 + tvs %icc, 6
4782 +#endif
4783 +
4784 cas [%o1], %g1, %g7
4785 cmp %g1, %g7
4786 bne,pn %icc, 2f
4787 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4788 2: BACKOFF_SPIN(%o2, %o3, 1b)
4789 .size atomic_add_ret, .-atomic_add_ret
4790
4791 + .globl atomic_add_ret_unchecked
4792 + .type atomic_add_ret_unchecked,#function
4793 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4794 + BACKOFF_SETUP(%o2)
4795 +1: lduw [%o1], %g1
4796 + addcc %g1, %o0, %g7
4797 + cas [%o1], %g1, %g7
4798 + cmp %g1, %g7
4799 + bne,pn %icc, 2f
4800 + add %g7, %o0, %g7
4801 + sra %g7, 0, %o0
4802 + retl
4803 + nop
4804 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4805 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4806 +
4807 .globl atomic_sub_ret
4808 .type atomic_sub_ret,#function
4809 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4810 BACKOFF_SETUP(%o2)
4811 1: lduw [%o1], %g1
4812 - sub %g1, %o0, %g7
4813 + subcc %g1, %o0, %g7
4814 +
4815 +#ifdef CONFIG_PAX_REFCOUNT
4816 + tvs %icc, 6
4817 +#endif
4818 +
4819 cas [%o1], %g1, %g7
4820 cmp %g1, %g7
4821 bne,pn %icc, 2f
4822 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4823 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4824 BACKOFF_SETUP(%o2)
4825 1: ldx [%o1], %g1
4826 - add %g1, %o0, %g7
4827 + addcc %g1, %o0, %g7
4828 +
4829 +#ifdef CONFIG_PAX_REFCOUNT
4830 + tvs %xcc, 6
4831 +#endif
4832 +
4833 casx [%o1], %g1, %g7
4834 cmp %g1, %g7
4835 bne,pn %xcc, 2f
4836 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4837 2: BACKOFF_SPIN(%o2, %o3, 1b)
4838 .size atomic64_add, .-atomic64_add
4839
4840 + .globl atomic64_add_unchecked
4841 + .type atomic64_add_unchecked,#function
4842 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4843 + BACKOFF_SETUP(%o2)
4844 +1: ldx [%o1], %g1
4845 + addcc %g1, %o0, %g7
4846 + casx [%o1], %g1, %g7
4847 + cmp %g1, %g7
4848 + bne,pn %xcc, 2f
4849 + nop
4850 + retl
4851 + nop
4852 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4853 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4854 +
4855 .globl atomic64_sub
4856 .type atomic64_sub,#function
4857 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4858 BACKOFF_SETUP(%o2)
4859 1: ldx [%o1], %g1
4860 - sub %g1, %o0, %g7
4861 + subcc %g1, %o0, %g7
4862 +
4863 +#ifdef CONFIG_PAX_REFCOUNT
4864 + tvs %xcc, 6
4865 +#endif
4866 +
4867 casx [%o1], %g1, %g7
4868 cmp %g1, %g7
4869 bne,pn %xcc, 2f
4870 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4871 2: BACKOFF_SPIN(%o2, %o3, 1b)
4872 .size atomic64_sub, .-atomic64_sub
4873
4874 + .globl atomic64_sub_unchecked
4875 + .type atomic64_sub_unchecked,#function
4876 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4877 + BACKOFF_SETUP(%o2)
4878 +1: ldx [%o1], %g1
4879 + subcc %g1, %o0, %g7
4880 + casx [%o1], %g1, %g7
4881 + cmp %g1, %g7
4882 + bne,pn %xcc, 2f
4883 + nop
4884 + retl
4885 + nop
4886 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4887 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4888 +
4889 .globl atomic64_add_ret
4890 .type atomic64_add_ret,#function
4891 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4892 BACKOFF_SETUP(%o2)
4893 1: ldx [%o1], %g1
4894 - add %g1, %o0, %g7
4895 + addcc %g1, %o0, %g7
4896 +
4897 +#ifdef CONFIG_PAX_REFCOUNT
4898 + tvs %xcc, 6
4899 +#endif
4900 +
4901 casx [%o1], %g1, %g7
4902 cmp %g1, %g7
4903 bne,pn %xcc, 2f
4904 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4905 2: BACKOFF_SPIN(%o2, %o3, 1b)
4906 .size atomic64_add_ret, .-atomic64_add_ret
4907
4908 + .globl atomic64_add_ret_unchecked
4909 + .type atomic64_add_ret_unchecked,#function
4910 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4911 + BACKOFF_SETUP(%o2)
4912 +1: ldx [%o1], %g1
4913 + addcc %g1, %o0, %g7
4914 + casx [%o1], %g1, %g7
4915 + cmp %g1, %g7
4916 + bne,pn %xcc, 2f
4917 + add %g7, %o0, %g7
4918 + mov %g7, %o0
4919 + retl
4920 + nop
4921 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4922 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4923 +
4924 .globl atomic64_sub_ret
4925 .type atomic64_sub_ret,#function
4926 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4927 BACKOFF_SETUP(%o2)
4928 1: ldx [%o1], %g1
4929 - sub %g1, %o0, %g7
4930 + subcc %g1, %o0, %g7
4931 +
4932 +#ifdef CONFIG_PAX_REFCOUNT
4933 + tvs %xcc, 6
4934 +#endif
4935 +
4936 casx [%o1], %g1, %g7
4937 cmp %g1, %g7
4938 bne,pn %xcc, 2f
4939 diff -urNp linux-2.6.32.45/arch/sparc/lib/ksyms.c linux-2.6.32.45/arch/sparc/lib/ksyms.c
4940 --- linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4941 +++ linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4942 @@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4943
4944 /* Atomic counter implementation. */
4945 EXPORT_SYMBOL(atomic_add);
4946 +EXPORT_SYMBOL(atomic_add_unchecked);
4947 EXPORT_SYMBOL(atomic_add_ret);
4948 EXPORT_SYMBOL(atomic_sub);
4949 +EXPORT_SYMBOL(atomic_sub_unchecked);
4950 EXPORT_SYMBOL(atomic_sub_ret);
4951 EXPORT_SYMBOL(atomic64_add);
4952 +EXPORT_SYMBOL(atomic64_add_unchecked);
4953 EXPORT_SYMBOL(atomic64_add_ret);
4954 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4955 EXPORT_SYMBOL(atomic64_sub);
4956 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4957 EXPORT_SYMBOL(atomic64_sub_ret);
4958
4959 /* Atomic bit operations. */
4960 diff -urNp linux-2.6.32.45/arch/sparc/lib/Makefile linux-2.6.32.45/arch/sparc/lib/Makefile
4961 --- linux-2.6.32.45/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4962 +++ linux-2.6.32.45/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4963 @@ -2,7 +2,7 @@
4964 #
4965
4966 asflags-y := -ansi -DST_DIV0=0x02
4967 -ccflags-y := -Werror
4968 +#ccflags-y := -Werror
4969
4970 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4971 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4972 diff -urNp linux-2.6.32.45/arch/sparc/lib/rwsem_64.S linux-2.6.32.45/arch/sparc/lib/rwsem_64.S
4973 --- linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4974 +++ linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4975 @@ -11,7 +11,12 @@
4976 .globl __down_read
4977 __down_read:
4978 1: lduw [%o0], %g1
4979 - add %g1, 1, %g7
4980 + addcc %g1, 1, %g7
4981 +
4982 +#ifdef CONFIG_PAX_REFCOUNT
4983 + tvs %icc, 6
4984 +#endif
4985 +
4986 cas [%o0], %g1, %g7
4987 cmp %g1, %g7
4988 bne,pn %icc, 1b
4989 @@ -33,7 +38,12 @@ __down_read:
4990 .globl __down_read_trylock
4991 __down_read_trylock:
4992 1: lduw [%o0], %g1
4993 - add %g1, 1, %g7
4994 + addcc %g1, 1, %g7
4995 +
4996 +#ifdef CONFIG_PAX_REFCOUNT
4997 + tvs %icc, 6
4998 +#endif
4999 +
5000 cmp %g7, 0
5001 bl,pn %icc, 2f
5002 mov 0, %o1
5003 @@ -51,7 +61,12 @@ __down_write:
5004 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5005 1:
5006 lduw [%o0], %g3
5007 - add %g3, %g1, %g7
5008 + addcc %g3, %g1, %g7
5009 +
5010 +#ifdef CONFIG_PAX_REFCOUNT
5011 + tvs %icc, 6
5012 +#endif
5013 +
5014 cas [%o0], %g3, %g7
5015 cmp %g3, %g7
5016 bne,pn %icc, 1b
5017 @@ -77,7 +92,12 @@ __down_write_trylock:
5018 cmp %g3, 0
5019 bne,pn %icc, 2f
5020 mov 0, %o1
5021 - add %g3, %g1, %g7
5022 + addcc %g3, %g1, %g7
5023 +
5024 +#ifdef CONFIG_PAX_REFCOUNT
5025 + tvs %icc, 6
5026 +#endif
5027 +
5028 cas [%o0], %g3, %g7
5029 cmp %g3, %g7
5030 bne,pn %icc, 1b
5031 @@ -90,7 +110,12 @@ __down_write_trylock:
5032 __up_read:
5033 1:
5034 lduw [%o0], %g1
5035 - sub %g1, 1, %g7
5036 + subcc %g1, 1, %g7
5037 +
5038 +#ifdef CONFIG_PAX_REFCOUNT
5039 + tvs %icc, 6
5040 +#endif
5041 +
5042 cas [%o0], %g1, %g7
5043 cmp %g1, %g7
5044 bne,pn %icc, 1b
5045 @@ -118,7 +143,12 @@ __up_write:
5046 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5047 1:
5048 lduw [%o0], %g3
5049 - sub %g3, %g1, %g7
5050 + subcc %g3, %g1, %g7
5051 +
5052 +#ifdef CONFIG_PAX_REFCOUNT
5053 + tvs %icc, 6
5054 +#endif
5055 +
5056 cas [%o0], %g3, %g7
5057 cmp %g3, %g7
5058 bne,pn %icc, 1b
5059 @@ -143,7 +173,12 @@ __downgrade_write:
5060 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5061 1:
5062 lduw [%o0], %g3
5063 - sub %g3, %g1, %g7
5064 + subcc %g3, %g1, %g7
5065 +
5066 +#ifdef CONFIG_PAX_REFCOUNT
5067 + tvs %icc, 6
5068 +#endif
5069 +
5070 cas [%o0], %g3, %g7
5071 cmp %g3, %g7
5072 bne,pn %icc, 1b
5073 diff -urNp linux-2.6.32.45/arch/sparc/Makefile linux-2.6.32.45/arch/sparc/Makefile
5074 --- linux-2.6.32.45/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5075 +++ linux-2.6.32.45/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5076 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5077 # Export what is needed by arch/sparc/boot/Makefile
5078 export VMLINUX_INIT VMLINUX_MAIN
5079 VMLINUX_INIT := $(head-y) $(init-y)
5080 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5081 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5082 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5083 VMLINUX_MAIN += $(drivers-y) $(net-y)
5084
5085 diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_32.c linux-2.6.32.45/arch/sparc/mm/fault_32.c
5086 --- linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5087 +++ linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5088 @@ -21,6 +21,9 @@
5089 #include <linux/interrupt.h>
5090 #include <linux/module.h>
5091 #include <linux/kdebug.h>
5092 +#include <linux/slab.h>
5093 +#include <linux/pagemap.h>
5094 +#include <linux/compiler.h>
5095
5096 #include <asm/system.h>
5097 #include <asm/page.h>
5098 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5099 return safe_compute_effective_address(regs, insn);
5100 }
5101
5102 +#ifdef CONFIG_PAX_PAGEEXEC
5103 +#ifdef CONFIG_PAX_DLRESOLVE
5104 +static void pax_emuplt_close(struct vm_area_struct *vma)
5105 +{
5106 + vma->vm_mm->call_dl_resolve = 0UL;
5107 +}
5108 +
5109 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5110 +{
5111 + unsigned int *kaddr;
5112 +
5113 + vmf->page = alloc_page(GFP_HIGHUSER);
5114 + if (!vmf->page)
5115 + return VM_FAULT_OOM;
5116 +
5117 + kaddr = kmap(vmf->page);
5118 + memset(kaddr, 0, PAGE_SIZE);
5119 + kaddr[0] = 0x9DE3BFA8U; /* save */
5120 + flush_dcache_page(vmf->page);
5121 + kunmap(vmf->page);
5122 + return VM_FAULT_MAJOR;
5123 +}
5124 +
5125 +static const struct vm_operations_struct pax_vm_ops = {
5126 + .close = pax_emuplt_close,
5127 + .fault = pax_emuplt_fault
5128 +};
5129 +
5130 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5131 +{
5132 + int ret;
5133 +
5134 + vma->vm_mm = current->mm;
5135 + vma->vm_start = addr;
5136 + vma->vm_end = addr + PAGE_SIZE;
5137 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5138 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5139 + vma->vm_ops = &pax_vm_ops;
5140 +
5141 + ret = insert_vm_struct(current->mm, vma);
5142 + if (ret)
5143 + return ret;
5144 +
5145 + ++current->mm->total_vm;
5146 + return 0;
5147 +}
5148 +#endif
5149 +
5150 +/*
5151 + * PaX: decide what to do with offenders (regs->pc = fault address)
5152 + *
5153 + * returns 1 when task should be killed
5154 + * 2 when patched PLT trampoline was detected
5155 + * 3 when unpatched PLT trampoline was detected
5156 + */
5157 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5158 +{
5159 +
5160 +#ifdef CONFIG_PAX_EMUPLT
5161 + int err;
5162 +
5163 + do { /* PaX: patched PLT emulation #1 */
5164 + unsigned int sethi1, sethi2, jmpl;
5165 +
5166 + err = get_user(sethi1, (unsigned int *)regs->pc);
5167 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5168 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5169 +
5170 + if (err)
5171 + break;
5172 +
5173 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5174 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5175 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5176 + {
5177 + unsigned int addr;
5178 +
5179 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5180 + addr = regs->u_regs[UREG_G1];
5181 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5182 + regs->pc = addr;
5183 + regs->npc = addr+4;
5184 + return 2;
5185 + }
5186 + } while (0);
5187 +
5188 + { /* PaX: patched PLT emulation #2 */
5189 + unsigned int ba;
5190 +
5191 + err = get_user(ba, (unsigned int *)regs->pc);
5192 +
5193 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5194 + unsigned int addr;
5195 +
5196 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5197 + regs->pc = addr;
5198 + regs->npc = addr+4;
5199 + return 2;
5200 + }
5201 + }
5202 +
5203 + do { /* PaX: patched PLT emulation #3 */
5204 + unsigned int sethi, jmpl, nop;
5205 +
5206 + err = get_user(sethi, (unsigned int *)regs->pc);
5207 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5208 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5209 +
5210 + if (err)
5211 + break;
5212 +
5213 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5214 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5215 + nop == 0x01000000U)
5216 + {
5217 + unsigned int addr;
5218 +
5219 + addr = (sethi & 0x003FFFFFU) << 10;
5220 + regs->u_regs[UREG_G1] = addr;
5221 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5222 + regs->pc = addr;
5223 + regs->npc = addr+4;
5224 + return 2;
5225 + }
5226 + } while (0);
5227 +
5228 + do { /* PaX: unpatched PLT emulation step 1 */
5229 + unsigned int sethi, ba, nop;
5230 +
5231 + err = get_user(sethi, (unsigned int *)regs->pc);
5232 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
5233 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5234 +
5235 + if (err)
5236 + break;
5237 +
5238 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5239 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5240 + nop == 0x01000000U)
5241 + {
5242 + unsigned int addr, save, call;
5243 +
5244 + if ((ba & 0xFFC00000U) == 0x30800000U)
5245 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5246 + else
5247 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5248 +
5249 + err = get_user(save, (unsigned int *)addr);
5250 + err |= get_user(call, (unsigned int *)(addr+4));
5251 + err |= get_user(nop, (unsigned int *)(addr+8));
5252 + if (err)
5253 + break;
5254 +
5255 +#ifdef CONFIG_PAX_DLRESOLVE
5256 + if (save == 0x9DE3BFA8U &&
5257 + (call & 0xC0000000U) == 0x40000000U &&
5258 + nop == 0x01000000U)
5259 + {
5260 + struct vm_area_struct *vma;
5261 + unsigned long call_dl_resolve;
5262 +
5263 + down_read(&current->mm->mmap_sem);
5264 + call_dl_resolve = current->mm->call_dl_resolve;
5265 + up_read(&current->mm->mmap_sem);
5266 + if (likely(call_dl_resolve))
5267 + goto emulate;
5268 +
5269 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5270 +
5271 + down_write(&current->mm->mmap_sem);
5272 + if (current->mm->call_dl_resolve) {
5273 + call_dl_resolve = current->mm->call_dl_resolve;
5274 + up_write(&current->mm->mmap_sem);
5275 + if (vma)
5276 + kmem_cache_free(vm_area_cachep, vma);
5277 + goto emulate;
5278 + }
5279 +
5280 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5281 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5282 + up_write(&current->mm->mmap_sem);
5283 + if (vma)
5284 + kmem_cache_free(vm_area_cachep, vma);
5285 + return 1;
5286 + }
5287 +
5288 + if (pax_insert_vma(vma, call_dl_resolve)) {
5289 + up_write(&current->mm->mmap_sem);
5290 + kmem_cache_free(vm_area_cachep, vma);
5291 + return 1;
5292 + }
5293 +
5294 + current->mm->call_dl_resolve = call_dl_resolve;
5295 + up_write(&current->mm->mmap_sem);
5296 +
5297 +emulate:
5298 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5299 + regs->pc = call_dl_resolve;
5300 + regs->npc = addr+4;
5301 + return 3;
5302 + }
5303 +#endif
5304 +
5305 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5306 + if ((save & 0xFFC00000U) == 0x05000000U &&
5307 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5308 + nop == 0x01000000U)
5309 + {
5310 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5311 + regs->u_regs[UREG_G2] = addr + 4;
5312 + addr = (save & 0x003FFFFFU) << 10;
5313 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5314 + regs->pc = addr;
5315 + regs->npc = addr+4;
5316 + return 3;
5317 + }
5318 + }
5319 + } while (0);
5320 +
5321 + do { /* PaX: unpatched PLT emulation step 2 */
5322 + unsigned int save, call, nop;
5323 +
5324 + err = get_user(save, (unsigned int *)(regs->pc-4));
5325 + err |= get_user(call, (unsigned int *)regs->pc);
5326 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5327 + if (err)
5328 + break;
5329 +
5330 + if (save == 0x9DE3BFA8U &&
5331 + (call & 0xC0000000U) == 0x40000000U &&
5332 + nop == 0x01000000U)
5333 + {
5334 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5335 +
5336 + regs->u_regs[UREG_RETPC] = regs->pc;
5337 + regs->pc = dl_resolve;
5338 + regs->npc = dl_resolve+4;
5339 + return 3;
5340 + }
5341 + } while (0);
5342 +#endif
5343 +
5344 + return 1;
5345 +}
5346 +
5347 +void pax_report_insns(void *pc, void *sp)
5348 +{
5349 + unsigned long i;
5350 +
5351 + printk(KERN_ERR "PAX: bytes at PC: ");
5352 + for (i = 0; i < 8; i++) {
5353 + unsigned int c;
5354 + if (get_user(c, (unsigned int *)pc+i))
5355 + printk(KERN_CONT "???????? ");
5356 + else
5357 + printk(KERN_CONT "%08x ", c);
5358 + }
5359 + printk("\n");
5360 +}
5361 +#endif
5362 +
5363 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5364 unsigned long address)
5365 {
5366 @@ -231,6 +495,24 @@ good_area:
5367 if(!(vma->vm_flags & VM_WRITE))
5368 goto bad_area;
5369 } else {
5370 +
5371 +#ifdef CONFIG_PAX_PAGEEXEC
5372 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5373 + up_read(&mm->mmap_sem);
5374 + switch (pax_handle_fetch_fault(regs)) {
5375 +
5376 +#ifdef CONFIG_PAX_EMUPLT
5377 + case 2:
5378 + case 3:
5379 + return;
5380 +#endif
5381 +
5382 + }
5383 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5384 + do_group_exit(SIGKILL);
5385 + }
5386 +#endif
5387 +
5388 /* Allow reads even for write-only mappings */
5389 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5390 goto bad_area;
5391 diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_64.c linux-2.6.32.45/arch/sparc/mm/fault_64.c
5392 --- linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5393 +++ linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5394 @@ -20,6 +20,9 @@
5395 #include <linux/kprobes.h>
5396 #include <linux/kdebug.h>
5397 #include <linux/percpu.h>
5398 +#include <linux/slab.h>
5399 +#include <linux/pagemap.h>
5400 +#include <linux/compiler.h>
5401
5402 #include <asm/page.h>
5403 #include <asm/pgtable.h>
5404 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5405 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5406 regs->tpc);
5407 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5408 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5409 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5410 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5411 dump_stack();
5412 unhandled_fault(regs->tpc, current, regs);
5413 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5414 show_regs(regs);
5415 }
5416
5417 +#ifdef CONFIG_PAX_PAGEEXEC
5418 +#ifdef CONFIG_PAX_DLRESOLVE
5419 +static void pax_emuplt_close(struct vm_area_struct *vma)
5420 +{
5421 + vma->vm_mm->call_dl_resolve = 0UL;
5422 +}
5423 +
5424 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5425 +{
5426 + unsigned int *kaddr;
5427 +
5428 + vmf->page = alloc_page(GFP_HIGHUSER);
5429 + if (!vmf->page)
5430 + return VM_FAULT_OOM;
5431 +
5432 + kaddr = kmap(vmf->page);
5433 + memset(kaddr, 0, PAGE_SIZE);
5434 + kaddr[0] = 0x9DE3BFA8U; /* save */
5435 + flush_dcache_page(vmf->page);
5436 + kunmap(vmf->page);
5437 + return VM_FAULT_MAJOR;
5438 +}
5439 +
5440 +static const struct vm_operations_struct pax_vm_ops = {
5441 + .close = pax_emuplt_close,
5442 + .fault = pax_emuplt_fault
5443 +};
5444 +
5445 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5446 +{
5447 + int ret;
5448 +
5449 + vma->vm_mm = current->mm;
5450 + vma->vm_start = addr;
5451 + vma->vm_end = addr + PAGE_SIZE;
5452 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5453 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5454 + vma->vm_ops = &pax_vm_ops;
5455 +
5456 + ret = insert_vm_struct(current->mm, vma);
5457 + if (ret)
5458 + return ret;
5459 +
5460 + ++current->mm->total_vm;
5461 + return 0;
5462 +}
5463 +#endif
5464 +
5465 +/*
5466 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5467 + *
5468 + * returns 1 when task should be killed
5469 + * 2 when patched PLT trampoline was detected
5470 + * 3 when unpatched PLT trampoline was detected
5471 + */
5472 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5473 +{
5474 +
5475 +#ifdef CONFIG_PAX_EMUPLT
5476 + int err;
5477 +
5478 + do { /* PaX: patched PLT emulation #1 */
5479 + unsigned int sethi1, sethi2, jmpl;
5480 +
5481 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5482 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5483 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5484 +
5485 + if (err)
5486 + break;
5487 +
5488 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5489 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5490 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5491 + {
5492 + unsigned long addr;
5493 +
5494 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5495 + addr = regs->u_regs[UREG_G1];
5496 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5497 +
5498 + if (test_thread_flag(TIF_32BIT))
5499 + addr &= 0xFFFFFFFFUL;
5500 +
5501 + regs->tpc = addr;
5502 + regs->tnpc = addr+4;
5503 + return 2;
5504 + }
5505 + } while (0);
5506 +
5507 + { /* PaX: patched PLT emulation #2 */
5508 + unsigned int ba;
5509 +
5510 + err = get_user(ba, (unsigned int *)regs->tpc);
5511 +
5512 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5513 + unsigned long addr;
5514 +
5515 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5516 +
5517 + if (test_thread_flag(TIF_32BIT))
5518 + addr &= 0xFFFFFFFFUL;
5519 +
5520 + regs->tpc = addr;
5521 + regs->tnpc = addr+4;
5522 + return 2;
5523 + }
5524 + }
5525 +
5526 + do { /* PaX: patched PLT emulation #3 */
5527 + unsigned int sethi, jmpl, nop;
5528 +
5529 + err = get_user(sethi, (unsigned int *)regs->tpc);
5530 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5531 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5532 +
5533 + if (err)
5534 + break;
5535 +
5536 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5537 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5538 + nop == 0x01000000U)
5539 + {
5540 + unsigned long addr;
5541 +
5542 + addr = (sethi & 0x003FFFFFU) << 10;
5543 + regs->u_regs[UREG_G1] = addr;
5544 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5545 +
5546 + if (test_thread_flag(TIF_32BIT))
5547 + addr &= 0xFFFFFFFFUL;
5548 +
5549 + regs->tpc = addr;
5550 + regs->tnpc = addr+4;
5551 + return 2;
5552 + }
5553 + } while (0);
5554 +
5555 + do { /* PaX: patched PLT emulation #4 */
5556 + unsigned int sethi, mov1, call, mov2;
5557 +
5558 + err = get_user(sethi, (unsigned int *)regs->tpc);
5559 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5560 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5561 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5562 +
5563 + if (err)
5564 + break;
5565 +
5566 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5567 + mov1 == 0x8210000FU &&
5568 + (call & 0xC0000000U) == 0x40000000U &&
5569 + mov2 == 0x9E100001U)
5570 + {
5571 + unsigned long addr;
5572 +
5573 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5574 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5575 +
5576 + if (test_thread_flag(TIF_32BIT))
5577 + addr &= 0xFFFFFFFFUL;
5578 +
5579 + regs->tpc = addr;
5580 + regs->tnpc = addr+4;
5581 + return 2;
5582 + }
5583 + } while (0);
5584 +
5585 + do { /* PaX: patched PLT emulation #5 */
5586 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5587 +
5588 + err = get_user(sethi, (unsigned int *)regs->tpc);
5589 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5590 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5591 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5592 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5593 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5594 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5595 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5596 +
5597 + if (err)
5598 + break;
5599 +
5600 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5601 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5602 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5603 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5604 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5605 + sllx == 0x83287020U &&
5606 + jmpl == 0x81C04005U &&
5607 + nop == 0x01000000U)
5608 + {
5609 + unsigned long addr;
5610 +
5611 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5612 + regs->u_regs[UREG_G1] <<= 32;
5613 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5614 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5615 + regs->tpc = addr;
5616 + regs->tnpc = addr+4;
5617 + return 2;
5618 + }
5619 + } while (0);
5620 +
5621 + do { /* PaX: patched PLT emulation #6 */
5622 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5623 +
5624 + err = get_user(sethi, (unsigned int *)regs->tpc);
5625 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5626 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5627 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5628 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5629 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5630 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5631 +
5632 + if (err)
5633 + break;
5634 +
5635 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5636 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5637 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5638 + sllx == 0x83287020U &&
5639 + (or & 0xFFFFE000U) == 0x8A116000U &&
5640 + jmpl == 0x81C04005U &&
5641 + nop == 0x01000000U)
5642 + {
5643 + unsigned long addr;
5644 +
5645 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5646 + regs->u_regs[UREG_G1] <<= 32;
5647 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5648 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5649 + regs->tpc = addr;
5650 + regs->tnpc = addr+4;
5651 + return 2;
5652 + }
5653 + } while (0);
5654 +
5655 + do { /* PaX: unpatched PLT emulation step 1 */
5656 + unsigned int sethi, ba, nop;
5657 +
5658 + err = get_user(sethi, (unsigned int *)regs->tpc);
5659 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5660 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5661 +
5662 + if (err)
5663 + break;
5664 +
5665 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5666 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5667 + nop == 0x01000000U)
5668 + {
5669 + unsigned long addr;
5670 + unsigned int save, call;
5671 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5672 +
5673 + if ((ba & 0xFFC00000U) == 0x30800000U)
5674 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5675 + else
5676 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5677 +
5678 + if (test_thread_flag(TIF_32BIT))
5679 + addr &= 0xFFFFFFFFUL;
5680 +
5681 + err = get_user(save, (unsigned int *)addr);
5682 + err |= get_user(call, (unsigned int *)(addr+4));
5683 + err |= get_user(nop, (unsigned int *)(addr+8));
5684 + if (err)
5685 + break;
5686 +
5687 +#ifdef CONFIG_PAX_DLRESOLVE
5688 + if (save == 0x9DE3BFA8U &&
5689 + (call & 0xC0000000U) == 0x40000000U &&
5690 + nop == 0x01000000U)
5691 + {
5692 + struct vm_area_struct *vma;
5693 + unsigned long call_dl_resolve;
5694 +
5695 + down_read(&current->mm->mmap_sem);
5696 + call_dl_resolve = current->mm->call_dl_resolve;
5697 + up_read(&current->mm->mmap_sem);
5698 + if (likely(call_dl_resolve))
5699 + goto emulate;
5700 +
5701 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5702 +
5703 + down_write(&current->mm->mmap_sem);
5704 + if (current->mm->call_dl_resolve) {
5705 + call_dl_resolve = current->mm->call_dl_resolve;
5706 + up_write(&current->mm->mmap_sem);
5707 + if (vma)
5708 + kmem_cache_free(vm_area_cachep, vma);
5709 + goto emulate;
5710 + }
5711 +
5712 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5713 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5714 + up_write(&current->mm->mmap_sem);
5715 + if (vma)
5716 + kmem_cache_free(vm_area_cachep, vma);
5717 + return 1;
5718 + }
5719 +
5720 + if (pax_insert_vma(vma, call_dl_resolve)) {
5721 + up_write(&current->mm->mmap_sem);
5722 + kmem_cache_free(vm_area_cachep, vma);
5723 + return 1;
5724 + }
5725 +
5726 + current->mm->call_dl_resolve = call_dl_resolve;
5727 + up_write(&current->mm->mmap_sem);
5728 +
5729 +emulate:
5730 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5731 + regs->tpc = call_dl_resolve;
5732 + regs->tnpc = addr+4;
5733 + return 3;
5734 + }
5735 +#endif
5736 +
5737 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5738 + if ((save & 0xFFC00000U) == 0x05000000U &&
5739 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5740 + nop == 0x01000000U)
5741 + {
5742 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5743 + regs->u_regs[UREG_G2] = addr + 4;
5744 + addr = (save & 0x003FFFFFU) << 10;
5745 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5746 +
5747 + if (test_thread_flag(TIF_32BIT))
5748 + addr &= 0xFFFFFFFFUL;
5749 +
5750 + regs->tpc = addr;
5751 + regs->tnpc = addr+4;
5752 + return 3;
5753 + }
5754 +
5755 + /* PaX: 64-bit PLT stub */
5756 + err = get_user(sethi1, (unsigned int *)addr);
5757 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5758 + err |= get_user(or1, (unsigned int *)(addr+8));
5759 + err |= get_user(or2, (unsigned int *)(addr+12));
5760 + err |= get_user(sllx, (unsigned int *)(addr+16));
5761 + err |= get_user(add, (unsigned int *)(addr+20));
5762 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5763 + err |= get_user(nop, (unsigned int *)(addr+28));
5764 + if (err)
5765 + break;
5766 +
5767 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5768 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5769 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5770 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5771 + sllx == 0x89293020U &&
5772 + add == 0x8A010005U &&
5773 + jmpl == 0x89C14000U &&
5774 + nop == 0x01000000U)
5775 + {
5776 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5777 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5778 + regs->u_regs[UREG_G4] <<= 32;
5779 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5780 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5781 + regs->u_regs[UREG_G4] = addr + 24;
5782 + addr = regs->u_regs[UREG_G5];
5783 + regs->tpc = addr;
5784 + regs->tnpc = addr+4;
5785 + return 3;
5786 + }
5787 + }
5788 + } while (0);
5789 +
5790 +#ifdef CONFIG_PAX_DLRESOLVE
5791 + do { /* PaX: unpatched PLT emulation step 2 */
5792 + unsigned int save, call, nop;
5793 +
5794 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5795 + err |= get_user(call, (unsigned int *)regs->tpc);
5796 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5797 + if (err)
5798 + break;
5799 +
5800 + if (save == 0x9DE3BFA8U &&
5801 + (call & 0xC0000000U) == 0x40000000U &&
5802 + nop == 0x01000000U)
5803 + {
5804 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5805 +
5806 + if (test_thread_flag(TIF_32BIT))
5807 + dl_resolve &= 0xFFFFFFFFUL;
5808 +
5809 + regs->u_regs[UREG_RETPC] = regs->tpc;
5810 + regs->tpc = dl_resolve;
5811 + regs->tnpc = dl_resolve+4;
5812 + return 3;
5813 + }
5814 + } while (0);
5815 +#endif
5816 +
5817 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5818 + unsigned int sethi, ba, nop;
5819 +
5820 + err = get_user(sethi, (unsigned int *)regs->tpc);
5821 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5822 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5823 +
5824 + if (err)
5825 + break;
5826 +
5827 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5828 + (ba & 0xFFF00000U) == 0x30600000U &&
5829 + nop == 0x01000000U)
5830 + {
5831 + unsigned long addr;
5832 +
5833 + addr = (sethi & 0x003FFFFFU) << 10;
5834 + regs->u_regs[UREG_G1] = addr;
5835 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5836 +
5837 + if (test_thread_flag(TIF_32BIT))
5838 + addr &= 0xFFFFFFFFUL;
5839 +
5840 + regs->tpc = addr;
5841 + regs->tnpc = addr+4;
5842 + return 2;
5843 + }
5844 + } while (0);
5845 +
5846 +#endif
5847 +
5848 + return 1;
5849 +}
5850 +
5851 +void pax_report_insns(void *pc, void *sp)
5852 +{
5853 + unsigned long i;
5854 +
5855 + printk(KERN_ERR "PAX: bytes at PC: ");
5856 + for (i = 0; i < 8; i++) {
5857 + unsigned int c;
5858 + if (get_user(c, (unsigned int *)pc+i))
5859 + printk(KERN_CONT "???????? ");
5860 + else
5861 + printk(KERN_CONT "%08x ", c);
5862 + }
5863 + printk("\n");
5864 +}
5865 +#endif
5866 +
5867 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5868 {
5869 struct mm_struct *mm = current->mm;
5870 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5871 if (!vma)
5872 goto bad_area;
5873
5874 +#ifdef CONFIG_PAX_PAGEEXEC
5875 + /* PaX: detect ITLB misses on non-exec pages */
5876 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5877 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5878 + {
5879 + if (address != regs->tpc)
5880 + goto good_area;
5881 +
5882 + up_read(&mm->mmap_sem);
5883 + switch (pax_handle_fetch_fault(regs)) {
5884 +
5885 +#ifdef CONFIG_PAX_EMUPLT
5886 + case 2:
5887 + case 3:
5888 + return;
5889 +#endif
5890 +
5891 + }
5892 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5893 + do_group_exit(SIGKILL);
5894 + }
5895 +#endif
5896 +
5897 /* Pure DTLB misses do not tell us whether the fault causing
5898 * load/store/atomic was a write or not, it only says that there
5899 * was no match. So in such a case we (carefully) read the
5900 diff -urNp linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c
5901 --- linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5902 +++ linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5903 @@ -69,7 +69,7 @@ full_search:
5904 }
5905 return -ENOMEM;
5906 }
5907 - if (likely(!vma || addr + len <= vma->vm_start)) {
5908 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5909 /*
5910 * Remember the place where we stopped the search:
5911 */
5912 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5913 /* make sure it can fit in the remaining address space */
5914 if (likely(addr > len)) {
5915 vma = find_vma(mm, addr-len);
5916 - if (!vma || addr <= vma->vm_start) {
5917 + if (check_heap_stack_gap(vma, addr - len, len)) {
5918 /* remember the address as a hint for next time */
5919 return (mm->free_area_cache = addr-len);
5920 }
5921 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5922 if (unlikely(mm->mmap_base < len))
5923 goto bottomup;
5924
5925 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5926 + addr = mm->mmap_base - len;
5927
5928 do {
5929 + addr &= HPAGE_MASK;
5930 /*
5931 * Lookup failure means no vma is above this address,
5932 * else if new region fits below vma->vm_start,
5933 * return with success:
5934 */
5935 vma = find_vma(mm, addr);
5936 - if (likely(!vma || addr+len <= vma->vm_start)) {
5937 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5938 /* remember the address as a hint for next time */
5939 return (mm->free_area_cache = addr);
5940 }
5941 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5942 mm->cached_hole_size = vma->vm_start - addr;
5943
5944 /* try just below the current vma->vm_start */
5945 - addr = (vma->vm_start-len) & HPAGE_MASK;
5946 - } while (likely(len < vma->vm_start));
5947 + addr = skip_heap_stack_gap(vma, len);
5948 + } while (!IS_ERR_VALUE(addr));
5949
5950 bottomup:
5951 /*
5952 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5953 if (addr) {
5954 addr = ALIGN(addr, HPAGE_SIZE);
5955 vma = find_vma(mm, addr);
5956 - if (task_size - len >= addr &&
5957 - (!vma || addr + len <= vma->vm_start))
5958 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5959 return addr;
5960 }
5961 if (mm->get_unmapped_area == arch_get_unmapped_area)
5962 diff -urNp linux-2.6.32.45/arch/sparc/mm/init_32.c linux-2.6.32.45/arch/sparc/mm/init_32.c
5963 --- linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5964 +++ linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5965 @@ -317,6 +317,9 @@ extern void device_scan(void);
5966 pgprot_t PAGE_SHARED __read_mostly;
5967 EXPORT_SYMBOL(PAGE_SHARED);
5968
5969 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5970 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5971 +
5972 void __init paging_init(void)
5973 {
5974 switch(sparc_cpu_model) {
5975 @@ -345,17 +348,17 @@ void __init paging_init(void)
5976
5977 /* Initialize the protection map with non-constant, MMU dependent values. */
5978 protection_map[0] = PAGE_NONE;
5979 - protection_map[1] = PAGE_READONLY;
5980 - protection_map[2] = PAGE_COPY;
5981 - protection_map[3] = PAGE_COPY;
5982 + protection_map[1] = PAGE_READONLY_NOEXEC;
5983 + protection_map[2] = PAGE_COPY_NOEXEC;
5984 + protection_map[3] = PAGE_COPY_NOEXEC;
5985 protection_map[4] = PAGE_READONLY;
5986 protection_map[5] = PAGE_READONLY;
5987 protection_map[6] = PAGE_COPY;
5988 protection_map[7] = PAGE_COPY;
5989 protection_map[8] = PAGE_NONE;
5990 - protection_map[9] = PAGE_READONLY;
5991 - protection_map[10] = PAGE_SHARED;
5992 - protection_map[11] = PAGE_SHARED;
5993 + protection_map[9] = PAGE_READONLY_NOEXEC;
5994 + protection_map[10] = PAGE_SHARED_NOEXEC;
5995 + protection_map[11] = PAGE_SHARED_NOEXEC;
5996 protection_map[12] = PAGE_READONLY;
5997 protection_map[13] = PAGE_READONLY;
5998 protection_map[14] = PAGE_SHARED;
5999 diff -urNp linux-2.6.32.45/arch/sparc/mm/Makefile linux-2.6.32.45/arch/sparc/mm/Makefile
6000 --- linux-2.6.32.45/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
6001 +++ linux-2.6.32.45/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
6002 @@ -2,7 +2,7 @@
6003 #
6004
6005 asflags-y := -ansi
6006 -ccflags-y := -Werror
6007 +#ccflags-y := -Werror
6008
6009 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6010 obj-y += fault_$(BITS).o
6011 diff -urNp linux-2.6.32.45/arch/sparc/mm/srmmu.c linux-2.6.32.45/arch/sparc/mm/srmmu.c
6012 --- linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
6013 +++ linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
6014 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6015 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6016 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6017 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6018 +
6019 +#ifdef CONFIG_PAX_PAGEEXEC
6020 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6021 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6022 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6023 +#endif
6024 +
6025 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6026 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6027
6028 diff -urNp linux-2.6.32.45/arch/um/include/asm/kmap_types.h linux-2.6.32.45/arch/um/include/asm/kmap_types.h
6029 --- linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6030 +++ linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6031 @@ -23,6 +23,7 @@ enum km_type {
6032 KM_IRQ1,
6033 KM_SOFTIRQ0,
6034 KM_SOFTIRQ1,
6035 + KM_CLEARPAGE,
6036 KM_TYPE_NR
6037 };
6038
6039 diff -urNp linux-2.6.32.45/arch/um/include/asm/page.h linux-2.6.32.45/arch/um/include/asm/page.h
6040 --- linux-2.6.32.45/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6041 +++ linux-2.6.32.45/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6042 @@ -14,6 +14,9 @@
6043 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6044 #define PAGE_MASK (~(PAGE_SIZE-1))
6045
6046 +#define ktla_ktva(addr) (addr)
6047 +#define ktva_ktla(addr) (addr)
6048 +
6049 #ifndef __ASSEMBLY__
6050
6051 struct page;
6052 diff -urNp linux-2.6.32.45/arch/um/kernel/process.c linux-2.6.32.45/arch/um/kernel/process.c
6053 --- linux-2.6.32.45/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6054 +++ linux-2.6.32.45/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6055 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6056 return 2;
6057 }
6058
6059 -/*
6060 - * Only x86 and x86_64 have an arch_align_stack().
6061 - * All other arches have "#define arch_align_stack(x) (x)"
6062 - * in their asm/system.h
6063 - * As this is included in UML from asm-um/system-generic.h,
6064 - * we can use it to behave as the subarch does.
6065 - */
6066 -#ifndef arch_align_stack
6067 -unsigned long arch_align_stack(unsigned long sp)
6068 -{
6069 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6070 - sp -= get_random_int() % 8192;
6071 - return sp & ~0xf;
6072 -}
6073 -#endif
6074 -
6075 unsigned long get_wchan(struct task_struct *p)
6076 {
6077 unsigned long stack_page, sp, ip;
6078 diff -urNp linux-2.6.32.45/arch/um/sys-i386/syscalls.c linux-2.6.32.45/arch/um/sys-i386/syscalls.c
6079 --- linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6080 +++ linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6081 @@ -11,6 +11,21 @@
6082 #include "asm/uaccess.h"
6083 #include "asm/unistd.h"
6084
6085 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6086 +{
6087 + unsigned long pax_task_size = TASK_SIZE;
6088 +
6089 +#ifdef CONFIG_PAX_SEGMEXEC
6090 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6091 + pax_task_size = SEGMEXEC_TASK_SIZE;
6092 +#endif
6093 +
6094 + if (len > pax_task_size || addr > pax_task_size - len)
6095 + return -EINVAL;
6096 +
6097 + return 0;
6098 +}
6099 +
6100 /*
6101 * Perform the select(nd, in, out, ex, tv) and mmap() system
6102 * calls. Linux/i386 didn't use to be able to handle more than
6103 diff -urNp linux-2.6.32.45/arch/x86/boot/bitops.h linux-2.6.32.45/arch/x86/boot/bitops.h
6104 --- linux-2.6.32.45/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6105 +++ linux-2.6.32.45/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6106 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6107 u8 v;
6108 const u32 *p = (const u32 *)addr;
6109
6110 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6111 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6112 return v;
6113 }
6114
6115 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6116
6117 static inline void set_bit(int nr, void *addr)
6118 {
6119 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6120 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6121 }
6122
6123 #endif /* BOOT_BITOPS_H */
6124 diff -urNp linux-2.6.32.45/arch/x86/boot/boot.h linux-2.6.32.45/arch/x86/boot/boot.h
6125 --- linux-2.6.32.45/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6126 +++ linux-2.6.32.45/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6127 @@ -82,7 +82,7 @@ static inline void io_delay(void)
6128 static inline u16 ds(void)
6129 {
6130 u16 seg;
6131 - asm("movw %%ds,%0" : "=rm" (seg));
6132 + asm volatile("movw %%ds,%0" : "=rm" (seg));
6133 return seg;
6134 }
6135
6136 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6137 static inline int memcmp(const void *s1, const void *s2, size_t len)
6138 {
6139 u8 diff;
6140 - asm("repe; cmpsb; setnz %0"
6141 + asm volatile("repe; cmpsb; setnz %0"
6142 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6143 return diff;
6144 }
6145 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_32.S linux-2.6.32.45/arch/x86/boot/compressed/head_32.S
6146 --- linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6147 +++ linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6148 @@ -76,7 +76,7 @@ ENTRY(startup_32)
6149 notl %eax
6150 andl %eax, %ebx
6151 #else
6152 - movl $LOAD_PHYSICAL_ADDR, %ebx
6153 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6154 #endif
6155
6156 /* Target address to relocate to for decompression */
6157 @@ -149,7 +149,7 @@ relocated:
6158 * and where it was actually loaded.
6159 */
6160 movl %ebp, %ebx
6161 - subl $LOAD_PHYSICAL_ADDR, %ebx
6162 + subl $____LOAD_PHYSICAL_ADDR, %ebx
6163 jz 2f /* Nothing to be done if loaded at compiled addr. */
6164 /*
6165 * Process relocations.
6166 @@ -157,8 +157,7 @@ relocated:
6167
6168 1: subl $4, %edi
6169 movl (%edi), %ecx
6170 - testl %ecx, %ecx
6171 - jz 2f
6172 + jecxz 2f
6173 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6174 jmp 1b
6175 2:
6176 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_64.S linux-2.6.32.45/arch/x86/boot/compressed/head_64.S
6177 --- linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6178 +++ linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6179 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6180 notl %eax
6181 andl %eax, %ebx
6182 #else
6183 - movl $LOAD_PHYSICAL_ADDR, %ebx
6184 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6185 #endif
6186
6187 /* Target address to relocate to for decompression */
6188 @@ -183,7 +183,7 @@ no_longmode:
6189 hlt
6190 jmp 1b
6191
6192 -#include "../../kernel/verify_cpu_64.S"
6193 +#include "../../kernel/verify_cpu.S"
6194
6195 /*
6196 * Be careful here startup_64 needs to be at a predictable
6197 @@ -234,7 +234,7 @@ ENTRY(startup_64)
6198 notq %rax
6199 andq %rax, %rbp
6200 #else
6201 - movq $LOAD_PHYSICAL_ADDR, %rbp
6202 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6203 #endif
6204
6205 /* Target address to relocate to for decompression */
6206 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/Makefile linux-2.6.32.45/arch/x86/boot/compressed/Makefile
6207 --- linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-03-27 14:31:47.000000000 -0400
6208 +++ linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-08-07 14:38:34.000000000 -0400
6209 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
6210 KBUILD_CFLAGS += $(cflags-y)
6211 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6212 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6213 +ifdef CONSTIFY_PLUGIN
6214 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6215 +endif
6216
6217 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6218 GCOV_PROFILE := n
6219 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/misc.c linux-2.6.32.45/arch/x86/boot/compressed/misc.c
6220 --- linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6221 +++ linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6222 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
6223 case PT_LOAD:
6224 #ifdef CONFIG_RELOCATABLE
6225 dest = output;
6226 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6227 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6228 #else
6229 dest = (void *)(phdr->p_paddr);
6230 #endif
6231 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6232 error("Destination address too large");
6233 #endif
6234 #ifndef CONFIG_RELOCATABLE
6235 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6236 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6237 error("Wrong destination address");
6238 #endif
6239
6240 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c
6241 --- linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6242 +++ linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6243 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6244
6245 offs = (olen > ilen) ? olen - ilen : 0;
6246 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6247 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6248 + offs += 64*1024; /* Add 64K bytes slack */
6249 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6250
6251 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6252 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/relocs.c linux-2.6.32.45/arch/x86/boot/compressed/relocs.c
6253 --- linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6254 +++ linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6255 @@ -10,8 +10,11 @@
6256 #define USE_BSD
6257 #include <endian.h>
6258
6259 +#include "../../../../include/linux/autoconf.h"
6260 +
6261 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6262 static Elf32_Ehdr ehdr;
6263 +static Elf32_Phdr *phdr;
6264 static unsigned long reloc_count, reloc_idx;
6265 static unsigned long *relocs;
6266
6267 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6268
6269 static int is_safe_abs_reloc(const char* sym_name)
6270 {
6271 - int i;
6272 + unsigned int i;
6273
6274 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6275 if (!strcmp(sym_name, safe_abs_relocs[i]))
6276 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6277 }
6278 }
6279
6280 +static void read_phdrs(FILE *fp)
6281 +{
6282 + unsigned int i;
6283 +
6284 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6285 + if (!phdr) {
6286 + die("Unable to allocate %d program headers\n",
6287 + ehdr.e_phnum);
6288 + }
6289 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6290 + die("Seek to %d failed: %s\n",
6291 + ehdr.e_phoff, strerror(errno));
6292 + }
6293 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6294 + die("Cannot read ELF program headers: %s\n",
6295 + strerror(errno));
6296 + }
6297 + for(i = 0; i < ehdr.e_phnum; i++) {
6298 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6299 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6300 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6301 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6302 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6303 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6304 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6305 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6306 + }
6307 +
6308 +}
6309 +
6310 static void read_shdrs(FILE *fp)
6311 {
6312 - int i;
6313 + unsigned int i;
6314 Elf32_Shdr shdr;
6315
6316 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6317 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6318
6319 static void read_strtabs(FILE *fp)
6320 {
6321 - int i;
6322 + unsigned int i;
6323 for (i = 0; i < ehdr.e_shnum; i++) {
6324 struct section *sec = &secs[i];
6325 if (sec->shdr.sh_type != SHT_STRTAB) {
6326 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6327
6328 static void read_symtabs(FILE *fp)
6329 {
6330 - int i,j;
6331 + unsigned int i,j;
6332 for (i = 0; i < ehdr.e_shnum; i++) {
6333 struct section *sec = &secs[i];
6334 if (sec->shdr.sh_type != SHT_SYMTAB) {
6335 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6336
6337 static void read_relocs(FILE *fp)
6338 {
6339 - int i,j;
6340 + unsigned int i,j;
6341 + uint32_t base;
6342 +
6343 for (i = 0; i < ehdr.e_shnum; i++) {
6344 struct section *sec = &secs[i];
6345 if (sec->shdr.sh_type != SHT_REL) {
6346 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6347 die("Cannot read symbol table: %s\n",
6348 strerror(errno));
6349 }
6350 + base = 0;
6351 + for (j = 0; j < ehdr.e_phnum; j++) {
6352 + if (phdr[j].p_type != PT_LOAD )
6353 + continue;
6354 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6355 + continue;
6356 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6357 + break;
6358 + }
6359 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6360 Elf32_Rel *rel = &sec->reltab[j];
6361 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6362 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6363 rel->r_info = elf32_to_cpu(rel->r_info);
6364 }
6365 }
6366 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6367
6368 static void print_absolute_symbols(void)
6369 {
6370 - int i;
6371 + unsigned int i;
6372 printf("Absolute symbols\n");
6373 printf(" Num: Value Size Type Bind Visibility Name\n");
6374 for (i = 0; i < ehdr.e_shnum; i++) {
6375 struct section *sec = &secs[i];
6376 char *sym_strtab;
6377 Elf32_Sym *sh_symtab;
6378 - int j;
6379 + unsigned int j;
6380
6381 if (sec->shdr.sh_type != SHT_SYMTAB) {
6382 continue;
6383 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6384
6385 static void print_absolute_relocs(void)
6386 {
6387 - int i, printed = 0;
6388 + unsigned int i, printed = 0;
6389
6390 for (i = 0; i < ehdr.e_shnum; i++) {
6391 struct section *sec = &secs[i];
6392 struct section *sec_applies, *sec_symtab;
6393 char *sym_strtab;
6394 Elf32_Sym *sh_symtab;
6395 - int j;
6396 + unsigned int j;
6397 if (sec->shdr.sh_type != SHT_REL) {
6398 continue;
6399 }
6400 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6401
6402 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6403 {
6404 - int i;
6405 + unsigned int i;
6406 /* Walk through the relocations */
6407 for (i = 0; i < ehdr.e_shnum; i++) {
6408 char *sym_strtab;
6409 Elf32_Sym *sh_symtab;
6410 struct section *sec_applies, *sec_symtab;
6411 - int j;
6412 + unsigned int j;
6413 struct section *sec = &secs[i];
6414
6415 if (sec->shdr.sh_type != SHT_REL) {
6416 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6417 if (sym->st_shndx == SHN_ABS) {
6418 continue;
6419 }
6420 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6421 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6422 + continue;
6423 +
6424 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6425 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6426 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6427 + continue;
6428 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6429 + continue;
6430 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6431 + continue;
6432 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6433 + continue;
6434 +#endif
6435 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6436 /*
6437 * NONE can be ignored and and PC relative
6438 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6439
6440 static void emit_relocs(int as_text)
6441 {
6442 - int i;
6443 + unsigned int i;
6444 /* Count how many relocations I have and allocate space for them. */
6445 reloc_count = 0;
6446 walk_relocs(count_reloc);
6447 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
6448 fname, strerror(errno));
6449 }
6450 read_ehdr(fp);
6451 + read_phdrs(fp);
6452 read_shdrs(fp);
6453 read_strtabs(fp);
6454 read_symtabs(fp);
6455 diff -urNp linux-2.6.32.45/arch/x86/boot/cpucheck.c linux-2.6.32.45/arch/x86/boot/cpucheck.c
6456 --- linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6457 +++ linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6458 @@ -74,7 +74,7 @@ static int has_fpu(void)
6459 u16 fcw = -1, fsw = -1;
6460 u32 cr0;
6461
6462 - asm("movl %%cr0,%0" : "=r" (cr0));
6463 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6464 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6465 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6466 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6467 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6468 {
6469 u32 f0, f1;
6470
6471 - asm("pushfl ; "
6472 + asm volatile("pushfl ; "
6473 "pushfl ; "
6474 "popl %0 ; "
6475 "movl %0,%1 ; "
6476 @@ -115,7 +115,7 @@ static void get_flags(void)
6477 set_bit(X86_FEATURE_FPU, cpu.flags);
6478
6479 if (has_eflag(X86_EFLAGS_ID)) {
6480 - asm("cpuid"
6481 + asm volatile("cpuid"
6482 : "=a" (max_intel_level),
6483 "=b" (cpu_vendor[0]),
6484 "=d" (cpu_vendor[1]),
6485 @@ -124,7 +124,7 @@ static void get_flags(void)
6486
6487 if (max_intel_level >= 0x00000001 &&
6488 max_intel_level <= 0x0000ffff) {
6489 - asm("cpuid"
6490 + asm volatile("cpuid"
6491 : "=a" (tfms),
6492 "=c" (cpu.flags[4]),
6493 "=d" (cpu.flags[0])
6494 @@ -136,7 +136,7 @@ static void get_flags(void)
6495 cpu.model += ((tfms >> 16) & 0xf) << 4;
6496 }
6497
6498 - asm("cpuid"
6499 + asm volatile("cpuid"
6500 : "=a" (max_amd_level)
6501 : "a" (0x80000000)
6502 : "ebx", "ecx", "edx");
6503 @@ -144,7 +144,7 @@ static void get_flags(void)
6504 if (max_amd_level >= 0x80000001 &&
6505 max_amd_level <= 0x8000ffff) {
6506 u32 eax = 0x80000001;
6507 - asm("cpuid"
6508 + asm volatile("cpuid"
6509 : "+a" (eax),
6510 "=c" (cpu.flags[6]),
6511 "=d" (cpu.flags[1])
6512 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6513 u32 ecx = MSR_K7_HWCR;
6514 u32 eax, edx;
6515
6516 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6517 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6518 eax &= ~(1 << 15);
6519 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6520 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6521
6522 get_flags(); /* Make sure it really did something */
6523 err = check_flags();
6524 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6525 u32 ecx = MSR_VIA_FCR;
6526 u32 eax, edx;
6527
6528 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6529 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6530 eax |= (1<<1)|(1<<7);
6531 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6532 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6533
6534 set_bit(X86_FEATURE_CX8, cpu.flags);
6535 err = check_flags();
6536 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6537 u32 eax, edx;
6538 u32 level = 1;
6539
6540 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6541 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6542 - asm("cpuid"
6543 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6544 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6545 + asm volatile("cpuid"
6546 : "+a" (level), "=d" (cpu.flags[0])
6547 : : "ecx", "ebx");
6548 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6549 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6550
6551 err = check_flags();
6552 }
6553 diff -urNp linux-2.6.32.45/arch/x86/boot/header.S linux-2.6.32.45/arch/x86/boot/header.S
6554 --- linux-2.6.32.45/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6555 +++ linux-2.6.32.45/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6556 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6557 # single linked list of
6558 # struct setup_data
6559
6560 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6561 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6562
6563 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6564 #define VO_INIT_SIZE (VO__end - VO__text)
6565 diff -urNp linux-2.6.32.45/arch/x86/boot/Makefile linux-2.6.32.45/arch/x86/boot/Makefile
6566 --- linux-2.6.32.45/arch/x86/boot/Makefile 2011-03-27 14:31:47.000000000 -0400
6567 +++ linux-2.6.32.45/arch/x86/boot/Makefile 2011-08-07 14:38:13.000000000 -0400
6568 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
6569 $(call cc-option, -fno-stack-protector) \
6570 $(call cc-option, -mpreferred-stack-boundary=2)
6571 KBUILD_CFLAGS += $(call cc-option, -m32)
6572 +ifdef CONSTIFY_PLUGIN
6573 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6574 +endif
6575 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6576 GCOV_PROFILE := n
6577
6578 diff -urNp linux-2.6.32.45/arch/x86/boot/memory.c linux-2.6.32.45/arch/x86/boot/memory.c
6579 --- linux-2.6.32.45/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6580 +++ linux-2.6.32.45/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6581 @@ -19,7 +19,7 @@
6582
6583 static int detect_memory_e820(void)
6584 {
6585 - int count = 0;
6586 + unsigned int count = 0;
6587 struct biosregs ireg, oreg;
6588 struct e820entry *desc = boot_params.e820_map;
6589 static struct e820entry buf; /* static so it is zeroed */
6590 diff -urNp linux-2.6.32.45/arch/x86/boot/video.c linux-2.6.32.45/arch/x86/boot/video.c
6591 --- linux-2.6.32.45/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6592 +++ linux-2.6.32.45/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6593 @@ -90,7 +90,7 @@ static void store_mode_params(void)
6594 static unsigned int get_entry(void)
6595 {
6596 char entry_buf[4];
6597 - int i, len = 0;
6598 + unsigned int i, len = 0;
6599 int key;
6600 unsigned int v;
6601
6602 diff -urNp linux-2.6.32.45/arch/x86/boot/video-vesa.c linux-2.6.32.45/arch/x86/boot/video-vesa.c
6603 --- linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6604 +++ linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6605 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6606
6607 boot_params.screen_info.vesapm_seg = oreg.es;
6608 boot_params.screen_info.vesapm_off = oreg.di;
6609 + boot_params.screen_info.vesapm_size = oreg.cx;
6610 }
6611
6612 /*
6613 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_aout.c linux-2.6.32.45/arch/x86/ia32/ia32_aout.c
6614 --- linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6615 +++ linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6616 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6617 unsigned long dump_start, dump_size;
6618 struct user32 dump;
6619
6620 + memset(&dump, 0, sizeof(dump));
6621 +
6622 fs = get_fs();
6623 set_fs(KERNEL_DS);
6624 has_dumped = 1;
6625 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6626 dump_size = dump.u_ssize << PAGE_SHIFT;
6627 DUMP_WRITE(dump_start, dump_size);
6628 }
6629 - /*
6630 - * Finally dump the task struct. Not be used by gdb, but
6631 - * could be useful
6632 - */
6633 - set_fs(KERNEL_DS);
6634 - DUMP_WRITE(current, sizeof(*current));
6635 end_coredump:
6636 set_fs(fs);
6637 return has_dumped;
6638 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32entry.S linux-2.6.32.45/arch/x86/ia32/ia32entry.S
6639 --- linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6640 +++ linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6641 @@ -13,6 +13,7 @@
6642 #include <asm/thread_info.h>
6643 #include <asm/segment.h>
6644 #include <asm/irqflags.h>
6645 +#include <asm/pgtable.h>
6646 #include <linux/linkage.h>
6647
6648 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6649 @@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6650 ENDPROC(native_irq_enable_sysexit)
6651 #endif
6652
6653 + .macro pax_enter_kernel_user
6654 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6655 + call pax_enter_kernel_user
6656 +#endif
6657 + .endm
6658 +
6659 + .macro pax_exit_kernel_user
6660 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6661 + call pax_exit_kernel_user
6662 +#endif
6663 +#ifdef CONFIG_PAX_RANDKSTACK
6664 + pushq %rax
6665 + call pax_randomize_kstack
6666 + popq %rax
6667 +#endif
6668 + pax_erase_kstack
6669 + .endm
6670 +
6671 +.macro pax_erase_kstack
6672 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6673 + call pax_erase_kstack
6674 +#endif
6675 +.endm
6676 +
6677 /*
6678 * 32bit SYSENTER instruction entry.
6679 *
6680 @@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6681 CFI_REGISTER rsp,rbp
6682 SWAPGS_UNSAFE_STACK
6683 movq PER_CPU_VAR(kernel_stack), %rsp
6684 - addq $(KERNEL_STACK_OFFSET),%rsp
6685 + pax_enter_kernel_user
6686 /*
6687 * No need to follow this irqs on/off section: the syscall
6688 * disabled irqs, here we enable it straight after entry:
6689 @@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6690 pushfq
6691 CFI_ADJUST_CFA_OFFSET 8
6692 /*CFI_REL_OFFSET rflags,0*/
6693 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6694 + GET_THREAD_INFO(%r10)
6695 + movl TI_sysenter_return(%r10), %r10d
6696 CFI_REGISTER rip,r10
6697 pushq $__USER32_CS
6698 CFI_ADJUST_CFA_OFFSET 8
6699 @@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6700 SAVE_ARGS 0,0,1
6701 /* no need to do an access_ok check here because rbp has been
6702 32bit zero extended */
6703 +
6704 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6705 + mov $PAX_USER_SHADOW_BASE,%r10
6706 + add %r10,%rbp
6707 +#endif
6708 +
6709 1: movl (%rbp),%ebp
6710 .section __ex_table,"a"
6711 .quad 1b,ia32_badarg
6712 @@ -172,6 +204,7 @@ sysenter_dispatch:
6713 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6714 jnz sysexit_audit
6715 sysexit_from_sys_call:
6716 + pax_exit_kernel_user
6717 andl $~TS_COMPAT,TI_status(%r10)
6718 /* clear IF, that popfq doesn't enable interrupts early */
6719 andl $~0x200,EFLAGS-R11(%rsp)
6720 @@ -200,6 +233,9 @@ sysexit_from_sys_call:
6721 movl %eax,%esi /* 2nd arg: syscall number */
6722 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6723 call audit_syscall_entry
6724 +
6725 + pax_erase_kstack
6726 +
6727 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6728 cmpq $(IA32_NR_syscalls-1),%rax
6729 ja ia32_badsys
6730 @@ -252,6 +288,9 @@ sysenter_tracesys:
6731 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6732 movq %rsp,%rdi /* &pt_regs -> arg1 */
6733 call syscall_trace_enter
6734 +
6735 + pax_erase_kstack
6736 +
6737 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6738 RESTORE_REST
6739 cmpq $(IA32_NR_syscalls-1),%rax
6740 @@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6741 ENTRY(ia32_cstar_target)
6742 CFI_STARTPROC32 simple
6743 CFI_SIGNAL_FRAME
6744 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6745 + CFI_DEF_CFA rsp,0
6746 CFI_REGISTER rip,rcx
6747 /*CFI_REGISTER rflags,r11*/
6748 SWAPGS_UNSAFE_STACK
6749 movl %esp,%r8d
6750 CFI_REGISTER rsp,r8
6751 movq PER_CPU_VAR(kernel_stack),%rsp
6752 +
6753 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6754 + pax_enter_kernel_user
6755 +#endif
6756 +
6757 /*
6758 * No need to follow this irqs on/off section: the syscall
6759 * disabled irqs and here we enable it straight after entry:
6760 */
6761 ENABLE_INTERRUPTS(CLBR_NONE)
6762 - SAVE_ARGS 8,1,1
6763 + SAVE_ARGS 8*6,1,1
6764 movl %eax,%eax /* zero extension */
6765 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6766 movq %rcx,RIP-ARGOFFSET(%rsp)
6767 @@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6768 /* no need to do an access_ok check here because r8 has been
6769 32bit zero extended */
6770 /* hardware stack frame is complete now */
6771 +
6772 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6773 + mov $PAX_USER_SHADOW_BASE,%r10
6774 + add %r10,%r8
6775 +#endif
6776 +
6777 1: movl (%r8),%r9d
6778 .section __ex_table,"a"
6779 .quad 1b,ia32_badarg
6780 @@ -333,6 +383,7 @@ cstar_dispatch:
6781 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6782 jnz sysretl_audit
6783 sysretl_from_sys_call:
6784 + pax_exit_kernel_user
6785 andl $~TS_COMPAT,TI_status(%r10)
6786 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6787 movl RIP-ARGOFFSET(%rsp),%ecx
6788 @@ -370,6 +421,9 @@ cstar_tracesys:
6789 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6790 movq %rsp,%rdi /* &pt_regs -> arg1 */
6791 call syscall_trace_enter
6792 +
6793 + pax_erase_kstack
6794 +
6795 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6796 RESTORE_REST
6797 xchgl %ebp,%r9d
6798 @@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6799 CFI_REL_OFFSET rip,RIP-RIP
6800 PARAVIRT_ADJUST_EXCEPTION_FRAME
6801 SWAPGS
6802 + pax_enter_kernel_user
6803 /*
6804 * No need to follow this irqs on/off section: the syscall
6805 * disabled irqs and here we enable it straight after entry:
6806 @@ -448,6 +503,9 @@ ia32_tracesys:
6807 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6808 movq %rsp,%rdi /* &pt_regs -> arg1 */
6809 call syscall_trace_enter
6810 +
6811 + pax_erase_kstack
6812 +
6813 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6814 RESTORE_REST
6815 cmpq $(IA32_NR_syscalls-1),%rax
6816 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_signal.c linux-2.6.32.45/arch/x86/ia32/ia32_signal.c
6817 --- linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6818 +++ linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6819 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6820 sp -= frame_size;
6821 /* Align the stack pointer according to the i386 ABI,
6822 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6823 - sp = ((sp + 4) & -16ul) - 4;
6824 + sp = ((sp - 12) & -16ul) - 4;
6825 return (void __user *) sp;
6826 }
6827
6828 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6829 * These are actually not used anymore, but left because some
6830 * gdb versions depend on them as a marker.
6831 */
6832 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6833 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6834 } put_user_catch(err);
6835
6836 if (err)
6837 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6838 0xb8,
6839 __NR_ia32_rt_sigreturn,
6840 0x80cd,
6841 - 0,
6842 + 0
6843 };
6844
6845 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6846 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6847
6848 if (ka->sa.sa_flags & SA_RESTORER)
6849 restorer = ka->sa.sa_restorer;
6850 + else if (current->mm->context.vdso)
6851 + /* Return stub is in 32bit vsyscall page */
6852 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6853 else
6854 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6855 - rt_sigreturn);
6856 + restorer = &frame->retcode;
6857 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6858
6859 /*
6860 * Not actually used anymore, but left because some gdb
6861 * versions need it.
6862 */
6863 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6864 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6865 } put_user_catch(err);
6866
6867 if (err)
6868 diff -urNp linux-2.6.32.45/arch/x86/include/asm/alternative.h linux-2.6.32.45/arch/x86/include/asm/alternative.h
6869 --- linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6870 +++ linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6871 @@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6872 " .byte 662b-661b\n" /* sourcelen */ \
6873 " .byte 664f-663f\n" /* replacementlen */ \
6874 ".previous\n" \
6875 - ".section .altinstr_replacement, \"ax\"\n" \
6876 + ".section .altinstr_replacement, \"a\"\n" \
6877 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6878 ".previous"
6879
6880 diff -urNp linux-2.6.32.45/arch/x86/include/asm/apic.h linux-2.6.32.45/arch/x86/include/asm/apic.h
6881 --- linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-03-27 14:31:47.000000000 -0400
6882 +++ linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-08-17 20:01:15.000000000 -0400
6883 @@ -46,7 +46,7 @@ static inline void generic_apic_probe(vo
6884
6885 #ifdef CONFIG_X86_LOCAL_APIC
6886
6887 -extern unsigned int apic_verbosity;
6888 +extern int apic_verbosity;
6889 extern int local_apic_timer_c2_ok;
6890
6891 extern int disable_apic;
6892 diff -urNp linux-2.6.32.45/arch/x86/include/asm/apm.h linux-2.6.32.45/arch/x86/include/asm/apm.h
6893 --- linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6894 +++ linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6895 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6896 __asm__ __volatile__(APM_DO_ZERO_SEGS
6897 "pushl %%edi\n\t"
6898 "pushl %%ebp\n\t"
6899 - "lcall *%%cs:apm_bios_entry\n\t"
6900 + "lcall *%%ss:apm_bios_entry\n\t"
6901 "setc %%al\n\t"
6902 "popl %%ebp\n\t"
6903 "popl %%edi\n\t"
6904 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6905 __asm__ __volatile__(APM_DO_ZERO_SEGS
6906 "pushl %%edi\n\t"
6907 "pushl %%ebp\n\t"
6908 - "lcall *%%cs:apm_bios_entry\n\t"
6909 + "lcall *%%ss:apm_bios_entry\n\t"
6910 "setc %%bl\n\t"
6911 "popl %%ebp\n\t"
6912 "popl %%edi\n\t"
6913 diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_32.h linux-2.6.32.45/arch/x86/include/asm/atomic_32.h
6914 --- linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6915 +++ linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6916 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6917 }
6918
6919 /**
6920 + * atomic_read_unchecked - read atomic variable
6921 + * @v: pointer of type atomic_unchecked_t
6922 + *
6923 + * Atomically reads the value of @v.
6924 + */
6925 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6926 +{
6927 + return v->counter;
6928 +}
6929 +
6930 +/**
6931 * atomic_set - set atomic variable
6932 * @v: pointer of type atomic_t
6933 * @i: required value
6934 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6935 }
6936
6937 /**
6938 + * atomic_set_unchecked - set atomic variable
6939 + * @v: pointer of type atomic_unchecked_t
6940 + * @i: required value
6941 + *
6942 + * Atomically sets the value of @v to @i.
6943 + */
6944 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6945 +{
6946 + v->counter = i;
6947 +}
6948 +
6949 +/**
6950 * atomic_add - add integer to atomic variable
6951 * @i: integer value to add
6952 * @v: pointer of type atomic_t
6953 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6954 */
6955 static inline void atomic_add(int i, atomic_t *v)
6956 {
6957 - asm volatile(LOCK_PREFIX "addl %1,%0"
6958 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6959 +
6960 +#ifdef CONFIG_PAX_REFCOUNT
6961 + "jno 0f\n"
6962 + LOCK_PREFIX "subl %1,%0\n"
6963 + "int $4\n0:\n"
6964 + _ASM_EXTABLE(0b, 0b)
6965 +#endif
6966 +
6967 + : "+m" (v->counter)
6968 + : "ir" (i));
6969 +}
6970 +
6971 +/**
6972 + * atomic_add_unchecked - add integer to atomic variable
6973 + * @i: integer value to add
6974 + * @v: pointer of type atomic_unchecked_t
6975 + *
6976 + * Atomically adds @i to @v.
6977 + */
6978 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6979 +{
6980 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6981 : "+m" (v->counter)
6982 : "ir" (i));
6983 }
6984 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6985 */
6986 static inline void atomic_sub(int i, atomic_t *v)
6987 {
6988 - asm volatile(LOCK_PREFIX "subl %1,%0"
6989 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6990 +
6991 +#ifdef CONFIG_PAX_REFCOUNT
6992 + "jno 0f\n"
6993 + LOCK_PREFIX "addl %1,%0\n"
6994 + "int $4\n0:\n"
6995 + _ASM_EXTABLE(0b, 0b)
6996 +#endif
6997 +
6998 + : "+m" (v->counter)
6999 + : "ir" (i));
7000 +}
7001 +
7002 +/**
7003 + * atomic_sub_unchecked - subtract integer from atomic variable
7004 + * @i: integer value to subtract
7005 + * @v: pointer of type atomic_unchecked_t
7006 + *
7007 + * Atomically subtracts @i from @v.
7008 + */
7009 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7010 +{
7011 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7012 : "+m" (v->counter)
7013 : "ir" (i));
7014 }
7015 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
7016 {
7017 unsigned char c;
7018
7019 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7020 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7021 +
7022 +#ifdef CONFIG_PAX_REFCOUNT
7023 + "jno 0f\n"
7024 + LOCK_PREFIX "addl %2,%0\n"
7025 + "int $4\n0:\n"
7026 + _ASM_EXTABLE(0b, 0b)
7027 +#endif
7028 +
7029 + "sete %1\n"
7030 : "+m" (v->counter), "=qm" (c)
7031 : "ir" (i) : "memory");
7032 return c;
7033 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
7034 */
7035 static inline void atomic_inc(atomic_t *v)
7036 {
7037 - asm volatile(LOCK_PREFIX "incl %0"
7038 + asm volatile(LOCK_PREFIX "incl %0\n"
7039 +
7040 +#ifdef CONFIG_PAX_REFCOUNT
7041 + "jno 0f\n"
7042 + LOCK_PREFIX "decl %0\n"
7043 + "int $4\n0:\n"
7044 + _ASM_EXTABLE(0b, 0b)
7045 +#endif
7046 +
7047 + : "+m" (v->counter));
7048 +}
7049 +
7050 +/**
7051 + * atomic_inc_unchecked - increment atomic variable
7052 + * @v: pointer of type atomic_unchecked_t
7053 + *
7054 + * Atomically increments @v by 1.
7055 + */
7056 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7057 +{
7058 + asm volatile(LOCK_PREFIX "incl %0\n"
7059 : "+m" (v->counter));
7060 }
7061
7062 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7063 */
7064 static inline void atomic_dec(atomic_t *v)
7065 {
7066 - asm volatile(LOCK_PREFIX "decl %0"
7067 + asm volatile(LOCK_PREFIX "decl %0\n"
7068 +
7069 +#ifdef CONFIG_PAX_REFCOUNT
7070 + "jno 0f\n"
7071 + LOCK_PREFIX "incl %0\n"
7072 + "int $4\n0:\n"
7073 + _ASM_EXTABLE(0b, 0b)
7074 +#endif
7075 +
7076 + : "+m" (v->counter));
7077 +}
7078 +
7079 +/**
7080 + * atomic_dec_unchecked - decrement atomic variable
7081 + * @v: pointer of type atomic_unchecked_t
7082 + *
7083 + * Atomically decrements @v by 1.
7084 + */
7085 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7086 +{
7087 + asm volatile(LOCK_PREFIX "decl %0\n"
7088 : "+m" (v->counter));
7089 }
7090
7091 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7092 {
7093 unsigned char c;
7094
7095 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7096 + asm volatile(LOCK_PREFIX "decl %0\n"
7097 +
7098 +#ifdef CONFIG_PAX_REFCOUNT
7099 + "jno 0f\n"
7100 + LOCK_PREFIX "incl %0\n"
7101 + "int $4\n0:\n"
7102 + _ASM_EXTABLE(0b, 0b)
7103 +#endif
7104 +
7105 + "sete %1\n"
7106 : "+m" (v->counter), "=qm" (c)
7107 : : "memory");
7108 return c != 0;
7109 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7110 {
7111 unsigned char c;
7112
7113 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7114 + asm volatile(LOCK_PREFIX "incl %0\n"
7115 +
7116 +#ifdef CONFIG_PAX_REFCOUNT
7117 + "jno 0f\n"
7118 + LOCK_PREFIX "decl %0\n"
7119 + "into\n0:\n"
7120 + _ASM_EXTABLE(0b, 0b)
7121 +#endif
7122 +
7123 + "sete %1\n"
7124 + : "+m" (v->counter), "=qm" (c)
7125 + : : "memory");
7126 + return c != 0;
7127 +}
7128 +
7129 +/**
7130 + * atomic_inc_and_test_unchecked - increment and test
7131 + * @v: pointer of type atomic_unchecked_t
7132 + *
7133 + * Atomically increments @v by 1
7134 + * and returns true if the result is zero, or false for all
7135 + * other cases.
7136 + */
7137 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7138 +{
7139 + unsigned char c;
7140 +
7141 + asm volatile(LOCK_PREFIX "incl %0\n"
7142 + "sete %1\n"
7143 : "+m" (v->counter), "=qm" (c)
7144 : : "memory");
7145 return c != 0;
7146 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7147 {
7148 unsigned char c;
7149
7150 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7151 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7152 +
7153 +#ifdef CONFIG_PAX_REFCOUNT
7154 + "jno 0f\n"
7155 + LOCK_PREFIX "subl %2,%0\n"
7156 + "int $4\n0:\n"
7157 + _ASM_EXTABLE(0b, 0b)
7158 +#endif
7159 +
7160 + "sets %1\n"
7161 : "+m" (v->counter), "=qm" (c)
7162 : "ir" (i) : "memory");
7163 return c;
7164 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7165 #endif
7166 /* Modern 486+ processor */
7167 __i = i;
7168 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7169 +
7170 +#ifdef CONFIG_PAX_REFCOUNT
7171 + "jno 0f\n"
7172 + "movl %0, %1\n"
7173 + "int $4\n0:\n"
7174 + _ASM_EXTABLE(0b, 0b)
7175 +#endif
7176 +
7177 + : "+r" (i), "+m" (v->counter)
7178 + : : "memory");
7179 + return i + __i;
7180 +
7181 +#ifdef CONFIG_M386
7182 +no_xadd: /* Legacy 386 processor */
7183 + local_irq_save(flags);
7184 + __i = atomic_read(v);
7185 + atomic_set(v, i + __i);
7186 + local_irq_restore(flags);
7187 + return i + __i;
7188 +#endif
7189 +}
7190 +
7191 +/**
7192 + * atomic_add_return_unchecked - add integer and return
7193 + * @v: pointer of type atomic_unchecked_t
7194 + * @i: integer value to add
7195 + *
7196 + * Atomically adds @i to @v and returns @i + @v
7197 + */
7198 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7199 +{
7200 + int __i;
7201 +#ifdef CONFIG_M386
7202 + unsigned long flags;
7203 + if (unlikely(boot_cpu_data.x86 <= 3))
7204 + goto no_xadd;
7205 +#endif
7206 + /* Modern 486+ processor */
7207 + __i = i;
7208 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7209 : "+r" (i), "+m" (v->counter)
7210 : : "memory");
7211 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7212 return cmpxchg(&v->counter, old, new);
7213 }
7214
7215 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7216 +{
7217 + return cmpxchg(&v->counter, old, new);
7218 +}
7219 +
7220 static inline int atomic_xchg(atomic_t *v, int new)
7221 {
7222 return xchg(&v->counter, new);
7223 }
7224
7225 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7226 +{
7227 + return xchg(&v->counter, new);
7228 +}
7229 +
7230 /**
7231 * atomic_add_unless - add unless the number is already a given value
7232 * @v: pointer of type atomic_t
7233 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7234 */
7235 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7236 {
7237 - int c, old;
7238 + int c, old, new;
7239 c = atomic_read(v);
7240 for (;;) {
7241 - if (unlikely(c == (u)))
7242 + if (unlikely(c == u))
7243 break;
7244 - old = atomic_cmpxchg((v), c, c + (a));
7245 +
7246 + asm volatile("addl %2,%0\n"
7247 +
7248 +#ifdef CONFIG_PAX_REFCOUNT
7249 + "jno 0f\n"
7250 + "subl %2,%0\n"
7251 + "int $4\n0:\n"
7252 + _ASM_EXTABLE(0b, 0b)
7253 +#endif
7254 +
7255 + : "=r" (new)
7256 + : "0" (c), "ir" (a));
7257 +
7258 + old = atomic_cmpxchg(v, c, new);
7259 if (likely(old == c))
7260 break;
7261 c = old;
7262 }
7263 - return c != (u);
7264 + return c != u;
7265 }
7266
7267 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7268
7269 #define atomic_inc_return(v) (atomic_add_return(1, v))
7270 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7271 +{
7272 + return atomic_add_return_unchecked(1, v);
7273 +}
7274 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7275
7276 /* These are x86-specific, used by some header files */
7277 @@ -266,9 +495,18 @@ typedef struct {
7278 u64 __aligned(8) counter;
7279 } atomic64_t;
7280
7281 +#ifdef CONFIG_PAX_REFCOUNT
7282 +typedef struct {
7283 + u64 __aligned(8) counter;
7284 +} atomic64_unchecked_t;
7285 +#else
7286 +typedef atomic64_t atomic64_unchecked_t;
7287 +#endif
7288 +
7289 #define ATOMIC64_INIT(val) { (val) }
7290
7291 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7292 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7293
7294 /**
7295 * atomic64_xchg - xchg atomic64 variable
7296 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7297 * the old value.
7298 */
7299 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7300 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7301
7302 /**
7303 * atomic64_set - set atomic64 variable
7304 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7305 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7306
7307 /**
7308 + * atomic64_unchecked_set - set atomic64 variable
7309 + * @ptr: pointer to type atomic64_unchecked_t
7310 + * @new_val: value to assign
7311 + *
7312 + * Atomically sets the value of @ptr to @new_val.
7313 + */
7314 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7315 +
7316 +/**
7317 * atomic64_read - read atomic64 variable
7318 * @ptr: pointer to type atomic64_t
7319 *
7320 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7321 return res;
7322 }
7323
7324 -extern u64 atomic64_read(atomic64_t *ptr);
7325 +/**
7326 + * atomic64_read_unchecked - read atomic64 variable
7327 + * @ptr: pointer to type atomic64_unchecked_t
7328 + *
7329 + * Atomically reads the value of @ptr and returns it.
7330 + */
7331 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7332 +{
7333 + u64 res;
7334 +
7335 + /*
7336 + * Note, we inline this atomic64_unchecked_t primitive because
7337 + * it only clobbers EAX/EDX and leaves the others
7338 + * untouched. We also (somewhat subtly) rely on the
7339 + * fact that cmpxchg8b returns the current 64-bit value
7340 + * of the memory location we are touching:
7341 + */
7342 + asm volatile(
7343 + "mov %%ebx, %%eax\n\t"
7344 + "mov %%ecx, %%edx\n\t"
7345 + LOCK_PREFIX "cmpxchg8b %1\n"
7346 + : "=&A" (res)
7347 + : "m" (*ptr)
7348 + );
7349 +
7350 + return res;
7351 +}
7352
7353 /**
7354 * atomic64_add_return - add and return
7355 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7356 * Other variants with different arithmetic operators:
7357 */
7358 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7359 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7360 extern u64 atomic64_inc_return(atomic64_t *ptr);
7361 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7362 extern u64 atomic64_dec_return(atomic64_t *ptr);
7363 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7364
7365 /**
7366 * atomic64_add - add integer to atomic64 variable
7367 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7368 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7369
7370 /**
7371 + * atomic64_add_unchecked - add integer to atomic64 variable
7372 + * @delta: integer value to add
7373 + * @ptr: pointer to type atomic64_unchecked_t
7374 + *
7375 + * Atomically adds @delta to @ptr.
7376 + */
7377 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7378 +
7379 +/**
7380 * atomic64_sub - subtract the atomic64 variable
7381 * @delta: integer value to subtract
7382 * @ptr: pointer to type atomic64_t
7383 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7384 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7385
7386 /**
7387 + * atomic64_sub_unchecked - subtract the atomic64 variable
7388 + * @delta: integer value to subtract
7389 + * @ptr: pointer to type atomic64_unchecked_t
7390 + *
7391 + * Atomically subtracts @delta from @ptr.
7392 + */
7393 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7394 +
7395 +/**
7396 * atomic64_sub_and_test - subtract value from variable and test result
7397 * @delta: integer value to subtract
7398 * @ptr: pointer to type atomic64_t
7399 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7400 extern void atomic64_inc(atomic64_t *ptr);
7401
7402 /**
7403 + * atomic64_inc_unchecked - increment atomic64 variable
7404 + * @ptr: pointer to type atomic64_unchecked_t
7405 + *
7406 + * Atomically increments @ptr by 1.
7407 + */
7408 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7409 +
7410 +/**
7411 * atomic64_dec - decrement atomic64 variable
7412 * @ptr: pointer to type atomic64_t
7413 *
7414 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7415 extern void atomic64_dec(atomic64_t *ptr);
7416
7417 /**
7418 + * atomic64_dec_unchecked - decrement atomic64 variable
7419 + * @ptr: pointer to type atomic64_unchecked_t
7420 + *
7421 + * Atomically decrements @ptr by 1.
7422 + */
7423 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7424 +
7425 +/**
7426 * atomic64_dec_and_test - decrement and test
7427 * @ptr: pointer to type atomic64_t
7428 *
7429 diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_64.h linux-2.6.32.45/arch/x86/include/asm/atomic_64.h
7430 --- linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7431 +++ linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7432 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7433 }
7434
7435 /**
7436 + * atomic_read_unchecked - read atomic variable
7437 + * @v: pointer of type atomic_unchecked_t
7438 + *
7439 + * Atomically reads the value of @v.
7440 + */
7441 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7442 +{
7443 + return v->counter;
7444 +}
7445 +
7446 +/**
7447 * atomic_set - set atomic variable
7448 * @v: pointer of type atomic_t
7449 * @i: required value
7450 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7451 }
7452
7453 /**
7454 + * atomic_set_unchecked - set atomic variable
7455 + * @v: pointer of type atomic_unchecked_t
7456 + * @i: required value
7457 + *
7458 + * Atomically sets the value of @v to @i.
7459 + */
7460 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7461 +{
7462 + v->counter = i;
7463 +}
7464 +
7465 +/**
7466 * atomic_add - add integer to atomic variable
7467 * @i: integer value to add
7468 * @v: pointer of type atomic_t
7469 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7470 */
7471 static inline void atomic_add(int i, atomic_t *v)
7472 {
7473 - asm volatile(LOCK_PREFIX "addl %1,%0"
7474 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7475 +
7476 +#ifdef CONFIG_PAX_REFCOUNT
7477 + "jno 0f\n"
7478 + LOCK_PREFIX "subl %1,%0\n"
7479 + "int $4\n0:\n"
7480 + _ASM_EXTABLE(0b, 0b)
7481 +#endif
7482 +
7483 + : "=m" (v->counter)
7484 + : "ir" (i), "m" (v->counter));
7485 +}
7486 +
7487 +/**
7488 + * atomic_add_unchecked - add integer to atomic variable
7489 + * @i: integer value to add
7490 + * @v: pointer of type atomic_unchecked_t
7491 + *
7492 + * Atomically adds @i to @v.
7493 + */
7494 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7495 +{
7496 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7497 : "=m" (v->counter)
7498 : "ir" (i), "m" (v->counter));
7499 }
7500 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7501 */
7502 static inline void atomic_sub(int i, atomic_t *v)
7503 {
7504 - asm volatile(LOCK_PREFIX "subl %1,%0"
7505 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7506 +
7507 +#ifdef CONFIG_PAX_REFCOUNT
7508 + "jno 0f\n"
7509 + LOCK_PREFIX "addl %1,%0\n"
7510 + "int $4\n0:\n"
7511 + _ASM_EXTABLE(0b, 0b)
7512 +#endif
7513 +
7514 + : "=m" (v->counter)
7515 + : "ir" (i), "m" (v->counter));
7516 +}
7517 +
7518 +/**
7519 + * atomic_sub_unchecked - subtract the atomic variable
7520 + * @i: integer value to subtract
7521 + * @v: pointer of type atomic_unchecked_t
7522 + *
7523 + * Atomically subtracts @i from @v.
7524 + */
7525 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7526 +{
7527 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7528 : "=m" (v->counter)
7529 : "ir" (i), "m" (v->counter));
7530 }
7531 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7532 {
7533 unsigned char c;
7534
7535 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7536 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7537 +
7538 +#ifdef CONFIG_PAX_REFCOUNT
7539 + "jno 0f\n"
7540 + LOCK_PREFIX "addl %2,%0\n"
7541 + "int $4\n0:\n"
7542 + _ASM_EXTABLE(0b, 0b)
7543 +#endif
7544 +
7545 + "sete %1\n"
7546 : "=m" (v->counter), "=qm" (c)
7547 : "ir" (i), "m" (v->counter) : "memory");
7548 return c;
7549 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7550 */
7551 static inline void atomic_inc(atomic_t *v)
7552 {
7553 - asm volatile(LOCK_PREFIX "incl %0"
7554 + asm volatile(LOCK_PREFIX "incl %0\n"
7555 +
7556 +#ifdef CONFIG_PAX_REFCOUNT
7557 + "jno 0f\n"
7558 + LOCK_PREFIX "decl %0\n"
7559 + "int $4\n0:\n"
7560 + _ASM_EXTABLE(0b, 0b)
7561 +#endif
7562 +
7563 + : "=m" (v->counter)
7564 + : "m" (v->counter));
7565 +}
7566 +
7567 +/**
7568 + * atomic_inc_unchecked - increment atomic variable
7569 + * @v: pointer of type atomic_unchecked_t
7570 + *
7571 + * Atomically increments @v by 1.
7572 + */
7573 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7574 +{
7575 + asm volatile(LOCK_PREFIX "incl %0\n"
7576 : "=m" (v->counter)
7577 : "m" (v->counter));
7578 }
7579 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7580 */
7581 static inline void atomic_dec(atomic_t *v)
7582 {
7583 - asm volatile(LOCK_PREFIX "decl %0"
7584 + asm volatile(LOCK_PREFIX "decl %0\n"
7585 +
7586 +#ifdef CONFIG_PAX_REFCOUNT
7587 + "jno 0f\n"
7588 + LOCK_PREFIX "incl %0\n"
7589 + "int $4\n0:\n"
7590 + _ASM_EXTABLE(0b, 0b)
7591 +#endif
7592 +
7593 + : "=m" (v->counter)
7594 + : "m" (v->counter));
7595 +}
7596 +
7597 +/**
7598 + * atomic_dec_unchecked - decrement atomic variable
7599 + * @v: pointer of type atomic_unchecked_t
7600 + *
7601 + * Atomically decrements @v by 1.
7602 + */
7603 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7604 +{
7605 + asm volatile(LOCK_PREFIX "decl %0\n"
7606 : "=m" (v->counter)
7607 : "m" (v->counter));
7608 }
7609 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7610 {
7611 unsigned char c;
7612
7613 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7614 + asm volatile(LOCK_PREFIX "decl %0\n"
7615 +
7616 +#ifdef CONFIG_PAX_REFCOUNT
7617 + "jno 0f\n"
7618 + LOCK_PREFIX "incl %0\n"
7619 + "int $4\n0:\n"
7620 + _ASM_EXTABLE(0b, 0b)
7621 +#endif
7622 +
7623 + "sete %1\n"
7624 : "=m" (v->counter), "=qm" (c)
7625 : "m" (v->counter) : "memory");
7626 return c != 0;
7627 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7628 {
7629 unsigned char c;
7630
7631 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7632 + asm volatile(LOCK_PREFIX "incl %0\n"
7633 +
7634 +#ifdef CONFIG_PAX_REFCOUNT
7635 + "jno 0f\n"
7636 + LOCK_PREFIX "decl %0\n"
7637 + "int $4\n0:\n"
7638 + _ASM_EXTABLE(0b, 0b)
7639 +#endif
7640 +
7641 + "sete %1\n"
7642 + : "=m" (v->counter), "=qm" (c)
7643 + : "m" (v->counter) : "memory");
7644 + return c != 0;
7645 +}
7646 +
7647 +/**
7648 + * atomic_inc_and_test_unchecked - increment and test
7649 + * @v: pointer of type atomic_unchecked_t
7650 + *
7651 + * Atomically increments @v by 1
7652 + * and returns true if the result is zero, or false for all
7653 + * other cases.
7654 + */
7655 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7656 +{
7657 + unsigned char c;
7658 +
7659 + asm volatile(LOCK_PREFIX "incl %0\n"
7660 + "sete %1\n"
7661 : "=m" (v->counter), "=qm" (c)
7662 : "m" (v->counter) : "memory");
7663 return c != 0;
7664 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7665 {
7666 unsigned char c;
7667
7668 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7669 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7670 +
7671 +#ifdef CONFIG_PAX_REFCOUNT
7672 + "jno 0f\n"
7673 + LOCK_PREFIX "subl %2,%0\n"
7674 + "int $4\n0:\n"
7675 + _ASM_EXTABLE(0b, 0b)
7676 +#endif
7677 +
7678 + "sets %1\n"
7679 : "=m" (v->counter), "=qm" (c)
7680 : "ir" (i), "m" (v->counter) : "memory");
7681 return c;
7682 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7683 static inline int atomic_add_return(int i, atomic_t *v)
7684 {
7685 int __i = i;
7686 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
7687 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7688 +
7689 +#ifdef CONFIG_PAX_REFCOUNT
7690 + "jno 0f\n"
7691 + "movl %0, %1\n"
7692 + "int $4\n0:\n"
7693 + _ASM_EXTABLE(0b, 0b)
7694 +#endif
7695 +
7696 + : "+r" (i), "+m" (v->counter)
7697 + : : "memory");
7698 + return i + __i;
7699 +}
7700 +
7701 +/**
7702 + * atomic_add_return_unchecked - add and return
7703 + * @i: integer value to add
7704 + * @v: pointer of type atomic_unchecked_t
7705 + *
7706 + * Atomically adds @i to @v and returns @i + @v
7707 + */
7708 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7709 +{
7710 + int __i = i;
7711 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7712 : "+r" (i), "+m" (v->counter)
7713 : : "memory");
7714 return i + __i;
7715 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7716 }
7717
7718 #define atomic_inc_return(v) (atomic_add_return(1, v))
7719 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7720 +{
7721 + return atomic_add_return_unchecked(1, v);
7722 +}
7723 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7724
7725 /* The 64-bit atomic type */
7726 @@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7727 }
7728
7729 /**
7730 + * atomic64_read_unchecked - read atomic64 variable
7731 + * @v: pointer of type atomic64_unchecked_t
7732 + *
7733 + * Atomically reads the value of @v.
7734 + * Doesn't imply a read memory barrier.
7735 + */
7736 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7737 +{
7738 + return v->counter;
7739 +}
7740 +
7741 +/**
7742 * atomic64_set - set atomic64 variable
7743 * @v: pointer to type atomic64_t
7744 * @i: required value
7745 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7746 }
7747
7748 /**
7749 + * atomic64_set_unchecked - set atomic64 variable
7750 + * @v: pointer to type atomic64_unchecked_t
7751 + * @i: required value
7752 + *
7753 + * Atomically sets the value of @v to @i.
7754 + */
7755 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7756 +{
7757 + v->counter = i;
7758 +}
7759 +
7760 +/**
7761 * atomic64_add - add integer to atomic64 variable
7762 * @i: integer value to add
7763 * @v: pointer to type atomic64_t
7764 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7765 */
7766 static inline void atomic64_add(long i, atomic64_t *v)
7767 {
7768 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7769 +
7770 +#ifdef CONFIG_PAX_REFCOUNT
7771 + "jno 0f\n"
7772 + LOCK_PREFIX "subq %1,%0\n"
7773 + "int $4\n0:\n"
7774 + _ASM_EXTABLE(0b, 0b)
7775 +#endif
7776 +
7777 + : "=m" (v->counter)
7778 + : "er" (i), "m" (v->counter));
7779 +}
7780 +
7781 +/**
7782 + * atomic64_add_unchecked - add integer to atomic64 variable
7783 + * @i: integer value to add
7784 + * @v: pointer to type atomic64_unchecked_t
7785 + *
7786 + * Atomically adds @i to @v.
7787 + */
7788 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7789 +{
7790 asm volatile(LOCK_PREFIX "addq %1,%0"
7791 : "=m" (v->counter)
7792 : "er" (i), "m" (v->counter));
7793 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7794 */
7795 static inline void atomic64_sub(long i, atomic64_t *v)
7796 {
7797 - asm volatile(LOCK_PREFIX "subq %1,%0"
7798 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7799 +
7800 +#ifdef CONFIG_PAX_REFCOUNT
7801 + "jno 0f\n"
7802 + LOCK_PREFIX "addq %1,%0\n"
7803 + "int $4\n0:\n"
7804 + _ASM_EXTABLE(0b, 0b)
7805 +#endif
7806 +
7807 : "=m" (v->counter)
7808 : "er" (i), "m" (v->counter));
7809 }
7810 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7811 {
7812 unsigned char c;
7813
7814 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7815 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7816 +
7817 +#ifdef CONFIG_PAX_REFCOUNT
7818 + "jno 0f\n"
7819 + LOCK_PREFIX "addq %2,%0\n"
7820 + "int $4\n0:\n"
7821 + _ASM_EXTABLE(0b, 0b)
7822 +#endif
7823 +
7824 + "sete %1\n"
7825 : "=m" (v->counter), "=qm" (c)
7826 : "er" (i), "m" (v->counter) : "memory");
7827 return c;
7828 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7829 */
7830 static inline void atomic64_inc(atomic64_t *v)
7831 {
7832 + asm volatile(LOCK_PREFIX "incq %0\n"
7833 +
7834 +#ifdef CONFIG_PAX_REFCOUNT
7835 + "jno 0f\n"
7836 + LOCK_PREFIX "decq %0\n"
7837 + "int $4\n0:\n"
7838 + _ASM_EXTABLE(0b, 0b)
7839 +#endif
7840 +
7841 + : "=m" (v->counter)
7842 + : "m" (v->counter));
7843 +}
7844 +
7845 +/**
7846 + * atomic64_inc_unchecked - increment atomic64 variable
7847 + * @v: pointer to type atomic64_unchecked_t
7848 + *
7849 + * Atomically increments @v by 1.
7850 + */
7851 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7852 +{
7853 asm volatile(LOCK_PREFIX "incq %0"
7854 : "=m" (v->counter)
7855 : "m" (v->counter));
7856 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7857 */
7858 static inline void atomic64_dec(atomic64_t *v)
7859 {
7860 - asm volatile(LOCK_PREFIX "decq %0"
7861 + asm volatile(LOCK_PREFIX "decq %0\n"
7862 +
7863 +#ifdef CONFIG_PAX_REFCOUNT
7864 + "jno 0f\n"
7865 + LOCK_PREFIX "incq %0\n"
7866 + "int $4\n0:\n"
7867 + _ASM_EXTABLE(0b, 0b)
7868 +#endif
7869 +
7870 + : "=m" (v->counter)
7871 + : "m" (v->counter));
7872 +}
7873 +
7874 +/**
7875 + * atomic64_dec_unchecked - decrement atomic64 variable
7876 + * @v: pointer to type atomic64_t
7877 + *
7878 + * Atomically decrements @v by 1.
7879 + */
7880 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7881 +{
7882 + asm volatile(LOCK_PREFIX "decq %0\n"
7883 : "=m" (v->counter)
7884 : "m" (v->counter));
7885 }
7886 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7887 {
7888 unsigned char c;
7889
7890 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7891 + asm volatile(LOCK_PREFIX "decq %0\n"
7892 +
7893 +#ifdef CONFIG_PAX_REFCOUNT
7894 + "jno 0f\n"
7895 + LOCK_PREFIX "incq %0\n"
7896 + "int $4\n0:\n"
7897 + _ASM_EXTABLE(0b, 0b)
7898 +#endif
7899 +
7900 + "sete %1\n"
7901 : "=m" (v->counter), "=qm" (c)
7902 : "m" (v->counter) : "memory");
7903 return c != 0;
7904 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7905 {
7906 unsigned char c;
7907
7908 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7909 + asm volatile(LOCK_PREFIX "incq %0\n"
7910 +
7911 +#ifdef CONFIG_PAX_REFCOUNT
7912 + "jno 0f\n"
7913 + LOCK_PREFIX "decq %0\n"
7914 + "int $4\n0:\n"
7915 + _ASM_EXTABLE(0b, 0b)
7916 +#endif
7917 +
7918 + "sete %1\n"
7919 : "=m" (v->counter), "=qm" (c)
7920 : "m" (v->counter) : "memory");
7921 return c != 0;
7922 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7923 {
7924 unsigned char c;
7925
7926 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7927 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7928 +
7929 +#ifdef CONFIG_PAX_REFCOUNT
7930 + "jno 0f\n"
7931 + LOCK_PREFIX "subq %2,%0\n"
7932 + "int $4\n0:\n"
7933 + _ASM_EXTABLE(0b, 0b)
7934 +#endif
7935 +
7936 + "sets %1\n"
7937 : "=m" (v->counter), "=qm" (c)
7938 : "er" (i), "m" (v->counter) : "memory");
7939 return c;
7940 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7941 static inline long atomic64_add_return(long i, atomic64_t *v)
7942 {
7943 long __i = i;
7944 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7945 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7946 +
7947 +#ifdef CONFIG_PAX_REFCOUNT
7948 + "jno 0f\n"
7949 + "movq %0, %1\n"
7950 + "int $4\n0:\n"
7951 + _ASM_EXTABLE(0b, 0b)
7952 +#endif
7953 +
7954 + : "+r" (i), "+m" (v->counter)
7955 + : : "memory");
7956 + return i + __i;
7957 +}
7958 +
7959 +/**
7960 + * atomic64_add_return_unchecked - add and return
7961 + * @i: integer value to add
7962 + * @v: pointer to type atomic64_unchecked_t
7963 + *
7964 + * Atomically adds @i to @v and returns @i + @v
7965 + */
7966 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7967 +{
7968 + long __i = i;
7969 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7970 : "+r" (i), "+m" (v->counter)
7971 : : "memory");
7972 return i + __i;
7973 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7974 }
7975
7976 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7977 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7978 +{
7979 + return atomic64_add_return_unchecked(1, v);
7980 +}
7981 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7982
7983 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7984 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7985 return cmpxchg(&v->counter, old, new);
7986 }
7987
7988 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7989 +{
7990 + return cmpxchg(&v->counter, old, new);
7991 +}
7992 +
7993 static inline long atomic64_xchg(atomic64_t *v, long new)
7994 {
7995 return xchg(&v->counter, new);
7996 }
7997
7998 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7999 +{
8000 + return xchg(&v->counter, new);
8001 +}
8002 +
8003 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
8004 {
8005 return cmpxchg(&v->counter, old, new);
8006 }
8007
8008 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8009 +{
8010 + return cmpxchg(&v->counter, old, new);
8011 +}
8012 +
8013 static inline long atomic_xchg(atomic_t *v, int new)
8014 {
8015 return xchg(&v->counter, new);
8016 }
8017
8018 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8019 +{
8020 + return xchg(&v->counter, new);
8021 +}
8022 +
8023 /**
8024 * atomic_add_unless - add unless the number is a given value
8025 * @v: pointer of type atomic_t
8026 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
8027 */
8028 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8029 {
8030 - int c, old;
8031 + int c, old, new;
8032 c = atomic_read(v);
8033 for (;;) {
8034 - if (unlikely(c == (u)))
8035 + if (unlikely(c == u))
8036 break;
8037 - old = atomic_cmpxchg((v), c, c + (a));
8038 +
8039 + asm volatile("addl %2,%0\n"
8040 +
8041 +#ifdef CONFIG_PAX_REFCOUNT
8042 + "jno 0f\n"
8043 + "subl %2,%0\n"
8044 + "int $4\n0:\n"
8045 + _ASM_EXTABLE(0b, 0b)
8046 +#endif
8047 +
8048 + : "=r" (new)
8049 + : "0" (c), "ir" (a));
8050 +
8051 + old = atomic_cmpxchg(v, c, new);
8052 if (likely(old == c))
8053 break;
8054 c = old;
8055 }
8056 - return c != (u);
8057 + return c != u;
8058 }
8059
8060 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8061 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8062 */
8063 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8064 {
8065 - long c, old;
8066 + long c, old, new;
8067 c = atomic64_read(v);
8068 for (;;) {
8069 - if (unlikely(c == (u)))
8070 + if (unlikely(c == u))
8071 break;
8072 - old = atomic64_cmpxchg((v), c, c + (a));
8073 +
8074 + asm volatile("addq %2,%0\n"
8075 +
8076 +#ifdef CONFIG_PAX_REFCOUNT
8077 + "jno 0f\n"
8078 + "subq %2,%0\n"
8079 + "int $4\n0:\n"
8080 + _ASM_EXTABLE(0b, 0b)
8081 +#endif
8082 +
8083 + : "=r" (new)
8084 + : "0" (c), "er" (a));
8085 +
8086 + old = atomic64_cmpxchg(v, c, new);
8087 if (likely(old == c))
8088 break;
8089 c = old;
8090 }
8091 - return c != (u);
8092 + return c != u;
8093 }
8094
8095 /**
8096 diff -urNp linux-2.6.32.45/arch/x86/include/asm/bitops.h linux-2.6.32.45/arch/x86/include/asm/bitops.h
8097 --- linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8098 +++ linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8099 @@ -38,7 +38,7 @@
8100 * a mask operation on a byte.
8101 */
8102 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8103 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8104 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8105 #define CONST_MASK(nr) (1 << ((nr) & 7))
8106
8107 /**
8108 diff -urNp linux-2.6.32.45/arch/x86/include/asm/boot.h linux-2.6.32.45/arch/x86/include/asm/boot.h
8109 --- linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8110 +++ linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8111 @@ -11,10 +11,15 @@
8112 #include <asm/pgtable_types.h>
8113
8114 /* Physical address where kernel should be loaded. */
8115 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8116 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8117 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8118 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8119
8120 +#ifndef __ASSEMBLY__
8121 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8122 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8123 +#endif
8124 +
8125 /* Minimum kernel alignment, as a power of two */
8126 #ifdef CONFIG_X86_64
8127 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8128 diff -urNp linux-2.6.32.45/arch/x86/include/asm/cacheflush.h linux-2.6.32.45/arch/x86/include/asm/cacheflush.h
8129 --- linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8130 +++ linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8131 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8132 static inline unsigned long get_page_memtype(struct page *pg)
8133 {
8134 if (!PageUncached(pg) && !PageWC(pg))
8135 - return -1;
8136 + return ~0UL;
8137 else if (!PageUncached(pg) && PageWC(pg))
8138 return _PAGE_CACHE_WC;
8139 else if (PageUncached(pg) && !PageWC(pg))
8140 @@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8141 SetPageWC(pg);
8142 break;
8143 default:
8144 - case -1:
8145 + case ~0UL:
8146 ClearPageUncached(pg);
8147 ClearPageWC(pg);
8148 break;
8149 diff -urNp linux-2.6.32.45/arch/x86/include/asm/cache.h linux-2.6.32.45/arch/x86/include/asm/cache.h
8150 --- linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8151 +++ linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8152 @@ -5,9 +5,10 @@
8153
8154 /* L1 cache line size */
8155 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8156 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8157 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8158
8159 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8160 +#define __read_only __attribute__((__section__(".data.read_only")))
8161
8162 #ifdef CONFIG_X86_VSMP
8163 /* vSMP Internode cacheline shift */
8164 diff -urNp linux-2.6.32.45/arch/x86/include/asm/checksum_32.h linux-2.6.32.45/arch/x86/include/asm/checksum_32.h
8165 --- linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8166 +++ linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8167 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8168 int len, __wsum sum,
8169 int *src_err_ptr, int *dst_err_ptr);
8170
8171 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8172 + int len, __wsum sum,
8173 + int *src_err_ptr, int *dst_err_ptr);
8174 +
8175 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8176 + int len, __wsum sum,
8177 + int *src_err_ptr, int *dst_err_ptr);
8178 +
8179 /*
8180 * Note: when you get a NULL pointer exception here this means someone
8181 * passed in an incorrect kernel address to one of these functions.
8182 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8183 int *err_ptr)
8184 {
8185 might_sleep();
8186 - return csum_partial_copy_generic((__force void *)src, dst,
8187 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8188 len, sum, err_ptr, NULL);
8189 }
8190
8191 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8192 {
8193 might_sleep();
8194 if (access_ok(VERIFY_WRITE, dst, len))
8195 - return csum_partial_copy_generic(src, (__force void *)dst,
8196 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8197 len, sum, NULL, err_ptr);
8198
8199 if (len)
8200 diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc_defs.h linux-2.6.32.45/arch/x86/include/asm/desc_defs.h
8201 --- linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8202 +++ linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8203 @@ -31,6 +31,12 @@ struct desc_struct {
8204 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8205 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8206 };
8207 + struct {
8208 + u16 offset_low;
8209 + u16 seg;
8210 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8211 + unsigned offset_high: 16;
8212 + } gate;
8213 };
8214 } __attribute__((packed));
8215
8216 diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc.h linux-2.6.32.45/arch/x86/include/asm/desc.h
8217 --- linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8218 +++ linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8219 @@ -4,6 +4,7 @@
8220 #include <asm/desc_defs.h>
8221 #include <asm/ldt.h>
8222 #include <asm/mmu.h>
8223 +#include <asm/pgtable.h>
8224 #include <linux/smp.h>
8225
8226 static inline void fill_ldt(struct desc_struct *desc,
8227 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8228 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8229 desc->type = (info->read_exec_only ^ 1) << 1;
8230 desc->type |= info->contents << 2;
8231 + desc->type |= info->seg_not_present ^ 1;
8232 desc->s = 1;
8233 desc->dpl = 0x3;
8234 desc->p = info->seg_not_present ^ 1;
8235 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8236 }
8237
8238 extern struct desc_ptr idt_descr;
8239 -extern gate_desc idt_table[];
8240 -
8241 -struct gdt_page {
8242 - struct desc_struct gdt[GDT_ENTRIES];
8243 -} __attribute__((aligned(PAGE_SIZE)));
8244 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8245 +extern gate_desc idt_table[256];
8246
8247 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8248 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8249 {
8250 - return per_cpu(gdt_page, cpu).gdt;
8251 + return cpu_gdt_table[cpu];
8252 }
8253
8254 #ifdef CONFIG_X86_64
8255 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8256 unsigned long base, unsigned dpl, unsigned flags,
8257 unsigned short seg)
8258 {
8259 - gate->a = (seg << 16) | (base & 0xffff);
8260 - gate->b = (base & 0xffff0000) |
8261 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8262 + gate->gate.offset_low = base;
8263 + gate->gate.seg = seg;
8264 + gate->gate.reserved = 0;
8265 + gate->gate.type = type;
8266 + gate->gate.s = 0;
8267 + gate->gate.dpl = dpl;
8268 + gate->gate.p = 1;
8269 + gate->gate.offset_high = base >> 16;
8270 }
8271
8272 #endif
8273 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8274 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8275 const gate_desc *gate)
8276 {
8277 + pax_open_kernel();
8278 memcpy(&idt[entry], gate, sizeof(*gate));
8279 + pax_close_kernel();
8280 }
8281
8282 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8283 const void *desc)
8284 {
8285 + pax_open_kernel();
8286 memcpy(&ldt[entry], desc, 8);
8287 + pax_close_kernel();
8288 }
8289
8290 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8291 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8292 size = sizeof(struct desc_struct);
8293 break;
8294 }
8295 +
8296 + pax_open_kernel();
8297 memcpy(&gdt[entry], desc, size);
8298 + pax_close_kernel();
8299 }
8300
8301 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8302 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8303
8304 static inline void native_load_tr_desc(void)
8305 {
8306 + pax_open_kernel();
8307 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8308 + pax_close_kernel();
8309 }
8310
8311 static inline void native_load_gdt(const struct desc_ptr *dtr)
8312 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8313 unsigned int i;
8314 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8315
8316 + pax_open_kernel();
8317 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8318 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8319 + pax_close_kernel();
8320 }
8321
8322 #define _LDT_empty(info) \
8323 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8324 desc->limit = (limit >> 16) & 0xf;
8325 }
8326
8327 -static inline void _set_gate(int gate, unsigned type, void *addr,
8328 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8329 unsigned dpl, unsigned ist, unsigned seg)
8330 {
8331 gate_desc s;
8332 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8333 * Pentium F0 0F bugfix can have resulted in the mapped
8334 * IDT being write-protected.
8335 */
8336 -static inline void set_intr_gate(unsigned int n, void *addr)
8337 +static inline void set_intr_gate(unsigned int n, const void *addr)
8338 {
8339 BUG_ON((unsigned)n > 0xFF);
8340 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8341 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8342 /*
8343 * This routine sets up an interrupt gate at directory privilege level 3.
8344 */
8345 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8346 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8347 {
8348 BUG_ON((unsigned)n > 0xFF);
8349 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8350 }
8351
8352 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8353 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8354 {
8355 BUG_ON((unsigned)n > 0xFF);
8356 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8357 }
8358
8359 -static inline void set_trap_gate(unsigned int n, void *addr)
8360 +static inline void set_trap_gate(unsigned int n, const void *addr)
8361 {
8362 BUG_ON((unsigned)n > 0xFF);
8363 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8364 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8365 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8366 {
8367 BUG_ON((unsigned)n > 0xFF);
8368 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8369 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8370 }
8371
8372 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8373 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8374 {
8375 BUG_ON((unsigned)n > 0xFF);
8376 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8377 }
8378
8379 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8380 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8381 {
8382 BUG_ON((unsigned)n > 0xFF);
8383 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8384 }
8385
8386 +#ifdef CONFIG_X86_32
8387 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8388 +{
8389 + struct desc_struct d;
8390 +
8391 + if (likely(limit))
8392 + limit = (limit - 1UL) >> PAGE_SHIFT;
8393 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8394 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8395 +}
8396 +#endif
8397 +
8398 #endif /* _ASM_X86_DESC_H */
8399 diff -urNp linux-2.6.32.45/arch/x86/include/asm/device.h linux-2.6.32.45/arch/x86/include/asm/device.h
8400 --- linux-2.6.32.45/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8401 +++ linux-2.6.32.45/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8402 @@ -6,7 +6,7 @@ struct dev_archdata {
8403 void *acpi_handle;
8404 #endif
8405 #ifdef CONFIG_X86_64
8406 -struct dma_map_ops *dma_ops;
8407 + const struct dma_map_ops *dma_ops;
8408 #endif
8409 #ifdef CONFIG_DMAR
8410 void *iommu; /* hook for IOMMU specific extension */
8411 diff -urNp linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h
8412 --- linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8413 +++ linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8414 @@ -25,9 +25,9 @@ extern int iommu_merge;
8415 extern struct device x86_dma_fallback_dev;
8416 extern int panic_on_overflow;
8417
8418 -extern struct dma_map_ops *dma_ops;
8419 +extern const struct dma_map_ops *dma_ops;
8420
8421 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8422 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8423 {
8424 #ifdef CONFIG_X86_32
8425 return dma_ops;
8426 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8427 /* Make sure we keep the same behaviour */
8428 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8429 {
8430 - struct dma_map_ops *ops = get_dma_ops(dev);
8431 + const struct dma_map_ops *ops = get_dma_ops(dev);
8432 if (ops->mapping_error)
8433 return ops->mapping_error(dev, dma_addr);
8434
8435 @@ -122,7 +122,7 @@ static inline void *
8436 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8437 gfp_t gfp)
8438 {
8439 - struct dma_map_ops *ops = get_dma_ops(dev);
8440 + const struct dma_map_ops *ops = get_dma_ops(dev);
8441 void *memory;
8442
8443 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8444 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8445 static inline void dma_free_coherent(struct device *dev, size_t size,
8446 void *vaddr, dma_addr_t bus)
8447 {
8448 - struct dma_map_ops *ops = get_dma_ops(dev);
8449 + const struct dma_map_ops *ops = get_dma_ops(dev);
8450
8451 WARN_ON(irqs_disabled()); /* for portability */
8452
8453 diff -urNp linux-2.6.32.45/arch/x86/include/asm/e820.h linux-2.6.32.45/arch/x86/include/asm/e820.h
8454 --- linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8455 +++ linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8456 @@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8457 #define ISA_END_ADDRESS 0x100000
8458 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8459
8460 -#define BIOS_BEGIN 0x000a0000
8461 +#define BIOS_BEGIN 0x000c0000
8462 #define BIOS_END 0x00100000
8463
8464 #ifdef __KERNEL__
8465 diff -urNp linux-2.6.32.45/arch/x86/include/asm/elf.h linux-2.6.32.45/arch/x86/include/asm/elf.h
8466 --- linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8467 +++ linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8468 @@ -257,7 +257,25 @@ extern int force_personality32;
8469 the loader. We need to make sure that it is out of the way of the program
8470 that it will "exec", and that there is sufficient room for the brk. */
8471
8472 +#ifdef CONFIG_PAX_SEGMEXEC
8473 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8474 +#else
8475 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8476 +#endif
8477 +
8478 +#ifdef CONFIG_PAX_ASLR
8479 +#ifdef CONFIG_X86_32
8480 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8481 +
8482 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8483 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8484 +#else
8485 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8486 +
8487 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8488 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8489 +#endif
8490 +#endif
8491
8492 /* This yields a mask that user programs can use to figure out what
8493 instruction set this CPU supports. This could be done in user space,
8494 @@ -311,8 +329,7 @@ do { \
8495 #define ARCH_DLINFO \
8496 do { \
8497 if (vdso_enabled) \
8498 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8499 - (unsigned long)current->mm->context.vdso); \
8500 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8501 } while (0)
8502
8503 #define AT_SYSINFO 32
8504 @@ -323,7 +340,7 @@ do { \
8505
8506 #endif /* !CONFIG_X86_32 */
8507
8508 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8509 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8510
8511 #define VDSO_ENTRY \
8512 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8513 @@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8514 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8515 #define compat_arch_setup_additional_pages syscall32_setup_pages
8516
8517 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8518 -#define arch_randomize_brk arch_randomize_brk
8519 -
8520 #endif /* _ASM_X86_ELF_H */
8521 diff -urNp linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h
8522 --- linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8523 +++ linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8524 @@ -15,6 +15,6 @@ enum reboot_type {
8525
8526 extern enum reboot_type reboot_type;
8527
8528 -extern void machine_emergency_restart(void);
8529 +extern void machine_emergency_restart(void) __noreturn;
8530
8531 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8532 diff -urNp linux-2.6.32.45/arch/x86/include/asm/futex.h linux-2.6.32.45/arch/x86/include/asm/futex.h
8533 --- linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8534 +++ linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8535 @@ -12,16 +12,18 @@
8536 #include <asm/system.h>
8537
8538 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8539 + typecheck(u32 *, uaddr); \
8540 asm volatile("1:\t" insn "\n" \
8541 "2:\t.section .fixup,\"ax\"\n" \
8542 "3:\tmov\t%3, %1\n" \
8543 "\tjmp\t2b\n" \
8544 "\t.previous\n" \
8545 _ASM_EXTABLE(1b, 3b) \
8546 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8547 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8548 : "i" (-EFAULT), "0" (oparg), "1" (0))
8549
8550 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8551 + typecheck(u32 *, uaddr); \
8552 asm volatile("1:\tmovl %2, %0\n" \
8553 "\tmovl\t%0, %3\n" \
8554 "\t" insn "\n" \
8555 @@ -34,10 +36,10 @@
8556 _ASM_EXTABLE(1b, 4b) \
8557 _ASM_EXTABLE(2b, 4b) \
8558 : "=&a" (oldval), "=&r" (ret), \
8559 - "+m" (*uaddr), "=&r" (tem) \
8560 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8561 : "r" (oparg), "i" (-EFAULT), "1" (0))
8562
8563 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8564 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8565 {
8566 int op = (encoded_op >> 28) & 7;
8567 int cmp = (encoded_op >> 24) & 15;
8568 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8569
8570 switch (op) {
8571 case FUTEX_OP_SET:
8572 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8573 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8574 break;
8575 case FUTEX_OP_ADD:
8576 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8577 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8578 uaddr, oparg);
8579 break;
8580 case FUTEX_OP_OR:
8581 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8582 return ret;
8583 }
8584
8585 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8586 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8587 int newval)
8588 {
8589
8590 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8591 return -ENOSYS;
8592 #endif
8593
8594 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8595 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8596 return -EFAULT;
8597
8598 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8599 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8600 "2:\t.section .fixup, \"ax\"\n"
8601 "3:\tmov %2, %0\n"
8602 "\tjmp 2b\n"
8603 "\t.previous\n"
8604 _ASM_EXTABLE(1b, 3b)
8605 - : "=a" (oldval), "+m" (*uaddr)
8606 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8607 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8608 : "memory"
8609 );
8610 diff -urNp linux-2.6.32.45/arch/x86/include/asm/hw_irq.h linux-2.6.32.45/arch/x86/include/asm/hw_irq.h
8611 --- linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8612 +++ linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8613 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8614 extern void enable_IO_APIC(void);
8615
8616 /* Statistics */
8617 -extern atomic_t irq_err_count;
8618 -extern atomic_t irq_mis_count;
8619 +extern atomic_unchecked_t irq_err_count;
8620 +extern atomic_unchecked_t irq_mis_count;
8621
8622 /* EISA */
8623 extern void eisa_set_level_irq(unsigned int irq);
8624 diff -urNp linux-2.6.32.45/arch/x86/include/asm/i387.h linux-2.6.32.45/arch/x86/include/asm/i387.h
8625 --- linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8626 +++ linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8627 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8628 {
8629 int err;
8630
8631 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8632 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8633 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8634 +#endif
8635 +
8636 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8637 "2:\n"
8638 ".section .fixup,\"ax\"\n"
8639 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8640 {
8641 int err;
8642
8643 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8644 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8645 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8646 +#endif
8647 +
8648 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8649 "2:\n"
8650 ".section .fixup,\"ax\"\n"
8651 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8652 }
8653
8654 /* We need a safe address that is cheap to find and that is already
8655 - in L1 during context switch. The best choices are unfortunately
8656 - different for UP and SMP */
8657 -#ifdef CONFIG_SMP
8658 -#define safe_address (__per_cpu_offset[0])
8659 -#else
8660 -#define safe_address (kstat_cpu(0).cpustat.user)
8661 -#endif
8662 + in L1 during context switch. */
8663 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8664
8665 /*
8666 * These must be called with preempt disabled
8667 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8668 struct thread_info *me = current_thread_info();
8669 preempt_disable();
8670 if (me->status & TS_USEDFPU)
8671 - __save_init_fpu(me->task);
8672 + __save_init_fpu(current);
8673 else
8674 clts();
8675 }
8676 diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_32.h linux-2.6.32.45/arch/x86/include/asm/io_32.h
8677 --- linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8678 +++ linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8679 @@ -3,6 +3,7 @@
8680
8681 #include <linux/string.h>
8682 #include <linux/compiler.h>
8683 +#include <asm/processor.h>
8684
8685 /*
8686 * This file contains the definitions for the x86 IO instructions
8687 @@ -42,6 +43,17 @@
8688
8689 #ifdef __KERNEL__
8690
8691 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8692 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8693 +{
8694 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8695 +}
8696 +
8697 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8698 +{
8699 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8700 +}
8701 +
8702 #include <asm-generic/iomap.h>
8703
8704 #include <linux/vmalloc.h>
8705 diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_64.h linux-2.6.32.45/arch/x86/include/asm/io_64.h
8706 --- linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8707 +++ linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8708 @@ -140,6 +140,17 @@ __OUTS(l)
8709
8710 #include <linux/vmalloc.h>
8711
8712 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8713 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8714 +{
8715 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8716 +}
8717 +
8718 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8719 +{
8720 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8721 +}
8722 +
8723 #include <asm-generic/iomap.h>
8724
8725 void __memcpy_fromio(void *, unsigned long, unsigned);
8726 diff -urNp linux-2.6.32.45/arch/x86/include/asm/iommu.h linux-2.6.32.45/arch/x86/include/asm/iommu.h
8727 --- linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8728 +++ linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8729 @@ -3,7 +3,7 @@
8730
8731 extern void pci_iommu_shutdown(void);
8732 extern void no_iommu_init(void);
8733 -extern struct dma_map_ops nommu_dma_ops;
8734 +extern const struct dma_map_ops nommu_dma_ops;
8735 extern int force_iommu, no_iommu;
8736 extern int iommu_detected;
8737 extern int iommu_pass_through;
8738 diff -urNp linux-2.6.32.45/arch/x86/include/asm/irqflags.h linux-2.6.32.45/arch/x86/include/asm/irqflags.h
8739 --- linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8740 +++ linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8741 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8742 sti; \
8743 sysexit
8744
8745 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8746 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8747 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8748 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8749 +
8750 #else
8751 #define INTERRUPT_RETURN iret
8752 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8753 diff -urNp linux-2.6.32.45/arch/x86/include/asm/kprobes.h linux-2.6.32.45/arch/x86/include/asm/kprobes.h
8754 --- linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8755 +++ linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8756 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8757 #define BREAKPOINT_INSTRUCTION 0xcc
8758 #define RELATIVEJUMP_INSTRUCTION 0xe9
8759 #define MAX_INSN_SIZE 16
8760 -#define MAX_STACK_SIZE 64
8761 -#define MIN_STACK_SIZE(ADDR) \
8762 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8763 - THREAD_SIZE - (unsigned long)(ADDR))) \
8764 - ? (MAX_STACK_SIZE) \
8765 - : (((unsigned long)current_thread_info()) + \
8766 - THREAD_SIZE - (unsigned long)(ADDR)))
8767 +#define MAX_STACK_SIZE 64UL
8768 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8769
8770 #define flush_insn_slot(p) do { } while (0)
8771
8772 diff -urNp linux-2.6.32.45/arch/x86/include/asm/kvm_host.h linux-2.6.32.45/arch/x86/include/asm/kvm_host.h
8773 --- linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8774 +++ linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8775 @@ -536,7 +536,7 @@ struct kvm_x86_ops {
8776 const struct trace_print_flags *exit_reasons_str;
8777 };
8778
8779 -extern struct kvm_x86_ops *kvm_x86_ops;
8780 +extern const struct kvm_x86_ops *kvm_x86_ops;
8781
8782 int kvm_mmu_module_init(void);
8783 void kvm_mmu_module_exit(void);
8784 diff -urNp linux-2.6.32.45/arch/x86/include/asm/local.h linux-2.6.32.45/arch/x86/include/asm/local.h
8785 --- linux-2.6.32.45/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8786 +++ linux-2.6.32.45/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8787 @@ -18,26 +18,58 @@ typedef struct {
8788
8789 static inline void local_inc(local_t *l)
8790 {
8791 - asm volatile(_ASM_INC "%0"
8792 + asm volatile(_ASM_INC "%0\n"
8793 +
8794 +#ifdef CONFIG_PAX_REFCOUNT
8795 + "jno 0f\n"
8796 + _ASM_DEC "%0\n"
8797 + "int $4\n0:\n"
8798 + _ASM_EXTABLE(0b, 0b)
8799 +#endif
8800 +
8801 : "+m" (l->a.counter));
8802 }
8803
8804 static inline void local_dec(local_t *l)
8805 {
8806 - asm volatile(_ASM_DEC "%0"
8807 + asm volatile(_ASM_DEC "%0\n"
8808 +
8809 +#ifdef CONFIG_PAX_REFCOUNT
8810 + "jno 0f\n"
8811 + _ASM_INC "%0\n"
8812 + "int $4\n0:\n"
8813 + _ASM_EXTABLE(0b, 0b)
8814 +#endif
8815 +
8816 : "+m" (l->a.counter));
8817 }
8818
8819 static inline void local_add(long i, local_t *l)
8820 {
8821 - asm volatile(_ASM_ADD "%1,%0"
8822 + asm volatile(_ASM_ADD "%1,%0\n"
8823 +
8824 +#ifdef CONFIG_PAX_REFCOUNT
8825 + "jno 0f\n"
8826 + _ASM_SUB "%1,%0\n"
8827 + "int $4\n0:\n"
8828 + _ASM_EXTABLE(0b, 0b)
8829 +#endif
8830 +
8831 : "+m" (l->a.counter)
8832 : "ir" (i));
8833 }
8834
8835 static inline void local_sub(long i, local_t *l)
8836 {
8837 - asm volatile(_ASM_SUB "%1,%0"
8838 + asm volatile(_ASM_SUB "%1,%0\n"
8839 +
8840 +#ifdef CONFIG_PAX_REFCOUNT
8841 + "jno 0f\n"
8842 + _ASM_ADD "%1,%0\n"
8843 + "int $4\n0:\n"
8844 + _ASM_EXTABLE(0b, 0b)
8845 +#endif
8846 +
8847 : "+m" (l->a.counter)
8848 : "ir" (i));
8849 }
8850 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8851 {
8852 unsigned char c;
8853
8854 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8855 + asm volatile(_ASM_SUB "%2,%0\n"
8856 +
8857 +#ifdef CONFIG_PAX_REFCOUNT
8858 + "jno 0f\n"
8859 + _ASM_ADD "%2,%0\n"
8860 + "int $4\n0:\n"
8861 + _ASM_EXTABLE(0b, 0b)
8862 +#endif
8863 +
8864 + "sete %1\n"
8865 : "+m" (l->a.counter), "=qm" (c)
8866 : "ir" (i) : "memory");
8867 return c;
8868 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8869 {
8870 unsigned char c;
8871
8872 - asm volatile(_ASM_DEC "%0; sete %1"
8873 + asm volatile(_ASM_DEC "%0\n"
8874 +
8875 +#ifdef CONFIG_PAX_REFCOUNT
8876 + "jno 0f\n"
8877 + _ASM_INC "%0\n"
8878 + "int $4\n0:\n"
8879 + _ASM_EXTABLE(0b, 0b)
8880 +#endif
8881 +
8882 + "sete %1\n"
8883 : "+m" (l->a.counter), "=qm" (c)
8884 : : "memory");
8885 return c != 0;
8886 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8887 {
8888 unsigned char c;
8889
8890 - asm volatile(_ASM_INC "%0; sete %1"
8891 + asm volatile(_ASM_INC "%0\n"
8892 +
8893 +#ifdef CONFIG_PAX_REFCOUNT
8894 + "jno 0f\n"
8895 + _ASM_DEC "%0\n"
8896 + "int $4\n0:\n"
8897 + _ASM_EXTABLE(0b, 0b)
8898 +#endif
8899 +
8900 + "sete %1\n"
8901 : "+m" (l->a.counter), "=qm" (c)
8902 : : "memory");
8903 return c != 0;
8904 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8905 {
8906 unsigned char c;
8907
8908 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8909 + asm volatile(_ASM_ADD "%2,%0\n"
8910 +
8911 +#ifdef CONFIG_PAX_REFCOUNT
8912 + "jno 0f\n"
8913 + _ASM_SUB "%2,%0\n"
8914 + "int $4\n0:\n"
8915 + _ASM_EXTABLE(0b, 0b)
8916 +#endif
8917 +
8918 + "sets %1\n"
8919 : "+m" (l->a.counter), "=qm" (c)
8920 : "ir" (i) : "memory");
8921 return c;
8922 @@ -133,7 +201,15 @@ static inline long local_add_return(long
8923 #endif
8924 /* Modern 486+ processor */
8925 __i = i;
8926 - asm volatile(_ASM_XADD "%0, %1;"
8927 + asm volatile(_ASM_XADD "%0, %1\n"
8928 +
8929 +#ifdef CONFIG_PAX_REFCOUNT
8930 + "jno 0f\n"
8931 + _ASM_MOV "%0,%1\n"
8932 + "int $4\n0:\n"
8933 + _ASM_EXTABLE(0b, 0b)
8934 +#endif
8935 +
8936 : "+r" (i), "+m" (l->a.counter)
8937 : : "memory");
8938 return i + __i;
8939 diff -urNp linux-2.6.32.45/arch/x86/include/asm/microcode.h linux-2.6.32.45/arch/x86/include/asm/microcode.h
8940 --- linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8941 +++ linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8942 @@ -12,13 +12,13 @@ struct device;
8943 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8944
8945 struct microcode_ops {
8946 - enum ucode_state (*request_microcode_user) (int cpu,
8947 + enum ucode_state (* const request_microcode_user) (int cpu,
8948 const void __user *buf, size_t size);
8949
8950 - enum ucode_state (*request_microcode_fw) (int cpu,
8951 + enum ucode_state (* const request_microcode_fw) (int cpu,
8952 struct device *device);
8953
8954 - void (*microcode_fini_cpu) (int cpu);
8955 + void (* const microcode_fini_cpu) (int cpu);
8956
8957 /*
8958 * The generic 'microcode_core' part guarantees that
8959 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
8960 extern struct ucode_cpu_info ucode_cpu_info[];
8961
8962 #ifdef CONFIG_MICROCODE_INTEL
8963 -extern struct microcode_ops * __init init_intel_microcode(void);
8964 +extern const struct microcode_ops * __init init_intel_microcode(void);
8965 #else
8966 -static inline struct microcode_ops * __init init_intel_microcode(void)
8967 +static inline const struct microcode_ops * __init init_intel_microcode(void)
8968 {
8969 return NULL;
8970 }
8971 #endif /* CONFIG_MICROCODE_INTEL */
8972
8973 #ifdef CONFIG_MICROCODE_AMD
8974 -extern struct microcode_ops * __init init_amd_microcode(void);
8975 +extern const struct microcode_ops * __init init_amd_microcode(void);
8976 #else
8977 -static inline struct microcode_ops * __init init_amd_microcode(void)
8978 +static inline const struct microcode_ops * __init init_amd_microcode(void)
8979 {
8980 return NULL;
8981 }
8982 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mman.h linux-2.6.32.45/arch/x86/include/asm/mman.h
8983 --- linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8984 +++ linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8985 @@ -5,4 +5,14 @@
8986
8987 #include <asm-generic/mman.h>
8988
8989 +#ifdef __KERNEL__
8990 +#ifndef __ASSEMBLY__
8991 +#ifdef CONFIG_X86_32
8992 +#define arch_mmap_check i386_mmap_check
8993 +int i386_mmap_check(unsigned long addr, unsigned long len,
8994 + unsigned long flags);
8995 +#endif
8996 +#endif
8997 +#endif
8998 +
8999 #endif /* _ASM_X86_MMAN_H */
9000 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu_context.h linux-2.6.32.45/arch/x86/include/asm/mmu_context.h
9001 --- linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
9002 +++ linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-08-17 19:46:53.000000000 -0400
9003 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
9004
9005 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9006 {
9007 +
9008 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9009 + unsigned int i;
9010 + pgd_t *pgd;
9011 +
9012 + pax_open_kernel();
9013 + pgd = get_cpu_pgd(smp_processor_id());
9014 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9015 + if (paravirt_enabled())
9016 + set_pgd(pgd+i, native_make_pgd(0));
9017 + else
9018 + pgd[i] = native_make_pgd(0);
9019 + pax_close_kernel();
9020 +#endif
9021 +
9022 #ifdef CONFIG_SMP
9023 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9024 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9025 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
9026 struct task_struct *tsk)
9027 {
9028 unsigned cpu = smp_processor_id();
9029 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
9030 + int tlbstate = TLBSTATE_OK;
9031 +#endif
9032
9033 if (likely(prev != next)) {
9034 #ifdef CONFIG_SMP
9035 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9036 + tlbstate = percpu_read(cpu_tlbstate.state);
9037 +#endif
9038 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9039 percpu_write(cpu_tlbstate.active_mm, next);
9040 #endif
9041 cpumask_set_cpu(cpu, mm_cpumask(next));
9042
9043 /* Re-load page tables */
9044 +#ifdef CONFIG_PAX_PER_CPU_PGD
9045 + pax_open_kernel();
9046 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9047 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9048 + pax_close_kernel();
9049 + load_cr3(get_cpu_pgd(cpu));
9050 +#else
9051 load_cr3(next->pgd);
9052 +#endif
9053
9054 /* stop flush ipis for the previous mm */
9055 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9056 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
9057 */
9058 if (unlikely(prev->context.ldt != next->context.ldt))
9059 load_LDT_nolock(&next->context);
9060 - }
9061 +
9062 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9063 + if (!nx_enabled) {
9064 + smp_mb__before_clear_bit();
9065 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9066 + smp_mb__after_clear_bit();
9067 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9068 + }
9069 +#endif
9070 +
9071 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9072 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9073 + prev->context.user_cs_limit != next->context.user_cs_limit))
9074 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9075 #ifdef CONFIG_SMP
9076 + else if (unlikely(tlbstate != TLBSTATE_OK))
9077 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9078 +#endif
9079 +#endif
9080 +
9081 + }
9082 else {
9083 +
9084 +#ifdef CONFIG_PAX_PER_CPU_PGD
9085 + pax_open_kernel();
9086 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9087 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9088 + pax_close_kernel();
9089 + load_cr3(get_cpu_pgd(cpu));
9090 +#endif
9091 +
9092 +#ifdef CONFIG_SMP
9093 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9094 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9095
9096 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
9097 * tlb flush IPI delivery. We must reload CR3
9098 * to make sure to use no freed page tables.
9099 */
9100 +
9101 +#ifndef CONFIG_PAX_PER_CPU_PGD
9102 load_cr3(next->pgd);
9103 +#endif
9104 +
9105 load_LDT_nolock(&next->context);
9106 +
9107 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9108 + if (!nx_enabled)
9109 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9110 +#endif
9111 +
9112 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9113 +#ifdef CONFIG_PAX_PAGEEXEC
9114 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9115 +#endif
9116 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9117 +#endif
9118 +
9119 }
9120 - }
9121 #endif
9122 + }
9123 }
9124
9125 #define activate_mm(prev, next) \
9126 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu.h linux-2.6.32.45/arch/x86/include/asm/mmu.h
9127 --- linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9128 +++ linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9129 @@ -9,10 +9,23 @@
9130 * we put the segment information here.
9131 */
9132 typedef struct {
9133 - void *ldt;
9134 + struct desc_struct *ldt;
9135 int size;
9136 struct mutex lock;
9137 - void *vdso;
9138 + unsigned long vdso;
9139 +
9140 +#ifdef CONFIG_X86_32
9141 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9142 + unsigned long user_cs_base;
9143 + unsigned long user_cs_limit;
9144 +
9145 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9146 + cpumask_t cpu_user_cs_mask;
9147 +#endif
9148 +
9149 +#endif
9150 +#endif
9151 +
9152 } mm_context_t;
9153
9154 #ifdef CONFIG_SMP
9155 diff -urNp linux-2.6.32.45/arch/x86/include/asm/module.h linux-2.6.32.45/arch/x86/include/asm/module.h
9156 --- linux-2.6.32.45/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9157 +++ linux-2.6.32.45/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9158 @@ -5,6 +5,7 @@
9159
9160 #ifdef CONFIG_X86_64
9161 /* X86_64 does not define MODULE_PROC_FAMILY */
9162 +#define MODULE_PROC_FAMILY ""
9163 #elif defined CONFIG_M386
9164 #define MODULE_PROC_FAMILY "386 "
9165 #elif defined CONFIG_M486
9166 @@ -59,13 +60,36 @@
9167 #error unknown processor family
9168 #endif
9169
9170 -#ifdef CONFIG_X86_32
9171 -# ifdef CONFIG_4KSTACKS
9172 -# define MODULE_STACKSIZE "4KSTACKS "
9173 -# else
9174 -# define MODULE_STACKSIZE ""
9175 -# endif
9176 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9177 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9178 +#define MODULE_PAX_UDEREF "UDEREF "
9179 +#else
9180 +#define MODULE_PAX_UDEREF ""
9181 +#endif
9182 +
9183 +#ifdef CONFIG_PAX_KERNEXEC
9184 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
9185 +#else
9186 +#define MODULE_PAX_KERNEXEC ""
9187 +#endif
9188 +
9189 +#ifdef CONFIG_PAX_REFCOUNT
9190 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
9191 +#else
9192 +#define MODULE_PAX_REFCOUNT ""
9193 #endif
9194
9195 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9196 +#define MODULE_STACKSIZE "4KSTACKS "
9197 +#else
9198 +#define MODULE_STACKSIZE ""
9199 +#endif
9200 +
9201 +#ifdef CONFIG_GRKERNSEC
9202 +#define MODULE_GRSEC "GRSECURITY "
9203 +#else
9204 +#define MODULE_GRSEC ""
9205 +#endif
9206 +
9207 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9208 +
9209 #endif /* _ASM_X86_MODULE_H */
9210 diff -urNp linux-2.6.32.45/arch/x86/include/asm/page_64_types.h linux-2.6.32.45/arch/x86/include/asm/page_64_types.h
9211 --- linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9212 +++ linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9213 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9214
9215 /* duplicated to the one in bootmem.h */
9216 extern unsigned long max_pfn;
9217 -extern unsigned long phys_base;
9218 +extern const unsigned long phys_base;
9219
9220 extern unsigned long __phys_addr(unsigned long);
9221 #define __phys_reloc_hide(x) (x)
9222 diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt.h linux-2.6.32.45/arch/x86/include/asm/paravirt.h
9223 --- linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9224 +++ linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
9225 @@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
9226 pv_mmu_ops.set_fixmap(idx, phys, flags);
9227 }
9228
9229 +#ifdef CONFIG_PAX_KERNEXEC
9230 +static inline unsigned long pax_open_kernel(void)
9231 +{
9232 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9233 +}
9234 +
9235 +static inline unsigned long pax_close_kernel(void)
9236 +{
9237 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9238 +}
9239 +#else
9240 +static inline unsigned long pax_open_kernel(void) { return 0; }
9241 +static inline unsigned long pax_close_kernel(void) { return 0; }
9242 +#endif
9243 +
9244 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9245
9246 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9247 @@ -945,7 +960,7 @@ extern void default_banner(void);
9248
9249 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9250 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9251 -#define PARA_INDIRECT(addr) *%cs:addr
9252 +#define PARA_INDIRECT(addr) *%ss:addr
9253 #endif
9254
9255 #define INTERRUPT_RETURN \
9256 @@ -1022,6 +1037,21 @@ extern void default_banner(void);
9257 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9258 CLBR_NONE, \
9259 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9260 +
9261 +#define GET_CR0_INTO_RDI \
9262 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9263 + mov %rax,%rdi
9264 +
9265 +#define SET_RDI_INTO_CR0 \
9266 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9267 +
9268 +#define GET_CR3_INTO_RDI \
9269 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9270 + mov %rax,%rdi
9271 +
9272 +#define SET_RDI_INTO_CR3 \
9273 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9274 +
9275 #endif /* CONFIG_X86_32 */
9276
9277 #endif /* __ASSEMBLY__ */
9278 diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h
9279 --- linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9280 +++ linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-08-05 20:33:55.000000000 -0400
9281 @@ -78,19 +78,19 @@ struct pv_init_ops {
9282 */
9283 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9284 unsigned long addr, unsigned len);
9285 -};
9286 +} __no_const;
9287
9288
9289 struct pv_lazy_ops {
9290 /* Set deferred update mode, used for batching operations. */
9291 void (*enter)(void);
9292 void (*leave)(void);
9293 -};
9294 +} __no_const;
9295
9296 struct pv_time_ops {
9297 unsigned long long (*sched_clock)(void);
9298 unsigned long (*get_tsc_khz)(void);
9299 -};
9300 +} __no_const;
9301
9302 struct pv_cpu_ops {
9303 /* hooks for various privileged instructions */
9304 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
9305
9306 void (*start_context_switch)(struct task_struct *prev);
9307 void (*end_context_switch)(struct task_struct *next);
9308 -};
9309 +} __no_const;
9310
9311 struct pv_irq_ops {
9312 /*
9313 @@ -217,7 +217,7 @@ struct pv_apic_ops {
9314 unsigned long start_eip,
9315 unsigned long start_esp);
9316 #endif
9317 -};
9318 +} __no_const;
9319
9320 struct pv_mmu_ops {
9321 unsigned long (*read_cr2)(void);
9322 @@ -316,6 +316,12 @@ struct pv_mmu_ops {
9323 an mfn. We can tell which is which from the index. */
9324 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9325 phys_addr_t phys, pgprot_t flags);
9326 +
9327 +#ifdef CONFIG_PAX_KERNEXEC
9328 + unsigned long (*pax_open_kernel)(void);
9329 + unsigned long (*pax_close_kernel)(void);
9330 +#endif
9331 +
9332 };
9333
9334 struct raw_spinlock;
9335 @@ -326,7 +332,7 @@ struct pv_lock_ops {
9336 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
9337 int (*spin_trylock)(struct raw_spinlock *lock);
9338 void (*spin_unlock)(struct raw_spinlock *lock);
9339 -};
9340 +} __no_const;
9341
9342 /* This contains all the paravirt structures: we get a convenient
9343 * number for each function using the offset which we use to indicate
9344 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pci_x86.h linux-2.6.32.45/arch/x86/include/asm/pci_x86.h
9345 --- linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9346 +++ linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9347 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9348 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9349
9350 struct pci_raw_ops {
9351 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9352 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9353 int reg, int len, u32 *val);
9354 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9355 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9356 int reg, int len, u32 val);
9357 };
9358
9359 -extern struct pci_raw_ops *raw_pci_ops;
9360 -extern struct pci_raw_ops *raw_pci_ext_ops;
9361 +extern const struct pci_raw_ops *raw_pci_ops;
9362 +extern const struct pci_raw_ops *raw_pci_ext_ops;
9363
9364 -extern struct pci_raw_ops pci_direct_conf1;
9365 +extern const struct pci_raw_ops pci_direct_conf1;
9366 extern bool port_cf9_safe;
9367
9368 /* arch_initcall level */
9369 diff -urNp linux-2.6.32.45/arch/x86/include/asm/percpu.h linux-2.6.32.45/arch/x86/include/asm/percpu.h
9370 --- linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-03-27 14:31:47.000000000 -0400
9371 +++ linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-08-17 19:33:59.000000000 -0400
9372 @@ -78,6 +78,7 @@ do { \
9373 if (0) { \
9374 T__ tmp__; \
9375 tmp__ = (val); \
9376 + (void)tmp__; \
9377 } \
9378 switch (sizeof(var)) { \
9379 case 1: \
9380 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgalloc.h linux-2.6.32.45/arch/x86/include/asm/pgalloc.h
9381 --- linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9382 +++ linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9383 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9384 pmd_t *pmd, pte_t *pte)
9385 {
9386 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9387 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9388 +}
9389 +
9390 +static inline void pmd_populate_user(struct mm_struct *mm,
9391 + pmd_t *pmd, pte_t *pte)
9392 +{
9393 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9394 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9395 }
9396
9397 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h
9398 --- linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9399 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9400 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9401
9402 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9403 {
9404 + pax_open_kernel();
9405 *pmdp = pmd;
9406 + pax_close_kernel();
9407 }
9408
9409 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9410 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h
9411 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9412 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9413 @@ -26,9 +26,6 @@
9414 struct mm_struct;
9415 struct vm_area_struct;
9416
9417 -extern pgd_t swapper_pg_dir[1024];
9418 -extern pgd_t trampoline_pg_dir[1024];
9419 -
9420 static inline void pgtable_cache_init(void) { }
9421 static inline void check_pgt_cache(void) { }
9422 void paging_init(void);
9423 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9424 # include <asm/pgtable-2level.h>
9425 #endif
9426
9427 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9428 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9429 +#ifdef CONFIG_X86_PAE
9430 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9431 +#endif
9432 +
9433 #if defined(CONFIG_HIGHPTE)
9434 #define __KM_PTE \
9435 (in_nmi() ? KM_NMI_PTE : \
9436 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9437 /* Clear a kernel PTE and flush it from the TLB */
9438 #define kpte_clear_flush(ptep, vaddr) \
9439 do { \
9440 + pax_open_kernel(); \
9441 pte_clear(&init_mm, (vaddr), (ptep)); \
9442 + pax_close_kernel(); \
9443 __flush_tlb_one((vaddr)); \
9444 } while (0)
9445
9446 @@ -85,6 +90,9 @@ do { \
9447
9448 #endif /* !__ASSEMBLY__ */
9449
9450 +#define HAVE_ARCH_UNMAPPED_AREA
9451 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9452 +
9453 /*
9454 * kern_addr_valid() is (1) for FLATMEM and (0) for
9455 * SPARSEMEM and DISCONTIGMEM
9456 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h
9457 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9458 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9459 @@ -8,7 +8,7 @@
9460 */
9461 #ifdef CONFIG_X86_PAE
9462 # include <asm/pgtable-3level_types.h>
9463 -# define PMD_SIZE (1UL << PMD_SHIFT)
9464 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9465 # define PMD_MASK (~(PMD_SIZE - 1))
9466 #else
9467 # include <asm/pgtable-2level_types.h>
9468 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9469 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9470 #endif
9471
9472 +#ifdef CONFIG_PAX_KERNEXEC
9473 +#ifndef __ASSEMBLY__
9474 +extern unsigned char MODULES_EXEC_VADDR[];
9475 +extern unsigned char MODULES_EXEC_END[];
9476 +#endif
9477 +#include <asm/boot.h>
9478 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9479 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9480 +#else
9481 +#define ktla_ktva(addr) (addr)
9482 +#define ktva_ktla(addr) (addr)
9483 +#endif
9484 +
9485 #define MODULES_VADDR VMALLOC_START
9486 #define MODULES_END VMALLOC_END
9487 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9488 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h
9489 --- linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9490 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9491 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9492
9493 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9494 {
9495 + pax_open_kernel();
9496 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9497 + pax_close_kernel();
9498 }
9499
9500 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9501 {
9502 + pax_open_kernel();
9503 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9504 + pax_close_kernel();
9505 }
9506
9507 /*
9508 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h
9509 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9510 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9511 @@ -16,10 +16,13 @@
9512
9513 extern pud_t level3_kernel_pgt[512];
9514 extern pud_t level3_ident_pgt[512];
9515 +extern pud_t level3_vmalloc_pgt[512];
9516 +extern pud_t level3_vmemmap_pgt[512];
9517 +extern pud_t level2_vmemmap_pgt[512];
9518 extern pmd_t level2_kernel_pgt[512];
9519 extern pmd_t level2_fixmap_pgt[512];
9520 -extern pmd_t level2_ident_pgt[512];
9521 -extern pgd_t init_level4_pgt[];
9522 +extern pmd_t level2_ident_pgt[512*2];
9523 +extern pgd_t init_level4_pgt[512];
9524
9525 #define swapper_pg_dir init_level4_pgt
9526
9527 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9528
9529 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9530 {
9531 + pax_open_kernel();
9532 *pmdp = pmd;
9533 + pax_close_kernel();
9534 }
9535
9536 static inline void native_pmd_clear(pmd_t *pmd)
9537 @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9538
9539 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9540 {
9541 + pax_open_kernel();
9542 *pgdp = pgd;
9543 + pax_close_kernel();
9544 }
9545
9546 static inline void native_pgd_clear(pgd_t *pgd)
9547 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h
9548 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9549 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9550 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9551 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9552 #define MODULES_END _AC(0xffffffffff000000, UL)
9553 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9554 +#define MODULES_EXEC_VADDR MODULES_VADDR
9555 +#define MODULES_EXEC_END MODULES_END
9556 +
9557 +#define ktla_ktva(addr) (addr)
9558 +#define ktva_ktla(addr) (addr)
9559
9560 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9561 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable.h linux-2.6.32.45/arch/x86/include/asm/pgtable.h
9562 --- linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9563 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9564 @@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9565
9566 #define arch_end_context_switch(prev) do {} while(0)
9567
9568 +#define pax_open_kernel() native_pax_open_kernel()
9569 +#define pax_close_kernel() native_pax_close_kernel()
9570 #endif /* CONFIG_PARAVIRT */
9571
9572 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9573 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9574 +
9575 +#ifdef CONFIG_PAX_KERNEXEC
9576 +static inline unsigned long native_pax_open_kernel(void)
9577 +{
9578 + unsigned long cr0;
9579 +
9580 + preempt_disable();
9581 + barrier();
9582 + cr0 = read_cr0() ^ X86_CR0_WP;
9583 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9584 + write_cr0(cr0);
9585 + return cr0 ^ X86_CR0_WP;
9586 +}
9587 +
9588 +static inline unsigned long native_pax_close_kernel(void)
9589 +{
9590 + unsigned long cr0;
9591 +
9592 + cr0 = read_cr0() ^ X86_CR0_WP;
9593 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9594 + write_cr0(cr0);
9595 + barrier();
9596 + preempt_enable_no_resched();
9597 + return cr0 ^ X86_CR0_WP;
9598 +}
9599 +#else
9600 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9601 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9602 +#endif
9603 +
9604 /*
9605 * The following only work if pte_present() is true.
9606 * Undefined behaviour if not..
9607 */
9608 +static inline int pte_user(pte_t pte)
9609 +{
9610 + return pte_val(pte) & _PAGE_USER;
9611 +}
9612 +
9613 static inline int pte_dirty(pte_t pte)
9614 {
9615 return pte_flags(pte) & _PAGE_DIRTY;
9616 @@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9617 return pte_clear_flags(pte, _PAGE_RW);
9618 }
9619
9620 +static inline pte_t pte_mkread(pte_t pte)
9621 +{
9622 + return __pte(pte_val(pte) | _PAGE_USER);
9623 +}
9624 +
9625 static inline pte_t pte_mkexec(pte_t pte)
9626 {
9627 - return pte_clear_flags(pte, _PAGE_NX);
9628 +#ifdef CONFIG_X86_PAE
9629 + if (__supported_pte_mask & _PAGE_NX)
9630 + return pte_clear_flags(pte, _PAGE_NX);
9631 + else
9632 +#endif
9633 + return pte_set_flags(pte, _PAGE_USER);
9634 +}
9635 +
9636 +static inline pte_t pte_exprotect(pte_t pte)
9637 +{
9638 +#ifdef CONFIG_X86_PAE
9639 + if (__supported_pte_mask & _PAGE_NX)
9640 + return pte_set_flags(pte, _PAGE_NX);
9641 + else
9642 +#endif
9643 + return pte_clear_flags(pte, _PAGE_USER);
9644 }
9645
9646 static inline pte_t pte_mkdirty(pte_t pte)
9647 @@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9648 #endif
9649
9650 #ifndef __ASSEMBLY__
9651 +
9652 +#ifdef CONFIG_PAX_PER_CPU_PGD
9653 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9654 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9655 +{
9656 + return cpu_pgd[cpu];
9657 +}
9658 +#endif
9659 +
9660 #include <linux/mm_types.h>
9661
9662 static inline int pte_none(pte_t pte)
9663 @@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9664
9665 static inline int pgd_bad(pgd_t pgd)
9666 {
9667 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9668 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9669 }
9670
9671 static inline int pgd_none(pgd_t pgd)
9672 @@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9673 * pgd_offset() returns a (pgd_t *)
9674 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9675 */
9676 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9677 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9678 +
9679 +#ifdef CONFIG_PAX_PER_CPU_PGD
9680 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9681 +#endif
9682 +
9683 /*
9684 * a shortcut which implies the use of the kernel's pgd, instead
9685 * of a process's
9686 @@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9687 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9688 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9689
9690 +#ifdef CONFIG_X86_32
9691 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9692 +#else
9693 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9694 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9695 +
9696 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9697 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9698 +#else
9699 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9700 +#endif
9701 +
9702 +#endif
9703 +
9704 #ifndef __ASSEMBLY__
9705
9706 extern int direct_gbpages;
9707 @@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9708 * dst and src can be on the same page, but the range must not overlap,
9709 * and must not cross a page boundary.
9710 */
9711 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9712 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9713 {
9714 - memcpy(dst, src, count * sizeof(pgd_t));
9715 + pax_open_kernel();
9716 + while (count--)
9717 + *dst++ = *src++;
9718 + pax_close_kernel();
9719 }
9720
9721 +#ifdef CONFIG_PAX_PER_CPU_PGD
9722 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9723 +#endif
9724 +
9725 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9726 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9727 +#else
9728 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9729 +#endif
9730
9731 #include <asm-generic/pgtable.h>
9732 #endif /* __ASSEMBLY__ */
9733 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h
9734 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9735 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9736 @@ -16,12 +16,11 @@
9737 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9738 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9739 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9740 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9741 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9742 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9743 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9744 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9745 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9746 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9747 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9748 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9749
9750 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9751 @@ -39,7 +38,6 @@
9752 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9753 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9754 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9755 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9756 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9757 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9758 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9759 @@ -55,8 +53,10 @@
9760
9761 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9762 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9763 -#else
9764 +#elif defined(CONFIG_KMEMCHECK)
9765 #define _PAGE_NX (_AT(pteval_t, 0))
9766 +#else
9767 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9768 #endif
9769
9770 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9771 @@ -93,6 +93,9 @@
9772 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9773 _PAGE_ACCESSED)
9774
9775 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9776 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9777 +
9778 #define __PAGE_KERNEL_EXEC \
9779 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9780 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9781 @@ -103,8 +106,8 @@
9782 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9783 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9784 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9785 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9786 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9787 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9788 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9789 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9790 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9791 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9792 @@ -163,8 +166,8 @@
9793 * bits are combined, this will alow user to access the high address mapped
9794 * VDSO in the presence of CONFIG_COMPAT_VDSO
9795 */
9796 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9797 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9798 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9799 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9800 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9801 #endif
9802
9803 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9804 {
9805 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9806 }
9807 +#endif
9808
9809 +#if PAGETABLE_LEVELS == 3
9810 +#include <asm-generic/pgtable-nopud.h>
9811 +#endif
9812 +
9813 +#if PAGETABLE_LEVELS == 2
9814 +#include <asm-generic/pgtable-nopmd.h>
9815 +#endif
9816 +
9817 +#ifndef __ASSEMBLY__
9818 #if PAGETABLE_LEVELS > 3
9819 typedef struct { pudval_t pud; } pud_t;
9820
9821 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9822 return pud.pud;
9823 }
9824 #else
9825 -#include <asm-generic/pgtable-nopud.h>
9826 -
9827 static inline pudval_t native_pud_val(pud_t pud)
9828 {
9829 return native_pgd_val(pud.pgd);
9830 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9831 return pmd.pmd;
9832 }
9833 #else
9834 -#include <asm-generic/pgtable-nopmd.h>
9835 -
9836 static inline pmdval_t native_pmd_val(pmd_t pmd)
9837 {
9838 return native_pgd_val(pmd.pud.pgd);
9839 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9840
9841 extern pteval_t __supported_pte_mask;
9842 extern void set_nx(void);
9843 +
9844 +#ifdef CONFIG_X86_32
9845 +#ifdef CONFIG_X86_PAE
9846 extern int nx_enabled;
9847 +#else
9848 +#define nx_enabled (0)
9849 +#endif
9850 +#else
9851 +#define nx_enabled (1)
9852 +#endif
9853
9854 #define pgprot_writecombine pgprot_writecombine
9855 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9856 diff -urNp linux-2.6.32.45/arch/x86/include/asm/processor.h linux-2.6.32.45/arch/x86/include/asm/processor.h
9857 --- linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9858 +++ linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9859 @@ -272,7 +272,7 @@ struct tss_struct {
9860
9861 } ____cacheline_aligned;
9862
9863 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9864 +extern struct tss_struct init_tss[NR_CPUS];
9865
9866 /*
9867 * Save the original ist values for checking stack pointers during debugging
9868 @@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9869 */
9870 #define TASK_SIZE PAGE_OFFSET
9871 #define TASK_SIZE_MAX TASK_SIZE
9872 +
9873 +#ifdef CONFIG_PAX_SEGMEXEC
9874 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9875 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9876 +#else
9877 #define STACK_TOP TASK_SIZE
9878 -#define STACK_TOP_MAX STACK_TOP
9879 +#endif
9880 +
9881 +#define STACK_TOP_MAX TASK_SIZE
9882
9883 #define INIT_THREAD { \
9884 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9885 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9886 .vm86_info = NULL, \
9887 .sysenter_cs = __KERNEL_CS, \
9888 .io_bitmap_ptr = NULL, \
9889 @@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9890 */
9891 #define INIT_TSS { \
9892 .x86_tss = { \
9893 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9894 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9895 .ss0 = __KERNEL_DS, \
9896 .ss1 = __KERNEL_CS, \
9897 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9898 @@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9899 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9900
9901 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9902 -#define KSTK_TOP(info) \
9903 -({ \
9904 - unsigned long *__ptr = (unsigned long *)(info); \
9905 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9906 -})
9907 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9908
9909 /*
9910 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9911 @@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9912 #define task_pt_regs(task) \
9913 ({ \
9914 struct pt_regs *__regs__; \
9915 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9916 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9917 __regs__ - 1; \
9918 })
9919
9920 @@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9921 /*
9922 * User space process size. 47bits minus one guard page.
9923 */
9924 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9925 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9926
9927 /* This decides where the kernel will search for a free chunk of vm
9928 * space during mmap's.
9929 */
9930 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9931 - 0xc0000000 : 0xFFFFe000)
9932 + 0xc0000000 : 0xFFFFf000)
9933
9934 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9935 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9936 @@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9937 #define STACK_TOP_MAX TASK_SIZE_MAX
9938
9939 #define INIT_THREAD { \
9940 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9941 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9942 }
9943
9944 #define INIT_TSS { \
9945 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9946 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9947 }
9948
9949 /*
9950 @@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9951 */
9952 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9953
9954 +#ifdef CONFIG_PAX_SEGMEXEC
9955 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9956 +#endif
9957 +
9958 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9959
9960 /* Get/set a process' ability to use the timestamp counter instruction */
9961 diff -urNp linux-2.6.32.45/arch/x86/include/asm/ptrace.h linux-2.6.32.45/arch/x86/include/asm/ptrace.h
9962 --- linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9963 +++ linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9964 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9965 }
9966
9967 /*
9968 - * user_mode_vm(regs) determines whether a register set came from user mode.
9969 + * user_mode(regs) determines whether a register set came from user mode.
9970 * This is true if V8086 mode was enabled OR if the register set was from
9971 * protected mode with RPL-3 CS value. This tricky test checks that with
9972 * one comparison. Many places in the kernel can bypass this full check
9973 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9974 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9975 + * be used.
9976 */
9977 -static inline int user_mode(struct pt_regs *regs)
9978 +static inline int user_mode_novm(struct pt_regs *regs)
9979 {
9980 #ifdef CONFIG_X86_32
9981 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9982 #else
9983 - return !!(regs->cs & 3);
9984 + return !!(regs->cs & SEGMENT_RPL_MASK);
9985 #endif
9986 }
9987
9988 -static inline int user_mode_vm(struct pt_regs *regs)
9989 +static inline int user_mode(struct pt_regs *regs)
9990 {
9991 #ifdef CONFIG_X86_32
9992 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9993 USER_RPL;
9994 #else
9995 - return user_mode(regs);
9996 + return user_mode_novm(regs);
9997 #endif
9998 }
9999
10000 diff -urNp linux-2.6.32.45/arch/x86/include/asm/reboot.h linux-2.6.32.45/arch/x86/include/asm/reboot.h
10001 --- linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
10002 +++ linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-08-05 20:33:55.000000000 -0400
10003 @@ -6,19 +6,19 @@
10004 struct pt_regs;
10005
10006 struct machine_ops {
10007 - void (*restart)(char *cmd);
10008 - void (*halt)(void);
10009 - void (*power_off)(void);
10010 + void (* __noreturn restart)(char *cmd);
10011 + void (* __noreturn halt)(void);
10012 + void (* __noreturn power_off)(void);
10013 void (*shutdown)(void);
10014 void (*crash_shutdown)(struct pt_regs *);
10015 - void (*emergency_restart)(void);
10016 -};
10017 + void (* __noreturn emergency_restart)(void);
10018 +} __no_const;
10019
10020 extern struct machine_ops machine_ops;
10021
10022 void native_machine_crash_shutdown(struct pt_regs *regs);
10023 void native_machine_shutdown(void);
10024 -void machine_real_restart(const unsigned char *code, int length);
10025 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
10026
10027 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
10028 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
10029 diff -urNp linux-2.6.32.45/arch/x86/include/asm/rwsem.h linux-2.6.32.45/arch/x86/include/asm/rwsem.h
10030 --- linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
10031 +++ linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
10032 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw
10033 {
10034 asm volatile("# beginning down_read\n\t"
10035 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10036 +
10037 +#ifdef CONFIG_PAX_REFCOUNT
10038 + "jno 0f\n"
10039 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
10040 + "int $4\n0:\n"
10041 + _ASM_EXTABLE(0b, 0b)
10042 +#endif
10043 +
10044 /* adds 0x00000001, returns the old value */
10045 " jns 1f\n"
10046 " call call_rwsem_down_read_failed\n"
10047 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
10048 "1:\n\t"
10049 " mov %1,%2\n\t"
10050 " add %3,%2\n\t"
10051 +
10052 +#ifdef CONFIG_PAX_REFCOUNT
10053 + "jno 0f\n"
10054 + "sub %3,%2\n"
10055 + "int $4\n0:\n"
10056 + _ASM_EXTABLE(0b, 0b)
10057 +#endif
10058 +
10059 " jle 2f\n\t"
10060 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10061 " jnz 1b\n\t"
10062 @@ -160,6 +176,14 @@ static inline void __down_write_nested(s
10063 tmp = RWSEM_ACTIVE_WRITE_BIAS;
10064 asm volatile("# beginning down_write\n\t"
10065 LOCK_PREFIX " xadd %1,(%2)\n\t"
10066 +
10067 +#ifdef CONFIG_PAX_REFCOUNT
10068 + "jno 0f\n"
10069 + "mov %1,(%2)\n"
10070 + "int $4\n0:\n"
10071 + _ASM_EXTABLE(0b, 0b)
10072 +#endif
10073 +
10074 /* subtract 0x0000ffff, returns the old value */
10075 " test %1,%1\n\t"
10076 /* was the count 0 before? */
10077 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10078 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10079 asm volatile("# beginning __up_read\n\t"
10080 LOCK_PREFIX " xadd %1,(%2)\n\t"
10081 +
10082 +#ifdef CONFIG_PAX_REFCOUNT
10083 + "jno 0f\n"
10084 + "mov %1,(%2)\n"
10085 + "int $4\n0:\n"
10086 + _ASM_EXTABLE(0b, 0b)
10087 +#endif
10088 +
10089 /* subtracts 1, returns the old value */
10090 " jns 1f\n\t"
10091 " call call_rwsem_wake\n"
10092 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10093 rwsem_count_t tmp;
10094 asm volatile("# beginning __up_write\n\t"
10095 LOCK_PREFIX " xadd %1,(%2)\n\t"
10096 +
10097 +#ifdef CONFIG_PAX_REFCOUNT
10098 + "jno 0f\n"
10099 + "mov %1,(%2)\n"
10100 + "int $4\n0:\n"
10101 + _ASM_EXTABLE(0b, 0b)
10102 +#endif
10103 +
10104 /* tries to transition
10105 0xffff0001 -> 0x00000000 */
10106 " jz 1f\n"
10107 @@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10108 {
10109 asm volatile("# beginning __downgrade_write\n\t"
10110 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10111 +
10112 +#ifdef CONFIG_PAX_REFCOUNT
10113 + "jno 0f\n"
10114 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10115 + "int $4\n0:\n"
10116 + _ASM_EXTABLE(0b, 0b)
10117 +#endif
10118 +
10119 /*
10120 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10121 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10122 @@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10123 static inline void rwsem_atomic_add(rwsem_count_t delta,
10124 struct rw_semaphore *sem)
10125 {
10126 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10127 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10128 +
10129 +#ifdef CONFIG_PAX_REFCOUNT
10130 + "jno 0f\n"
10131 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10132 + "int $4\n0:\n"
10133 + _ASM_EXTABLE(0b, 0b)
10134 +#endif
10135 +
10136 : "+m" (sem->count)
10137 : "er" (delta));
10138 }
10139 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10140 {
10141 rwsem_count_t tmp = delta;
10142
10143 - asm volatile(LOCK_PREFIX "xadd %0,%1"
10144 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10145 +
10146 +#ifdef CONFIG_PAX_REFCOUNT
10147 + "jno 0f\n"
10148 + "mov %0,%1\n"
10149 + "int $4\n0:\n"
10150 + _ASM_EXTABLE(0b, 0b)
10151 +#endif
10152 +
10153 : "+r" (tmp), "+m" (sem->count)
10154 : : "memory");
10155
10156 diff -urNp linux-2.6.32.45/arch/x86/include/asm/segment.h linux-2.6.32.45/arch/x86/include/asm/segment.h
10157 --- linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10158 +++ linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10159 @@ -62,8 +62,8 @@
10160 * 26 - ESPFIX small SS
10161 * 27 - per-cpu [ offset to per-cpu data area ]
10162 * 28 - stack_canary-20 [ for stack protector ]
10163 - * 29 - unused
10164 - * 30 - unused
10165 + * 29 - PCI BIOS CS
10166 + * 30 - PCI BIOS DS
10167 * 31 - TSS for double fault handler
10168 */
10169 #define GDT_ENTRY_TLS_MIN 6
10170 @@ -77,6 +77,8 @@
10171
10172 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10173
10174 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10175 +
10176 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10177
10178 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10179 @@ -88,7 +90,7 @@
10180 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10181 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10182
10183 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10184 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10185 #ifdef CONFIG_SMP
10186 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10187 #else
10188 @@ -102,6 +104,12 @@
10189 #define __KERNEL_STACK_CANARY 0
10190 #endif
10191
10192 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10193 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10194 +
10195 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10196 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10197 +
10198 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10199
10200 /*
10201 @@ -139,7 +147,7 @@
10202 */
10203
10204 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10205 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10206 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10207
10208
10209 #else
10210 @@ -163,6 +171,8 @@
10211 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10212 #define __USER32_DS __USER_DS
10213
10214 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10215 +
10216 #define GDT_ENTRY_TSS 8 /* needs two entries */
10217 #define GDT_ENTRY_LDT 10 /* needs two entries */
10218 #define GDT_ENTRY_TLS_MIN 12
10219 @@ -183,6 +193,7 @@
10220 #endif
10221
10222 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10223 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10224 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10225 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10226 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10227 diff -urNp linux-2.6.32.45/arch/x86/include/asm/smp.h linux-2.6.32.45/arch/x86/include/asm/smp.h
10228 --- linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10229 +++ linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-08-05 20:33:55.000000000 -0400
10230 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
10231 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10232 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10233 DECLARE_PER_CPU(u16, cpu_llc_id);
10234 -DECLARE_PER_CPU(int, cpu_number);
10235 +DECLARE_PER_CPU(unsigned int, cpu_number);
10236
10237 static inline struct cpumask *cpu_sibling_mask(int cpu)
10238 {
10239 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10240 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10241
10242 /* Static state in head.S used to set up a CPU */
10243 -extern struct {
10244 - void *sp;
10245 - unsigned short ss;
10246 -} stack_start;
10247 +extern unsigned long stack_start; /* Initial stack pointer address */
10248
10249 struct smp_ops {
10250 void (*smp_prepare_boot_cpu)(void);
10251 @@ -60,7 +57,7 @@ struct smp_ops {
10252
10253 void (*send_call_func_ipi)(const struct cpumask *mask);
10254 void (*send_call_func_single_ipi)(int cpu);
10255 -};
10256 +} __no_const;
10257
10258 /* Globals due to paravirt */
10259 extern void set_cpu_sibling_map(int cpu);
10260 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10261 extern int safe_smp_processor_id(void);
10262
10263 #elif defined(CONFIG_X86_64_SMP)
10264 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10265 -
10266 -#define stack_smp_processor_id() \
10267 -({ \
10268 - struct thread_info *ti; \
10269 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10270 - ti->cpu; \
10271 -})
10272 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10273 +#define stack_smp_processor_id() raw_smp_processor_id()
10274 #define safe_smp_processor_id() smp_processor_id()
10275
10276 #endif
10277 diff -urNp linux-2.6.32.45/arch/x86/include/asm/spinlock.h linux-2.6.32.45/arch/x86/include/asm/spinlock.h
10278 --- linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10279 +++ linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10280 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10281 static inline void __raw_read_lock(raw_rwlock_t *rw)
10282 {
10283 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10284 +
10285 +#ifdef CONFIG_PAX_REFCOUNT
10286 + "jno 0f\n"
10287 + LOCK_PREFIX " addl $1,(%0)\n"
10288 + "int $4\n0:\n"
10289 + _ASM_EXTABLE(0b, 0b)
10290 +#endif
10291 +
10292 "jns 1f\n"
10293 "call __read_lock_failed\n\t"
10294 "1:\n"
10295 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10296 static inline void __raw_write_lock(raw_rwlock_t *rw)
10297 {
10298 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10299 +
10300 +#ifdef CONFIG_PAX_REFCOUNT
10301 + "jno 0f\n"
10302 + LOCK_PREFIX " addl %1,(%0)\n"
10303 + "int $4\n0:\n"
10304 + _ASM_EXTABLE(0b, 0b)
10305 +#endif
10306 +
10307 "jz 1f\n"
10308 "call __write_lock_failed\n\t"
10309 "1:\n"
10310 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10311
10312 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10313 {
10314 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10315 + asm volatile(LOCK_PREFIX "incl %0\n"
10316 +
10317 +#ifdef CONFIG_PAX_REFCOUNT
10318 + "jno 0f\n"
10319 + LOCK_PREFIX "decl %0\n"
10320 + "int $4\n0:\n"
10321 + _ASM_EXTABLE(0b, 0b)
10322 +#endif
10323 +
10324 + :"+m" (rw->lock) : : "memory");
10325 }
10326
10327 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10328 {
10329 - asm volatile(LOCK_PREFIX "addl %1, %0"
10330 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
10331 +
10332 +#ifdef CONFIG_PAX_REFCOUNT
10333 + "jno 0f\n"
10334 + LOCK_PREFIX "subl %1, %0\n"
10335 + "int $4\n0:\n"
10336 + _ASM_EXTABLE(0b, 0b)
10337 +#endif
10338 +
10339 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10340 }
10341
10342 diff -urNp linux-2.6.32.45/arch/x86/include/asm/stackprotector.h linux-2.6.32.45/arch/x86/include/asm/stackprotector.h
10343 --- linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10344 +++ linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10345 @@ -48,7 +48,7 @@
10346 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10347 */
10348 #define GDT_STACK_CANARY_INIT \
10349 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10350 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10351
10352 /*
10353 * Initialize the stackprotector canary value.
10354 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10355
10356 static inline void load_stack_canary_segment(void)
10357 {
10358 -#ifdef CONFIG_X86_32
10359 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10360 asm volatile ("mov %0, %%gs" : : "r" (0));
10361 #endif
10362 }
10363 diff -urNp linux-2.6.32.45/arch/x86/include/asm/system.h linux-2.6.32.45/arch/x86/include/asm/system.h
10364 --- linux-2.6.32.45/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10365 +++ linux-2.6.32.45/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10366 @@ -132,7 +132,7 @@ do { \
10367 "thread_return:\n\t" \
10368 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10369 __switch_canary \
10370 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10371 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10372 "movq %%rax,%%rdi\n\t" \
10373 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10374 "jnz ret_from_fork\n\t" \
10375 @@ -143,7 +143,7 @@ do { \
10376 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10377 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10378 [_tif_fork] "i" (_TIF_FORK), \
10379 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10380 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
10381 [current_task] "m" (per_cpu_var(current_task)) \
10382 __switch_canary_iparam \
10383 : "memory", "cc" __EXTRA_CLOBBER)
10384 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10385 {
10386 unsigned long __limit;
10387 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10388 - return __limit + 1;
10389 + return __limit;
10390 }
10391
10392 static inline void native_clts(void)
10393 @@ -340,12 +340,12 @@ void enable_hlt(void);
10394
10395 void cpu_idle_wait(void);
10396
10397 -extern unsigned long arch_align_stack(unsigned long sp);
10398 +#define arch_align_stack(x) ((x) & ~0xfUL)
10399 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10400
10401 void default_idle(void);
10402
10403 -void stop_this_cpu(void *dummy);
10404 +void stop_this_cpu(void *dummy) __noreturn;
10405
10406 /*
10407 * Force strict CPU ordering.
10408 diff -urNp linux-2.6.32.45/arch/x86/include/asm/thread_info.h linux-2.6.32.45/arch/x86/include/asm/thread_info.h
10409 --- linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10410 +++ linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10411 @@ -10,6 +10,7 @@
10412 #include <linux/compiler.h>
10413 #include <asm/page.h>
10414 #include <asm/types.h>
10415 +#include <asm/percpu.h>
10416
10417 /*
10418 * low level task data that entry.S needs immediate access to
10419 @@ -24,7 +25,6 @@ struct exec_domain;
10420 #include <asm/atomic.h>
10421
10422 struct thread_info {
10423 - struct task_struct *task; /* main task structure */
10424 struct exec_domain *exec_domain; /* execution domain */
10425 __u32 flags; /* low level flags */
10426 __u32 status; /* thread synchronous flags */
10427 @@ -34,18 +34,12 @@ struct thread_info {
10428 mm_segment_t addr_limit;
10429 struct restart_block restart_block;
10430 void __user *sysenter_return;
10431 -#ifdef CONFIG_X86_32
10432 - unsigned long previous_esp; /* ESP of the previous stack in
10433 - case of nested (IRQ) stacks
10434 - */
10435 - __u8 supervisor_stack[0];
10436 -#endif
10437 + unsigned long lowest_stack;
10438 int uaccess_err;
10439 };
10440
10441 -#define INIT_THREAD_INFO(tsk) \
10442 +#define INIT_THREAD_INFO \
10443 { \
10444 - .task = &tsk, \
10445 .exec_domain = &default_exec_domain, \
10446 .flags = 0, \
10447 .cpu = 0, \
10448 @@ -56,7 +50,7 @@ struct thread_info {
10449 }, \
10450 }
10451
10452 -#define init_thread_info (init_thread_union.thread_info)
10453 +#define init_thread_info (init_thread_union.stack)
10454 #define init_stack (init_thread_union.stack)
10455
10456 #else /* !__ASSEMBLY__ */
10457 @@ -163,6 +157,23 @@ struct thread_info {
10458 #define alloc_thread_info(tsk) \
10459 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10460
10461 +#ifdef __ASSEMBLY__
10462 +/* how to get the thread information struct from ASM */
10463 +#define GET_THREAD_INFO(reg) \
10464 + mov PER_CPU_VAR(current_tinfo), reg
10465 +
10466 +/* use this one if reg already contains %esp */
10467 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10468 +#else
10469 +/* how to get the thread information struct from C */
10470 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10471 +
10472 +static __always_inline struct thread_info *current_thread_info(void)
10473 +{
10474 + return percpu_read_stable(current_tinfo);
10475 +}
10476 +#endif
10477 +
10478 #ifdef CONFIG_X86_32
10479
10480 #define STACK_WARN (THREAD_SIZE/8)
10481 @@ -173,35 +184,13 @@ struct thread_info {
10482 */
10483 #ifndef __ASSEMBLY__
10484
10485 -
10486 /* how to get the current stack pointer from C */
10487 register unsigned long current_stack_pointer asm("esp") __used;
10488
10489 -/* how to get the thread information struct from C */
10490 -static inline struct thread_info *current_thread_info(void)
10491 -{
10492 - return (struct thread_info *)
10493 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10494 -}
10495 -
10496 -#else /* !__ASSEMBLY__ */
10497 -
10498 -/* how to get the thread information struct from ASM */
10499 -#define GET_THREAD_INFO(reg) \
10500 - movl $-THREAD_SIZE, reg; \
10501 - andl %esp, reg
10502 -
10503 -/* use this one if reg already contains %esp */
10504 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10505 - andl $-THREAD_SIZE, reg
10506 -
10507 #endif
10508
10509 #else /* X86_32 */
10510
10511 -#include <asm/percpu.h>
10512 -#define KERNEL_STACK_OFFSET (5*8)
10513 -
10514 /*
10515 * macros/functions for gaining access to the thread information structure
10516 * preempt_count needs to be 1 initially, until the scheduler is functional.
10517 @@ -209,21 +198,8 @@ static inline struct thread_info *curren
10518 #ifndef __ASSEMBLY__
10519 DECLARE_PER_CPU(unsigned long, kernel_stack);
10520
10521 -static inline struct thread_info *current_thread_info(void)
10522 -{
10523 - struct thread_info *ti;
10524 - ti = (void *)(percpu_read_stable(kernel_stack) +
10525 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10526 - return ti;
10527 -}
10528 -
10529 -#else /* !__ASSEMBLY__ */
10530 -
10531 -/* how to get the thread information struct from ASM */
10532 -#define GET_THREAD_INFO(reg) \
10533 - movq PER_CPU_VAR(kernel_stack),reg ; \
10534 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10535 -
10536 +/* how to get the current stack pointer from C */
10537 +register unsigned long current_stack_pointer asm("rsp") __used;
10538 #endif
10539
10540 #endif /* !X86_32 */
10541 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10542 extern void free_thread_info(struct thread_info *ti);
10543 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10544 #define arch_task_cache_init arch_task_cache_init
10545 +
10546 +#define __HAVE_THREAD_FUNCTIONS
10547 +#define task_thread_info(task) (&(task)->tinfo)
10548 +#define task_stack_page(task) ((task)->stack)
10549 +#define setup_thread_stack(p, org) do {} while (0)
10550 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10551 +
10552 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10553 +extern struct task_struct *alloc_task_struct(void);
10554 +extern void free_task_struct(struct task_struct *);
10555 +
10556 #endif
10557 #endif /* _ASM_X86_THREAD_INFO_H */
10558 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h
10559 --- linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10560 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10561 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10562 static __always_inline unsigned long __must_check
10563 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10564 {
10565 + pax_track_stack();
10566 +
10567 + if ((long)n < 0)
10568 + return n;
10569 +
10570 if (__builtin_constant_p(n)) {
10571 unsigned long ret;
10572
10573 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10574 return ret;
10575 }
10576 }
10577 + if (!__builtin_constant_p(n))
10578 + check_object_size(from, n, true);
10579 return __copy_to_user_ll(to, from, n);
10580 }
10581
10582 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10583 __copy_to_user(void __user *to, const void *from, unsigned long n)
10584 {
10585 might_fault();
10586 +
10587 return __copy_to_user_inatomic(to, from, n);
10588 }
10589
10590 static __always_inline unsigned long
10591 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10592 {
10593 + if ((long)n < 0)
10594 + return n;
10595 +
10596 /* Avoid zeroing the tail if the copy fails..
10597 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10598 * but as the zeroing behaviour is only significant when n is not
10599 @@ -138,6 +149,12 @@ static __always_inline unsigned long
10600 __copy_from_user(void *to, const void __user *from, unsigned long n)
10601 {
10602 might_fault();
10603 +
10604 + pax_track_stack();
10605 +
10606 + if ((long)n < 0)
10607 + return n;
10608 +
10609 if (__builtin_constant_p(n)) {
10610 unsigned long ret;
10611
10612 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10613 return ret;
10614 }
10615 }
10616 + if (!__builtin_constant_p(n))
10617 + check_object_size(to, n, false);
10618 return __copy_from_user_ll(to, from, n);
10619 }
10620
10621 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10622 const void __user *from, unsigned long n)
10623 {
10624 might_fault();
10625 +
10626 + if ((long)n < 0)
10627 + return n;
10628 +
10629 if (__builtin_constant_p(n)) {
10630 unsigned long ret;
10631
10632 @@ -182,14 +205,62 @@ static __always_inline unsigned long
10633 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10634 unsigned long n)
10635 {
10636 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10637 + if ((long)n < 0)
10638 + return n;
10639 +
10640 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10641 +}
10642 +
10643 +/**
10644 + * copy_to_user: - Copy a block of data into user space.
10645 + * @to: Destination address, in user space.
10646 + * @from: Source address, in kernel space.
10647 + * @n: Number of bytes to copy.
10648 + *
10649 + * Context: User context only. This function may sleep.
10650 + *
10651 + * Copy data from kernel space to user space.
10652 + *
10653 + * Returns number of bytes that could not be copied.
10654 + * On success, this will be zero.
10655 + */
10656 +static __always_inline unsigned long __must_check
10657 +copy_to_user(void __user *to, const void *from, unsigned long n)
10658 +{
10659 + if (access_ok(VERIFY_WRITE, to, n))
10660 + n = __copy_to_user(to, from, n);
10661 + return n;
10662 +}
10663 +
10664 +/**
10665 + * copy_from_user: - Copy a block of data from user space.
10666 + * @to: Destination address, in kernel space.
10667 + * @from: Source address, in user space.
10668 + * @n: Number of bytes to copy.
10669 + *
10670 + * Context: User context only. This function may sleep.
10671 + *
10672 + * Copy data from user space to kernel space.
10673 + *
10674 + * Returns number of bytes that could not be copied.
10675 + * On success, this will be zero.
10676 + *
10677 + * If some data could not be copied, this function will pad the copied
10678 + * data to the requested size using zero bytes.
10679 + */
10680 +static __always_inline unsigned long __must_check
10681 +copy_from_user(void *to, const void __user *from, unsigned long n)
10682 +{
10683 + if (access_ok(VERIFY_READ, from, n))
10684 + n = __copy_from_user(to, from, n);
10685 + else if ((long)n > 0) {
10686 + if (!__builtin_constant_p(n))
10687 + check_object_size(to, n, false);
10688 + memset(to, 0, n);
10689 + }
10690 + return n;
10691 }
10692
10693 -unsigned long __must_check copy_to_user(void __user *to,
10694 - const void *from, unsigned long n);
10695 -unsigned long __must_check copy_from_user(void *to,
10696 - const void __user *from,
10697 - unsigned long n);
10698 long __must_check strncpy_from_user(char *dst, const char __user *src,
10699 long count);
10700 long __must_check __strncpy_from_user(char *dst,
10701 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h
10702 --- linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10703 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10704 @@ -9,6 +9,9 @@
10705 #include <linux/prefetch.h>
10706 #include <linux/lockdep.h>
10707 #include <asm/page.h>
10708 +#include <asm/pgtable.h>
10709 +
10710 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10711
10712 /*
10713 * Copy To/From Userspace
10714 @@ -19,113 +22,203 @@ __must_check unsigned long
10715 copy_user_generic(void *to, const void *from, unsigned len);
10716
10717 __must_check unsigned long
10718 -copy_to_user(void __user *to, const void *from, unsigned len);
10719 -__must_check unsigned long
10720 -copy_from_user(void *to, const void __user *from, unsigned len);
10721 -__must_check unsigned long
10722 copy_in_user(void __user *to, const void __user *from, unsigned len);
10723
10724 static __always_inline __must_check
10725 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10726 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10727 {
10728 - int ret = 0;
10729 + unsigned ret = 0;
10730
10731 might_fault();
10732 - if (!__builtin_constant_p(size))
10733 - return copy_user_generic(dst, (__force void *)src, size);
10734 +
10735 + if ((int)size < 0)
10736 + return size;
10737 +
10738 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10739 + if (!__access_ok(VERIFY_READ, src, size))
10740 + return size;
10741 +#endif
10742 +
10743 + if (!__builtin_constant_p(size)) {
10744 + check_object_size(dst, size, false);
10745 +
10746 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10747 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10748 + src += PAX_USER_SHADOW_BASE;
10749 +#endif
10750 +
10751 + return copy_user_generic(dst, (__force const void *)src, size);
10752 + }
10753 switch (size) {
10754 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10755 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10756 ret, "b", "b", "=q", 1);
10757 return ret;
10758 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10759 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10760 ret, "w", "w", "=r", 2);
10761 return ret;
10762 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10763 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10764 ret, "l", "k", "=r", 4);
10765 return ret;
10766 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10767 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10768 ret, "q", "", "=r", 8);
10769 return ret;
10770 case 10:
10771 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10772 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10773 ret, "q", "", "=r", 10);
10774 if (unlikely(ret))
10775 return ret;
10776 __get_user_asm(*(u16 *)(8 + (char *)dst),
10777 - (u16 __user *)(8 + (char __user *)src),
10778 + (const u16 __user *)(8 + (const char __user *)src),
10779 ret, "w", "w", "=r", 2);
10780 return ret;
10781 case 16:
10782 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10783 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10784 ret, "q", "", "=r", 16);
10785 if (unlikely(ret))
10786 return ret;
10787 __get_user_asm(*(u64 *)(8 + (char *)dst),
10788 - (u64 __user *)(8 + (char __user *)src),
10789 + (const u64 __user *)(8 + (const char __user *)src),
10790 ret, "q", "", "=r", 8);
10791 return ret;
10792 default:
10793 - return copy_user_generic(dst, (__force void *)src, size);
10794 +
10795 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10796 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10797 + src += PAX_USER_SHADOW_BASE;
10798 +#endif
10799 +
10800 + return copy_user_generic(dst, (__force const void *)src, size);
10801 }
10802 }
10803
10804 static __always_inline __must_check
10805 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10806 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10807 {
10808 - int ret = 0;
10809 + unsigned ret = 0;
10810
10811 might_fault();
10812 - if (!__builtin_constant_p(size))
10813 +
10814 + pax_track_stack();
10815 +
10816 + if ((int)size < 0)
10817 + return size;
10818 +
10819 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10820 + if (!__access_ok(VERIFY_WRITE, dst, size))
10821 + return size;
10822 +#endif
10823 +
10824 + if (!__builtin_constant_p(size)) {
10825 + check_object_size(src, size, true);
10826 +
10827 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10828 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10829 + dst += PAX_USER_SHADOW_BASE;
10830 +#endif
10831 +
10832 return copy_user_generic((__force void *)dst, src, size);
10833 + }
10834 switch (size) {
10835 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10836 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10837 ret, "b", "b", "iq", 1);
10838 return ret;
10839 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10840 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10841 ret, "w", "w", "ir", 2);
10842 return ret;
10843 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10844 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10845 ret, "l", "k", "ir", 4);
10846 return ret;
10847 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10848 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10849 ret, "q", "", "er", 8);
10850 return ret;
10851 case 10:
10852 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10853 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10854 ret, "q", "", "er", 10);
10855 if (unlikely(ret))
10856 return ret;
10857 asm("":::"memory");
10858 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10859 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10860 ret, "w", "w", "ir", 2);
10861 return ret;
10862 case 16:
10863 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10864 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10865 ret, "q", "", "er", 16);
10866 if (unlikely(ret))
10867 return ret;
10868 asm("":::"memory");
10869 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10870 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10871 ret, "q", "", "er", 8);
10872 return ret;
10873 default:
10874 +
10875 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10876 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10877 + dst += PAX_USER_SHADOW_BASE;
10878 +#endif
10879 +
10880 return copy_user_generic((__force void *)dst, src, size);
10881 }
10882 }
10883
10884 static __always_inline __must_check
10885 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10886 +unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10887 +{
10888 + if (access_ok(VERIFY_WRITE, to, len))
10889 + len = __copy_to_user(to, from, len);
10890 + return len;
10891 +}
10892 +
10893 +static __always_inline __must_check
10894 +unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10895 +{
10896 + if ((int)len < 0)
10897 + return len;
10898 +
10899 + if (access_ok(VERIFY_READ, from, len))
10900 + len = __copy_from_user(to, from, len);
10901 + else if ((int)len > 0) {
10902 + if (!__builtin_constant_p(len))
10903 + check_object_size(to, len, false);
10904 + memset(to, 0, len);
10905 + }
10906 + return len;
10907 +}
10908 +
10909 +static __always_inline __must_check
10910 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10911 {
10912 - int ret = 0;
10913 + unsigned ret = 0;
10914
10915 might_fault();
10916 - if (!__builtin_constant_p(size))
10917 +
10918 + pax_track_stack();
10919 +
10920 + if ((int)size < 0)
10921 + return size;
10922 +
10923 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10924 + if (!__access_ok(VERIFY_READ, src, size))
10925 + return size;
10926 + if (!__access_ok(VERIFY_WRITE, dst, size))
10927 + return size;
10928 +#endif
10929 +
10930 + if (!__builtin_constant_p(size)) {
10931 +
10932 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10933 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10934 + src += PAX_USER_SHADOW_BASE;
10935 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10936 + dst += PAX_USER_SHADOW_BASE;
10937 +#endif
10938 +
10939 return copy_user_generic((__force void *)dst,
10940 - (__force void *)src, size);
10941 + (__force const void *)src, size);
10942 + }
10943 switch (size) {
10944 case 1: {
10945 u8 tmp;
10946 - __get_user_asm(tmp, (u8 __user *)src,
10947 + __get_user_asm(tmp, (const u8 __user *)src,
10948 ret, "b", "b", "=q", 1);
10949 if (likely(!ret))
10950 __put_user_asm(tmp, (u8 __user *)dst,
10951 @@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10952 }
10953 case 2: {
10954 u16 tmp;
10955 - __get_user_asm(tmp, (u16 __user *)src,
10956 + __get_user_asm(tmp, (const u16 __user *)src,
10957 ret, "w", "w", "=r", 2);
10958 if (likely(!ret))
10959 __put_user_asm(tmp, (u16 __user *)dst,
10960 @@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10961
10962 case 4: {
10963 u32 tmp;
10964 - __get_user_asm(tmp, (u32 __user *)src,
10965 + __get_user_asm(tmp, (const u32 __user *)src,
10966 ret, "l", "k", "=r", 4);
10967 if (likely(!ret))
10968 __put_user_asm(tmp, (u32 __user *)dst,
10969 @@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10970 }
10971 case 8: {
10972 u64 tmp;
10973 - __get_user_asm(tmp, (u64 __user *)src,
10974 + __get_user_asm(tmp, (const u64 __user *)src,
10975 ret, "q", "", "=r", 8);
10976 if (likely(!ret))
10977 __put_user_asm(tmp, (u64 __user *)dst,
10978 @@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10979 return ret;
10980 }
10981 default:
10982 +
10983 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10984 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10985 + src += PAX_USER_SHADOW_BASE;
10986 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10987 + dst += PAX_USER_SHADOW_BASE;
10988 +#endif
10989 +
10990 return copy_user_generic((__force void *)dst,
10991 - (__force void *)src, size);
10992 + (__force const void *)src, size);
10993 }
10994 }
10995
10996 @@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10997 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10998 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10999
11000 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
11001 - unsigned size);
11002 +static __must_check __always_inline unsigned long
11003 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11004 +{
11005 + pax_track_stack();
11006 +
11007 + if ((int)size < 0)
11008 + return size;
11009
11010 -static __must_check __always_inline int
11011 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11012 + if (!__access_ok(VERIFY_READ, src, size))
11013 + return size;
11014 +
11015 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11016 + src += PAX_USER_SHADOW_BASE;
11017 +#endif
11018 +
11019 + return copy_user_generic(dst, (__force const void *)src, size);
11020 +}
11021 +
11022 +static __must_check __always_inline unsigned long
11023 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11024 {
11025 + if ((int)size < 0)
11026 + return size;
11027 +
11028 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11029 + if (!__access_ok(VERIFY_WRITE, dst, size))
11030 + return size;
11031 +
11032 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11033 + dst += PAX_USER_SHADOW_BASE;
11034 +#endif
11035 +
11036 return copy_user_generic((__force void *)dst, src, size);
11037 }
11038
11039 -extern long __copy_user_nocache(void *dst, const void __user *src,
11040 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11041 unsigned size, int zerorest);
11042
11043 -static inline int
11044 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11045 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11046 {
11047 might_sleep();
11048 +
11049 + if ((int)size < 0)
11050 + return size;
11051 +
11052 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11053 + if (!__access_ok(VERIFY_READ, src, size))
11054 + return size;
11055 +#endif
11056 +
11057 return __copy_user_nocache(dst, src, size, 1);
11058 }
11059
11060 -static inline int
11061 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11062 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11063 unsigned size)
11064 {
11065 + if ((int)size < 0)
11066 + return size;
11067 +
11068 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11069 + if (!__access_ok(VERIFY_READ, src, size))
11070 + return size;
11071 +#endif
11072 +
11073 return __copy_user_nocache(dst, src, size, 0);
11074 }
11075
11076 -unsigned long
11077 +extern unsigned long
11078 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11079
11080 #endif /* _ASM_X86_UACCESS_64_H */
11081 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess.h linux-2.6.32.45/arch/x86/include/asm/uaccess.h
11082 --- linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
11083 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
11084 @@ -8,12 +8,15 @@
11085 #include <linux/thread_info.h>
11086 #include <linux/prefetch.h>
11087 #include <linux/string.h>
11088 +#include <linux/sched.h>
11089 #include <asm/asm.h>
11090 #include <asm/page.h>
11091
11092 #define VERIFY_READ 0
11093 #define VERIFY_WRITE 1
11094
11095 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
11096 +
11097 /*
11098 * The fs value determines whether argument validity checking should be
11099 * performed or not. If get_fs() == USER_DS, checking is performed, with
11100 @@ -29,7 +32,12 @@
11101
11102 #define get_ds() (KERNEL_DS)
11103 #define get_fs() (current_thread_info()->addr_limit)
11104 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11105 +void __set_fs(mm_segment_t x);
11106 +void set_fs(mm_segment_t x);
11107 +#else
11108 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11109 +#endif
11110
11111 #define segment_eq(a, b) ((a).seg == (b).seg)
11112
11113 @@ -77,7 +85,33 @@
11114 * checks that the pointer is in the user space range - after calling
11115 * this function, memory access functions may still return -EFAULT.
11116 */
11117 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11118 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11119 +#define access_ok(type, addr, size) \
11120 +({ \
11121 + long __size = size; \
11122 + unsigned long __addr = (unsigned long)addr; \
11123 + unsigned long __addr_ao = __addr & PAGE_MASK; \
11124 + unsigned long __end_ao = __addr + __size - 1; \
11125 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11126 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11127 + while(__addr_ao <= __end_ao) { \
11128 + char __c_ao; \
11129 + __addr_ao += PAGE_SIZE; \
11130 + if (__size > PAGE_SIZE) \
11131 + cond_resched(); \
11132 + if (__get_user(__c_ao, (char __user *)__addr)) \
11133 + break; \
11134 + if (type != VERIFY_WRITE) { \
11135 + __addr = __addr_ao; \
11136 + continue; \
11137 + } \
11138 + if (__put_user(__c_ao, (char __user *)__addr)) \
11139 + break; \
11140 + __addr = __addr_ao; \
11141 + } \
11142 + } \
11143 + __ret_ao; \
11144 +})
11145
11146 /*
11147 * The exception table consists of pairs of addresses: the first is the
11148 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11149 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11150 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11151
11152 -
11153 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11154 +#define __copyuser_seg "gs;"
11155 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11156 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11157 +#else
11158 +#define __copyuser_seg
11159 +#define __COPYUSER_SET_ES
11160 +#define __COPYUSER_RESTORE_ES
11161 +#endif
11162
11163 #ifdef CONFIG_X86_32
11164 #define __put_user_asm_u64(x, addr, err, errret) \
11165 - asm volatile("1: movl %%eax,0(%2)\n" \
11166 - "2: movl %%edx,4(%2)\n" \
11167 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11168 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11169 "3:\n" \
11170 ".section .fixup,\"ax\"\n" \
11171 "4: movl %3,%0\n" \
11172 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11173 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11174
11175 #define __put_user_asm_ex_u64(x, addr) \
11176 - asm volatile("1: movl %%eax,0(%1)\n" \
11177 - "2: movl %%edx,4(%1)\n" \
11178 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11179 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11180 "3:\n" \
11181 _ASM_EXTABLE(1b, 2b - 1b) \
11182 _ASM_EXTABLE(2b, 3b - 2b) \
11183 @@ -374,7 +416,7 @@ do { \
11184 } while (0)
11185
11186 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11187 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11188 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11189 "2:\n" \
11190 ".section .fixup,\"ax\"\n" \
11191 "3: mov %3,%0\n" \
11192 @@ -382,7 +424,7 @@ do { \
11193 " jmp 2b\n" \
11194 ".previous\n" \
11195 _ASM_EXTABLE(1b, 3b) \
11196 - : "=r" (err), ltype(x) \
11197 + : "=r" (err), ltype (x) \
11198 : "m" (__m(addr)), "i" (errret), "0" (err))
11199
11200 #define __get_user_size_ex(x, ptr, size) \
11201 @@ -407,7 +449,7 @@ do { \
11202 } while (0)
11203
11204 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11205 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11206 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11207 "2:\n" \
11208 _ASM_EXTABLE(1b, 2b - 1b) \
11209 : ltype(x) : "m" (__m(addr)))
11210 @@ -424,13 +466,24 @@ do { \
11211 int __gu_err; \
11212 unsigned long __gu_val; \
11213 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11214 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
11215 + (x) = (__typeof__(*(ptr)))__gu_val; \
11216 __gu_err; \
11217 })
11218
11219 /* FIXME: this hack is definitely wrong -AK */
11220 struct __large_struct { unsigned long buf[100]; };
11221 -#define __m(x) (*(struct __large_struct __user *)(x))
11222 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11223 +#define ____m(x) \
11224 +({ \
11225 + unsigned long ____x = (unsigned long)(x); \
11226 + if (____x < PAX_USER_SHADOW_BASE) \
11227 + ____x += PAX_USER_SHADOW_BASE; \
11228 + (void __user *)____x; \
11229 +})
11230 +#else
11231 +#define ____m(x) (x)
11232 +#endif
11233 +#define __m(x) (*(struct __large_struct __user *)____m(x))
11234
11235 /*
11236 * Tell gcc we read from memory instead of writing: this is because
11237 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11238 * aliasing issues.
11239 */
11240 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11241 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11242 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11243 "2:\n" \
11244 ".section .fixup,\"ax\"\n" \
11245 "3: mov %3,%0\n" \
11246 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11247 ".previous\n" \
11248 _ASM_EXTABLE(1b, 3b) \
11249 : "=r"(err) \
11250 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11251 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11252
11253 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11254 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11255 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11256 "2:\n" \
11257 _ASM_EXTABLE(1b, 2b - 1b) \
11258 : : ltype(x), "m" (__m(addr)))
11259 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11260 * On error, the variable @x is set to zero.
11261 */
11262
11263 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11264 +#define __get_user(x, ptr) get_user((x), (ptr))
11265 +#else
11266 #define __get_user(x, ptr) \
11267 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11268 +#endif
11269
11270 /**
11271 * __put_user: - Write a simple value into user space, with less checking.
11272 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11273 * Returns zero on success, or -EFAULT on error.
11274 */
11275
11276 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11277 +#define __put_user(x, ptr) put_user((x), (ptr))
11278 +#else
11279 #define __put_user(x, ptr) \
11280 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11281 +#endif
11282
11283 #define __get_user_unaligned __get_user
11284 #define __put_user_unaligned __put_user
11285 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11286 #define get_user_ex(x, ptr) do { \
11287 unsigned long __gue_val; \
11288 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11289 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
11290 + (x) = (__typeof__(*(ptr)))__gue_val; \
11291 } while (0)
11292
11293 #ifdef CONFIG_X86_WP_WORKS_OK
11294 @@ -567,6 +628,7 @@ extern struct movsl_mask {
11295
11296 #define ARCH_HAS_NOCACHE_UACCESS 1
11297
11298 +#define ARCH_HAS_SORT_EXTABLE
11299 #ifdef CONFIG_X86_32
11300 # include "uaccess_32.h"
11301 #else
11302 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vgtod.h linux-2.6.32.45/arch/x86/include/asm/vgtod.h
11303 --- linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11304 +++ linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11305 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11306 int sysctl_enabled;
11307 struct timezone sys_tz;
11308 struct { /* extract of a clocksource struct */
11309 + char name[8];
11310 cycle_t (*vread)(void);
11311 cycle_t cycle_last;
11312 cycle_t mask;
11313 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi.h linux-2.6.32.45/arch/x86/include/asm/vmi.h
11314 --- linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11315 +++ linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11316 @@ -191,6 +191,7 @@ struct vrom_header {
11317 u8 reserved[96]; /* Reserved for headers */
11318 char vmi_init[8]; /* VMI_Init jump point */
11319 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11320 + char rom_data[8048]; /* rest of the option ROM */
11321 } __attribute__((packed));
11322
11323 struct pnp_header {
11324 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi_time.h linux-2.6.32.45/arch/x86/include/asm/vmi_time.h
11325 --- linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-03-27 14:31:47.000000000 -0400
11326 +++ linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-08-05 20:33:55.000000000 -0400
11327 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
11328 int (*wallclock_updated)(void);
11329 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
11330 void (*cancel_alarm)(u32 flags);
11331 -} vmi_timer_ops;
11332 +} __no_const vmi_timer_ops;
11333
11334 /* Prototypes */
11335 extern void __init vmi_time_init(void);
11336 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vsyscall.h linux-2.6.32.45/arch/x86/include/asm/vsyscall.h
11337 --- linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11338 +++ linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11339 @@ -15,9 +15,10 @@ enum vsyscall_num {
11340
11341 #ifdef __KERNEL__
11342 #include <linux/seqlock.h>
11343 +#include <linux/getcpu.h>
11344 +#include <linux/time.h>
11345
11346 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11347 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11348
11349 /* Definitions for CONFIG_GENERIC_TIME definitions */
11350 #define __section_vsyscall_gtod_data __attribute__ \
11351 @@ -31,7 +32,6 @@ enum vsyscall_num {
11352 #define VGETCPU_LSL 2
11353
11354 extern int __vgetcpu_mode;
11355 -extern volatile unsigned long __jiffies;
11356
11357 /* kernel space (writeable) */
11358 extern int vgetcpu_mode;
11359 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11360
11361 extern void map_vsyscall(void);
11362
11363 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11364 +extern time_t vtime(time_t *t);
11365 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11366 #endif /* __KERNEL__ */
11367
11368 #endif /* _ASM_X86_VSYSCALL_H */
11369 diff -urNp linux-2.6.32.45/arch/x86/include/asm/x86_init.h linux-2.6.32.45/arch/x86/include/asm/x86_init.h
11370 --- linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-03-27 14:31:47.000000000 -0400
11371 +++ linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-08-05 20:33:55.000000000 -0400
11372 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11373 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11374 void (*find_smp_config)(unsigned int reserve);
11375 void (*get_smp_config)(unsigned int early);
11376 -};
11377 +} __no_const;
11378
11379 /**
11380 * struct x86_init_resources - platform specific resource related ops
11381 @@ -42,7 +42,7 @@ struct x86_init_resources {
11382 void (*probe_roms)(void);
11383 void (*reserve_resources)(void);
11384 char *(*memory_setup)(void);
11385 -};
11386 +} __no_const;
11387
11388 /**
11389 * struct x86_init_irqs - platform specific interrupt setup
11390 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11391 void (*pre_vector_init)(void);
11392 void (*intr_init)(void);
11393 void (*trap_init)(void);
11394 -};
11395 +} __no_const;
11396
11397 /**
11398 * struct x86_init_oem - oem platform specific customizing functions
11399 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11400 struct x86_init_oem {
11401 void (*arch_setup)(void);
11402 void (*banner)(void);
11403 -};
11404 +} __no_const;
11405
11406 /**
11407 * struct x86_init_paging - platform specific paging functions
11408 @@ -75,7 +75,7 @@ struct x86_init_oem {
11409 struct x86_init_paging {
11410 void (*pagetable_setup_start)(pgd_t *base);
11411 void (*pagetable_setup_done)(pgd_t *base);
11412 -};
11413 +} __no_const;
11414
11415 /**
11416 * struct x86_init_timers - platform specific timer setup
11417 @@ -88,7 +88,7 @@ struct x86_init_timers {
11418 void (*setup_percpu_clockev)(void);
11419 void (*tsc_pre_init)(void);
11420 void (*timer_init)(void);
11421 -};
11422 +} __no_const;
11423
11424 /**
11425 * struct x86_init_ops - functions for platform specific setup
11426 @@ -101,7 +101,7 @@ struct x86_init_ops {
11427 struct x86_init_oem oem;
11428 struct x86_init_paging paging;
11429 struct x86_init_timers timers;
11430 -};
11431 +} __no_const;
11432
11433 /**
11434 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11435 @@ -109,7 +109,7 @@ struct x86_init_ops {
11436 */
11437 struct x86_cpuinit_ops {
11438 void (*setup_percpu_clockev)(void);
11439 -};
11440 +} __no_const;
11441
11442 /**
11443 * struct x86_platform_ops - platform specific runtime functions
11444 @@ -121,7 +121,7 @@ struct x86_platform_ops {
11445 unsigned long (*calibrate_tsc)(void);
11446 unsigned long (*get_wallclock)(void);
11447 int (*set_wallclock)(unsigned long nowtime);
11448 -};
11449 +} __no_const;
11450
11451 extern struct x86_init_ops x86_init;
11452 extern struct x86_cpuinit_ops x86_cpuinit;
11453 diff -urNp linux-2.6.32.45/arch/x86/include/asm/xsave.h linux-2.6.32.45/arch/x86/include/asm/xsave.h
11454 --- linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11455 +++ linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11456 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11457 static inline int xsave_user(struct xsave_struct __user *buf)
11458 {
11459 int err;
11460 +
11461 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11462 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11463 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11464 +#endif
11465 +
11466 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11467 "2:\n"
11468 ".section .fixup,\"ax\"\n"
11469 @@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11470 u32 lmask = mask;
11471 u32 hmask = mask >> 32;
11472
11473 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11474 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11475 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11476 +#endif
11477 +
11478 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11479 "2:\n"
11480 ".section .fixup,\"ax\"\n"
11481 diff -urNp linux-2.6.32.45/arch/x86/Kconfig linux-2.6.32.45/arch/x86/Kconfig
11482 --- linux-2.6.32.45/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11483 +++ linux-2.6.32.45/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11484 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11485
11486 config X86_32_LAZY_GS
11487 def_bool y
11488 - depends on X86_32 && !CC_STACKPROTECTOR
11489 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11490
11491 config KTIME_SCALAR
11492 def_bool X86_32
11493 @@ -1008,7 +1008,7 @@ choice
11494
11495 config NOHIGHMEM
11496 bool "off"
11497 - depends on !X86_NUMAQ
11498 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11499 ---help---
11500 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11501 However, the address space of 32-bit x86 processors is only 4
11502 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
11503
11504 config HIGHMEM4G
11505 bool "4GB"
11506 - depends on !X86_NUMAQ
11507 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11508 ---help---
11509 Select this if you have a 32-bit processor and between 1 and 4
11510 gigabytes of physical RAM.
11511 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11512 hex
11513 default 0xB0000000 if VMSPLIT_3G_OPT
11514 default 0x80000000 if VMSPLIT_2G
11515 - default 0x78000000 if VMSPLIT_2G_OPT
11516 + default 0x70000000 if VMSPLIT_2G_OPT
11517 default 0x40000000 if VMSPLIT_1G
11518 default 0xC0000000
11519 depends on X86_32
11520 @@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11521
11522 config EFI
11523 bool "EFI runtime service support"
11524 - depends on ACPI
11525 + depends on ACPI && !PAX_KERNEXEC
11526 ---help---
11527 This enables the kernel to use EFI runtime services that are
11528 available (such as the EFI variable services).
11529 @@ -1460,6 +1460,7 @@ config SECCOMP
11530
11531 config CC_STACKPROTECTOR
11532 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11533 + depends on X86_64 || !PAX_MEMORY_UDEREF
11534 ---help---
11535 This option turns on the -fstack-protector GCC feature. This
11536 feature puts, at the beginning of functions, a canary value on
11537 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11538 config PHYSICAL_START
11539 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11540 default "0x1000000"
11541 + range 0x400000 0x40000000
11542 ---help---
11543 This gives the physical address where the kernel is loaded.
11544
11545 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11546 hex
11547 prompt "Alignment value to which kernel should be aligned" if X86_32
11548 default "0x1000000"
11549 + range 0x400000 0x1000000 if PAX_KERNEXEC
11550 range 0x2000 0x1000000
11551 ---help---
11552 This value puts the alignment restrictions on physical address
11553 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11554 Say N if you want to disable CPU hotplug.
11555
11556 config COMPAT_VDSO
11557 - def_bool y
11558 + def_bool n
11559 prompt "Compat VDSO support"
11560 depends on X86_32 || IA32_EMULATION
11561 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11562 ---help---
11563 Map the 32-bit VDSO to the predictable old-style address too.
11564 ---help---
11565 diff -urNp linux-2.6.32.45/arch/x86/Kconfig.cpu linux-2.6.32.45/arch/x86/Kconfig.cpu
11566 --- linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11567 +++ linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11568 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11569
11570 config X86_F00F_BUG
11571 def_bool y
11572 - depends on M586MMX || M586TSC || M586 || M486 || M386
11573 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11574
11575 config X86_WP_WORKS_OK
11576 def_bool y
11577 @@ -360,7 +360,7 @@ config X86_POPAD_OK
11578
11579 config X86_ALIGNMENT_16
11580 def_bool y
11581 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11582 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11583
11584 config X86_INTEL_USERCOPY
11585 def_bool y
11586 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
11587 # generates cmov.
11588 config X86_CMOV
11589 def_bool y
11590 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11591 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11592
11593 config X86_MINIMUM_CPU_FAMILY
11594 int
11595 diff -urNp linux-2.6.32.45/arch/x86/Kconfig.debug linux-2.6.32.45/arch/x86/Kconfig.debug
11596 --- linux-2.6.32.45/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11597 +++ linux-2.6.32.45/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11598 @@ -99,7 +99,7 @@ config X86_PTDUMP
11599 config DEBUG_RODATA
11600 bool "Write protect kernel read-only data structures"
11601 default y
11602 - depends on DEBUG_KERNEL
11603 + depends on DEBUG_KERNEL && BROKEN
11604 ---help---
11605 Mark the kernel read-only data as write-protected in the pagetables,
11606 in order to catch accidental (and incorrect) writes to such const
11607 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile
11608 --- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-03-27 14:31:47.000000000 -0400
11609 +++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-08-07 14:38:58.000000000 -0400
11610 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
11611 $(call cc-option, -fno-stack-protector) \
11612 $(call cc-option, -mpreferred-stack-boundary=2)
11613 KBUILD_CFLAGS += $(call cc-option, -m32)
11614 +ifdef CONSTIFY_PLUGIN
11615 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11616 +endif
11617 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11618 GCOV_PROFILE := n
11619
11620 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S
11621 --- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11622 +++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11623 @@ -91,6 +91,9 @@ _start:
11624 /* Do any other stuff... */
11625
11626 #ifndef CONFIG_64BIT
11627 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
11628 + call verify_cpu
11629 +
11630 /* This could also be done in C code... */
11631 movl pmode_cr3, %eax
11632 movl %eax, %cr3
11633 @@ -104,7 +107,7 @@ _start:
11634 movl %eax, %ecx
11635 orl %edx, %ecx
11636 jz 1f
11637 - movl $0xc0000080, %ecx
11638 + mov $MSR_EFER, %ecx
11639 wrmsr
11640 1:
11641
11642 @@ -114,6 +117,7 @@ _start:
11643 movl pmode_cr0, %eax
11644 movl %eax, %cr0
11645 jmp pmode_return
11646 +# include "../../verify_cpu.S"
11647 #else
11648 pushw $0
11649 pushw trampoline_segment
11650 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c
11651 --- linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11652 +++ linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11653 @@ -11,11 +11,12 @@
11654 #include <linux/cpumask.h>
11655 #include <asm/segment.h>
11656 #include <asm/desc.h>
11657 +#include <asm/e820.h>
11658
11659 #include "realmode/wakeup.h"
11660 #include "sleep.h"
11661
11662 -unsigned long acpi_wakeup_address;
11663 +unsigned long acpi_wakeup_address = 0x2000;
11664 unsigned long acpi_realmode_flags;
11665
11666 /* address in low memory of the wakeup routine. */
11667 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11668 #else /* CONFIG_64BIT */
11669 header->trampoline_segment = setup_trampoline() >> 4;
11670 #ifdef CONFIG_SMP
11671 - stack_start.sp = temp_stack + sizeof(temp_stack);
11672 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11673 +
11674 + pax_open_kernel();
11675 early_gdt_descr.address =
11676 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11677 + pax_close_kernel();
11678 +
11679 initial_gs = per_cpu_offset(smp_processor_id());
11680 #endif
11681 initial_code = (unsigned long)wakeup_long64;
11682 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11683 return;
11684 }
11685
11686 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11687 -
11688 - if (!acpi_realmode) {
11689 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11690 - return;
11691 - }
11692 -
11693 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11694 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11695 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11696 }
11697
11698
11699 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S
11700 --- linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11701 +++ linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11702 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11703 # and restore the stack ... but you need gdt for this to work
11704 movl saved_context_esp, %esp
11705
11706 - movl %cs:saved_magic, %eax
11707 - cmpl $0x12345678, %eax
11708 + cmpl $0x12345678, saved_magic
11709 jne bogus_magic
11710
11711 # jump to place where we left off
11712 - movl saved_eip, %eax
11713 - jmp *%eax
11714 + jmp *(saved_eip)
11715
11716 bogus_magic:
11717 jmp bogus_magic
11718 diff -urNp linux-2.6.32.45/arch/x86/kernel/alternative.c linux-2.6.32.45/arch/x86/kernel/alternative.c
11719 --- linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11720 +++ linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11721 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11722
11723 BUG_ON(p->len > MAX_PATCH_LEN);
11724 /* prep the buffer with the original instructions */
11725 - memcpy(insnbuf, p->instr, p->len);
11726 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11727 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11728 (unsigned long)p->instr, p->len);
11729
11730 @@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11731 if (smp_alt_once)
11732 free_init_pages("SMP alternatives",
11733 (unsigned long)__smp_locks,
11734 - (unsigned long)__smp_locks_end);
11735 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11736
11737 restart_nmi();
11738 }
11739 @@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11740 * instructions. And on the local CPU you need to be protected again NMI or MCE
11741 * handlers seeing an inconsistent instruction while you patch.
11742 */
11743 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11744 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
11745 size_t len)
11746 {
11747 unsigned long flags;
11748 local_irq_save(flags);
11749 - memcpy(addr, opcode, len);
11750 +
11751 + pax_open_kernel();
11752 + memcpy(ktla_ktva(addr), opcode, len);
11753 sync_core();
11754 + pax_close_kernel();
11755 +
11756 local_irq_restore(flags);
11757 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11758 that causes hangs on some VIA CPUs. */
11759 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11760 */
11761 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11762 {
11763 - unsigned long flags;
11764 - char *vaddr;
11765 + unsigned char *vaddr = ktla_ktva(addr);
11766 struct page *pages[2];
11767 - int i;
11768 + size_t i;
11769
11770 if (!core_kernel_text((unsigned long)addr)) {
11771 - pages[0] = vmalloc_to_page(addr);
11772 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11773 + pages[0] = vmalloc_to_page(vaddr);
11774 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11775 } else {
11776 - pages[0] = virt_to_page(addr);
11777 + pages[0] = virt_to_page(vaddr);
11778 WARN_ON(!PageReserved(pages[0]));
11779 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11780 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11781 }
11782 BUG_ON(!pages[0]);
11783 - local_irq_save(flags);
11784 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11785 - if (pages[1])
11786 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11787 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11788 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11789 - clear_fixmap(FIX_TEXT_POKE0);
11790 - if (pages[1])
11791 - clear_fixmap(FIX_TEXT_POKE1);
11792 - local_flush_tlb();
11793 - sync_core();
11794 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11795 - that causes hangs on some VIA CPUs. */
11796 + text_poke_early(addr, opcode, len);
11797 for (i = 0; i < len; i++)
11798 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11799 - local_irq_restore(flags);
11800 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11801 return addr;
11802 }
11803 diff -urNp linux-2.6.32.45/arch/x86/kernel/amd_iommu.c linux-2.6.32.45/arch/x86/kernel/amd_iommu.c
11804 --- linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11805 +++ linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11806 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11807 }
11808 }
11809
11810 -static struct dma_map_ops amd_iommu_dma_ops = {
11811 +static const struct dma_map_ops amd_iommu_dma_ops = {
11812 .alloc_coherent = alloc_coherent,
11813 .free_coherent = free_coherent,
11814 .map_page = map_page,
11815 diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/apic.c linux-2.6.32.45/arch/x86/kernel/apic/apic.c
11816 --- linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11817 +++ linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-08-17 20:00:16.000000000 -0400
11818 @@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
11819 /*
11820 * Debug level, exported for io_apic.c
11821 */
11822 -unsigned int apic_verbosity;
11823 +int apic_verbosity;
11824
11825 int pic_mode;
11826
11827 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11828 apic_write(APIC_ESR, 0);
11829 v1 = apic_read(APIC_ESR);
11830 ack_APIC_irq();
11831 - atomic_inc(&irq_err_count);
11832 + atomic_inc_unchecked(&irq_err_count);
11833
11834 /*
11835 * Here is what the APIC error bits mean:
11836 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11837 u16 *bios_cpu_apicid;
11838 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11839
11840 + pax_track_stack();
11841 +
11842 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11843 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11844
11845 diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c
11846 --- linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11847 +++ linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11848 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11849 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11850 GFP_ATOMIC);
11851 if (!ioapic_entries)
11852 - return 0;
11853 + return NULL;
11854
11855 for (apic = 0; apic < nr_ioapics; apic++) {
11856 ioapic_entries[apic] =
11857 @@ -733,7 +733,7 @@ nomem:
11858 kfree(ioapic_entries[apic]);
11859 kfree(ioapic_entries);
11860
11861 - return 0;
11862 + return NULL;
11863 }
11864
11865 /*
11866 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11867 }
11868 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11869
11870 -void lock_vector_lock(void)
11871 +void lock_vector_lock(void) __acquires(vector_lock)
11872 {
11873 /* Used to the online set of cpus does not change
11874 * during assign_irq_vector.
11875 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11876 spin_lock(&vector_lock);
11877 }
11878
11879 -void unlock_vector_lock(void)
11880 +void unlock_vector_lock(void) __releases(vector_lock)
11881 {
11882 spin_unlock(&vector_lock);
11883 }
11884 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11885 ack_APIC_irq();
11886 }
11887
11888 -atomic_t irq_mis_count;
11889 +atomic_unchecked_t irq_mis_count;
11890
11891 static void ack_apic_level(unsigned int irq)
11892 {
11893 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11894
11895 /* Tail end of version 0x11 I/O APIC bug workaround */
11896 if (!(v & (1 << (i & 0x1f)))) {
11897 - atomic_inc(&irq_mis_count);
11898 + atomic_inc_unchecked(&irq_mis_count);
11899 spin_lock(&ioapic_lock);
11900 __mask_and_edge_IO_APIC_irq(cfg);
11901 __unmask_and_level_IO_APIC_irq(cfg);
11902 diff -urNp linux-2.6.32.45/arch/x86/kernel/apm_32.c linux-2.6.32.45/arch/x86/kernel/apm_32.c
11903 --- linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11904 +++ linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11905 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11906 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11907 * even though they are called in protected mode.
11908 */
11909 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11910 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11911 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11912
11913 static const char driver_version[] = "1.16ac"; /* no spaces */
11914 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11915 BUG_ON(cpu != 0);
11916 gdt = get_cpu_gdt_table(cpu);
11917 save_desc_40 = gdt[0x40 / 8];
11918 +
11919 + pax_open_kernel();
11920 gdt[0x40 / 8] = bad_bios_desc;
11921 + pax_close_kernel();
11922
11923 apm_irq_save(flags);
11924 APM_DO_SAVE_SEGS;
11925 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11926 &call->esi);
11927 APM_DO_RESTORE_SEGS;
11928 apm_irq_restore(flags);
11929 +
11930 + pax_open_kernel();
11931 gdt[0x40 / 8] = save_desc_40;
11932 + pax_close_kernel();
11933 +
11934 put_cpu();
11935
11936 return call->eax & 0xff;
11937 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11938 BUG_ON(cpu != 0);
11939 gdt = get_cpu_gdt_table(cpu);
11940 save_desc_40 = gdt[0x40 / 8];
11941 +
11942 + pax_open_kernel();
11943 gdt[0x40 / 8] = bad_bios_desc;
11944 + pax_close_kernel();
11945
11946 apm_irq_save(flags);
11947 APM_DO_SAVE_SEGS;
11948 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11949 &call->eax);
11950 APM_DO_RESTORE_SEGS;
11951 apm_irq_restore(flags);
11952 +
11953 + pax_open_kernel();
11954 gdt[0x40 / 8] = save_desc_40;
11955 + pax_close_kernel();
11956 +
11957 put_cpu();
11958 return error;
11959 }
11960 @@ -975,7 +989,7 @@ recalc:
11961
11962 static void apm_power_off(void)
11963 {
11964 - unsigned char po_bios_call[] = {
11965 + const unsigned char po_bios_call[] = {
11966 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11967 0x8e, 0xd0, /* movw ax,ss */
11968 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11969 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11970 * code to that CPU.
11971 */
11972 gdt = get_cpu_gdt_table(0);
11973 +
11974 + pax_open_kernel();
11975 set_desc_base(&gdt[APM_CS >> 3],
11976 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11977 set_desc_base(&gdt[APM_CS_16 >> 3],
11978 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11979 set_desc_base(&gdt[APM_DS >> 3],
11980 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11981 + pax_close_kernel();
11982
11983 proc_create("apm", 0, NULL, &apm_file_ops);
11984
11985 diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c
11986 --- linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11987 +++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11988 @@ -51,7 +51,6 @@ void foo(void)
11989 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11990 BLANK();
11991
11992 - OFFSET(TI_task, thread_info, task);
11993 OFFSET(TI_exec_domain, thread_info, exec_domain);
11994 OFFSET(TI_flags, thread_info, flags);
11995 OFFSET(TI_status, thread_info, status);
11996 @@ -60,6 +59,8 @@ void foo(void)
11997 OFFSET(TI_restart_block, thread_info, restart_block);
11998 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11999 OFFSET(TI_cpu, thread_info, cpu);
12000 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
12001 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12002 BLANK();
12003
12004 OFFSET(GDS_size, desc_ptr, size);
12005 @@ -99,6 +100,7 @@ void foo(void)
12006
12007 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12008 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12009 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12010 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
12011 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
12012 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
12013 @@ -115,6 +117,11 @@ void foo(void)
12014 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
12015 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12016 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12017 +
12018 +#ifdef CONFIG_PAX_KERNEXEC
12019 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12020 +#endif
12021 +
12022 #endif
12023
12024 #ifdef CONFIG_XEN
12025 diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c
12026 --- linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
12027 +++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
12028 @@ -44,6 +44,8 @@ int main(void)
12029 ENTRY(addr_limit);
12030 ENTRY(preempt_count);
12031 ENTRY(status);
12032 + ENTRY(lowest_stack);
12033 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12034 #ifdef CONFIG_IA32_EMULATION
12035 ENTRY(sysenter_return);
12036 #endif
12037 @@ -63,6 +65,18 @@ int main(void)
12038 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12039 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
12040 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12041 +
12042 +#ifdef CONFIG_PAX_KERNEXEC
12043 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12044 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12045 +#endif
12046 +
12047 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12048 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12049 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12050 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
12051 +#endif
12052 +
12053 #endif
12054
12055
12056 @@ -115,6 +129,7 @@ int main(void)
12057 ENTRY(cr8);
12058 BLANK();
12059 #undef ENTRY
12060 + DEFINE(TSS_size, sizeof(struct tss_struct));
12061 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
12062 BLANK();
12063 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
12064 @@ -130,6 +145,7 @@ int main(void)
12065
12066 BLANK();
12067 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12068 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12069 #ifdef CONFIG_XEN
12070 BLANK();
12071 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12072 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/amd.c
12073 --- linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
12074 +++ linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
12075 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
12076 unsigned int size)
12077 {
12078 /* AMD errata T13 (order #21922) */
12079 - if ((c->x86 == 6)) {
12080 + if (c->x86 == 6) {
12081 /* Duron Rev A0 */
12082 if (c->x86_model == 3 && c->x86_mask == 0)
12083 size = 64;
12084 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/common.c linux-2.6.32.45/arch/x86/kernel/cpu/common.c
12085 --- linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
12086 +++ linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
12087 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
12088
12089 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12090
12091 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12092 -#ifdef CONFIG_X86_64
12093 - /*
12094 - * We need valid kernel segments for data and code in long mode too
12095 - * IRET will check the segment types kkeil 2000/10/28
12096 - * Also sysret mandates a special GDT layout
12097 - *
12098 - * TLS descriptors are currently at a different place compared to i386.
12099 - * Hopefully nobody expects them at a fixed place (Wine?)
12100 - */
12101 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12102 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12103 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12104 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12105 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12106 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12107 -#else
12108 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12109 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12110 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12111 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12112 - /*
12113 - * Segments used for calling PnP BIOS have byte granularity.
12114 - * They code segments and data segments have fixed 64k limits,
12115 - * the transfer segment sizes are set at run time.
12116 - */
12117 - /* 32-bit code */
12118 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12119 - /* 16-bit code */
12120 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12121 - /* 16-bit data */
12122 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12123 - /* 16-bit data */
12124 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12125 - /* 16-bit data */
12126 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12127 - /*
12128 - * The APM segments have byte granularity and their bases
12129 - * are set at run time. All have 64k limits.
12130 - */
12131 - /* 32-bit code */
12132 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12133 - /* 16-bit code */
12134 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12135 - /* data */
12136 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12137 -
12138 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12139 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12140 - GDT_STACK_CANARY_INIT
12141 -#endif
12142 -} };
12143 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12144 -
12145 static int __init x86_xsave_setup(char *s)
12146 {
12147 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12148 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
12149 {
12150 struct desc_ptr gdt_descr;
12151
12152 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12153 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12154 gdt_descr.size = GDT_SIZE - 1;
12155 load_gdt(&gdt_descr);
12156 /* Reload the per-cpu base */
12157 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
12158 /* Filter out anything that depends on CPUID levels we don't have */
12159 filter_cpuid_features(c, true);
12160
12161 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
12162 + setup_clear_cpu_cap(X86_FEATURE_SEP);
12163 +#endif
12164 +
12165 /* If the model name is still unset, do table lookup. */
12166 if (!c->x86_model_id[0]) {
12167 const char *p;
12168 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
12169 }
12170 __setup("clearcpuid=", setup_disablecpuid);
12171
12172 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12173 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
12174 +
12175 #ifdef CONFIG_X86_64
12176 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12177
12178 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
12179 EXPORT_PER_CPU_SYMBOL(current_task);
12180
12181 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12182 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12183 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12184 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12185
12186 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12187 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
12188 {
12189 memset(regs, 0, sizeof(struct pt_regs));
12190 regs->fs = __KERNEL_PERCPU;
12191 - regs->gs = __KERNEL_STACK_CANARY;
12192 + savesegment(gs, regs->gs);
12193
12194 return regs;
12195 }
12196 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
12197 int i;
12198
12199 cpu = stack_smp_processor_id();
12200 - t = &per_cpu(init_tss, cpu);
12201 + t = init_tss + cpu;
12202 orig_ist = &per_cpu(orig_ist, cpu);
12203
12204 #ifdef CONFIG_NUMA
12205 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12206 switch_to_new_gdt(cpu);
12207 loadsegment(fs, 0);
12208
12209 - load_idt((const struct desc_ptr *)&idt_descr);
12210 + load_idt(&idt_descr);
12211
12212 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12213 syscall_init();
12214 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12215 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12216 barrier();
12217
12218 - check_efer();
12219 if (cpu != 0)
12220 enable_x2apic();
12221
12222 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12223 {
12224 int cpu = smp_processor_id();
12225 struct task_struct *curr = current;
12226 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12227 + struct tss_struct *t = init_tss + cpu;
12228 struct thread_struct *thread = &curr->thread;
12229
12230 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12231 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel.c linux-2.6.32.45/arch/x86/kernel/cpu/intel.c
12232 --- linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12233 +++ linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12234 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12235 * Update the IDT descriptor and reload the IDT so that
12236 * it uses the read-only mapped virtual address.
12237 */
12238 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12239 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12240 load_idt(&idt_descr);
12241 }
12242 #endif
12243 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c
12244 --- linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12245 +++ linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12246 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12247 return ret;
12248 }
12249
12250 -static struct sysfs_ops sysfs_ops = {
12251 +static const struct sysfs_ops sysfs_ops = {
12252 .show = show,
12253 .store = store,
12254 };
12255 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/Makefile linux-2.6.32.45/arch/x86/kernel/cpu/Makefile
12256 --- linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12257 +++ linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12258 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12259 CFLAGS_REMOVE_common.o = -pg
12260 endif
12261
12262 -# Make sure load_percpu_segment has no stackprotector
12263 -nostackp := $(call cc-option, -fno-stack-protector)
12264 -CFLAGS_common.o := $(nostackp)
12265 -
12266 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12267 obj-y += proc.o capflags.o powerflags.o common.o
12268 obj-y += vmware.o hypervisor.o sched.o
12269 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c
12270 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12271 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12272 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12273 return ret;
12274 }
12275
12276 -static struct sysfs_ops threshold_ops = {
12277 +static const struct sysfs_ops threshold_ops = {
12278 .show = show,
12279 .store = store,
12280 };
12281 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c
12282 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12283 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12284 @@ -43,6 +43,7 @@
12285 #include <asm/ipi.h>
12286 #include <asm/mce.h>
12287 #include <asm/msr.h>
12288 +#include <asm/local.h>
12289
12290 #include "mce-internal.h"
12291
12292 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12293 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12294 m->cs, m->ip);
12295
12296 - if (m->cs == __KERNEL_CS)
12297 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12298 print_symbol("{%s}", m->ip);
12299 pr_cont("\n");
12300 }
12301 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
12302
12303 #define PANIC_TIMEOUT 5 /* 5 seconds */
12304
12305 -static atomic_t mce_paniced;
12306 +static atomic_unchecked_t mce_paniced;
12307
12308 static int fake_panic;
12309 -static atomic_t mce_fake_paniced;
12310 +static atomic_unchecked_t mce_fake_paniced;
12311
12312 /* Panic in progress. Enable interrupts and wait for final IPI */
12313 static void wait_for_panic(void)
12314 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12315 /*
12316 * Make sure only one CPU runs in machine check panic
12317 */
12318 - if (atomic_inc_return(&mce_paniced) > 1)
12319 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12320 wait_for_panic();
12321 barrier();
12322
12323 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12324 console_verbose();
12325 } else {
12326 /* Don't log too much for fake panic */
12327 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12328 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12329 return;
12330 }
12331 print_mce_head();
12332 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12333 * might have been modified by someone else.
12334 */
12335 rmb();
12336 - if (atomic_read(&mce_paniced))
12337 + if (atomic_read_unchecked(&mce_paniced))
12338 wait_for_panic();
12339 if (!monarch_timeout)
12340 goto out;
12341 @@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12342 */
12343
12344 static DEFINE_SPINLOCK(mce_state_lock);
12345 -static int open_count; /* #times opened */
12346 +static local_t open_count; /* #times opened */
12347 static int open_exclu; /* already open exclusive? */
12348
12349 static int mce_open(struct inode *inode, struct file *file)
12350 {
12351 spin_lock(&mce_state_lock);
12352
12353 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12354 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12355 spin_unlock(&mce_state_lock);
12356
12357 return -EBUSY;
12358 @@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12359
12360 if (file->f_flags & O_EXCL)
12361 open_exclu = 1;
12362 - open_count++;
12363 + local_inc(&open_count);
12364
12365 spin_unlock(&mce_state_lock);
12366
12367 @@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12368 {
12369 spin_lock(&mce_state_lock);
12370
12371 - open_count--;
12372 + local_dec(&open_count);
12373 open_exclu = 0;
12374
12375 spin_unlock(&mce_state_lock);
12376 @@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12377 static void mce_reset(void)
12378 {
12379 cpu_missing = 0;
12380 - atomic_set(&mce_fake_paniced, 0);
12381 + atomic_set_unchecked(&mce_fake_paniced, 0);
12382 atomic_set(&mce_executing, 0);
12383 atomic_set(&mce_callin, 0);
12384 atomic_set(&global_nwo, 0);
12385 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c
12386 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-03-27 14:31:47.000000000 -0400
12387 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:33:55.000000000 -0400
12388 @@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *fi
12389 static int inject_init(void)
12390 {
12391 printk(KERN_INFO "Machine check injector initialized\n");
12392 - mce_chrdev_ops.write = mce_write;
12393 + pax_open_kernel();
12394 + *(void **)&mce_chrdev_ops.write = mce_write;
12395 + pax_close_kernel();
12396 register_die_notifier(&mce_raise_nb);
12397 return 0;
12398 }
12399 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c
12400 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12401 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12402 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12403 return 0;
12404 }
12405
12406 -static struct mtrr_ops amd_mtrr_ops = {
12407 +static const struct mtrr_ops amd_mtrr_ops = {
12408 .vendor = X86_VENDOR_AMD,
12409 .set = amd_set_mtrr,
12410 .get = amd_get_mtrr,
12411 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c
12412 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12413 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12414 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12415 return 0;
12416 }
12417
12418 -static struct mtrr_ops centaur_mtrr_ops = {
12419 +static const struct mtrr_ops centaur_mtrr_ops = {
12420 .vendor = X86_VENDOR_CENTAUR,
12421 .set = centaur_set_mcr,
12422 .get = centaur_get_mcr,
12423 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c
12424 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12425 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12426 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12427 post_set();
12428 }
12429
12430 -static struct mtrr_ops cyrix_mtrr_ops = {
12431 +static const struct mtrr_ops cyrix_mtrr_ops = {
12432 .vendor = X86_VENDOR_CYRIX,
12433 .set_all = cyrix_set_all,
12434 .set = cyrix_set_arr,
12435 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c
12436 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12437 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12438 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12439 /*
12440 * Generic structure...
12441 */
12442 -struct mtrr_ops generic_mtrr_ops = {
12443 +const struct mtrr_ops generic_mtrr_ops = {
12444 .use_intel_if = 1,
12445 .set_all = generic_set_all,
12446 .get = generic_get_mtrr,
12447 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c
12448 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12449 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12450 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12451 u64 size_or_mask, size_and_mask;
12452 static bool mtrr_aps_delayed_init;
12453
12454 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12455 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12456
12457 -struct mtrr_ops *mtrr_if;
12458 +const struct mtrr_ops *mtrr_if;
12459
12460 static void set_mtrr(unsigned int reg, unsigned long base,
12461 unsigned long size, mtrr_type type);
12462
12463 -void set_mtrr_ops(struct mtrr_ops *ops)
12464 +void set_mtrr_ops(const struct mtrr_ops *ops)
12465 {
12466 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12467 mtrr_ops[ops->vendor] = ops;
12468 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h
12469 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12470 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12471 @@ -12,19 +12,19 @@
12472 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12473
12474 struct mtrr_ops {
12475 - u32 vendor;
12476 - u32 use_intel_if;
12477 - void (*set)(unsigned int reg, unsigned long base,
12478 + const u32 vendor;
12479 + const u32 use_intel_if;
12480 + void (* const set)(unsigned int reg, unsigned long base,
12481 unsigned long size, mtrr_type type);
12482 - void (*set_all)(void);
12483 + void (* const set_all)(void);
12484
12485 - void (*get)(unsigned int reg, unsigned long *base,
12486 + void (* const get)(unsigned int reg, unsigned long *base,
12487 unsigned long *size, mtrr_type *type);
12488 - int (*get_free_region)(unsigned long base, unsigned long size,
12489 + int (* const get_free_region)(unsigned long base, unsigned long size,
12490 int replace_reg);
12491 - int (*validate_add_page)(unsigned long base, unsigned long size,
12492 + int (* const validate_add_page)(unsigned long base, unsigned long size,
12493 unsigned int type);
12494 - int (*have_wrcomb)(void);
12495 + int (* const have_wrcomb)(void);
12496 };
12497
12498 extern int generic_get_free_region(unsigned long base, unsigned long size,
12499 @@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12500 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12501 unsigned int type);
12502
12503 -extern struct mtrr_ops generic_mtrr_ops;
12504 +extern const struct mtrr_ops generic_mtrr_ops;
12505
12506 extern int positive_have_wrcomb(void);
12507
12508 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12509 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12510 void get_mtrr_state(void);
12511
12512 -extern void set_mtrr_ops(struct mtrr_ops *ops);
12513 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
12514
12515 extern u64 size_or_mask, size_and_mask;
12516 -extern struct mtrr_ops *mtrr_if;
12517 +extern const struct mtrr_ops *mtrr_if;
12518
12519 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12520 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12521 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c
12522 --- linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12523 +++ linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12524 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12525
12526 /* Interface defining a CPU specific perfctr watchdog */
12527 struct wd_ops {
12528 - int (*reserve)(void);
12529 - void (*unreserve)(void);
12530 - int (*setup)(unsigned nmi_hz);
12531 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12532 - void (*stop)(void);
12533 + int (* const reserve)(void);
12534 + void (* const unreserve)(void);
12535 + int (* const setup)(unsigned nmi_hz);
12536 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12537 + void (* const stop)(void);
12538 unsigned perfctr;
12539 unsigned evntsel;
12540 u64 checkbit;
12541 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12542 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12543 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12544
12545 +/* cannot be const */
12546 static struct wd_ops intel_arch_wd_ops;
12547
12548 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12549 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12550 return 1;
12551 }
12552
12553 +/* cannot be const */
12554 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12555 .reserve = single_msr_reserve,
12556 .unreserve = single_msr_unreserve,
12557 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c
12558 --- linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12559 +++ linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12560 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12561 * count to the generic event atomically:
12562 */
12563 again:
12564 - prev_raw_count = atomic64_read(&hwc->prev_count);
12565 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12566 rdmsrl(hwc->event_base + idx, new_raw_count);
12567
12568 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12569 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12570 new_raw_count) != prev_raw_count)
12571 goto again;
12572
12573 @@ -741,7 +741,7 @@ again:
12574 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12575 delta >>= shift;
12576
12577 - atomic64_add(delta, &event->count);
12578 + atomic64_add_unchecked(delta, &event->count);
12579 atomic64_sub(delta, &hwc->period_left);
12580
12581 return new_raw_count;
12582 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12583 * The hw event starts counting from this event offset,
12584 * mark it to be able to extra future deltas:
12585 */
12586 - atomic64_set(&hwc->prev_count, (u64)-left);
12587 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12588
12589 err = checking_wrmsrl(hwc->event_base + idx,
12590 (u64)(-left) & x86_pmu.event_mask);
12591 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12592 break;
12593
12594 callchain_store(entry, frame.return_address);
12595 - fp = frame.next_frame;
12596 + fp = (__force const void __user *)frame.next_frame;
12597 }
12598 }
12599
12600 diff -urNp linux-2.6.32.45/arch/x86/kernel/crash.c linux-2.6.32.45/arch/x86/kernel/crash.c
12601 --- linux-2.6.32.45/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12602 +++ linux-2.6.32.45/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12603 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12604 regs = args->regs;
12605
12606 #ifdef CONFIG_X86_32
12607 - if (!user_mode_vm(regs)) {
12608 + if (!user_mode(regs)) {
12609 crash_fixup_ss_esp(&fixed_regs, regs);
12610 regs = &fixed_regs;
12611 }
12612 diff -urNp linux-2.6.32.45/arch/x86/kernel/doublefault_32.c linux-2.6.32.45/arch/x86/kernel/doublefault_32.c
12613 --- linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12614 +++ linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12615 @@ -11,7 +11,7 @@
12616
12617 #define DOUBLEFAULT_STACKSIZE (1024)
12618 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12619 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12620 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12621
12622 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12623
12624 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12625 unsigned long gdt, tss;
12626
12627 store_gdt(&gdt_desc);
12628 - gdt = gdt_desc.address;
12629 + gdt = (unsigned long)gdt_desc.address;
12630
12631 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12632
12633 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12634 /* 0x2 bit is always set */
12635 .flags = X86_EFLAGS_SF | 0x2,
12636 .sp = STACK_START,
12637 - .es = __USER_DS,
12638 + .es = __KERNEL_DS,
12639 .cs = __KERNEL_CS,
12640 .ss = __KERNEL_DS,
12641 - .ds = __USER_DS,
12642 + .ds = __KERNEL_DS,
12643 .fs = __KERNEL_PERCPU,
12644
12645 .__cr3 = __pa_nodebug(swapper_pg_dir),
12646 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c
12647 --- linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12648 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12649 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12650 #endif
12651
12652 for (;;) {
12653 - struct thread_info *context;
12654 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12655 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12656
12657 - context = (struct thread_info *)
12658 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12659 - bp = print_context_stack(context, stack, bp, ops,
12660 - data, NULL, &graph);
12661 -
12662 - stack = (unsigned long *)context->previous_esp;
12663 - if (!stack)
12664 + if (stack_start == task_stack_page(task))
12665 break;
12666 + stack = *(unsigned long **)stack_start;
12667 if (ops->stack(data, "IRQ") < 0)
12668 break;
12669 touch_nmi_watchdog();
12670 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12671 * When in-kernel, we also print out the stack and code at the
12672 * time of the fault..
12673 */
12674 - if (!user_mode_vm(regs)) {
12675 + if (!user_mode(regs)) {
12676 unsigned int code_prologue = code_bytes * 43 / 64;
12677 unsigned int code_len = code_bytes;
12678 unsigned char c;
12679 u8 *ip;
12680 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12681
12682 printk(KERN_EMERG "Stack:\n");
12683 show_stack_log_lvl(NULL, regs, &regs->sp,
12684 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12685
12686 printk(KERN_EMERG "Code: ");
12687
12688 - ip = (u8 *)regs->ip - code_prologue;
12689 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12690 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12691 /* try starting at IP */
12692 - ip = (u8 *)regs->ip;
12693 + ip = (u8 *)regs->ip + cs_base;
12694 code_len = code_len - code_prologue + 1;
12695 }
12696 for (i = 0; i < code_len; i++, ip++) {
12697 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12698 printk(" Bad EIP value.");
12699 break;
12700 }
12701 - if (ip == (u8 *)regs->ip)
12702 + if (ip == (u8 *)regs->ip + cs_base)
12703 printk("<%02x> ", c);
12704 else
12705 printk("%02x ", c);
12706 @@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12707 {
12708 unsigned short ud2;
12709
12710 + ip = ktla_ktva(ip);
12711 if (ip < PAGE_OFFSET)
12712 return 0;
12713 if (probe_kernel_address((unsigned short *)ip, ud2))
12714 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c
12715 --- linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12716 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12717 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12718 unsigned long *irq_stack_end =
12719 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12720 unsigned used = 0;
12721 - struct thread_info *tinfo;
12722 int graph = 0;
12723 + void *stack_start;
12724
12725 if (!task)
12726 task = current;
12727 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12728 * current stack address. If the stacks consist of nested
12729 * exceptions
12730 */
12731 - tinfo = task_thread_info(task);
12732 for (;;) {
12733 char *id;
12734 unsigned long *estack_end;
12735 +
12736 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12737 &used, &id);
12738
12739 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12740 if (ops->stack(data, id) < 0)
12741 break;
12742
12743 - bp = print_context_stack(tinfo, stack, bp, ops,
12744 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12745 data, estack_end, &graph);
12746 ops->stack(data, "<EOE>");
12747 /*
12748 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12749 if (stack >= irq_stack && stack < irq_stack_end) {
12750 if (ops->stack(data, "IRQ") < 0)
12751 break;
12752 - bp = print_context_stack(tinfo, stack, bp,
12753 + bp = print_context_stack(task, irq_stack, stack, bp,
12754 ops, data, irq_stack_end, &graph);
12755 /*
12756 * We link to the next stack (which would be
12757 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12758 /*
12759 * This handles the process stack:
12760 */
12761 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12762 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12763 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12764 put_cpu();
12765 }
12766 EXPORT_SYMBOL(dump_trace);
12767 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.c linux-2.6.32.45/arch/x86/kernel/dumpstack.c
12768 --- linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12769 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12770 @@ -2,6 +2,9 @@
12771 * Copyright (C) 1991, 1992 Linus Torvalds
12772 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12773 */
12774 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12775 +#define __INCLUDED_BY_HIDESYM 1
12776 +#endif
12777 #include <linux/kallsyms.h>
12778 #include <linux/kprobes.h>
12779 #include <linux/uaccess.h>
12780 @@ -28,7 +31,7 @@ static int die_counter;
12781
12782 void printk_address(unsigned long address, int reliable)
12783 {
12784 - printk(" [<%p>] %s%pS\n", (void *) address,
12785 + printk(" [<%p>] %s%pA\n", (void *) address,
12786 reliable ? "" : "? ", (void *) address);
12787 }
12788
12789 @@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12790 static void
12791 print_ftrace_graph_addr(unsigned long addr, void *data,
12792 const struct stacktrace_ops *ops,
12793 - struct thread_info *tinfo, int *graph)
12794 + struct task_struct *task, int *graph)
12795 {
12796 - struct task_struct *task = tinfo->task;
12797 unsigned long ret_addr;
12798 int index = task->curr_ret_stack;
12799
12800 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12801 static inline void
12802 print_ftrace_graph_addr(unsigned long addr, void *data,
12803 const struct stacktrace_ops *ops,
12804 - struct thread_info *tinfo, int *graph)
12805 + struct task_struct *task, int *graph)
12806 { }
12807 #endif
12808
12809 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12810 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12811 */
12812
12813 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12814 - void *p, unsigned int size, void *end)
12815 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12816 {
12817 - void *t = tinfo;
12818 if (end) {
12819 if (p < end && p >= (end-THREAD_SIZE))
12820 return 1;
12821 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12822 }
12823
12824 unsigned long
12825 -print_context_stack(struct thread_info *tinfo,
12826 +print_context_stack(struct task_struct *task, void *stack_start,
12827 unsigned long *stack, unsigned long bp,
12828 const struct stacktrace_ops *ops, void *data,
12829 unsigned long *end, int *graph)
12830 {
12831 struct stack_frame *frame = (struct stack_frame *)bp;
12832
12833 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12834 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12835 unsigned long addr;
12836
12837 addr = *stack;
12838 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12839 } else {
12840 ops->address(data, addr, 0);
12841 }
12842 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12843 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12844 }
12845 stack++;
12846 }
12847 @@ -180,7 +180,7 @@ void dump_stack(void)
12848 #endif
12849
12850 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12851 - current->pid, current->comm, print_tainted(),
12852 + task_pid_nr(current), current->comm, print_tainted(),
12853 init_utsname()->release,
12854 (int)strcspn(init_utsname()->version, " "),
12855 init_utsname()->version);
12856 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12857 return flags;
12858 }
12859
12860 +extern void gr_handle_kernel_exploit(void);
12861 +
12862 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12863 {
12864 if (regs && kexec_should_crash(current))
12865 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12866 panic("Fatal exception in interrupt");
12867 if (panic_on_oops)
12868 panic("Fatal exception");
12869 - do_exit(signr);
12870 +
12871 + gr_handle_kernel_exploit();
12872 +
12873 + do_group_exit(signr);
12874 }
12875
12876 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12877 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12878 unsigned long flags = oops_begin();
12879 int sig = SIGSEGV;
12880
12881 - if (!user_mode_vm(regs))
12882 + if (!user_mode(regs))
12883 report_bug(regs->ip, regs);
12884
12885 if (__die(str, regs, err))
12886 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.h linux-2.6.32.45/arch/x86/kernel/dumpstack.h
12887 --- linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12888 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12889 @@ -15,7 +15,7 @@
12890 #endif
12891
12892 extern unsigned long
12893 -print_context_stack(struct thread_info *tinfo,
12894 +print_context_stack(struct task_struct *task, void *stack_start,
12895 unsigned long *stack, unsigned long bp,
12896 const struct stacktrace_ops *ops, void *data,
12897 unsigned long *end, int *graph);
12898 diff -urNp linux-2.6.32.45/arch/x86/kernel/e820.c linux-2.6.32.45/arch/x86/kernel/e820.c
12899 --- linux-2.6.32.45/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12900 +++ linux-2.6.32.45/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12901 @@ -733,7 +733,7 @@ struct early_res {
12902 };
12903 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12904 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12905 - {}
12906 + { 0, 0, {0}, 0 }
12907 };
12908
12909 static int __init find_overlapped_early(u64 start, u64 end)
12910 diff -urNp linux-2.6.32.45/arch/x86/kernel/early_printk.c linux-2.6.32.45/arch/x86/kernel/early_printk.c
12911 --- linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12912 +++ linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12913 @@ -7,6 +7,7 @@
12914 #include <linux/pci_regs.h>
12915 #include <linux/pci_ids.h>
12916 #include <linux/errno.h>
12917 +#include <linux/sched.h>
12918 #include <asm/io.h>
12919 #include <asm/processor.h>
12920 #include <asm/fcntl.h>
12921 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12922 int n;
12923 va_list ap;
12924
12925 + pax_track_stack();
12926 +
12927 va_start(ap, fmt);
12928 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12929 early_console->write(early_console, buf, n);
12930 diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_32.c linux-2.6.32.45/arch/x86/kernel/efi_32.c
12931 --- linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12932 +++ linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12933 @@ -38,70 +38,38 @@
12934 */
12935
12936 static unsigned long efi_rt_eflags;
12937 -static pgd_t efi_bak_pg_dir_pointer[2];
12938 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12939
12940 -void efi_call_phys_prelog(void)
12941 +void __init efi_call_phys_prelog(void)
12942 {
12943 - unsigned long cr4;
12944 - unsigned long temp;
12945 struct desc_ptr gdt_descr;
12946
12947 local_irq_save(efi_rt_eflags);
12948
12949 - /*
12950 - * If I don't have PAE, I should just duplicate two entries in page
12951 - * directory. If I have PAE, I just need to duplicate one entry in
12952 - * page directory.
12953 - */
12954 - cr4 = read_cr4_safe();
12955
12956 - if (cr4 & X86_CR4_PAE) {
12957 - efi_bak_pg_dir_pointer[0].pgd =
12958 - swapper_pg_dir[pgd_index(0)].pgd;
12959 - swapper_pg_dir[0].pgd =
12960 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12961 - } else {
12962 - efi_bak_pg_dir_pointer[0].pgd =
12963 - swapper_pg_dir[pgd_index(0)].pgd;
12964 - efi_bak_pg_dir_pointer[1].pgd =
12965 - swapper_pg_dir[pgd_index(0x400000)].pgd;
12966 - swapper_pg_dir[pgd_index(0)].pgd =
12967 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12968 - temp = PAGE_OFFSET + 0x400000;
12969 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12970 - swapper_pg_dir[pgd_index(temp)].pgd;
12971 - }
12972 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12973 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12974 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12975
12976 /*
12977 * After the lock is released, the original page table is restored.
12978 */
12979 __flush_tlb_all();
12980
12981 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
12982 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12983 gdt_descr.size = GDT_SIZE - 1;
12984 load_gdt(&gdt_descr);
12985 }
12986
12987 -void efi_call_phys_epilog(void)
12988 +void __init efi_call_phys_epilog(void)
12989 {
12990 - unsigned long cr4;
12991 struct desc_ptr gdt_descr;
12992
12993 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12994 + gdt_descr.address = get_cpu_gdt_table(0);
12995 gdt_descr.size = GDT_SIZE - 1;
12996 load_gdt(&gdt_descr);
12997
12998 - cr4 = read_cr4_safe();
12999 -
13000 - if (cr4 & X86_CR4_PAE) {
13001 - swapper_pg_dir[pgd_index(0)].pgd =
13002 - efi_bak_pg_dir_pointer[0].pgd;
13003 - } else {
13004 - swapper_pg_dir[pgd_index(0)].pgd =
13005 - efi_bak_pg_dir_pointer[0].pgd;
13006 - swapper_pg_dir[pgd_index(0x400000)].pgd =
13007 - efi_bak_pg_dir_pointer[1].pgd;
13008 - }
13009 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
13010
13011 /*
13012 * After the lock is released, the original page table is restored.
13013 diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S
13014 --- linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
13015 +++ linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
13016 @@ -6,6 +6,7 @@
13017 */
13018
13019 #include <linux/linkage.h>
13020 +#include <linux/init.h>
13021 #include <asm/page_types.h>
13022
13023 /*
13024 @@ -20,7 +21,7 @@
13025 * service functions will comply with gcc calling convention, too.
13026 */
13027
13028 -.text
13029 +__INIT
13030 ENTRY(efi_call_phys)
13031 /*
13032 * 0. The function can only be called in Linux kernel. So CS has been
13033 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
13034 * The mapping of lower virtual memory has been created in prelog and
13035 * epilog.
13036 */
13037 - movl $1f, %edx
13038 - subl $__PAGE_OFFSET, %edx
13039 - jmp *%edx
13040 + jmp 1f-__PAGE_OFFSET
13041 1:
13042
13043 /*
13044 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
13045 * parameter 2, ..., param n. To make things easy, we save the return
13046 * address of efi_call_phys in a global variable.
13047 */
13048 - popl %edx
13049 - movl %edx, saved_return_addr
13050 - /* get the function pointer into ECX*/
13051 - popl %ecx
13052 - movl %ecx, efi_rt_function_ptr
13053 - movl $2f, %edx
13054 - subl $__PAGE_OFFSET, %edx
13055 - pushl %edx
13056 + popl (saved_return_addr)
13057 + popl (efi_rt_function_ptr)
13058
13059 /*
13060 * 3. Clear PG bit in %CR0.
13061 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
13062 /*
13063 * 5. Call the physical function.
13064 */
13065 - jmp *%ecx
13066 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
13067
13068 -2:
13069 /*
13070 * 6. After EFI runtime service returns, control will return to
13071 * following instruction. We'd better readjust stack pointer first.
13072 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
13073 movl %cr0, %edx
13074 orl $0x80000000, %edx
13075 movl %edx, %cr0
13076 - jmp 1f
13077 -1:
13078 +
13079 /*
13080 * 8. Now restore the virtual mode from flat mode by
13081 * adding EIP with PAGE_OFFSET.
13082 */
13083 - movl $1f, %edx
13084 - jmp *%edx
13085 + jmp 1f+__PAGE_OFFSET
13086 1:
13087
13088 /*
13089 * 9. Balance the stack. And because EAX contain the return value,
13090 * we'd better not clobber it.
13091 */
13092 - leal efi_rt_function_ptr, %edx
13093 - movl (%edx), %ecx
13094 - pushl %ecx
13095 + pushl (efi_rt_function_ptr)
13096
13097 /*
13098 - * 10. Push the saved return address onto the stack and return.
13099 + * 10. Return to the saved return address.
13100 */
13101 - leal saved_return_addr, %edx
13102 - movl (%edx), %ecx
13103 - pushl %ecx
13104 - ret
13105 + jmpl *(saved_return_addr)
13106 ENDPROC(efi_call_phys)
13107 .previous
13108
13109 -.data
13110 +__INITDATA
13111 saved_return_addr:
13112 .long 0
13113 efi_rt_function_ptr:
13114 diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_32.S linux-2.6.32.45/arch/x86/kernel/entry_32.S
13115 --- linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
13116 +++ linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
13117 @@ -185,13 +185,146 @@
13118 /*CFI_REL_OFFSET gs, PT_GS*/
13119 .endm
13120 .macro SET_KERNEL_GS reg
13121 +
13122 +#ifdef CONFIG_CC_STACKPROTECTOR
13123 movl $(__KERNEL_STACK_CANARY), \reg
13124 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13125 + movl $(__USER_DS), \reg
13126 +#else
13127 + xorl \reg, \reg
13128 +#endif
13129 +
13130 movl \reg, %gs
13131 .endm
13132
13133 #endif /* CONFIG_X86_32_LAZY_GS */
13134
13135 -.macro SAVE_ALL
13136 +.macro pax_enter_kernel
13137 +#ifdef CONFIG_PAX_KERNEXEC
13138 + call pax_enter_kernel
13139 +#endif
13140 +.endm
13141 +
13142 +.macro pax_exit_kernel
13143 +#ifdef CONFIG_PAX_KERNEXEC
13144 + call pax_exit_kernel
13145 +#endif
13146 +.endm
13147 +
13148 +#ifdef CONFIG_PAX_KERNEXEC
13149 +ENTRY(pax_enter_kernel)
13150 +#ifdef CONFIG_PARAVIRT
13151 + pushl %eax
13152 + pushl %ecx
13153 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
13154 + mov %eax, %esi
13155 +#else
13156 + mov %cr0, %esi
13157 +#endif
13158 + bts $16, %esi
13159 + jnc 1f
13160 + mov %cs, %esi
13161 + cmp $__KERNEL_CS, %esi
13162 + jz 3f
13163 + ljmp $__KERNEL_CS, $3f
13164 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13165 +2:
13166 +#ifdef CONFIG_PARAVIRT
13167 + mov %esi, %eax
13168 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13169 +#else
13170 + mov %esi, %cr0
13171 +#endif
13172 +3:
13173 +#ifdef CONFIG_PARAVIRT
13174 + popl %ecx
13175 + popl %eax
13176 +#endif
13177 + ret
13178 +ENDPROC(pax_enter_kernel)
13179 +
13180 +ENTRY(pax_exit_kernel)
13181 +#ifdef CONFIG_PARAVIRT
13182 + pushl %eax
13183 + pushl %ecx
13184 +#endif
13185 + mov %cs, %esi
13186 + cmp $__KERNEXEC_KERNEL_CS, %esi
13187 + jnz 2f
13188 +#ifdef CONFIG_PARAVIRT
13189 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13190 + mov %eax, %esi
13191 +#else
13192 + mov %cr0, %esi
13193 +#endif
13194 + btr $16, %esi
13195 + ljmp $__KERNEL_CS, $1f
13196 +1:
13197 +#ifdef CONFIG_PARAVIRT
13198 + mov %esi, %eax
13199 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13200 +#else
13201 + mov %esi, %cr0
13202 +#endif
13203 +2:
13204 +#ifdef CONFIG_PARAVIRT
13205 + popl %ecx
13206 + popl %eax
13207 +#endif
13208 + ret
13209 +ENDPROC(pax_exit_kernel)
13210 +#endif
13211 +
13212 +.macro pax_erase_kstack
13213 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13214 + call pax_erase_kstack
13215 +#endif
13216 +.endm
13217 +
13218 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13219 +/*
13220 + * ebp: thread_info
13221 + * ecx, edx: can be clobbered
13222 + */
13223 +ENTRY(pax_erase_kstack)
13224 + pushl %edi
13225 + pushl %eax
13226 +
13227 + mov TI_lowest_stack(%ebp), %edi
13228 + mov $-0xBEEF, %eax
13229 + std
13230 +
13231 +1: mov %edi, %ecx
13232 + and $THREAD_SIZE_asm - 1, %ecx
13233 + shr $2, %ecx
13234 + repne scasl
13235 + jecxz 2f
13236 +
13237 + cmp $2*16, %ecx
13238 + jc 2f
13239 +
13240 + mov $2*16, %ecx
13241 + repe scasl
13242 + jecxz 2f
13243 + jne 1b
13244 +
13245 +2: cld
13246 + mov %esp, %ecx
13247 + sub %edi, %ecx
13248 + shr $2, %ecx
13249 + rep stosl
13250 +
13251 + mov TI_task_thread_sp0(%ebp), %edi
13252 + sub $128, %edi
13253 + mov %edi, TI_lowest_stack(%ebp)
13254 +
13255 + popl %eax
13256 + popl %edi
13257 + ret
13258 +ENDPROC(pax_erase_kstack)
13259 +#endif
13260 +
13261 +.macro __SAVE_ALL _DS
13262 cld
13263 PUSH_GS
13264 pushl %fs
13265 @@ -224,7 +357,7 @@
13266 pushl %ebx
13267 CFI_ADJUST_CFA_OFFSET 4
13268 CFI_REL_OFFSET ebx, 0
13269 - movl $(__USER_DS), %edx
13270 + movl $\_DS, %edx
13271 movl %edx, %ds
13272 movl %edx, %es
13273 movl $(__KERNEL_PERCPU), %edx
13274 @@ -232,6 +365,15 @@
13275 SET_KERNEL_GS %edx
13276 .endm
13277
13278 +.macro SAVE_ALL
13279 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13280 + __SAVE_ALL __KERNEL_DS
13281 + pax_enter_kernel
13282 +#else
13283 + __SAVE_ALL __USER_DS
13284 +#endif
13285 +.endm
13286 +
13287 .macro RESTORE_INT_REGS
13288 popl %ebx
13289 CFI_ADJUST_CFA_OFFSET -4
13290 @@ -352,7 +494,15 @@ check_userspace:
13291 movb PT_CS(%esp), %al
13292 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13293 cmpl $USER_RPL, %eax
13294 +
13295 +#ifdef CONFIG_PAX_KERNEXEC
13296 + jae resume_userspace
13297 +
13298 + PAX_EXIT_KERNEL
13299 + jmp resume_kernel
13300 +#else
13301 jb resume_kernel # not returning to v8086 or userspace
13302 +#endif
13303
13304 ENTRY(resume_userspace)
13305 LOCKDEP_SYS_EXIT
13306 @@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13307 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13308 # int/exception return?
13309 jne work_pending
13310 - jmp restore_all
13311 + jmp restore_all_pax
13312 END(ret_from_exception)
13313
13314 #ifdef CONFIG_PREEMPT
13315 @@ -414,25 +564,36 @@ sysenter_past_esp:
13316 /*CFI_REL_OFFSET cs, 0*/
13317 /*
13318 * Push current_thread_info()->sysenter_return to the stack.
13319 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13320 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
13321 */
13322 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13323 + pushl $0
13324 CFI_ADJUST_CFA_OFFSET 4
13325 CFI_REL_OFFSET eip, 0
13326
13327 pushl %eax
13328 CFI_ADJUST_CFA_OFFSET 4
13329 SAVE_ALL
13330 + GET_THREAD_INFO(%ebp)
13331 + movl TI_sysenter_return(%ebp),%ebp
13332 + movl %ebp,PT_EIP(%esp)
13333 ENABLE_INTERRUPTS(CLBR_NONE)
13334
13335 /*
13336 * Load the potential sixth argument from user stack.
13337 * Careful about security.
13338 */
13339 + movl PT_OLDESP(%esp),%ebp
13340 +
13341 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13342 + mov PT_OLDSS(%esp),%ds
13343 +1: movl %ds:(%ebp),%ebp
13344 + push %ss
13345 + pop %ds
13346 +#else
13347 cmpl $__PAGE_OFFSET-3,%ebp
13348 jae syscall_fault
13349 1: movl (%ebp),%ebp
13350 +#endif
13351 +
13352 movl %ebp,PT_EBP(%esp)
13353 .section __ex_table,"a"
13354 .align 4
13355 @@ -455,12 +616,23 @@ sysenter_do_call:
13356 testl $_TIF_ALLWORK_MASK, %ecx
13357 jne sysexit_audit
13358 sysenter_exit:
13359 +
13360 +#ifdef CONFIG_PAX_RANDKSTACK
13361 + pushl_cfi %eax
13362 + call pax_randomize_kstack
13363 + popl_cfi %eax
13364 +#endif
13365 +
13366 + pax_erase_kstack
13367 +
13368 /* if something modifies registers it must also disable sysexit */
13369 movl PT_EIP(%esp), %edx
13370 movl PT_OLDESP(%esp), %ecx
13371 xorl %ebp,%ebp
13372 TRACE_IRQS_ON
13373 1: mov PT_FS(%esp), %fs
13374 +2: mov PT_DS(%esp), %ds
13375 +3: mov PT_ES(%esp), %es
13376 PTGS_TO_GS
13377 ENABLE_INTERRUPTS_SYSEXIT
13378
13379 @@ -477,6 +649,9 @@ sysenter_audit:
13380 movl %eax,%edx /* 2nd arg: syscall number */
13381 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13382 call audit_syscall_entry
13383 +
13384 + pax_erase_kstack
13385 +
13386 pushl %ebx
13387 CFI_ADJUST_CFA_OFFSET 4
13388 movl PT_EAX(%esp),%eax /* reload syscall number */
13389 @@ -504,11 +679,17 @@ sysexit_audit:
13390
13391 CFI_ENDPROC
13392 .pushsection .fixup,"ax"
13393 -2: movl $0,PT_FS(%esp)
13394 +4: movl $0,PT_FS(%esp)
13395 + jmp 1b
13396 +5: movl $0,PT_DS(%esp)
13397 + jmp 1b
13398 +6: movl $0,PT_ES(%esp)
13399 jmp 1b
13400 .section __ex_table,"a"
13401 .align 4
13402 - .long 1b,2b
13403 + .long 1b,4b
13404 + .long 2b,5b
13405 + .long 3b,6b
13406 .popsection
13407 PTGS_TO_GS_EX
13408 ENDPROC(ia32_sysenter_target)
13409 @@ -538,6 +719,14 @@ syscall_exit:
13410 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13411 jne syscall_exit_work
13412
13413 +restore_all_pax:
13414 +
13415 +#ifdef CONFIG_PAX_RANDKSTACK
13416 + call pax_randomize_kstack
13417 +#endif
13418 +
13419 + pax_erase_kstack
13420 +
13421 restore_all:
13422 TRACE_IRQS_IRET
13423 restore_all_notrace:
13424 @@ -602,7 +791,13 @@ ldt_ss:
13425 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13426 mov %dx, %ax /* eax: new kernel esp */
13427 sub %eax, %edx /* offset (low word is 0) */
13428 - PER_CPU(gdt_page, %ebx)
13429 +#ifdef CONFIG_SMP
13430 + movl PER_CPU_VAR(cpu_number), %ebx
13431 + shll $PAGE_SHIFT_asm, %ebx
13432 + addl $cpu_gdt_table, %ebx
13433 +#else
13434 + movl $cpu_gdt_table, %ebx
13435 +#endif
13436 shr $16, %edx
13437 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13438 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13439 @@ -636,31 +831,25 @@ work_resched:
13440 movl TI_flags(%ebp), %ecx
13441 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13442 # than syscall tracing?
13443 - jz restore_all
13444 + jz restore_all_pax
13445 testb $_TIF_NEED_RESCHED, %cl
13446 jnz work_resched
13447
13448 work_notifysig: # deal with pending signals and
13449 # notify-resume requests
13450 + movl %esp, %eax
13451 #ifdef CONFIG_VM86
13452 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13453 - movl %esp, %eax
13454 - jne work_notifysig_v86 # returning to kernel-space or
13455 + jz 1f # returning to kernel-space or
13456 # vm86-space
13457 - xorl %edx, %edx
13458 - call do_notify_resume
13459 - jmp resume_userspace_sig
13460
13461 - ALIGN
13462 -work_notifysig_v86:
13463 pushl %ecx # save ti_flags for do_notify_resume
13464 CFI_ADJUST_CFA_OFFSET 4
13465 call save_v86_state # %eax contains pt_regs pointer
13466 popl %ecx
13467 CFI_ADJUST_CFA_OFFSET -4
13468 movl %eax, %esp
13469 -#else
13470 - movl %esp, %eax
13471 +1:
13472 #endif
13473 xorl %edx, %edx
13474 call do_notify_resume
13475 @@ -673,6 +862,9 @@ syscall_trace_entry:
13476 movl $-ENOSYS,PT_EAX(%esp)
13477 movl %esp, %eax
13478 call syscall_trace_enter
13479 +
13480 + pax_erase_kstack
13481 +
13482 /* What it returned is what we'll actually use. */
13483 cmpl $(nr_syscalls), %eax
13484 jnae syscall_call
13485 @@ -695,6 +887,10 @@ END(syscall_exit_work)
13486
13487 RING0_INT_FRAME # can't unwind into user space anyway
13488 syscall_fault:
13489 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13490 + push %ss
13491 + pop %ds
13492 +#endif
13493 GET_THREAD_INFO(%ebp)
13494 movl $-EFAULT,PT_EAX(%esp)
13495 jmp resume_userspace
13496 @@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
13497 PTREGSCALL(vm86)
13498 PTREGSCALL(vm86old)
13499
13500 + ALIGN;
13501 +ENTRY(kernel_execve)
13502 + push %ebp
13503 + sub $PT_OLDSS+4,%esp
13504 + push %edi
13505 + push %ecx
13506 + push %eax
13507 + lea 3*4(%esp),%edi
13508 + mov $PT_OLDSS/4+1,%ecx
13509 + xorl %eax,%eax
13510 + rep stosl
13511 + pop %eax
13512 + pop %ecx
13513 + pop %edi
13514 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13515 + mov %eax,PT_EBX(%esp)
13516 + mov %edx,PT_ECX(%esp)
13517 + mov %ecx,PT_EDX(%esp)
13518 + mov %esp,%eax
13519 + call sys_execve
13520 + GET_THREAD_INFO(%ebp)
13521 + test %eax,%eax
13522 + jz syscall_exit
13523 + add $PT_OLDSS+4,%esp
13524 + pop %ebp
13525 + ret
13526 +
13527 .macro FIXUP_ESPFIX_STACK
13528 /*
13529 * Switch back for ESPFIX stack to the normal zerobased stack
13530 @@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
13531 * normal stack and adjusts ESP with the matching offset.
13532 */
13533 /* fixup the stack */
13534 - PER_CPU(gdt_page, %ebx)
13535 +#ifdef CONFIG_SMP
13536 + movl PER_CPU_VAR(cpu_number), %ebx
13537 + shll $PAGE_SHIFT_asm, %ebx
13538 + addl $cpu_gdt_table, %ebx
13539 +#else
13540 + movl $cpu_gdt_table, %ebx
13541 +#endif
13542 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13543 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13544 shl $16, %eax
13545 @@ -1198,7 +1427,6 @@ return_to_handler:
13546 ret
13547 #endif
13548
13549 -.section .rodata,"a"
13550 #include "syscall_table_32.S"
13551
13552 syscall_table_size=(.-sys_call_table)
13553 @@ -1255,9 +1483,12 @@ error_code:
13554 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13555 REG_TO_PTGS %ecx
13556 SET_KERNEL_GS %ecx
13557 - movl $(__USER_DS), %ecx
13558 + movl $(__KERNEL_DS), %ecx
13559 movl %ecx, %ds
13560 movl %ecx, %es
13561 +
13562 + pax_enter_kernel
13563 +
13564 TRACE_IRQS_OFF
13565 movl %esp,%eax # pt_regs pointer
13566 call *%edi
13567 @@ -1351,6 +1582,9 @@ nmi_stack_correct:
13568 xorl %edx,%edx # zero error code
13569 movl %esp,%eax # pt_regs pointer
13570 call do_nmi
13571 +
13572 + pax_exit_kernel
13573 +
13574 jmp restore_all_notrace
13575 CFI_ENDPROC
13576
13577 @@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13578 FIXUP_ESPFIX_STACK # %eax == %esp
13579 xorl %edx,%edx # zero error code
13580 call do_nmi
13581 +
13582 + pax_exit_kernel
13583 +
13584 RESTORE_REGS
13585 lss 12+4(%esp), %esp # back to espfix stack
13586 CFI_ADJUST_CFA_OFFSET -24
13587 diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_64.S linux-2.6.32.45/arch/x86/kernel/entry_64.S
13588 --- linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13589 +++ linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13590 @@ -53,6 +53,7 @@
13591 #include <asm/paravirt.h>
13592 #include <asm/ftrace.h>
13593 #include <asm/percpu.h>
13594 +#include <asm/pgtable.h>
13595
13596 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13597 #include <linux/elf-em.h>
13598 @@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13599 ENDPROC(native_usergs_sysret64)
13600 #endif /* CONFIG_PARAVIRT */
13601
13602 + .macro ljmpq sel, off
13603 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13604 + .byte 0x48; ljmp *1234f(%rip)
13605 + .pushsection .rodata
13606 + .align 16
13607 + 1234: .quad \off; .word \sel
13608 + .popsection
13609 +#else
13610 + pushq $\sel
13611 + pushq $\off
13612 + lretq
13613 +#endif
13614 + .endm
13615 +
13616 + .macro pax_enter_kernel
13617 +#ifdef CONFIG_PAX_KERNEXEC
13618 + call pax_enter_kernel
13619 +#endif
13620 + .endm
13621 +
13622 + .macro pax_exit_kernel
13623 +#ifdef CONFIG_PAX_KERNEXEC
13624 + call pax_exit_kernel
13625 +#endif
13626 + .endm
13627 +
13628 +#ifdef CONFIG_PAX_KERNEXEC
13629 +ENTRY(pax_enter_kernel)
13630 + pushq %rdi
13631 +
13632 +#ifdef CONFIG_PARAVIRT
13633 + PV_SAVE_REGS(CLBR_RDI)
13634 +#endif
13635 +
13636 + GET_CR0_INTO_RDI
13637 + bts $16,%rdi
13638 + jnc 1f
13639 + mov %cs,%edi
13640 + cmp $__KERNEL_CS,%edi
13641 + jz 3f
13642 + ljmpq __KERNEL_CS,3f
13643 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
13644 +2: SET_RDI_INTO_CR0
13645 +3:
13646 +
13647 +#ifdef CONFIG_PARAVIRT
13648 + PV_RESTORE_REGS(CLBR_RDI)
13649 +#endif
13650 +
13651 + popq %rdi
13652 + retq
13653 +ENDPROC(pax_enter_kernel)
13654 +
13655 +ENTRY(pax_exit_kernel)
13656 + pushq %rdi
13657 +
13658 +#ifdef CONFIG_PARAVIRT
13659 + PV_SAVE_REGS(CLBR_RDI)
13660 +#endif
13661 +
13662 + mov %cs,%rdi
13663 + cmp $__KERNEXEC_KERNEL_CS,%edi
13664 + jnz 2f
13665 + GET_CR0_INTO_RDI
13666 + btr $16,%rdi
13667 + ljmpq __KERNEL_CS,1f
13668 +1: SET_RDI_INTO_CR0
13669 +2:
13670 +
13671 +#ifdef CONFIG_PARAVIRT
13672 + PV_RESTORE_REGS(CLBR_RDI);
13673 +#endif
13674 +
13675 + popq %rdi
13676 + retq
13677 +ENDPROC(pax_exit_kernel)
13678 +#endif
13679 +
13680 + .macro pax_enter_kernel_user
13681 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13682 + call pax_enter_kernel_user
13683 +#endif
13684 + .endm
13685 +
13686 + .macro pax_exit_kernel_user
13687 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13688 + call pax_exit_kernel_user
13689 +#endif
13690 +#ifdef CONFIG_PAX_RANDKSTACK
13691 + push %rax
13692 + call pax_randomize_kstack
13693 + pop %rax
13694 +#endif
13695 + pax_erase_kstack
13696 + .endm
13697 +
13698 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13699 +ENTRY(pax_enter_kernel_user)
13700 + pushq %rdi
13701 + pushq %rbx
13702 +
13703 +#ifdef CONFIG_PARAVIRT
13704 + PV_SAVE_REGS(CLBR_RDI)
13705 +#endif
13706 +
13707 + GET_CR3_INTO_RDI
13708 + mov %rdi,%rbx
13709 + add $__START_KERNEL_map,%rbx
13710 + sub phys_base(%rip),%rbx
13711 +
13712 +#ifdef CONFIG_PARAVIRT
13713 + pushq %rdi
13714 + cmpl $0, pv_info+PARAVIRT_enabled
13715 + jz 1f
13716 + i = 0
13717 + .rept USER_PGD_PTRS
13718 + mov i*8(%rbx),%rsi
13719 + mov $0,%sil
13720 + lea i*8(%rbx),%rdi
13721 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13722 + i = i + 1
13723 + .endr
13724 + jmp 2f
13725 +1:
13726 +#endif
13727 +
13728 + i = 0
13729 + .rept USER_PGD_PTRS
13730 + movb $0,i*8(%rbx)
13731 + i = i + 1
13732 + .endr
13733 +
13734 +#ifdef CONFIG_PARAVIRT
13735 +2: popq %rdi
13736 +#endif
13737 + SET_RDI_INTO_CR3
13738 +
13739 +#ifdef CONFIG_PAX_KERNEXEC
13740 + GET_CR0_INTO_RDI
13741 + bts $16,%rdi
13742 + SET_RDI_INTO_CR0
13743 +#endif
13744 +
13745 +#ifdef CONFIG_PARAVIRT
13746 + PV_RESTORE_REGS(CLBR_RDI)
13747 +#endif
13748 +
13749 + popq %rbx
13750 + popq %rdi
13751 + retq
13752 +ENDPROC(pax_enter_kernel_user)
13753 +
13754 +ENTRY(pax_exit_kernel_user)
13755 + push %rdi
13756 +
13757 +#ifdef CONFIG_PARAVIRT
13758 + pushq %rbx
13759 + PV_SAVE_REGS(CLBR_RDI)
13760 +#endif
13761 +
13762 +#ifdef CONFIG_PAX_KERNEXEC
13763 + GET_CR0_INTO_RDI
13764 + btr $16,%rdi
13765 + SET_RDI_INTO_CR0
13766 +#endif
13767 +
13768 + GET_CR3_INTO_RDI
13769 + add $__START_KERNEL_map,%rdi
13770 + sub phys_base(%rip),%rdi
13771 +
13772 +#ifdef CONFIG_PARAVIRT
13773 + cmpl $0, pv_info+PARAVIRT_enabled
13774 + jz 1f
13775 + mov %rdi,%rbx
13776 + i = 0
13777 + .rept USER_PGD_PTRS
13778 + mov i*8(%rbx),%rsi
13779 + mov $0x67,%sil
13780 + lea i*8(%rbx),%rdi
13781 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13782 + i = i + 1
13783 + .endr
13784 + jmp 2f
13785 +1:
13786 +#endif
13787 +
13788 + i = 0
13789 + .rept USER_PGD_PTRS
13790 + movb $0x67,i*8(%rdi)
13791 + i = i + 1
13792 + .endr
13793 +
13794 +#ifdef CONFIG_PARAVIRT
13795 +2: PV_RESTORE_REGS(CLBR_RDI)
13796 + popq %rbx
13797 +#endif
13798 +
13799 + popq %rdi
13800 + retq
13801 +ENDPROC(pax_exit_kernel_user)
13802 +#endif
13803 +
13804 +.macro pax_erase_kstack
13805 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13806 + call pax_erase_kstack
13807 +#endif
13808 +.endm
13809 +
13810 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13811 +/*
13812 + * r10: thread_info
13813 + * rcx, rdx: can be clobbered
13814 + */
13815 +ENTRY(pax_erase_kstack)
13816 + pushq %rdi
13817 + pushq %rax
13818 +
13819 + GET_THREAD_INFO(%r10)
13820 + mov TI_lowest_stack(%r10), %rdi
13821 + mov $-0xBEEF, %rax
13822 + std
13823 +
13824 +1: mov %edi, %ecx
13825 + and $THREAD_SIZE_asm - 1, %ecx
13826 + shr $3, %ecx
13827 + repne scasq
13828 + jecxz 2f
13829 +
13830 + cmp $2*8, %ecx
13831 + jc 2f
13832 +
13833 + mov $2*8, %ecx
13834 + repe scasq
13835 + jecxz 2f
13836 + jne 1b
13837 +
13838 +2: cld
13839 + mov %esp, %ecx
13840 + sub %edi, %ecx
13841 + shr $3, %ecx
13842 + rep stosq
13843 +
13844 + mov TI_task_thread_sp0(%r10), %rdi
13845 + sub $256, %rdi
13846 + mov %rdi, TI_lowest_stack(%r10)
13847 +
13848 + popq %rax
13849 + popq %rdi
13850 + ret
13851 +ENDPROC(pax_erase_kstack)
13852 +#endif
13853
13854 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13855 #ifdef CONFIG_TRACE_IRQFLAGS
13856 @@ -317,7 +569,7 @@ ENTRY(save_args)
13857 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13858 movq_cfi rbp, 8 /* push %rbp */
13859 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13860 - testl $3, CS(%rdi)
13861 + testb $3, CS(%rdi)
13862 je 1f
13863 SWAPGS
13864 /*
13865 @@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13866
13867 RESTORE_REST
13868
13869 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13870 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13871 je int_ret_from_sys_call
13872
13873 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13874 @@ -455,7 +707,7 @@ END(ret_from_fork)
13875 ENTRY(system_call)
13876 CFI_STARTPROC simple
13877 CFI_SIGNAL_FRAME
13878 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13879 + CFI_DEF_CFA rsp,0
13880 CFI_REGISTER rip,rcx
13881 /*CFI_REGISTER rflags,r11*/
13882 SWAPGS_UNSAFE_STACK
13883 @@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13884
13885 movq %rsp,PER_CPU_VAR(old_rsp)
13886 movq PER_CPU_VAR(kernel_stack),%rsp
13887 + pax_enter_kernel_user
13888 /*
13889 * No need to follow this irqs off/on section - it's straight
13890 * and short:
13891 */
13892 ENABLE_INTERRUPTS(CLBR_NONE)
13893 - SAVE_ARGS 8,1
13894 + SAVE_ARGS 8*6,1
13895 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13896 movq %rcx,RIP-ARGOFFSET(%rsp)
13897 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13898 @@ -502,6 +755,7 @@ sysret_check:
13899 andl %edi,%edx
13900 jnz sysret_careful
13901 CFI_REMEMBER_STATE
13902 + pax_exit_kernel_user
13903 /*
13904 * sysretq will re-enable interrupts:
13905 */
13906 @@ -562,6 +816,9 @@ auditsys:
13907 movq %rax,%rsi /* 2nd arg: syscall number */
13908 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13909 call audit_syscall_entry
13910 +
13911 + pax_erase_kstack
13912 +
13913 LOAD_ARGS 0 /* reload call-clobbered registers */
13914 jmp system_call_fastpath
13915
13916 @@ -592,6 +849,9 @@ tracesys:
13917 FIXUP_TOP_OF_STACK %rdi
13918 movq %rsp,%rdi
13919 call syscall_trace_enter
13920 +
13921 + pax_erase_kstack
13922 +
13923 /*
13924 * Reload arg registers from stack in case ptrace changed them.
13925 * We don't reload %rax because syscall_trace_enter() returned
13926 @@ -613,7 +873,7 @@ tracesys:
13927 GLOBAL(int_ret_from_sys_call)
13928 DISABLE_INTERRUPTS(CLBR_NONE)
13929 TRACE_IRQS_OFF
13930 - testl $3,CS-ARGOFFSET(%rsp)
13931 + testb $3,CS-ARGOFFSET(%rsp)
13932 je retint_restore_args
13933 movl $_TIF_ALLWORK_MASK,%edi
13934 /* edi: mask to check */
13935 @@ -800,6 +1060,16 @@ END(interrupt)
13936 CFI_ADJUST_CFA_OFFSET 10*8
13937 call save_args
13938 PARTIAL_FRAME 0
13939 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13940 + testb $3, CS(%rdi)
13941 + jnz 1f
13942 + pax_enter_kernel
13943 + jmp 2f
13944 +1: pax_enter_kernel_user
13945 +2:
13946 +#else
13947 + pax_enter_kernel
13948 +#endif
13949 call \func
13950 .endm
13951
13952 @@ -822,7 +1092,7 @@ ret_from_intr:
13953 CFI_ADJUST_CFA_OFFSET -8
13954 exit_intr:
13955 GET_THREAD_INFO(%rcx)
13956 - testl $3,CS-ARGOFFSET(%rsp)
13957 + testb $3,CS-ARGOFFSET(%rsp)
13958 je retint_kernel
13959
13960 /* Interrupt came from user space */
13961 @@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13962 * The iretq could re-enable interrupts:
13963 */
13964 DISABLE_INTERRUPTS(CLBR_ANY)
13965 + pax_exit_kernel_user
13966 TRACE_IRQS_IRETQ
13967 SWAPGS
13968 jmp restore_args
13969
13970 retint_restore_args: /* return to kernel space */
13971 DISABLE_INTERRUPTS(CLBR_ANY)
13972 + pax_exit_kernel
13973 /*
13974 * The iretq could re-enable interrupts:
13975 */
13976 @@ -1032,6 +1304,16 @@ ENTRY(\sym)
13977 CFI_ADJUST_CFA_OFFSET 15*8
13978 call error_entry
13979 DEFAULT_FRAME 0
13980 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13981 + testb $3, CS(%rsp)
13982 + jnz 1f
13983 + pax_enter_kernel
13984 + jmp 2f
13985 +1: pax_enter_kernel_user
13986 +2:
13987 +#else
13988 + pax_enter_kernel
13989 +#endif
13990 movq %rsp,%rdi /* pt_regs pointer */
13991 xorl %esi,%esi /* no error code */
13992 call \do_sym
13993 @@ -1049,6 +1331,16 @@ ENTRY(\sym)
13994 subq $15*8, %rsp
13995 call save_paranoid
13996 TRACE_IRQS_OFF
13997 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13998 + testb $3, CS(%rsp)
13999 + jnz 1f
14000 + pax_enter_kernel
14001 + jmp 2f
14002 +1: pax_enter_kernel_user
14003 +2:
14004 +#else
14005 + pax_enter_kernel
14006 +#endif
14007 movq %rsp,%rdi /* pt_regs pointer */
14008 xorl %esi,%esi /* no error code */
14009 call \do_sym
14010 @@ -1066,9 +1358,24 @@ ENTRY(\sym)
14011 subq $15*8, %rsp
14012 call save_paranoid
14013 TRACE_IRQS_OFF
14014 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14015 + testb $3, CS(%rsp)
14016 + jnz 1f
14017 + pax_enter_kernel
14018 + jmp 2f
14019 +1: pax_enter_kernel_user
14020 +2:
14021 +#else
14022 + pax_enter_kernel
14023 +#endif
14024 movq %rsp,%rdi /* pt_regs pointer */
14025 xorl %esi,%esi /* no error code */
14026 - PER_CPU(init_tss, %rbp)
14027 +#ifdef CONFIG_SMP
14028 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
14029 + lea init_tss(%rbp), %rbp
14030 +#else
14031 + lea init_tss(%rip), %rbp
14032 +#endif
14033 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14034 call \do_sym
14035 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14036 @@ -1085,6 +1392,16 @@ ENTRY(\sym)
14037 CFI_ADJUST_CFA_OFFSET 15*8
14038 call error_entry
14039 DEFAULT_FRAME 0
14040 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14041 + testb $3, CS(%rsp)
14042 + jnz 1f
14043 + pax_enter_kernel
14044 + jmp 2f
14045 +1: pax_enter_kernel_user
14046 +2:
14047 +#else
14048 + pax_enter_kernel
14049 +#endif
14050 movq %rsp,%rdi /* pt_regs pointer */
14051 movq ORIG_RAX(%rsp),%rsi /* get error code */
14052 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14053 @@ -1104,6 +1421,16 @@ ENTRY(\sym)
14054 call save_paranoid
14055 DEFAULT_FRAME 0
14056 TRACE_IRQS_OFF
14057 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14058 + testb $3, CS(%rsp)
14059 + jnz 1f
14060 + pax_enter_kernel
14061 + jmp 2f
14062 +1: pax_enter_kernel_user
14063 +2:
14064 +#else
14065 + pax_enter_kernel
14066 +#endif
14067 movq %rsp,%rdi /* pt_regs pointer */
14068 movq ORIG_RAX(%rsp),%rsi /* get error code */
14069 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14070 @@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
14071 TRACE_IRQS_OFF
14072 testl %ebx,%ebx /* swapgs needed? */
14073 jnz paranoid_restore
14074 - testl $3,CS(%rsp)
14075 + testb $3,CS(%rsp)
14076 jnz paranoid_userspace
14077 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14078 + pax_exit_kernel
14079 + TRACE_IRQS_IRETQ 0
14080 + SWAPGS_UNSAFE_STACK
14081 + RESTORE_ALL 8
14082 + jmp irq_return
14083 +#endif
14084 paranoid_swapgs:
14085 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14086 + pax_exit_kernel_user
14087 +#else
14088 + pax_exit_kernel
14089 +#endif
14090 TRACE_IRQS_IRETQ 0
14091 SWAPGS_UNSAFE_STACK
14092 RESTORE_ALL 8
14093 jmp irq_return
14094 paranoid_restore:
14095 + pax_exit_kernel
14096 TRACE_IRQS_IRETQ 0
14097 RESTORE_ALL 8
14098 jmp irq_return
14099 @@ -1470,7 +1810,7 @@ ENTRY(error_entry)
14100 movq_cfi r14, R14+8
14101 movq_cfi r15, R15+8
14102 xorl %ebx,%ebx
14103 - testl $3,CS+8(%rsp)
14104 + testb $3,CS+8(%rsp)
14105 je error_kernelspace
14106 error_swapgs:
14107 SWAPGS
14108 @@ -1529,6 +1869,16 @@ ENTRY(nmi)
14109 CFI_ADJUST_CFA_OFFSET 15*8
14110 call save_paranoid
14111 DEFAULT_FRAME 0
14112 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14113 + testb $3, CS(%rsp)
14114 + jnz 1f
14115 + pax_enter_kernel
14116 + jmp 2f
14117 +1: pax_enter_kernel_user
14118 +2:
14119 +#else
14120 + pax_enter_kernel
14121 +#endif
14122 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14123 movq %rsp,%rdi
14124 movq $-1,%rsi
14125 @@ -1539,11 +1889,25 @@ ENTRY(nmi)
14126 DISABLE_INTERRUPTS(CLBR_NONE)
14127 testl %ebx,%ebx /* swapgs needed? */
14128 jnz nmi_restore
14129 - testl $3,CS(%rsp)
14130 + testb $3,CS(%rsp)
14131 jnz nmi_userspace
14132 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14133 + pax_exit_kernel
14134 + SWAPGS_UNSAFE_STACK
14135 + RESTORE_ALL 8
14136 + jmp irq_return
14137 +#endif
14138 nmi_swapgs:
14139 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14140 + pax_exit_kernel_user
14141 +#else
14142 + pax_exit_kernel
14143 +#endif
14144 SWAPGS_UNSAFE_STACK
14145 + RESTORE_ALL 8
14146 + jmp irq_return
14147 nmi_restore:
14148 + pax_exit_kernel
14149 RESTORE_ALL 8
14150 jmp irq_return
14151 nmi_userspace:
14152 diff -urNp linux-2.6.32.45/arch/x86/kernel/ftrace.c linux-2.6.32.45/arch/x86/kernel/ftrace.c
14153 --- linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
14154 +++ linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
14155 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
14156 static void *mod_code_newcode; /* holds the text to write to the IP */
14157
14158 static unsigned nmi_wait_count;
14159 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14160 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14161
14162 int ftrace_arch_read_dyn_info(char *buf, int size)
14163 {
14164 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
14165
14166 r = snprintf(buf, size, "%u %u",
14167 nmi_wait_count,
14168 - atomic_read(&nmi_update_count));
14169 + atomic_read_unchecked(&nmi_update_count));
14170 return r;
14171 }
14172
14173 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
14174 {
14175 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14176 smp_rmb();
14177 + pax_open_kernel();
14178 ftrace_mod_code();
14179 - atomic_inc(&nmi_update_count);
14180 + pax_close_kernel();
14181 + atomic_inc_unchecked(&nmi_update_count);
14182 }
14183 /* Must have previous changes seen before executions */
14184 smp_mb();
14185 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
14186
14187
14188
14189 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
14190 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
14191
14192 static unsigned char *ftrace_nop_replace(void)
14193 {
14194 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
14195 {
14196 unsigned char replaced[MCOUNT_INSN_SIZE];
14197
14198 + ip = ktla_ktva(ip);
14199 +
14200 /*
14201 * Note: Due to modules and __init, code can
14202 * disappear and change, we need to protect against faulting
14203 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
14204 unsigned char old[MCOUNT_INSN_SIZE], *new;
14205 int ret;
14206
14207 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14208 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14209 new = ftrace_call_replace(ip, (unsigned long)func);
14210 ret = ftrace_modify_code(ip, old, new);
14211
14212 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
14213 switch (faulted) {
14214 case 0:
14215 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
14216 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
14217 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14218 break;
14219 case 1:
14220 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14221 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14222 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14223 break;
14224 case 2:
14225 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14226 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14227 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14228 break;
14229 }
14230
14231 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14232 {
14233 unsigned char code[MCOUNT_INSN_SIZE];
14234
14235 + ip = ktla_ktva(ip);
14236 +
14237 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14238 return -EFAULT;
14239
14240 diff -urNp linux-2.6.32.45/arch/x86/kernel/head32.c linux-2.6.32.45/arch/x86/kernel/head32.c
14241 --- linux-2.6.32.45/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
14242 +++ linux-2.6.32.45/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
14243 @@ -16,6 +16,7 @@
14244 #include <asm/apic.h>
14245 #include <asm/io_apic.h>
14246 #include <asm/bios_ebda.h>
14247 +#include <asm/boot.h>
14248
14249 static void __init i386_default_early_setup(void)
14250 {
14251 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14252 {
14253 reserve_trampoline_memory();
14254
14255 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14256 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14257
14258 #ifdef CONFIG_BLK_DEV_INITRD
14259 /* Reserve INITRD */
14260 diff -urNp linux-2.6.32.45/arch/x86/kernel/head_32.S linux-2.6.32.45/arch/x86/kernel/head_32.S
14261 --- linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14262 +++ linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14263 @@ -19,10 +19,17 @@
14264 #include <asm/setup.h>
14265 #include <asm/processor-flags.h>
14266 #include <asm/percpu.h>
14267 +#include <asm/msr-index.h>
14268
14269 /* Physical address */
14270 #define pa(X) ((X) - __PAGE_OFFSET)
14271
14272 +#ifdef CONFIG_PAX_KERNEXEC
14273 +#define ta(X) (X)
14274 +#else
14275 +#define ta(X) ((X) - __PAGE_OFFSET)
14276 +#endif
14277 +
14278 /*
14279 * References to members of the new_cpu_data structure.
14280 */
14281 @@ -52,11 +59,7 @@
14282 * and small than max_low_pfn, otherwise will waste some page table entries
14283 */
14284
14285 -#if PTRS_PER_PMD > 1
14286 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14287 -#else
14288 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14289 -#endif
14290 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14291
14292 /* Enough space to fit pagetables for the low memory linear map */
14293 MAPPING_BEYOND_END = \
14294 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14295 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14296
14297 /*
14298 + * Real beginning of normal "text" segment
14299 + */
14300 +ENTRY(stext)
14301 +ENTRY(_stext)
14302 +
14303 +/*
14304 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14305 * %esi points to the real-mode code as a 32-bit pointer.
14306 * CS and DS must be 4 GB flat segments, but we don't depend on
14307 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14308 * can.
14309 */
14310 __HEAD
14311 +
14312 +#ifdef CONFIG_PAX_KERNEXEC
14313 + jmp startup_32
14314 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14315 +.fill PAGE_SIZE-5,1,0xcc
14316 +#endif
14317 +
14318 ENTRY(startup_32)
14319 + movl pa(stack_start),%ecx
14320 +
14321 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14322 us to not reload segments */
14323 testb $(1<<6), BP_loadflags(%esi)
14324 @@ -95,7 +113,60 @@ ENTRY(startup_32)
14325 movl %eax,%es
14326 movl %eax,%fs
14327 movl %eax,%gs
14328 + movl %eax,%ss
14329 2:
14330 + leal -__PAGE_OFFSET(%ecx),%esp
14331 +
14332 +#ifdef CONFIG_SMP
14333 + movl $pa(cpu_gdt_table),%edi
14334 + movl $__per_cpu_load,%eax
14335 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14336 + rorl $16,%eax
14337 + movb %al,__KERNEL_PERCPU + 4(%edi)
14338 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14339 + movl $__per_cpu_end - 1,%eax
14340 + subl $__per_cpu_start,%eax
14341 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14342 +#endif
14343 +
14344 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14345 + movl $NR_CPUS,%ecx
14346 + movl $pa(cpu_gdt_table),%edi
14347 +1:
14348 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14349 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14350 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14351 + addl $PAGE_SIZE_asm,%edi
14352 + loop 1b
14353 +#endif
14354 +
14355 +#ifdef CONFIG_PAX_KERNEXEC
14356 + movl $pa(boot_gdt),%edi
14357 + movl $__LOAD_PHYSICAL_ADDR,%eax
14358 + movw %ax,__BOOT_CS + 2(%edi)
14359 + rorl $16,%eax
14360 + movb %al,__BOOT_CS + 4(%edi)
14361 + movb %ah,__BOOT_CS + 7(%edi)
14362 + rorl $16,%eax
14363 +
14364 + ljmp $(__BOOT_CS),$1f
14365 +1:
14366 +
14367 + movl $NR_CPUS,%ecx
14368 + movl $pa(cpu_gdt_table),%edi
14369 + addl $__PAGE_OFFSET,%eax
14370 +1:
14371 + movw %ax,__KERNEL_CS + 2(%edi)
14372 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14373 + rorl $16,%eax
14374 + movb %al,__KERNEL_CS + 4(%edi)
14375 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14376 + movb %ah,__KERNEL_CS + 7(%edi)
14377 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14378 + rorl $16,%eax
14379 + addl $PAGE_SIZE_asm,%edi
14380 + loop 1b
14381 +#endif
14382
14383 /*
14384 * Clear BSS first so that there are no surprises...
14385 @@ -140,9 +211,7 @@ ENTRY(startup_32)
14386 cmpl $num_subarch_entries, %eax
14387 jae bad_subarch
14388
14389 - movl pa(subarch_entries)(,%eax,4), %eax
14390 - subl $__PAGE_OFFSET, %eax
14391 - jmp *%eax
14392 + jmp *pa(subarch_entries)(,%eax,4)
14393
14394 bad_subarch:
14395 WEAK(lguest_entry)
14396 @@ -154,10 +223,10 @@ WEAK(xen_entry)
14397 __INITDATA
14398
14399 subarch_entries:
14400 - .long default_entry /* normal x86/PC */
14401 - .long lguest_entry /* lguest hypervisor */
14402 - .long xen_entry /* Xen hypervisor */
14403 - .long default_entry /* Moorestown MID */
14404 + .long ta(default_entry) /* normal x86/PC */
14405 + .long ta(lguest_entry) /* lguest hypervisor */
14406 + .long ta(xen_entry) /* Xen hypervisor */
14407 + .long ta(default_entry) /* Moorestown MID */
14408 num_subarch_entries = (. - subarch_entries) / 4
14409 .previous
14410 #endif /* CONFIG_PARAVIRT */
14411 @@ -218,8 +287,11 @@ default_entry:
14412 movl %eax, pa(max_pfn_mapped)
14413
14414 /* Do early initialization of the fixmap area */
14415 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14416 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14417 +#ifdef CONFIG_COMPAT_VDSO
14418 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14419 +#else
14420 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14421 +#endif
14422 #else /* Not PAE */
14423
14424 page_pde_offset = (__PAGE_OFFSET >> 20);
14425 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14426 movl %eax, pa(max_pfn_mapped)
14427
14428 /* Do early initialization of the fixmap area */
14429 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14430 - movl %eax,pa(swapper_pg_dir+0xffc)
14431 +#ifdef CONFIG_COMPAT_VDSO
14432 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14433 +#else
14434 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14435 +#endif
14436 #endif
14437 jmp 3f
14438 /*
14439 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14440 movl %eax,%es
14441 movl %eax,%fs
14442 movl %eax,%gs
14443 + movl pa(stack_start),%ecx
14444 + movl %eax,%ss
14445 + leal -__PAGE_OFFSET(%ecx),%esp
14446 #endif /* CONFIG_SMP */
14447 3:
14448
14449 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14450 orl %edx,%eax
14451 movl %eax,%cr4
14452
14453 +#ifdef CONFIG_X86_PAE
14454 btl $5, %eax # check if PAE is enabled
14455 jnc 6f
14456
14457 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14458 cpuid
14459 cmpl $0x80000000, %eax
14460 jbe 6f
14461 +
14462 + /* Clear bogus XD_DISABLE bits */
14463 + call verify_cpu
14464 +
14465 mov $0x80000001, %eax
14466 cpuid
14467 /* Execute Disable bit supported? */
14468 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14469 jnc 6f
14470
14471 /* Setup EFER (Extended Feature Enable Register) */
14472 - movl $0xc0000080, %ecx
14473 + movl $MSR_EFER, %ecx
14474 rdmsr
14475
14476 btsl $11, %eax
14477 /* Make changes effective */
14478 wrmsr
14479
14480 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14481 + movl $1,pa(nx_enabled)
14482 +#endif
14483 +
14484 6:
14485
14486 /*
14487 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14488 movl %eax,%cr0 /* ..and set paging (PG) bit */
14489 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14490 1:
14491 - /* Set up the stack pointer */
14492 - lss stack_start,%esp
14493 + /* Shift the stack pointer to a virtual address */
14494 + addl $__PAGE_OFFSET, %esp
14495
14496 /*
14497 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14498 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14499
14500 #ifdef CONFIG_SMP
14501 cmpb $0, ready
14502 - jz 1f /* Initial CPU cleans BSS */
14503 - jmp checkCPUtype
14504 -1:
14505 + jnz checkCPUtype
14506 #endif /* CONFIG_SMP */
14507
14508 /*
14509 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14510 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14511 movl %eax,%ss # after changing gdt.
14512
14513 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14514 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14515 movl %eax,%ds
14516 movl %eax,%es
14517
14518 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14519 */
14520 cmpb $0,ready
14521 jne 1f
14522 - movl $per_cpu__gdt_page,%eax
14523 + movl $cpu_gdt_table,%eax
14524 movl $per_cpu__stack_canary,%ecx
14525 +#ifdef CONFIG_SMP
14526 + addl $__per_cpu_load,%ecx
14527 +#endif
14528 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14529 shrl $16, %ecx
14530 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14531 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14532 1:
14533 -#endif
14534 movl $(__KERNEL_STACK_CANARY),%eax
14535 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14536 + movl $(__USER_DS),%eax
14537 +#else
14538 + xorl %eax,%eax
14539 +#endif
14540 movl %eax,%gs
14541
14542 xorl %eax,%eax # Clear LDT
14543 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14544
14545 cld # gcc2 wants the direction flag cleared at all times
14546 pushl $0 # fake return address for unwinder
14547 -#ifdef CONFIG_SMP
14548 - movb ready, %cl
14549 movb $1, ready
14550 - cmpb $0,%cl # the first CPU calls start_kernel
14551 - je 1f
14552 - movl (stack_start), %esp
14553 -1:
14554 -#endif /* CONFIG_SMP */
14555 jmp *(initial_code)
14556
14557 /*
14558 @@ -546,22 +631,22 @@ early_page_fault:
14559 jmp early_fault
14560
14561 early_fault:
14562 - cld
14563 #ifdef CONFIG_PRINTK
14564 + cmpl $1,%ss:early_recursion_flag
14565 + je hlt_loop
14566 + incl %ss:early_recursion_flag
14567 + cld
14568 pusha
14569 movl $(__KERNEL_DS),%eax
14570 movl %eax,%ds
14571 movl %eax,%es
14572 - cmpl $2,early_recursion_flag
14573 - je hlt_loop
14574 - incl early_recursion_flag
14575 movl %cr2,%eax
14576 pushl %eax
14577 pushl %edx /* trapno */
14578 pushl $fault_msg
14579 call printk
14580 +; call dump_stack
14581 #endif
14582 - call dump_stack
14583 hlt_loop:
14584 hlt
14585 jmp hlt_loop
14586 @@ -569,8 +654,11 @@ hlt_loop:
14587 /* This is the default interrupt "handler" :-) */
14588 ALIGN
14589 ignore_int:
14590 - cld
14591 #ifdef CONFIG_PRINTK
14592 + cmpl $2,%ss:early_recursion_flag
14593 + je hlt_loop
14594 + incl %ss:early_recursion_flag
14595 + cld
14596 pushl %eax
14597 pushl %ecx
14598 pushl %edx
14599 @@ -579,9 +667,6 @@ ignore_int:
14600 movl $(__KERNEL_DS),%eax
14601 movl %eax,%ds
14602 movl %eax,%es
14603 - cmpl $2,early_recursion_flag
14604 - je hlt_loop
14605 - incl early_recursion_flag
14606 pushl 16(%esp)
14607 pushl 24(%esp)
14608 pushl 32(%esp)
14609 @@ -600,6 +685,8 @@ ignore_int:
14610 #endif
14611 iret
14612
14613 +#include "verify_cpu.S"
14614 +
14615 __REFDATA
14616 .align 4
14617 ENTRY(initial_code)
14618 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14619 /*
14620 * BSS section
14621 */
14622 -__PAGE_ALIGNED_BSS
14623 - .align PAGE_SIZE_asm
14624 #ifdef CONFIG_X86_PAE
14625 +.section .swapper_pg_pmd,"a",@progbits
14626 swapper_pg_pmd:
14627 .fill 1024*KPMDS,4,0
14628 #else
14629 +.section .swapper_pg_dir,"a",@progbits
14630 ENTRY(swapper_pg_dir)
14631 .fill 1024,4,0
14632 #endif
14633 +.section .swapper_pg_fixmap,"a",@progbits
14634 swapper_pg_fixmap:
14635 .fill 1024,4,0
14636 #ifdef CONFIG_X86_TRAMPOLINE
14637 +.section .trampoline_pg_dir,"a",@progbits
14638 ENTRY(trampoline_pg_dir)
14639 +#ifdef CONFIG_X86_PAE
14640 + .fill 4,8,0
14641 +#else
14642 .fill 1024,4,0
14643 #endif
14644 +#endif
14645 +
14646 +.section .empty_zero_page,"a",@progbits
14647 ENTRY(empty_zero_page)
14648 .fill 4096,1,0
14649
14650 /*
14651 + * The IDT has to be page-aligned to simplify the Pentium
14652 + * F0 0F bug workaround.. We have a special link segment
14653 + * for this.
14654 + */
14655 +.section .idt,"a",@progbits
14656 +ENTRY(idt_table)
14657 + .fill 256,8,0
14658 +
14659 +/*
14660 * This starts the data section.
14661 */
14662 #ifdef CONFIG_X86_PAE
14663 -__PAGE_ALIGNED_DATA
14664 - /* Page-aligned for the benefit of paravirt? */
14665 - .align PAGE_SIZE_asm
14666 +.section .swapper_pg_dir,"a",@progbits
14667 +
14668 ENTRY(swapper_pg_dir)
14669 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14670 # if KPMDS == 3
14671 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14672 # error "Kernel PMDs should be 1, 2 or 3"
14673 # endif
14674 .align PAGE_SIZE_asm /* needs to be page-sized too */
14675 +
14676 +#ifdef CONFIG_PAX_PER_CPU_PGD
14677 +ENTRY(cpu_pgd)
14678 + .rept NR_CPUS
14679 + .fill 4,8,0
14680 + .endr
14681 +#endif
14682 +
14683 #endif
14684
14685 .data
14686 +.balign 4
14687 ENTRY(stack_start)
14688 - .long init_thread_union+THREAD_SIZE
14689 - .long __BOOT_DS
14690 + .long init_thread_union+THREAD_SIZE-8
14691
14692 ready: .byte 0
14693
14694 +.section .rodata,"a",@progbits
14695 early_recursion_flag:
14696 .long 0
14697
14698 @@ -697,7 +809,7 @@ fault_msg:
14699 .word 0 # 32 bit align gdt_desc.address
14700 boot_gdt_descr:
14701 .word __BOOT_DS+7
14702 - .long boot_gdt - __PAGE_OFFSET
14703 + .long pa(boot_gdt)
14704
14705 .word 0 # 32-bit align idt_desc.address
14706 idt_descr:
14707 @@ -708,7 +820,7 @@ idt_descr:
14708 .word 0 # 32 bit align gdt_desc.address
14709 ENTRY(early_gdt_descr)
14710 .word GDT_ENTRIES*8-1
14711 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14712 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14713
14714 /*
14715 * The boot_gdt must mirror the equivalent in setup.S and is
14716 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14717 .align L1_CACHE_BYTES
14718 ENTRY(boot_gdt)
14719 .fill GDT_ENTRY_BOOT_CS,8,0
14720 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14721 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14722 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14723 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14724 +
14725 + .align PAGE_SIZE_asm
14726 +ENTRY(cpu_gdt_table)
14727 + .rept NR_CPUS
14728 + .quad 0x0000000000000000 /* NULL descriptor */
14729 + .quad 0x0000000000000000 /* 0x0b reserved */
14730 + .quad 0x0000000000000000 /* 0x13 reserved */
14731 + .quad 0x0000000000000000 /* 0x1b reserved */
14732 +
14733 +#ifdef CONFIG_PAX_KERNEXEC
14734 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14735 +#else
14736 + .quad 0x0000000000000000 /* 0x20 unused */
14737 +#endif
14738 +
14739 + .quad 0x0000000000000000 /* 0x28 unused */
14740 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14741 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14742 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14743 + .quad 0x0000000000000000 /* 0x4b reserved */
14744 + .quad 0x0000000000000000 /* 0x53 reserved */
14745 + .quad 0x0000000000000000 /* 0x5b reserved */
14746 +
14747 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14748 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14749 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14750 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14751 +
14752 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14753 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14754 +
14755 + /*
14756 + * Segments used for calling PnP BIOS have byte granularity.
14757 + * The code segments and data segments have fixed 64k limits,
14758 + * the transfer segment sizes are set at run time.
14759 + */
14760 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14761 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14762 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14763 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14764 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14765 +
14766 + /*
14767 + * The APM segments have byte granularity and their bases
14768 + * are set at run time. All have 64k limits.
14769 + */
14770 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14771 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14772 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14773 +
14774 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14775 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14776 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14777 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14778 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14779 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14780 +
14781 + /* Be sure this is zeroed to avoid false validations in Xen */
14782 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14783 + .endr
14784 diff -urNp linux-2.6.32.45/arch/x86/kernel/head_64.S linux-2.6.32.45/arch/x86/kernel/head_64.S
14785 --- linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14786 +++ linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14787 @@ -19,6 +19,7 @@
14788 #include <asm/cache.h>
14789 #include <asm/processor-flags.h>
14790 #include <asm/percpu.h>
14791 +#include <asm/cpufeature.h>
14792
14793 #ifdef CONFIG_PARAVIRT
14794 #include <asm/asm-offsets.h>
14795 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14796 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14797 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14798 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14799 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14800 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14801 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14802 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14803
14804 .text
14805 __HEAD
14806 @@ -85,35 +90,22 @@ startup_64:
14807 */
14808 addq %rbp, init_level4_pgt + 0(%rip)
14809 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14810 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14811 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14812 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14813
14814 addq %rbp, level3_ident_pgt + 0(%rip)
14815 +#ifndef CONFIG_XEN
14816 + addq %rbp, level3_ident_pgt + 8(%rip)
14817 +#endif
14818
14819 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14820 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14821 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14822
14823 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14824 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14825 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14826
14827 - /* Add an Identity mapping if I am above 1G */
14828 - leaq _text(%rip), %rdi
14829 - andq $PMD_PAGE_MASK, %rdi
14830 -
14831 - movq %rdi, %rax
14832 - shrq $PUD_SHIFT, %rax
14833 - andq $(PTRS_PER_PUD - 1), %rax
14834 - jz ident_complete
14835 -
14836 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14837 - leaq level3_ident_pgt(%rip), %rbx
14838 - movq %rdx, 0(%rbx, %rax, 8)
14839 -
14840 - movq %rdi, %rax
14841 - shrq $PMD_SHIFT, %rax
14842 - andq $(PTRS_PER_PMD - 1), %rax
14843 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14844 - leaq level2_spare_pgt(%rip), %rbx
14845 - movq %rdx, 0(%rbx, %rax, 8)
14846 -ident_complete:
14847 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14848 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14849
14850 /*
14851 * Fixup the kernel text+data virtual addresses. Note that
14852 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14853 * after the boot processor executes this code.
14854 */
14855
14856 - /* Enable PAE mode and PGE */
14857 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14858 + /* Enable PAE mode and PSE/PGE */
14859 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14860 movq %rax, %cr4
14861
14862 /* Setup early boot stage 4 level pagetables. */
14863 @@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14864 movl $MSR_EFER, %ecx
14865 rdmsr
14866 btsl $_EFER_SCE, %eax /* Enable System Call */
14867 - btl $20,%edi /* No Execute supported? */
14868 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14869 jnc 1f
14870 btsl $_EFER_NX, %eax
14871 + leaq init_level4_pgt(%rip), %rdi
14872 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14873 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14874 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14875 1: wrmsr /* Make changes effective */
14876
14877 /* Setup cr0 */
14878 @@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14879 .quad x86_64_start_kernel
14880 ENTRY(initial_gs)
14881 .quad INIT_PER_CPU_VAR(irq_stack_union)
14882 - __FINITDATA
14883
14884 ENTRY(stack_start)
14885 .quad init_thread_union+THREAD_SIZE-8
14886 .word 0
14887 + __FINITDATA
14888
14889 bad_address:
14890 jmp bad_address
14891
14892 - .section ".init.text","ax"
14893 + __INIT
14894 #ifdef CONFIG_EARLY_PRINTK
14895 .globl early_idt_handlers
14896 early_idt_handlers:
14897 @@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14898 #endif /* EARLY_PRINTK */
14899 1: hlt
14900 jmp 1b
14901 + .previous
14902
14903 #ifdef CONFIG_EARLY_PRINTK
14904 + __INITDATA
14905 early_recursion_flag:
14906 .long 0
14907 + .previous
14908
14909 + .section .rodata,"a",@progbits
14910 early_idt_msg:
14911 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14912 early_idt_ripmsg:
14913 .asciz "RIP %s\n"
14914 -#endif /* CONFIG_EARLY_PRINTK */
14915 .previous
14916 +#endif /* CONFIG_EARLY_PRINTK */
14917
14918 + .section .rodata,"a",@progbits
14919 #define NEXT_PAGE(name) \
14920 .balign PAGE_SIZE; \
14921 ENTRY(name)
14922 @@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14923 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14924 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14925 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14926 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
14927 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14928 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14929 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14930 .org init_level4_pgt + L4_START_KERNEL*8, 0
14931 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14932 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14933
14934 +#ifdef CONFIG_PAX_PER_CPU_PGD
14935 +NEXT_PAGE(cpu_pgd)
14936 + .rept NR_CPUS
14937 + .fill 512,8,0
14938 + .endr
14939 +#endif
14940 +
14941 NEXT_PAGE(level3_ident_pgt)
14942 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14943 +#ifdef CONFIG_XEN
14944 .fill 511,8,0
14945 +#else
14946 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14947 + .fill 510,8,0
14948 +#endif
14949 +
14950 +NEXT_PAGE(level3_vmalloc_pgt)
14951 + .fill 512,8,0
14952 +
14953 +NEXT_PAGE(level3_vmemmap_pgt)
14954 + .fill L3_VMEMMAP_START,8,0
14955 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14956
14957 NEXT_PAGE(level3_kernel_pgt)
14958 .fill L3_START_KERNEL,8,0
14959 @@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14960 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14961 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14962
14963 +NEXT_PAGE(level2_vmemmap_pgt)
14964 + .fill 512,8,0
14965 +
14966 NEXT_PAGE(level2_fixmap_pgt)
14967 - .fill 506,8,0
14968 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14969 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14970 - .fill 5,8,0
14971 + .fill 507,8,0
14972 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14973 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14974 + .fill 4,8,0
14975
14976 -NEXT_PAGE(level1_fixmap_pgt)
14977 +NEXT_PAGE(level1_vsyscall_pgt)
14978 .fill 512,8,0
14979
14980 -NEXT_PAGE(level2_ident_pgt)
14981 - /* Since I easily can, map the first 1G.
14982 + /* Since I easily can, map the first 2G.
14983 * Don't set NX because code runs from these pages.
14984 */
14985 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14986 +NEXT_PAGE(level2_ident_pgt)
14987 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14988
14989 NEXT_PAGE(level2_kernel_pgt)
14990 /*
14991 @@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14992 * If you want to increase this then increase MODULES_VADDR
14993 * too.)
14994 */
14995 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14996 - KERNEL_IMAGE_SIZE/PMD_SIZE)
14997 -
14998 -NEXT_PAGE(level2_spare_pgt)
14999 - .fill 512, 8, 0
15000 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15001
15002 #undef PMDS
15003 #undef NEXT_PAGE
15004
15005 - .data
15006 + .align PAGE_SIZE
15007 +ENTRY(cpu_gdt_table)
15008 + .rept NR_CPUS
15009 + .quad 0x0000000000000000 /* NULL descriptor */
15010 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15011 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
15012 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
15013 + .quad 0x00cffb000000ffff /* __USER32_CS */
15014 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15015 + .quad 0x00affb000000ffff /* __USER_CS */
15016 +
15017 +#ifdef CONFIG_PAX_KERNEXEC
15018 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15019 +#else
15020 + .quad 0x0 /* unused */
15021 +#endif
15022 +
15023 + .quad 0,0 /* TSS */
15024 + .quad 0,0 /* LDT */
15025 + .quad 0,0,0 /* three TLS descriptors */
15026 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
15027 + /* asm/segment.h:GDT_ENTRIES must match this */
15028 +
15029 + /* zero the remaining page */
15030 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15031 + .endr
15032 +
15033 .align 16
15034 .globl early_gdt_descr
15035 early_gdt_descr:
15036 .word GDT_ENTRIES*8-1
15037 early_gdt_descr_base:
15038 - .quad INIT_PER_CPU_VAR(gdt_page)
15039 + .quad cpu_gdt_table
15040
15041 ENTRY(phys_base)
15042 /* This must match the first entry in level2_kernel_pgt */
15043 .quad 0x0000000000000000
15044
15045 #include "../../x86/xen/xen-head.S"
15046 -
15047 - .section .bss, "aw", @nobits
15048 +
15049 + .section .rodata,"a",@progbits
15050 .align L1_CACHE_BYTES
15051 ENTRY(idt_table)
15052 - .skip IDT_ENTRIES * 16
15053 + .fill 512,8,0
15054
15055 __PAGE_ALIGNED_BSS
15056 .align PAGE_SIZE
15057 diff -urNp linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c
15058 --- linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
15059 +++ linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
15060 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15061 EXPORT_SYMBOL(cmpxchg8b_emu);
15062 #endif
15063
15064 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15065 +
15066 /* Networking helper routines. */
15067 EXPORT_SYMBOL(csum_partial_copy_generic);
15068 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15069 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15070
15071 EXPORT_SYMBOL(__get_user_1);
15072 EXPORT_SYMBOL(__get_user_2);
15073 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15074
15075 EXPORT_SYMBOL(csum_partial);
15076 EXPORT_SYMBOL(empty_zero_page);
15077 +
15078 +#ifdef CONFIG_PAX_KERNEXEC
15079 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15080 +#endif
15081 diff -urNp linux-2.6.32.45/arch/x86/kernel/i8259.c linux-2.6.32.45/arch/x86/kernel/i8259.c
15082 --- linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
15083 +++ linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
15084 @@ -208,7 +208,7 @@ spurious_8259A_irq:
15085 "spurious 8259A interrupt: IRQ%d.\n", irq);
15086 spurious_irq_mask |= irqmask;
15087 }
15088 - atomic_inc(&irq_err_count);
15089 + atomic_inc_unchecked(&irq_err_count);
15090 /*
15091 * Theoretically we do not have to handle this IRQ,
15092 * but in Linux this does not cause problems and is
15093 diff -urNp linux-2.6.32.45/arch/x86/kernel/init_task.c linux-2.6.32.45/arch/x86/kernel/init_task.c
15094 --- linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
15095 +++ linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
15096 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
15097 * way process stacks are handled. This is done by having a special
15098 * "init_task" linker map entry..
15099 */
15100 -union thread_union init_thread_union __init_task_data =
15101 - { INIT_THREAD_INFO(init_task) };
15102 +union thread_union init_thread_union __init_task_data;
15103
15104 /*
15105 * Initial task structure.
15106 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15107 * section. Since TSS's are completely CPU-local, we want them
15108 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15109 */
15110 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15111 -
15112 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15113 +EXPORT_SYMBOL(init_tss);
15114 diff -urNp linux-2.6.32.45/arch/x86/kernel/ioport.c linux-2.6.32.45/arch/x86/kernel/ioport.c
15115 --- linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
15116 +++ linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
15117 @@ -6,6 +6,7 @@
15118 #include <linux/sched.h>
15119 #include <linux/kernel.h>
15120 #include <linux/capability.h>
15121 +#include <linux/security.h>
15122 #include <linux/errno.h>
15123 #include <linux/types.h>
15124 #include <linux/ioport.h>
15125 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
15126
15127 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15128 return -EINVAL;
15129 +#ifdef CONFIG_GRKERNSEC_IO
15130 + if (turn_on && grsec_disable_privio) {
15131 + gr_handle_ioperm();
15132 + return -EPERM;
15133 + }
15134 +#endif
15135 if (turn_on && !capable(CAP_SYS_RAWIO))
15136 return -EPERM;
15137
15138 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
15139 * because the ->io_bitmap_max value must match the bitmap
15140 * contents:
15141 */
15142 - tss = &per_cpu(init_tss, get_cpu());
15143 + tss = init_tss + get_cpu();
15144
15145 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
15146
15147 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
15148 return -EINVAL;
15149 /* Trying to gain more privileges? */
15150 if (level > old) {
15151 +#ifdef CONFIG_GRKERNSEC_IO
15152 + if (grsec_disable_privio) {
15153 + gr_handle_iopl();
15154 + return -EPERM;
15155 + }
15156 +#endif
15157 if (!capable(CAP_SYS_RAWIO))
15158 return -EPERM;
15159 }
15160 diff -urNp linux-2.6.32.45/arch/x86/kernel/irq_32.c linux-2.6.32.45/arch/x86/kernel/irq_32.c
15161 --- linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
15162 +++ linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
15163 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
15164 __asm__ __volatile__("andl %%esp,%0" :
15165 "=r" (sp) : "0" (THREAD_SIZE - 1));
15166
15167 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15168 + return sp < STACK_WARN;
15169 }
15170
15171 static void print_stack_overflow(void)
15172 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(
15173 * per-CPU IRQ handling contexts (thread information and stack)
15174 */
15175 union irq_ctx {
15176 - struct thread_info tinfo;
15177 - u32 stack[THREAD_SIZE/sizeof(u32)];
15178 -} __attribute__((aligned(PAGE_SIZE)));
15179 + unsigned long previous_esp;
15180 + u32 stack[THREAD_SIZE/sizeof(u32)];
15181 +} __attribute__((aligned(THREAD_SIZE)));
15182
15183 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15184 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
15185 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
15186 static inline int
15187 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15188 {
15189 - union irq_ctx *curctx, *irqctx;
15190 + union irq_ctx *irqctx;
15191 u32 *isp, arg1, arg2;
15192
15193 - curctx = (union irq_ctx *) current_thread_info();
15194 irqctx = __get_cpu_var(hardirq_ctx);
15195
15196 /*
15197 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
15198 * handler) we can't do that and just have to keep using the
15199 * current stack (which is the irq stack already after all)
15200 */
15201 - if (unlikely(curctx == irqctx))
15202 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15203 return 0;
15204
15205 /* build the stack frame on the IRQ stack */
15206 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15207 - irqctx->tinfo.task = curctx->tinfo.task;
15208 - irqctx->tinfo.previous_esp = current_stack_pointer;
15209 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15210 + irqctx->previous_esp = current_stack_pointer;
15211
15212 - /*
15213 - * Copy the softirq bits in preempt_count so that the
15214 - * softirq checks work in the hardirq context.
15215 - */
15216 - irqctx->tinfo.preempt_count =
15217 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15218 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15219 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15220 + __set_fs(MAKE_MM_SEG(0));
15221 +#endif
15222
15223 if (unlikely(overflow))
15224 call_on_stack(print_stack_overflow, isp);
15225 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15226 : "0" (irq), "1" (desc), "2" (isp),
15227 "D" (desc->handle_irq)
15228 : "memory", "cc", "ecx");
15229 +
15230 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15231 + __set_fs(current_thread_info()->addr_limit);
15232 +#endif
15233 +
15234 return 1;
15235 }
15236
15237 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15238 */
15239 void __cpuinit irq_ctx_init(int cpu)
15240 {
15241 - union irq_ctx *irqctx;
15242 -
15243 if (per_cpu(hardirq_ctx, cpu))
15244 return;
15245
15246 - irqctx = &per_cpu(hardirq_stack, cpu);
15247 - irqctx->tinfo.task = NULL;
15248 - irqctx->tinfo.exec_domain = NULL;
15249 - irqctx->tinfo.cpu = cpu;
15250 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15251 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15252 -
15253 - per_cpu(hardirq_ctx, cpu) = irqctx;
15254 -
15255 - irqctx = &per_cpu(softirq_stack, cpu);
15256 - irqctx->tinfo.task = NULL;
15257 - irqctx->tinfo.exec_domain = NULL;
15258 - irqctx->tinfo.cpu = cpu;
15259 - irqctx->tinfo.preempt_count = 0;
15260 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15261 -
15262 - per_cpu(softirq_ctx, cpu) = irqctx;
15263 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15264 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15265
15266 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15267 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15268 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15269 asmlinkage void do_softirq(void)
15270 {
15271 unsigned long flags;
15272 - struct thread_info *curctx;
15273 union irq_ctx *irqctx;
15274 u32 *isp;
15275
15276 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15277 local_irq_save(flags);
15278
15279 if (local_softirq_pending()) {
15280 - curctx = current_thread_info();
15281 irqctx = __get_cpu_var(softirq_ctx);
15282 - irqctx->tinfo.task = curctx->task;
15283 - irqctx->tinfo.previous_esp = current_stack_pointer;
15284 + irqctx->previous_esp = current_stack_pointer;
15285
15286 /* build the stack frame on the softirq stack */
15287 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15288 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15289 +
15290 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15291 + __set_fs(MAKE_MM_SEG(0));
15292 +#endif
15293
15294 call_on_stack(__do_softirq, isp);
15295 +
15296 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15297 + __set_fs(current_thread_info()->addr_limit);
15298 +#endif
15299 +
15300 /*
15301 * Shouldnt happen, we returned above if in_interrupt():
15302 */
15303 diff -urNp linux-2.6.32.45/arch/x86/kernel/irq.c linux-2.6.32.45/arch/x86/kernel/irq.c
15304 --- linux-2.6.32.45/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15305 +++ linux-2.6.32.45/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15306 @@ -15,7 +15,7 @@
15307 #include <asm/mce.h>
15308 #include <asm/hw_irq.h>
15309
15310 -atomic_t irq_err_count;
15311 +atomic_unchecked_t irq_err_count;
15312
15313 /* Function pointer for generic interrupt vector handling */
15314 void (*generic_interrupt_extension)(void) = NULL;
15315 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15316 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15317 seq_printf(p, " Machine check polls\n");
15318 #endif
15319 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15320 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15321 #if defined(CONFIG_X86_IO_APIC)
15322 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15323 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15324 #endif
15325 return 0;
15326 }
15327 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15328
15329 u64 arch_irq_stat(void)
15330 {
15331 - u64 sum = atomic_read(&irq_err_count);
15332 + u64 sum = atomic_read_unchecked(&irq_err_count);
15333
15334 #ifdef CONFIG_X86_IO_APIC
15335 - sum += atomic_read(&irq_mis_count);
15336 + sum += atomic_read_unchecked(&irq_mis_count);
15337 #endif
15338 return sum;
15339 }
15340 diff -urNp linux-2.6.32.45/arch/x86/kernel/kgdb.c linux-2.6.32.45/arch/x86/kernel/kgdb.c
15341 --- linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15342 +++ linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15343 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15344
15345 /* clear the trace bit */
15346 linux_regs->flags &= ~X86_EFLAGS_TF;
15347 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15348 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15349
15350 /* set the trace bit if we're stepping */
15351 if (remcomInBuffer[0] == 's') {
15352 linux_regs->flags |= X86_EFLAGS_TF;
15353 kgdb_single_step = 1;
15354 - atomic_set(&kgdb_cpu_doing_single_step,
15355 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15356 raw_smp_processor_id());
15357 }
15358
15359 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15360 break;
15361
15362 case DIE_DEBUG:
15363 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
15364 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15365 raw_smp_processor_id()) {
15366 if (user_mode(regs))
15367 return single_step_cont(regs, args);
15368 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15369 return instruction_pointer(regs);
15370 }
15371
15372 -struct kgdb_arch arch_kgdb_ops = {
15373 +const struct kgdb_arch arch_kgdb_ops = {
15374 /* Breakpoint instruction: */
15375 .gdb_bpt_instr = { 0xcc },
15376 .flags = KGDB_HW_BREAKPOINT,
15377 diff -urNp linux-2.6.32.45/arch/x86/kernel/kprobes.c linux-2.6.32.45/arch/x86/kernel/kprobes.c
15378 --- linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15379 +++ linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15380 @@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15381 char op;
15382 s32 raddr;
15383 } __attribute__((packed)) * jop;
15384 - jop = (struct __arch_jmp_op *)from;
15385 +
15386 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15387 +
15388 + pax_open_kernel();
15389 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15390 jop->op = RELATIVEJUMP_INSTRUCTION;
15391 + pax_close_kernel();
15392 }
15393
15394 /*
15395 @@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15396 kprobe_opcode_t opcode;
15397 kprobe_opcode_t *orig_opcodes = opcodes;
15398
15399 - if (search_exception_tables((unsigned long)opcodes))
15400 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15401 return 0; /* Page fault may occur on this address. */
15402
15403 retry:
15404 @@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15405 disp = (u8 *) p->addr + *((s32 *) insn) -
15406 (u8 *) p->ainsn.insn;
15407 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15408 + pax_open_kernel();
15409 *(s32 *)insn = (s32) disp;
15410 + pax_close_kernel();
15411 }
15412 }
15413 #endif
15414 @@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15415
15416 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15417 {
15418 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15419 + pax_open_kernel();
15420 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15421 + pax_close_kernel();
15422
15423 fix_riprel(p);
15424
15425 - if (can_boost(p->addr))
15426 + if (can_boost(ktla_ktva(p->addr)))
15427 p->ainsn.boostable = 0;
15428 else
15429 p->ainsn.boostable = -1;
15430
15431 - p->opcode = *p->addr;
15432 + p->opcode = *(ktla_ktva(p->addr));
15433 }
15434
15435 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15436 @@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15437 if (p->opcode == BREAKPOINT_INSTRUCTION)
15438 regs->ip = (unsigned long)p->addr;
15439 else
15440 - regs->ip = (unsigned long)p->ainsn.insn;
15441 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15442 }
15443
15444 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15445 @@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15446 if (p->ainsn.boostable == 1 && !p->post_handler) {
15447 /* Boost up -- we can execute copied instructions directly */
15448 reset_current_kprobe();
15449 - regs->ip = (unsigned long)p->ainsn.insn;
15450 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15451 preempt_enable_no_resched();
15452 return;
15453 }
15454 @@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15455 struct kprobe_ctlblk *kcb;
15456
15457 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15458 - if (*addr != BREAKPOINT_INSTRUCTION) {
15459 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15460 /*
15461 * The breakpoint instruction was removed right
15462 * after we hit it. Another cpu has removed
15463 @@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15464 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15465 {
15466 unsigned long *tos = stack_addr(regs);
15467 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15468 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15469 unsigned long orig_ip = (unsigned long)p->addr;
15470 kprobe_opcode_t *insn = p->ainsn.insn;
15471
15472 @@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15473 struct die_args *args = data;
15474 int ret = NOTIFY_DONE;
15475
15476 - if (args->regs && user_mode_vm(args->regs))
15477 + if (args->regs && user_mode(args->regs))
15478 return ret;
15479
15480 switch (val) {
15481 diff -urNp linux-2.6.32.45/arch/x86/kernel/ldt.c linux-2.6.32.45/arch/x86/kernel/ldt.c
15482 --- linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15483 +++ linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15484 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15485 if (reload) {
15486 #ifdef CONFIG_SMP
15487 preempt_disable();
15488 - load_LDT(pc);
15489 + load_LDT_nolock(pc);
15490 if (!cpumask_equal(mm_cpumask(current->mm),
15491 cpumask_of(smp_processor_id())))
15492 smp_call_function(flush_ldt, current->mm, 1);
15493 preempt_enable();
15494 #else
15495 - load_LDT(pc);
15496 + load_LDT_nolock(pc);
15497 #endif
15498 }
15499 if (oldsize) {
15500 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15501 return err;
15502
15503 for (i = 0; i < old->size; i++)
15504 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15505 + write_ldt_entry(new->ldt, i, old->ldt + i);
15506 return 0;
15507 }
15508
15509 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15510 retval = copy_ldt(&mm->context, &old_mm->context);
15511 mutex_unlock(&old_mm->context.lock);
15512 }
15513 +
15514 + if (tsk == current) {
15515 + mm->context.vdso = 0;
15516 +
15517 +#ifdef CONFIG_X86_32
15518 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15519 + mm->context.user_cs_base = 0UL;
15520 + mm->context.user_cs_limit = ~0UL;
15521 +
15522 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15523 + cpus_clear(mm->context.cpu_user_cs_mask);
15524 +#endif
15525 +
15526 +#endif
15527 +#endif
15528 +
15529 + }
15530 +
15531 return retval;
15532 }
15533
15534 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15535 }
15536 }
15537
15538 +#ifdef CONFIG_PAX_SEGMEXEC
15539 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15540 + error = -EINVAL;
15541 + goto out_unlock;
15542 + }
15543 +#endif
15544 +
15545 fill_ldt(&ldt, &ldt_info);
15546 if (oldmode)
15547 ldt.avl = 0;
15548 diff -urNp linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c
15549 --- linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15550 +++ linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15551 @@ -26,7 +26,7 @@
15552 #include <asm/system.h>
15553 #include <asm/cacheflush.h>
15554
15555 -static void set_idt(void *newidt, __u16 limit)
15556 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15557 {
15558 struct desc_ptr curidt;
15559
15560 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15561 }
15562
15563
15564 -static void set_gdt(void *newgdt, __u16 limit)
15565 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15566 {
15567 struct desc_ptr curgdt;
15568
15569 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15570 }
15571
15572 control_page = page_address(image->control_code_page);
15573 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15574 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15575
15576 relocate_kernel_ptr = control_page;
15577 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15578 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_amd.c linux-2.6.32.45/arch/x86/kernel/microcode_amd.c
15579 --- linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15580 +++ linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15581 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15582 uci->mc = NULL;
15583 }
15584
15585 -static struct microcode_ops microcode_amd_ops = {
15586 +static const struct microcode_ops microcode_amd_ops = {
15587 .request_microcode_user = request_microcode_user,
15588 .request_microcode_fw = request_microcode_fw,
15589 .collect_cpu_info = collect_cpu_info_amd,
15590 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15591 .microcode_fini_cpu = microcode_fini_cpu_amd,
15592 };
15593
15594 -struct microcode_ops * __init init_amd_microcode(void)
15595 +const struct microcode_ops * __init init_amd_microcode(void)
15596 {
15597 return &microcode_amd_ops;
15598 }
15599 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_core.c linux-2.6.32.45/arch/x86/kernel/microcode_core.c
15600 --- linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15601 +++ linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15602 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15603
15604 #define MICROCODE_VERSION "2.00"
15605
15606 -static struct microcode_ops *microcode_ops;
15607 +static const struct microcode_ops *microcode_ops;
15608
15609 /*
15610 * Synchronization.
15611 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_intel.c linux-2.6.32.45/arch/x86/kernel/microcode_intel.c
15612 --- linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15613 +++ linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15614 @@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15615
15616 static int get_ucode_user(void *to, const void *from, size_t n)
15617 {
15618 - return copy_from_user(to, from, n);
15619 + return copy_from_user(to, (__force const void __user *)from, n);
15620 }
15621
15622 static enum ucode_state
15623 request_microcode_user(int cpu, const void __user *buf, size_t size)
15624 {
15625 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15626 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15627 }
15628
15629 static void microcode_fini_cpu(int cpu)
15630 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15631 uci->mc = NULL;
15632 }
15633
15634 -static struct microcode_ops microcode_intel_ops = {
15635 +static const struct microcode_ops microcode_intel_ops = {
15636 .request_microcode_user = request_microcode_user,
15637 .request_microcode_fw = request_microcode_fw,
15638 .collect_cpu_info = collect_cpu_info,
15639 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15640 .microcode_fini_cpu = microcode_fini_cpu,
15641 };
15642
15643 -struct microcode_ops * __init init_intel_microcode(void)
15644 +const struct microcode_ops * __init init_intel_microcode(void)
15645 {
15646 return &microcode_intel_ops;
15647 }
15648 diff -urNp linux-2.6.32.45/arch/x86/kernel/module.c linux-2.6.32.45/arch/x86/kernel/module.c
15649 --- linux-2.6.32.45/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15650 +++ linux-2.6.32.45/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15651 @@ -34,7 +34,7 @@
15652 #define DEBUGP(fmt...)
15653 #endif
15654
15655 -void *module_alloc(unsigned long size)
15656 +static void *__module_alloc(unsigned long size, pgprot_t prot)
15657 {
15658 struct vm_struct *area;
15659
15660 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15661 if (!area)
15662 return NULL;
15663
15664 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15665 - PAGE_KERNEL_EXEC);
15666 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15667 +}
15668 +
15669 +void *module_alloc(unsigned long size)
15670 +{
15671 +
15672 +#ifdef CONFIG_PAX_KERNEXEC
15673 + return __module_alloc(size, PAGE_KERNEL);
15674 +#else
15675 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15676 +#endif
15677 +
15678 }
15679
15680 /* Free memory returned from module_alloc */
15681 @@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15682 vfree(module_region);
15683 }
15684
15685 +#ifdef CONFIG_PAX_KERNEXEC
15686 +#ifdef CONFIG_X86_32
15687 +void *module_alloc_exec(unsigned long size)
15688 +{
15689 + struct vm_struct *area;
15690 +
15691 + if (size == 0)
15692 + return NULL;
15693 +
15694 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15695 + return area ? area->addr : NULL;
15696 +}
15697 +EXPORT_SYMBOL(module_alloc_exec);
15698 +
15699 +void module_free_exec(struct module *mod, void *module_region)
15700 +{
15701 + vunmap(module_region);
15702 +}
15703 +EXPORT_SYMBOL(module_free_exec);
15704 +#else
15705 +void module_free_exec(struct module *mod, void *module_region)
15706 +{
15707 + module_free(mod, module_region);
15708 +}
15709 +EXPORT_SYMBOL(module_free_exec);
15710 +
15711 +void *module_alloc_exec(unsigned long size)
15712 +{
15713 + return __module_alloc(size, PAGE_KERNEL_RX);
15714 +}
15715 +EXPORT_SYMBOL(module_alloc_exec);
15716 +#endif
15717 +#endif
15718 +
15719 /* We don't need anything special. */
15720 int module_frob_arch_sections(Elf_Ehdr *hdr,
15721 Elf_Shdr *sechdrs,
15722 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15723 unsigned int i;
15724 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15725 Elf32_Sym *sym;
15726 - uint32_t *location;
15727 + uint32_t *plocation, location;
15728
15729 DEBUGP("Applying relocate section %u to %u\n", relsec,
15730 sechdrs[relsec].sh_info);
15731 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15732 /* This is where to make the change */
15733 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15734 - + rel[i].r_offset;
15735 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15736 + location = (uint32_t)plocation;
15737 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15738 + plocation = ktla_ktva((void *)plocation);
15739 /* This is the symbol it is referring to. Note that all
15740 undefined symbols have been resolved. */
15741 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15742 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15743 switch (ELF32_R_TYPE(rel[i].r_info)) {
15744 case R_386_32:
15745 /* We add the value into the location given */
15746 - *location += sym->st_value;
15747 + pax_open_kernel();
15748 + *plocation += sym->st_value;
15749 + pax_close_kernel();
15750 break;
15751 case R_386_PC32:
15752 /* Add the value, subtract its postition */
15753 - *location += sym->st_value - (uint32_t)location;
15754 + pax_open_kernel();
15755 + *plocation += sym->st_value - location;
15756 + pax_close_kernel();
15757 break;
15758 default:
15759 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15760 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15761 case R_X86_64_NONE:
15762 break;
15763 case R_X86_64_64:
15764 + pax_open_kernel();
15765 *(u64 *)loc = val;
15766 + pax_close_kernel();
15767 break;
15768 case R_X86_64_32:
15769 + pax_open_kernel();
15770 *(u32 *)loc = val;
15771 + pax_close_kernel();
15772 if (val != *(u32 *)loc)
15773 goto overflow;
15774 break;
15775 case R_X86_64_32S:
15776 + pax_open_kernel();
15777 *(s32 *)loc = val;
15778 + pax_close_kernel();
15779 if ((s64)val != *(s32 *)loc)
15780 goto overflow;
15781 break;
15782 case R_X86_64_PC32:
15783 val -= (u64)loc;
15784 + pax_open_kernel();
15785 *(u32 *)loc = val;
15786 + pax_close_kernel();
15787 +
15788 #if 0
15789 if ((s64)val != *(s32 *)loc)
15790 goto overflow;
15791 diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt.c linux-2.6.32.45/arch/x86/kernel/paravirt.c
15792 --- linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15793 +++ linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-08-05 20:33:55.000000000 -0400
15794 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15795 {
15796 return x;
15797 }
15798 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15799 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15800 +#endif
15801
15802 void __init default_banner(void)
15803 {
15804 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15805 * corresponding structure. */
15806 static void *get_call_destination(u8 type)
15807 {
15808 - struct paravirt_patch_template tmpl = {
15809 + const struct paravirt_patch_template tmpl = {
15810 .pv_init_ops = pv_init_ops,
15811 .pv_time_ops = pv_time_ops,
15812 .pv_cpu_ops = pv_cpu_ops,
15813 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 typ
15814 .pv_lock_ops = pv_lock_ops,
15815 #endif
15816 };
15817 +
15818 + pax_track_stack();
15819 return *((void **)&tmpl + type);
15820 }
15821
15822 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type,
15823 if (opfunc == NULL)
15824 /* If there's no function, patch it with a ud2a (BUG) */
15825 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15826 - else if (opfunc == _paravirt_nop)
15827 + else if (opfunc == (void *)_paravirt_nop)
15828 /* If the operation is a nop, then nop the callsite */
15829 ret = paravirt_patch_nop();
15830
15831 /* identity functions just return their single argument */
15832 - else if (opfunc == _paravirt_ident_32)
15833 + else if (opfunc == (void *)_paravirt_ident_32)
15834 ret = paravirt_patch_ident_32(insnbuf, len);
15835 - else if (opfunc == _paravirt_ident_64)
15836 + else if (opfunc == (void *)_paravirt_ident_64)
15837 + ret = paravirt_patch_ident_64(insnbuf, len);
15838 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15839 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
15840 ret = paravirt_patch_ident_64(insnbuf, len);
15841 +#endif
15842
15843 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15844 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
15845 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insn
15846 if (insn_len > len || start == NULL)
15847 insn_len = len;
15848 else
15849 - memcpy(insnbuf, start, insn_len);
15850 + memcpy(insnbuf, ktla_ktva(start), insn_len);
15851
15852 return insn_len;
15853 }
15854 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
15855 preempt_enable();
15856 }
15857
15858 -struct pv_info pv_info = {
15859 +struct pv_info pv_info __read_only = {
15860 .name = "bare hardware",
15861 .paravirt_enabled = 0,
15862 .kernel_rpl = 0,
15863 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15864 };
15865
15866 -struct pv_init_ops pv_init_ops = {
15867 +struct pv_init_ops pv_init_ops __read_only = {
15868 .patch = native_patch,
15869 };
15870
15871 -struct pv_time_ops pv_time_ops = {
15872 +struct pv_time_ops pv_time_ops __read_only = {
15873 .sched_clock = native_sched_clock,
15874 };
15875
15876 -struct pv_irq_ops pv_irq_ops = {
15877 +struct pv_irq_ops pv_irq_ops __read_only = {
15878 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15879 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15880 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15881 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
15882 #endif
15883 };
15884
15885 -struct pv_cpu_ops pv_cpu_ops = {
15886 +struct pv_cpu_ops pv_cpu_ops __read_only = {
15887 .cpuid = native_cpuid,
15888 .get_debugreg = native_get_debugreg,
15889 .set_debugreg = native_set_debugreg,
15890 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
15891 .end_context_switch = paravirt_nop,
15892 };
15893
15894 -struct pv_apic_ops pv_apic_ops = {
15895 +struct pv_apic_ops pv_apic_ops __read_only = {
15896 #ifdef CONFIG_X86_LOCAL_APIC
15897 .startup_ipi_hook = paravirt_nop,
15898 #endif
15899 };
15900
15901 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
15902 +#ifdef CONFIG_X86_32
15903 +#ifdef CONFIG_X86_PAE
15904 +/* 64-bit pagetable entries */
15905 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
15906 +#else
15907 /* 32-bit pagetable entries */
15908 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
15909 +#endif
15910 #else
15911 /* 64-bit pagetable entries */
15912 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15913 #endif
15914
15915 -struct pv_mmu_ops pv_mmu_ops = {
15916 +struct pv_mmu_ops pv_mmu_ops __read_only = {
15917
15918 .read_cr2 = native_read_cr2,
15919 .write_cr2 = native_write_cr2,
15920 @@ -467,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15921 },
15922
15923 .set_fixmap = native_set_fixmap,
15924 +
15925 +#ifdef CONFIG_PAX_KERNEXEC
15926 + .pax_open_kernel = native_pax_open_kernel,
15927 + .pax_close_kernel = native_pax_close_kernel,
15928 +#endif
15929 +
15930 };
15931
15932 EXPORT_SYMBOL_GPL(pv_time_ops);
15933 diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c
15934 --- linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15935 +++ linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15936 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15937 __raw_spin_lock(lock);
15938 }
15939
15940 -struct pv_lock_ops pv_lock_ops = {
15941 +struct pv_lock_ops pv_lock_ops __read_only = {
15942 #ifdef CONFIG_SMP
15943 .spin_is_locked = __ticket_spin_is_locked,
15944 .spin_is_contended = __ticket_spin_is_contended,
15945 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c
15946 --- linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15947 +++ linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15948 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15949 free_pages((unsigned long)vaddr, get_order(size));
15950 }
15951
15952 -static struct dma_map_ops calgary_dma_ops = {
15953 +static const struct dma_map_ops calgary_dma_ops = {
15954 .alloc_coherent = calgary_alloc_coherent,
15955 .free_coherent = calgary_free_coherent,
15956 .map_sg = calgary_map_sg,
15957 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-dma.c linux-2.6.32.45/arch/x86/kernel/pci-dma.c
15958 --- linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15959 +++ linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15960 @@ -14,7 +14,7 @@
15961
15962 static int forbid_dac __read_mostly;
15963
15964 -struct dma_map_ops *dma_ops;
15965 +const struct dma_map_ops *dma_ops;
15966 EXPORT_SYMBOL(dma_ops);
15967
15968 static int iommu_sac_force __read_mostly;
15969 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15970
15971 int dma_supported(struct device *dev, u64 mask)
15972 {
15973 - struct dma_map_ops *ops = get_dma_ops(dev);
15974 + const struct dma_map_ops *ops = get_dma_ops(dev);
15975
15976 #ifdef CONFIG_PCI
15977 if (mask > 0xffffffff && forbid_dac > 0) {
15978 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c
15979 --- linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15980 +++ linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15981 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15982 return -1;
15983 }
15984
15985 -static struct dma_map_ops gart_dma_ops = {
15986 +static const struct dma_map_ops gart_dma_ops = {
15987 .map_sg = gart_map_sg,
15988 .unmap_sg = gart_unmap_sg,
15989 .map_page = gart_map_page,
15990 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-nommu.c linux-2.6.32.45/arch/x86/kernel/pci-nommu.c
15991 --- linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15992 +++ linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15993 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15994 flush_write_buffers();
15995 }
15996
15997 -struct dma_map_ops nommu_dma_ops = {
15998 +const struct dma_map_ops nommu_dma_ops = {
15999 .alloc_coherent = dma_generic_alloc_coherent,
16000 .free_coherent = nommu_free_coherent,
16001 .map_sg = nommu_map_sg,
16002 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c
16003 --- linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
16004 +++ linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
16005 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
16006 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
16007 }
16008
16009 -static struct dma_map_ops swiotlb_dma_ops = {
16010 +static const struct dma_map_ops swiotlb_dma_ops = {
16011 .mapping_error = swiotlb_dma_mapping_error,
16012 .alloc_coherent = x86_swiotlb_alloc_coherent,
16013 .free_coherent = swiotlb_free_coherent,
16014 diff -urNp linux-2.6.32.45/arch/x86/kernel/process_32.c linux-2.6.32.45/arch/x86/kernel/process_32.c
16015 --- linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
16016 +++ linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
16017 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
16018 unsigned long thread_saved_pc(struct task_struct *tsk)
16019 {
16020 return ((unsigned long *)tsk->thread.sp)[3];
16021 +//XXX return tsk->thread.eip;
16022 }
16023
16024 #ifndef CONFIG_SMP
16025 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
16026 unsigned short ss, gs;
16027 const char *board;
16028
16029 - if (user_mode_vm(regs)) {
16030 + if (user_mode(regs)) {
16031 sp = regs->sp;
16032 ss = regs->ss & 0xffff;
16033 - gs = get_user_gs(regs);
16034 } else {
16035 sp = (unsigned long) (&regs->sp);
16036 savesegment(ss, ss);
16037 - savesegment(gs, gs);
16038 }
16039 + gs = get_user_gs(regs);
16040
16041 printk("\n");
16042
16043 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
16044 regs.bx = (unsigned long) fn;
16045 regs.dx = (unsigned long) arg;
16046
16047 - regs.ds = __USER_DS;
16048 - regs.es = __USER_DS;
16049 + regs.ds = __KERNEL_DS;
16050 + regs.es = __KERNEL_DS;
16051 regs.fs = __KERNEL_PERCPU;
16052 - regs.gs = __KERNEL_STACK_CANARY;
16053 + savesegment(gs, regs.gs);
16054 regs.orig_ax = -1;
16055 regs.ip = (unsigned long) kernel_thread_helper;
16056 regs.cs = __KERNEL_CS | get_kernel_rpl();
16057 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
16058 struct task_struct *tsk;
16059 int err;
16060
16061 - childregs = task_pt_regs(p);
16062 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16063 *childregs = *regs;
16064 childregs->ax = 0;
16065 childregs->sp = sp;
16066
16067 p->thread.sp = (unsigned long) childregs;
16068 p->thread.sp0 = (unsigned long) (childregs+1);
16069 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16070
16071 p->thread.ip = (unsigned long) ret_from_fork;
16072
16073 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
16074 struct thread_struct *prev = &prev_p->thread,
16075 *next = &next_p->thread;
16076 int cpu = smp_processor_id();
16077 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16078 + struct tss_struct *tss = init_tss + cpu;
16079 bool preload_fpu;
16080
16081 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16082 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
16083 */
16084 lazy_save_gs(prev->gs);
16085
16086 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16087 + __set_fs(task_thread_info(next_p)->addr_limit);
16088 +#endif
16089 +
16090 /*
16091 * Load the per-thread Thread-Local Storage descriptor.
16092 */
16093 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
16094 */
16095 arch_end_context_switch(next_p);
16096
16097 + percpu_write(current_task, next_p);
16098 + percpu_write(current_tinfo, &next_p->tinfo);
16099 +
16100 if (preload_fpu)
16101 __math_state_restore();
16102
16103 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
16104 if (prev->gs | next->gs)
16105 lazy_load_gs(next->gs);
16106
16107 - percpu_write(current_task, next_p);
16108 -
16109 return prev_p;
16110 }
16111
16112 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
16113 } while (count++ < 16);
16114 return 0;
16115 }
16116 -
16117 diff -urNp linux-2.6.32.45/arch/x86/kernel/process_64.c linux-2.6.32.45/arch/x86/kernel/process_64.c
16118 --- linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
16119 +++ linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
16120 @@ -91,7 +91,7 @@ static void __exit_idle(void)
16121 void exit_idle(void)
16122 {
16123 /* idle loop has pid 0 */
16124 - if (current->pid)
16125 + if (task_pid_nr(current))
16126 return;
16127 __exit_idle();
16128 }
16129 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
16130 if (!board)
16131 board = "";
16132 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
16133 - current->pid, current->comm, print_tainted(),
16134 + task_pid_nr(current), current->comm, print_tainted(),
16135 init_utsname()->release,
16136 (int)strcspn(init_utsname()->version, " "),
16137 init_utsname()->version, board);
16138 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
16139 struct pt_regs *childregs;
16140 struct task_struct *me = current;
16141
16142 - childregs = ((struct pt_regs *)
16143 - (THREAD_SIZE + task_stack_page(p))) - 1;
16144 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16145 *childregs = *regs;
16146
16147 childregs->ax = 0;
16148 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
16149 p->thread.sp = (unsigned long) childregs;
16150 p->thread.sp0 = (unsigned long) (childregs+1);
16151 p->thread.usersp = me->thread.usersp;
16152 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16153
16154 set_tsk_thread_flag(p, TIF_FORK);
16155
16156 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
16157 struct thread_struct *prev = &prev_p->thread;
16158 struct thread_struct *next = &next_p->thread;
16159 int cpu = smp_processor_id();
16160 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16161 + struct tss_struct *tss = init_tss + cpu;
16162 unsigned fsindex, gsindex;
16163 bool preload_fpu;
16164
16165 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
16166 prev->usersp = percpu_read(old_rsp);
16167 percpu_write(old_rsp, next->usersp);
16168 percpu_write(current_task, next_p);
16169 + percpu_write(current_tinfo, &next_p->tinfo);
16170
16171 - percpu_write(kernel_stack,
16172 - (unsigned long)task_stack_page(next_p) +
16173 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16174 + percpu_write(kernel_stack, next->sp0);
16175
16176 /*
16177 * Now maybe reload the debug registers and handle I/O bitmaps
16178 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
16179 if (!p || p == current || p->state == TASK_RUNNING)
16180 return 0;
16181 stack = (unsigned long)task_stack_page(p);
16182 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16183 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16184 return 0;
16185 fp = *(u64 *)(p->thread.sp);
16186 do {
16187 - if (fp < (unsigned long)stack ||
16188 - fp >= (unsigned long)stack+THREAD_SIZE)
16189 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16190 return 0;
16191 ip = *(u64 *)(fp+8);
16192 if (!in_sched_functions(ip))
16193 diff -urNp linux-2.6.32.45/arch/x86/kernel/process.c linux-2.6.32.45/arch/x86/kernel/process.c
16194 --- linux-2.6.32.45/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
16195 +++ linux-2.6.32.45/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
16196 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
16197
16198 void free_thread_info(struct thread_info *ti)
16199 {
16200 - free_thread_xstate(ti->task);
16201 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16202 }
16203
16204 +static struct kmem_cache *task_struct_cachep;
16205 +
16206 void arch_task_cache_init(void)
16207 {
16208 - task_xstate_cachep =
16209 - kmem_cache_create("task_xstate", xstate_size,
16210 + /* create a slab on which task_structs can be allocated */
16211 + task_struct_cachep =
16212 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16213 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16214 +
16215 + task_xstate_cachep =
16216 + kmem_cache_create("task_xstate", xstate_size,
16217 __alignof__(union thread_xstate),
16218 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16219 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16220 +}
16221 +
16222 +struct task_struct *alloc_task_struct(void)
16223 +{
16224 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
16225 +}
16226 +
16227 +void free_task_struct(struct task_struct *task)
16228 +{
16229 + free_thread_xstate(task);
16230 + kmem_cache_free(task_struct_cachep, task);
16231 }
16232
16233 /*
16234 @@ -73,7 +90,7 @@ void exit_thread(void)
16235 unsigned long *bp = t->io_bitmap_ptr;
16236
16237 if (bp) {
16238 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16239 + struct tss_struct *tss = init_tss + get_cpu();
16240
16241 t->io_bitmap_ptr = NULL;
16242 clear_thread_flag(TIF_IO_BITMAP);
16243 @@ -93,6 +110,9 @@ void flush_thread(void)
16244
16245 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16246
16247 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16248 + loadsegment(gs, 0);
16249 +#endif
16250 tsk->thread.debugreg0 = 0;
16251 tsk->thread.debugreg1 = 0;
16252 tsk->thread.debugreg2 = 0;
16253 @@ -307,7 +327,7 @@ void default_idle(void)
16254 EXPORT_SYMBOL(default_idle);
16255 #endif
16256
16257 -void stop_this_cpu(void *dummy)
16258 +__noreturn void stop_this_cpu(void *dummy)
16259 {
16260 local_irq_disable();
16261 /*
16262 @@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
16263 }
16264 early_param("idle", idle_setup);
16265
16266 -unsigned long arch_align_stack(unsigned long sp)
16267 +#ifdef CONFIG_PAX_RANDKSTACK
16268 +asmlinkage void pax_randomize_kstack(void)
16269 {
16270 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16271 - sp -= get_random_int() % 8192;
16272 - return sp & ~0xf;
16273 -}
16274 + struct thread_struct *thread = &current->thread;
16275 + unsigned long time;
16276
16277 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16278 -{
16279 - unsigned long range_end = mm->brk + 0x02000000;
16280 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16281 + if (!randomize_va_space)
16282 + return;
16283 +
16284 + rdtscl(time);
16285 +
16286 + /* P4 seems to return a 0 LSB, ignore it */
16287 +#ifdef CONFIG_MPENTIUM4
16288 + time &= 0x3EUL;
16289 + time <<= 2;
16290 +#elif defined(CONFIG_X86_64)
16291 + time &= 0xFUL;
16292 + time <<= 4;
16293 +#else
16294 + time &= 0x1FUL;
16295 + time <<= 3;
16296 +#endif
16297 +
16298 + thread->sp0 ^= time;
16299 + load_sp0(init_tss + smp_processor_id(), thread);
16300 +
16301 +#ifdef CONFIG_X86_64
16302 + percpu_write(kernel_stack, thread->sp0);
16303 +#endif
16304 }
16305 +#endif
16306
16307 diff -urNp linux-2.6.32.45/arch/x86/kernel/ptrace.c linux-2.6.32.45/arch/x86/kernel/ptrace.c
16308 --- linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16309 +++ linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16310 @@ -925,7 +925,7 @@ static const struct user_regset_view use
16311 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16312 {
16313 int ret;
16314 - unsigned long __user *datap = (unsigned long __user *)data;
16315 + unsigned long __user *datap = (__force unsigned long __user *)data;
16316
16317 switch (request) {
16318 /* read the word at location addr in the USER area. */
16319 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16320 if (addr < 0)
16321 return -EIO;
16322 ret = do_get_thread_area(child, addr,
16323 - (struct user_desc __user *) data);
16324 + (__force struct user_desc __user *) data);
16325 break;
16326
16327 case PTRACE_SET_THREAD_AREA:
16328 if (addr < 0)
16329 return -EIO;
16330 ret = do_set_thread_area(child, addr,
16331 - (struct user_desc __user *) data, 0);
16332 + (__force struct user_desc __user *) data, 0);
16333 break;
16334 #endif
16335
16336 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16337 #ifdef CONFIG_X86_PTRACE_BTS
16338 case PTRACE_BTS_CONFIG:
16339 ret = ptrace_bts_config
16340 - (child, data, (struct ptrace_bts_config __user *)addr);
16341 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16342 break;
16343
16344 case PTRACE_BTS_STATUS:
16345 ret = ptrace_bts_status
16346 - (child, data, (struct ptrace_bts_config __user *)addr);
16347 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16348 break;
16349
16350 case PTRACE_BTS_SIZE:
16351 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16352
16353 case PTRACE_BTS_GET:
16354 ret = ptrace_bts_read_record
16355 - (child, data, (struct bts_struct __user *) addr);
16356 + (child, data, (__force struct bts_struct __user *) addr);
16357 break;
16358
16359 case PTRACE_BTS_CLEAR:
16360 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16361
16362 case PTRACE_BTS_DRAIN:
16363 ret = ptrace_bts_drain
16364 - (child, data, (struct bts_struct __user *) addr);
16365 + (child, data, (__force struct bts_struct __user *) addr);
16366 break;
16367 #endif /* CONFIG_X86_PTRACE_BTS */
16368
16369 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16370 info.si_code = si_code;
16371
16372 /* User-mode ip? */
16373 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16374 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16375
16376 /* Send us the fake SIGTRAP */
16377 force_sig_info(SIGTRAP, &info, tsk);
16378 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16379 * We must return the syscall number to actually look up in the table.
16380 * This can be -1L to skip running any syscall at all.
16381 */
16382 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
16383 +long syscall_trace_enter(struct pt_regs *regs)
16384 {
16385 long ret = 0;
16386
16387 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16388 return ret ?: regs->orig_ax;
16389 }
16390
16391 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
16392 +void syscall_trace_leave(struct pt_regs *regs)
16393 {
16394 if (unlikely(current->audit_context))
16395 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16396 diff -urNp linux-2.6.32.45/arch/x86/kernel/reboot.c linux-2.6.32.45/arch/x86/kernel/reboot.c
16397 --- linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:35:28.000000000 -0400
16398 +++ linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:33:59.000000000 -0400
16399 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16400 EXPORT_SYMBOL(pm_power_off);
16401
16402 static const struct desc_ptr no_idt = {};
16403 -static int reboot_mode;
16404 +static unsigned short reboot_mode;
16405 enum reboot_type reboot_type = BOOT_KBD;
16406 int reboot_force;
16407
16408 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
16409 controller to pulse the CPU reset line, which is more thorough, but
16410 doesn't work with at least one type of 486 motherboard. It is easy
16411 to stop this code working; hence the copious comments. */
16412 -static const unsigned long long
16413 -real_mode_gdt_entries [3] =
16414 +static struct desc_struct
16415 +real_mode_gdt_entries [3] __read_only =
16416 {
16417 - 0x0000000000000000ULL, /* Null descriptor */
16418 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16419 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16420 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16421 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16422 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16423 };
16424
16425 static const struct desc_ptr
16426 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16427 * specified by the code and length parameters.
16428 * We assume that length will aways be less that 100!
16429 */
16430 -void machine_real_restart(const unsigned char *code, int length)
16431 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16432 {
16433 local_irq_disable();
16434
16435 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16436 /* Remap the kernel at virtual address zero, as well as offset zero
16437 from the kernel segment. This assumes the kernel segment starts at
16438 virtual address PAGE_OFFSET. */
16439 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16440 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16441 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16442 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16443
16444 /*
16445 * Use `swapper_pg_dir' as our page directory.
16446 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16447 boot)". This seems like a fairly standard thing that gets set by
16448 REBOOT.COM programs, and the previous reset routine did this
16449 too. */
16450 - *((unsigned short *)0x472) = reboot_mode;
16451 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16452
16453 /* For the switch to real mode, copy some code to low memory. It has
16454 to be in the first 64k because it is running in 16-bit mode, and it
16455 has to have the same physical and virtual address, because it turns
16456 off paging. Copy it near the end of the first page, out of the way
16457 of BIOS variables. */
16458 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16459 - real_mode_switch, sizeof (real_mode_switch));
16460 - memcpy((void *)(0x1000 - 100), code, length);
16461 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16462 + memcpy(__va(0x1000 - 100), code, length);
16463
16464 /* Set up the IDT for real mode. */
16465 load_idt(&real_mode_idt);
16466 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16467 __asm__ __volatile__ ("ljmp $0x0008,%0"
16468 :
16469 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16470 + do { } while (1);
16471 }
16472 #ifdef CONFIG_APM_MODULE
16473 EXPORT_SYMBOL(machine_real_restart);
16474 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_f
16475 {
16476 }
16477
16478 -static void native_machine_emergency_restart(void)
16479 +__noreturn static void native_machine_emergency_restart(void)
16480 {
16481 int i;
16482
16483 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
16484 #endif
16485 }
16486
16487 -static void __machine_emergency_restart(int emergency)
16488 +static __noreturn void __machine_emergency_restart(int emergency)
16489 {
16490 reboot_emergency = emergency;
16491 machine_ops.emergency_restart();
16492 }
16493
16494 -static void native_machine_restart(char *__unused)
16495 +static __noreturn void native_machine_restart(char *__unused)
16496 {
16497 printk("machine restart\n");
16498
16499 @@ -674,7 +674,7 @@ static void native_machine_restart(char
16500 __machine_emergency_restart(0);
16501 }
16502
16503 -static void native_machine_halt(void)
16504 +static __noreturn void native_machine_halt(void)
16505 {
16506 /* stop other cpus and apics */
16507 machine_shutdown();
16508 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
16509 stop_this_cpu(NULL);
16510 }
16511
16512 -static void native_machine_power_off(void)
16513 +__noreturn static void native_machine_power_off(void)
16514 {
16515 if (pm_power_off) {
16516 if (!reboot_force)
16517 @@ -694,6 +694,7 @@ static void native_machine_power_off(voi
16518 }
16519 /* a fallback in case there is no PM info available */
16520 tboot_shutdown(TB_SHUTDOWN_HALT);
16521 + do { } while (1);
16522 }
16523
16524 struct machine_ops machine_ops = {
16525 diff -urNp linux-2.6.32.45/arch/x86/kernel/setup.c linux-2.6.32.45/arch/x86/kernel/setup.c
16526 --- linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16527 +++ linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16528 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16529
16530 if (!boot_params.hdr.root_flags)
16531 root_mountflags &= ~MS_RDONLY;
16532 - init_mm.start_code = (unsigned long) _text;
16533 - init_mm.end_code = (unsigned long) _etext;
16534 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16535 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16536 init_mm.end_data = (unsigned long) _edata;
16537 init_mm.brk = _brk_end;
16538
16539 - code_resource.start = virt_to_phys(_text);
16540 - code_resource.end = virt_to_phys(_etext)-1;
16541 - data_resource.start = virt_to_phys(_etext);
16542 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16543 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16544 + data_resource.start = virt_to_phys(_sdata);
16545 data_resource.end = virt_to_phys(_edata)-1;
16546 bss_resource.start = virt_to_phys(&__bss_start);
16547 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16548 diff -urNp linux-2.6.32.45/arch/x86/kernel/setup_percpu.c linux-2.6.32.45/arch/x86/kernel/setup_percpu.c
16549 --- linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16550 +++ linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16551 @@ -25,19 +25,17 @@
16552 # define DBG(x...)
16553 #endif
16554
16555 -DEFINE_PER_CPU(int, cpu_number);
16556 +#ifdef CONFIG_SMP
16557 +DEFINE_PER_CPU(unsigned int, cpu_number);
16558 EXPORT_PER_CPU_SYMBOL(cpu_number);
16559 +#endif
16560
16561 -#ifdef CONFIG_X86_64
16562 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16563 -#else
16564 -#define BOOT_PERCPU_OFFSET 0
16565 -#endif
16566
16567 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16568 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16569
16570 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16571 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16572 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16573 };
16574 EXPORT_SYMBOL(__per_cpu_offset);
16575 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16576 {
16577 #ifdef CONFIG_X86_32
16578 struct desc_struct gdt;
16579 + unsigned long base = per_cpu_offset(cpu);
16580
16581 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16582 - 0x2 | DESCTYPE_S, 0x8);
16583 - gdt.s = 1;
16584 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16585 + 0x83 | DESCTYPE_S, 0xC);
16586 write_gdt_entry(get_cpu_gdt_table(cpu),
16587 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16588 #endif
16589 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16590 /* alrighty, percpu areas up and running */
16591 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16592 for_each_possible_cpu(cpu) {
16593 +#ifdef CONFIG_CC_STACKPROTECTOR
16594 +#ifdef CONFIG_X86_32
16595 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16596 +#endif
16597 +#endif
16598 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16599 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16600 per_cpu(cpu_number, cpu) = cpu;
16601 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16602 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16603 #endif
16604 #endif
16605 +#ifdef CONFIG_CC_STACKPROTECTOR
16606 +#ifdef CONFIG_X86_32
16607 + if (!cpu)
16608 + per_cpu(stack_canary.canary, cpu) = canary;
16609 +#endif
16610 +#endif
16611 /*
16612 * Up to this point, the boot CPU has been using .data.init
16613 * area. Reload any changed state for the boot CPU.
16614 diff -urNp linux-2.6.32.45/arch/x86/kernel/signal.c linux-2.6.32.45/arch/x86/kernel/signal.c
16615 --- linux-2.6.32.45/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16616 +++ linux-2.6.32.45/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16617 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16618 * Align the stack pointer according to the i386 ABI,
16619 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16620 */
16621 - sp = ((sp + 4) & -16ul) - 4;
16622 + sp = ((sp - 12) & -16ul) - 4;
16623 #else /* !CONFIG_X86_32 */
16624 sp = round_down(sp, 16) - 8;
16625 #endif
16626 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16627 * Return an always-bogus address instead so we will die with SIGSEGV.
16628 */
16629 if (onsigstack && !likely(on_sig_stack(sp)))
16630 - return (void __user *)-1L;
16631 + return (__force void __user *)-1L;
16632
16633 /* save i387 state */
16634 if (used_math() && save_i387_xstate(*fpstate) < 0)
16635 - return (void __user *)-1L;
16636 + return (__force void __user *)-1L;
16637
16638 return (void __user *)sp;
16639 }
16640 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16641 }
16642
16643 if (current->mm->context.vdso)
16644 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16645 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16646 else
16647 - restorer = &frame->retcode;
16648 + restorer = (void __user *)&frame->retcode;
16649 if (ka->sa.sa_flags & SA_RESTORER)
16650 restorer = ka->sa.sa_restorer;
16651
16652 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16653 * reasons and because gdb uses it as a signature to notice
16654 * signal handler stack frames.
16655 */
16656 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16657 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16658
16659 if (err)
16660 return -EFAULT;
16661 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16662 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16663
16664 /* Set up to return from userspace. */
16665 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16666 + if (current->mm->context.vdso)
16667 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16668 + else
16669 + restorer = (void __user *)&frame->retcode;
16670 if (ka->sa.sa_flags & SA_RESTORER)
16671 restorer = ka->sa.sa_restorer;
16672 put_user_ex(restorer, &frame->pretcode);
16673 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16674 * reasons and because gdb uses it as a signature to notice
16675 * signal handler stack frames.
16676 */
16677 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16678 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16679 } put_user_catch(err);
16680
16681 if (err)
16682 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16683 int signr;
16684 sigset_t *oldset;
16685
16686 + pax_track_stack();
16687 +
16688 /*
16689 * We want the common case to go fast, which is why we may in certain
16690 * cases get here from kernel mode. Just return without doing anything
16691 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16692 * X86_32: vm86 regs switched out by assembly code before reaching
16693 * here, so testing against kernel CS suffices.
16694 */
16695 - if (!user_mode(regs))
16696 + if (!user_mode_novm(regs))
16697 return;
16698
16699 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16700 diff -urNp linux-2.6.32.45/arch/x86/kernel/smpboot.c linux-2.6.32.45/arch/x86/kernel/smpboot.c
16701 --- linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16702 +++ linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16703 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16704 */
16705 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16706
16707 -void cpu_hotplug_driver_lock()
16708 +void cpu_hotplug_driver_lock(void)
16709 {
16710 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
16711 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
16712 }
16713
16714 -void cpu_hotplug_driver_unlock()
16715 +void cpu_hotplug_driver_unlock(void)
16716 {
16717 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16718 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16719 }
16720
16721 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16722 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16723 * target processor state.
16724 */
16725 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16726 - (unsigned long)stack_start.sp);
16727 + stack_start);
16728
16729 /*
16730 * Run STARTUP IPI loop.
16731 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16732 set_idle_for_cpu(cpu, c_idle.idle);
16733 do_rest:
16734 per_cpu(current_task, cpu) = c_idle.idle;
16735 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16736 #ifdef CONFIG_X86_32
16737 /* Stack for startup_32 can be just as for start_secondary onwards */
16738 irq_ctx_init(cpu);
16739 @@ -750,13 +751,15 @@ do_rest:
16740 #else
16741 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16742 initial_gs = per_cpu_offset(cpu);
16743 - per_cpu(kernel_stack, cpu) =
16744 - (unsigned long)task_stack_page(c_idle.idle) -
16745 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16746 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16747 #endif
16748 +
16749 + pax_open_kernel();
16750 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16751 + pax_close_kernel();
16752 +
16753 initial_code = (unsigned long)start_secondary;
16754 - stack_start.sp = (void *) c_idle.idle->thread.sp;
16755 + stack_start = c_idle.idle->thread.sp;
16756
16757 /* start_ip had better be page-aligned! */
16758 start_ip = setup_trampoline();
16759 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16760
16761 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16762
16763 +#ifdef CONFIG_PAX_PER_CPU_PGD
16764 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16765 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16766 + KERNEL_PGD_PTRS);
16767 +#endif
16768 +
16769 err = do_boot_cpu(apicid, cpu);
16770
16771 if (err) {
16772 diff -urNp linux-2.6.32.45/arch/x86/kernel/step.c linux-2.6.32.45/arch/x86/kernel/step.c
16773 --- linux-2.6.32.45/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16774 +++ linux-2.6.32.45/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16775 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16776 struct desc_struct *desc;
16777 unsigned long base;
16778
16779 - seg &= ~7UL;
16780 + seg >>= 3;
16781
16782 mutex_lock(&child->mm->context.lock);
16783 - if (unlikely((seg >> 3) >= child->mm->context.size))
16784 + if (unlikely(seg >= child->mm->context.size))
16785 addr = -1L; /* bogus selector, access would fault */
16786 else {
16787 desc = child->mm->context.ldt + seg;
16788 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16789 addr += base;
16790 }
16791 mutex_unlock(&child->mm->context.lock);
16792 - }
16793 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16794 + addr = ktla_ktva(addr);
16795
16796 return addr;
16797 }
16798 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16799 unsigned char opcode[15];
16800 unsigned long addr = convert_ip_to_linear(child, regs);
16801
16802 + if (addr == -EINVAL)
16803 + return 0;
16804 +
16805 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16806 for (i = 0; i < copied; i++) {
16807 switch (opcode[i]) {
16808 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16809
16810 #ifdef CONFIG_X86_64
16811 case 0x40 ... 0x4f:
16812 - if (regs->cs != __USER_CS)
16813 + if ((regs->cs & 0xffff) != __USER_CS)
16814 /* 32-bit mode: register increment */
16815 return 0;
16816 /* 64-bit mode: REX prefix */
16817 diff -urNp linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S
16818 --- linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16819 +++ linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16820 @@ -1,3 +1,4 @@
16821 +.section .rodata,"a",@progbits
16822 ENTRY(sys_call_table)
16823 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16824 .long sys_exit
16825 diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c
16826 --- linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16827 +++ linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16828 @@ -24,6 +24,21 @@
16829
16830 #include <asm/syscalls.h>
16831
16832 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16833 +{
16834 + unsigned long pax_task_size = TASK_SIZE;
16835 +
16836 +#ifdef CONFIG_PAX_SEGMEXEC
16837 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16838 + pax_task_size = SEGMEXEC_TASK_SIZE;
16839 +#endif
16840 +
16841 + if (len > pax_task_size || addr > pax_task_size - len)
16842 + return -EINVAL;
16843 +
16844 + return 0;
16845 +}
16846 +
16847 /*
16848 * Perform the select(nd, in, out, ex, tv) and mmap() system
16849 * calls. Linux/i386 didn't use to be able to handle more than
16850 @@ -58,6 +73,212 @@ out:
16851 return err;
16852 }
16853
16854 +unsigned long
16855 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16856 + unsigned long len, unsigned long pgoff, unsigned long flags)
16857 +{
16858 + struct mm_struct *mm = current->mm;
16859 + struct vm_area_struct *vma;
16860 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16861 +
16862 +#ifdef CONFIG_PAX_SEGMEXEC
16863 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16864 + pax_task_size = SEGMEXEC_TASK_SIZE;
16865 +#endif
16866 +
16867 + pax_task_size -= PAGE_SIZE;
16868 +
16869 + if (len > pax_task_size)
16870 + return -ENOMEM;
16871 +
16872 + if (flags & MAP_FIXED)
16873 + return addr;
16874 +
16875 +#ifdef CONFIG_PAX_RANDMMAP
16876 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16877 +#endif
16878 +
16879 + if (addr) {
16880 + addr = PAGE_ALIGN(addr);
16881 + if (pax_task_size - len >= addr) {
16882 + vma = find_vma(mm, addr);
16883 + if (check_heap_stack_gap(vma, addr, len))
16884 + return addr;
16885 + }
16886 + }
16887 + if (len > mm->cached_hole_size) {
16888 + start_addr = addr = mm->free_area_cache;
16889 + } else {
16890 + start_addr = addr = mm->mmap_base;
16891 + mm->cached_hole_size = 0;
16892 + }
16893 +
16894 +#ifdef CONFIG_PAX_PAGEEXEC
16895 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16896 + start_addr = 0x00110000UL;
16897 +
16898 +#ifdef CONFIG_PAX_RANDMMAP
16899 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16900 + start_addr += mm->delta_mmap & 0x03FFF000UL;
16901 +#endif
16902 +
16903 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16904 + start_addr = addr = mm->mmap_base;
16905 + else
16906 + addr = start_addr;
16907 + }
16908 +#endif
16909 +
16910 +full_search:
16911 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16912 + /* At this point: (!vma || addr < vma->vm_end). */
16913 + if (pax_task_size - len < addr) {
16914 + /*
16915 + * Start a new search - just in case we missed
16916 + * some holes.
16917 + */
16918 + if (start_addr != mm->mmap_base) {
16919 + start_addr = addr = mm->mmap_base;
16920 + mm->cached_hole_size = 0;
16921 + goto full_search;
16922 + }
16923 + return -ENOMEM;
16924 + }
16925 + if (check_heap_stack_gap(vma, addr, len))
16926 + break;
16927 + if (addr + mm->cached_hole_size < vma->vm_start)
16928 + mm->cached_hole_size = vma->vm_start - addr;
16929 + addr = vma->vm_end;
16930 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
16931 + start_addr = addr = mm->mmap_base;
16932 + mm->cached_hole_size = 0;
16933 + goto full_search;
16934 + }
16935 + }
16936 +
16937 + /*
16938 + * Remember the place where we stopped the search:
16939 + */
16940 + mm->free_area_cache = addr + len;
16941 + return addr;
16942 +}
16943 +
16944 +unsigned long
16945 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16946 + const unsigned long len, const unsigned long pgoff,
16947 + const unsigned long flags)
16948 +{
16949 + struct vm_area_struct *vma;
16950 + struct mm_struct *mm = current->mm;
16951 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16952 +
16953 +#ifdef CONFIG_PAX_SEGMEXEC
16954 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16955 + pax_task_size = SEGMEXEC_TASK_SIZE;
16956 +#endif
16957 +
16958 + pax_task_size -= PAGE_SIZE;
16959 +
16960 + /* requested length too big for entire address space */
16961 + if (len > pax_task_size)
16962 + return -ENOMEM;
16963 +
16964 + if (flags & MAP_FIXED)
16965 + return addr;
16966 +
16967 +#ifdef CONFIG_PAX_PAGEEXEC
16968 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16969 + goto bottomup;
16970 +#endif
16971 +
16972 +#ifdef CONFIG_PAX_RANDMMAP
16973 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16974 +#endif
16975 +
16976 + /* requesting a specific address */
16977 + if (addr) {
16978 + addr = PAGE_ALIGN(addr);
16979 + if (pax_task_size - len >= addr) {
16980 + vma = find_vma(mm, addr);
16981 + if (check_heap_stack_gap(vma, addr, len))
16982 + return addr;
16983 + }
16984 + }
16985 +
16986 + /* check if free_area_cache is useful for us */
16987 + if (len <= mm->cached_hole_size) {
16988 + mm->cached_hole_size = 0;
16989 + mm->free_area_cache = mm->mmap_base;
16990 + }
16991 +
16992 + /* either no address requested or can't fit in requested address hole */
16993 + addr = mm->free_area_cache;
16994 +
16995 + /* make sure it can fit in the remaining address space */
16996 + if (addr > len) {
16997 + vma = find_vma(mm, addr-len);
16998 + if (check_heap_stack_gap(vma, addr - len, len))
16999 + /* remember the address as a hint for next time */
17000 + return (mm->free_area_cache = addr-len);
17001 + }
17002 +
17003 + if (mm->mmap_base < len)
17004 + goto bottomup;
17005 +
17006 + addr = mm->mmap_base-len;
17007 +
17008 + do {
17009 + /*
17010 + * Lookup failure means no vma is above this address,
17011 + * else if new region fits below vma->vm_start,
17012 + * return with success:
17013 + */
17014 + vma = find_vma(mm, addr);
17015 + if (check_heap_stack_gap(vma, addr, len))
17016 + /* remember the address as a hint for next time */
17017 + return (mm->free_area_cache = addr);
17018 +
17019 + /* remember the largest hole we saw so far */
17020 + if (addr + mm->cached_hole_size < vma->vm_start)
17021 + mm->cached_hole_size = vma->vm_start - addr;
17022 +
17023 + /* try just below the current vma->vm_start */
17024 + addr = skip_heap_stack_gap(vma, len);
17025 + } while (!IS_ERR_VALUE(addr));
17026 +
17027 +bottomup:
17028 + /*
17029 + * A failed mmap() very likely causes application failure,
17030 + * so fall back to the bottom-up function here. This scenario
17031 + * can happen with large stack limits and large mmap()
17032 + * allocations.
17033 + */
17034 +
17035 +#ifdef CONFIG_PAX_SEGMEXEC
17036 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17037 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17038 + else
17039 +#endif
17040 +
17041 + mm->mmap_base = TASK_UNMAPPED_BASE;
17042 +
17043 +#ifdef CONFIG_PAX_RANDMMAP
17044 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17045 + mm->mmap_base += mm->delta_mmap;
17046 +#endif
17047 +
17048 + mm->free_area_cache = mm->mmap_base;
17049 + mm->cached_hole_size = ~0UL;
17050 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17051 + /*
17052 + * Restore the topdown base:
17053 + */
17054 + mm->mmap_base = base;
17055 + mm->free_area_cache = base;
17056 + mm->cached_hole_size = ~0UL;
17057 +
17058 + return addr;
17059 +}
17060
17061 struct sel_arg_struct {
17062 unsigned long n;
17063 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
17064 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
17065 case SEMTIMEDOP:
17066 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
17067 - (const struct timespec __user *)fifth);
17068 + (__force const struct timespec __user *)fifth);
17069
17070 case SEMGET:
17071 return sys_semget(first, second, third);
17072 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
17073 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
17074 if (ret)
17075 return ret;
17076 - return put_user(raddr, (ulong __user *) third);
17077 + return put_user(raddr, (__force ulong __user *) third);
17078 }
17079 case 1: /* iBCS2 emulator entry point */
17080 if (!segment_eq(get_fs(), get_ds()))
17081 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
17082
17083 return error;
17084 }
17085 -
17086 -
17087 -/*
17088 - * Do a system call from kernel instead of calling sys_execve so we
17089 - * end up with proper pt_regs.
17090 - */
17091 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
17092 -{
17093 - long __res;
17094 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
17095 - : "=a" (__res)
17096 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
17097 - return __res;
17098 -}
17099 diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c
17100 --- linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
17101 +++ linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
17102 @@ -32,8 +32,8 @@ out:
17103 return error;
17104 }
17105
17106 -static void find_start_end(unsigned long flags, unsigned long *begin,
17107 - unsigned long *end)
17108 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17109 + unsigned long *begin, unsigned long *end)
17110 {
17111 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17112 unsigned long new_begin;
17113 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
17114 *begin = new_begin;
17115 }
17116 } else {
17117 - *begin = TASK_UNMAPPED_BASE;
17118 + *begin = mm->mmap_base;
17119 *end = TASK_SIZE;
17120 }
17121 }
17122 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
17123 if (flags & MAP_FIXED)
17124 return addr;
17125
17126 - find_start_end(flags, &begin, &end);
17127 + find_start_end(mm, flags, &begin, &end);
17128
17129 if (len > end)
17130 return -ENOMEM;
17131
17132 +#ifdef CONFIG_PAX_RANDMMAP
17133 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17134 +#endif
17135 +
17136 if (addr) {
17137 addr = PAGE_ALIGN(addr);
17138 vma = find_vma(mm, addr);
17139 - if (end - len >= addr &&
17140 - (!vma || addr + len <= vma->vm_start))
17141 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17142 return addr;
17143 }
17144 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17145 @@ -106,7 +109,7 @@ full_search:
17146 }
17147 return -ENOMEM;
17148 }
17149 - if (!vma || addr + len <= vma->vm_start) {
17150 + if (check_heap_stack_gap(vma, addr, len)) {
17151 /*
17152 * Remember the place where we stopped the search:
17153 */
17154 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
17155 {
17156 struct vm_area_struct *vma;
17157 struct mm_struct *mm = current->mm;
17158 - unsigned long addr = addr0;
17159 + unsigned long base = mm->mmap_base, addr = addr0;
17160
17161 /* requested length too big for entire address space */
17162 if (len > TASK_SIZE)
17163 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
17164 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17165 goto bottomup;
17166
17167 +#ifdef CONFIG_PAX_RANDMMAP
17168 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17169 +#endif
17170 +
17171 /* requesting a specific address */
17172 if (addr) {
17173 addr = PAGE_ALIGN(addr);
17174 - vma = find_vma(mm, addr);
17175 - if (TASK_SIZE - len >= addr &&
17176 - (!vma || addr + len <= vma->vm_start))
17177 - return addr;
17178 + if (TASK_SIZE - len >= addr) {
17179 + vma = find_vma(mm, addr);
17180 + if (check_heap_stack_gap(vma, addr, len))
17181 + return addr;
17182 + }
17183 }
17184
17185 /* check if free_area_cache is useful for us */
17186 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
17187 /* make sure it can fit in the remaining address space */
17188 if (addr > len) {
17189 vma = find_vma(mm, addr-len);
17190 - if (!vma || addr <= vma->vm_start)
17191 + if (check_heap_stack_gap(vma, addr - len, len))
17192 /* remember the address as a hint for next time */
17193 return mm->free_area_cache = addr-len;
17194 }
17195 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
17196 * return with success:
17197 */
17198 vma = find_vma(mm, addr);
17199 - if (!vma || addr+len <= vma->vm_start)
17200 + if (check_heap_stack_gap(vma, addr, len))
17201 /* remember the address as a hint for next time */
17202 return mm->free_area_cache = addr;
17203
17204 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
17205 mm->cached_hole_size = vma->vm_start - addr;
17206
17207 /* try just below the current vma->vm_start */
17208 - addr = vma->vm_start-len;
17209 - } while (len < vma->vm_start);
17210 + addr = skip_heap_stack_gap(vma, len);
17211 + } while (!IS_ERR_VALUE(addr));
17212
17213 bottomup:
17214 /*
17215 @@ -198,13 +206,21 @@ bottomup:
17216 * can happen with large stack limits and large mmap()
17217 * allocations.
17218 */
17219 + mm->mmap_base = TASK_UNMAPPED_BASE;
17220 +
17221 +#ifdef CONFIG_PAX_RANDMMAP
17222 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17223 + mm->mmap_base += mm->delta_mmap;
17224 +#endif
17225 +
17226 + mm->free_area_cache = mm->mmap_base;
17227 mm->cached_hole_size = ~0UL;
17228 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17229 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17230 /*
17231 * Restore the topdown base:
17232 */
17233 - mm->free_area_cache = mm->mmap_base;
17234 + mm->mmap_base = base;
17235 + mm->free_area_cache = base;
17236 mm->cached_hole_size = ~0UL;
17237
17238 return addr;
17239 diff -urNp linux-2.6.32.45/arch/x86/kernel/tboot.c linux-2.6.32.45/arch/x86/kernel/tboot.c
17240 --- linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
17241 +++ linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
17242 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
17243
17244 void tboot_shutdown(u32 shutdown_type)
17245 {
17246 - void (*shutdown)(void);
17247 + void (* __noreturn shutdown)(void);
17248
17249 if (!tboot_enabled())
17250 return;
17251 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17252
17253 switch_to_tboot_pt();
17254
17255 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17256 + shutdown = (void *)tboot->shutdown_entry;
17257 shutdown();
17258
17259 /* should not reach here */
17260 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17261 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17262 }
17263
17264 -static atomic_t ap_wfs_count;
17265 +static atomic_unchecked_t ap_wfs_count;
17266
17267 static int tboot_wait_for_aps(int num_aps)
17268 {
17269 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17270 {
17271 switch (action) {
17272 case CPU_DYING:
17273 - atomic_inc(&ap_wfs_count);
17274 + atomic_inc_unchecked(&ap_wfs_count);
17275 if (num_online_cpus() == 1)
17276 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17277 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17278 return NOTIFY_BAD;
17279 break;
17280 }
17281 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17282
17283 tboot_create_trampoline();
17284
17285 - atomic_set(&ap_wfs_count, 0);
17286 + atomic_set_unchecked(&ap_wfs_count, 0);
17287 register_hotcpu_notifier(&tboot_cpu_notifier);
17288 return 0;
17289 }
17290 diff -urNp linux-2.6.32.45/arch/x86/kernel/time.c linux-2.6.32.45/arch/x86/kernel/time.c
17291 --- linux-2.6.32.45/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17292 +++ linux-2.6.32.45/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17293 @@ -26,17 +26,13 @@
17294 int timer_ack;
17295 #endif
17296
17297 -#ifdef CONFIG_X86_64
17298 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17299 -#endif
17300 -
17301 unsigned long profile_pc(struct pt_regs *regs)
17302 {
17303 unsigned long pc = instruction_pointer(regs);
17304
17305 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17306 + if (!user_mode(regs) && in_lock_functions(pc)) {
17307 #ifdef CONFIG_FRAME_POINTER
17308 - return *(unsigned long *)(regs->bp + sizeof(long));
17309 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17310 #else
17311 unsigned long *sp =
17312 (unsigned long *)kernel_stack_pointer(regs);
17313 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17314 * or above a saved flags. Eflags has bits 22-31 zero,
17315 * kernel addresses don't.
17316 */
17317 +
17318 +#ifdef CONFIG_PAX_KERNEXEC
17319 + return ktla_ktva(sp[0]);
17320 +#else
17321 if (sp[0] >> 22)
17322 return sp[0];
17323 if (sp[1] >> 22)
17324 return sp[1];
17325 #endif
17326 +
17327 +#endif
17328 }
17329 return pc;
17330 }
17331 diff -urNp linux-2.6.32.45/arch/x86/kernel/tls.c linux-2.6.32.45/arch/x86/kernel/tls.c
17332 --- linux-2.6.32.45/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17333 +++ linux-2.6.32.45/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17334 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17335 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17336 return -EINVAL;
17337
17338 +#ifdef CONFIG_PAX_SEGMEXEC
17339 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17340 + return -EINVAL;
17341 +#endif
17342 +
17343 set_tls_desc(p, idx, &info, 1);
17344
17345 return 0;
17346 diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_32.S linux-2.6.32.45/arch/x86/kernel/trampoline_32.S
17347 --- linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17348 +++ linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17349 @@ -32,6 +32,12 @@
17350 #include <asm/segment.h>
17351 #include <asm/page_types.h>
17352
17353 +#ifdef CONFIG_PAX_KERNEXEC
17354 +#define ta(X) (X)
17355 +#else
17356 +#define ta(X) ((X) - __PAGE_OFFSET)
17357 +#endif
17358 +
17359 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17360 __CPUINITRODATA
17361 .code16
17362 @@ -60,7 +66,7 @@ r_base = .
17363 inc %ax # protected mode (PE) bit
17364 lmsw %ax # into protected mode
17365 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17366 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17367 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17368
17369 # These need to be in the same 64K segment as the above;
17370 # hence we don't use the boot_gdt_descr defined in head.S
17371 diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_64.S linux-2.6.32.45/arch/x86/kernel/trampoline_64.S
17372 --- linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17373 +++ linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17374 @@ -91,7 +91,7 @@ startup_32:
17375 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17376 movl %eax, %ds
17377
17378 - movl $X86_CR4_PAE, %eax
17379 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17380 movl %eax, %cr4 # Enable PAE mode
17381
17382 # Setup trampoline 4 level pagetables
17383 @@ -127,7 +127,7 @@ startup_64:
17384 no_longmode:
17385 hlt
17386 jmp no_longmode
17387 -#include "verify_cpu_64.S"
17388 +#include "verify_cpu.S"
17389
17390 # Careful these need to be in the same 64K segment as the above;
17391 tidt:
17392 @@ -138,7 +138,7 @@ tidt:
17393 # so the kernel can live anywhere
17394 .balign 4
17395 tgdt:
17396 - .short tgdt_end - tgdt # gdt limit
17397 + .short tgdt_end - tgdt - 1 # gdt limit
17398 .long tgdt - r_base
17399 .short 0
17400 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17401 diff -urNp linux-2.6.32.45/arch/x86/kernel/traps.c linux-2.6.32.45/arch/x86/kernel/traps.c
17402 --- linux-2.6.32.45/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17403 +++ linux-2.6.32.45/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17404 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17405
17406 /* Do we ignore FPU interrupts ? */
17407 char ignore_fpu_irq;
17408 -
17409 -/*
17410 - * The IDT has to be page-aligned to simplify the Pentium
17411 - * F0 0F bug workaround.
17412 - */
17413 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17414 #endif
17415
17416 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17417 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17418 static inline void
17419 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17420 {
17421 - if (!user_mode_vm(regs))
17422 + if (!user_mode(regs))
17423 die(str, regs, err);
17424 }
17425 #endif
17426
17427 static void __kprobes
17428 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17429 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17430 long error_code, siginfo_t *info)
17431 {
17432 struct task_struct *tsk = current;
17433
17434 #ifdef CONFIG_X86_32
17435 - if (regs->flags & X86_VM_MASK) {
17436 + if (v8086_mode(regs)) {
17437 /*
17438 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17439 * On nmi (interrupt 2), do_trap should not be called.
17440 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17441 }
17442 #endif
17443
17444 - if (!user_mode(regs))
17445 + if (!user_mode_novm(regs))
17446 goto kernel_trap;
17447
17448 #ifdef CONFIG_X86_32
17449 @@ -158,7 +152,7 @@ trap_signal:
17450 printk_ratelimit()) {
17451 printk(KERN_INFO
17452 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17453 - tsk->comm, tsk->pid, str,
17454 + tsk->comm, task_pid_nr(tsk), str,
17455 regs->ip, regs->sp, error_code);
17456 print_vma_addr(" in ", regs->ip);
17457 printk("\n");
17458 @@ -175,8 +169,20 @@ kernel_trap:
17459 if (!fixup_exception(regs)) {
17460 tsk->thread.error_code = error_code;
17461 tsk->thread.trap_no = trapnr;
17462 +
17463 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17464 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17465 + str = "PAX: suspicious stack segment fault";
17466 +#endif
17467 +
17468 die(str, regs, error_code);
17469 }
17470 +
17471 +#ifdef CONFIG_PAX_REFCOUNT
17472 + if (trapnr == 4)
17473 + pax_report_refcount_overflow(regs);
17474 +#endif
17475 +
17476 return;
17477
17478 #ifdef CONFIG_X86_32
17479 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17480 conditional_sti(regs);
17481
17482 #ifdef CONFIG_X86_32
17483 - if (regs->flags & X86_VM_MASK)
17484 + if (v8086_mode(regs))
17485 goto gp_in_vm86;
17486 #endif
17487
17488 tsk = current;
17489 - if (!user_mode(regs))
17490 + if (!user_mode_novm(regs))
17491 goto gp_in_kernel;
17492
17493 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17494 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17495 + struct mm_struct *mm = tsk->mm;
17496 + unsigned long limit;
17497 +
17498 + down_write(&mm->mmap_sem);
17499 + limit = mm->context.user_cs_limit;
17500 + if (limit < TASK_SIZE) {
17501 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17502 + up_write(&mm->mmap_sem);
17503 + return;
17504 + }
17505 + up_write(&mm->mmap_sem);
17506 + }
17507 +#endif
17508 +
17509 tsk->thread.error_code = error_code;
17510 tsk->thread.trap_no = 13;
17511
17512 @@ -305,6 +327,13 @@ gp_in_kernel:
17513 if (notify_die(DIE_GPF, "general protection fault", regs,
17514 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17515 return;
17516 +
17517 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17518 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17519 + die("PAX: suspicious general protection fault", regs, error_code);
17520 + else
17521 +#endif
17522 +
17523 die("general protection fault", regs, error_code);
17524 }
17525
17526 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17527 dotraplinkage notrace __kprobes void
17528 do_nmi(struct pt_regs *regs, long error_code)
17529 {
17530 +
17531 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17532 + if (!user_mode(regs)) {
17533 + unsigned long cs = regs->cs & 0xFFFF;
17534 + unsigned long ip = ktva_ktla(regs->ip);
17535 +
17536 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17537 + regs->ip = ip;
17538 + }
17539 +#endif
17540 +
17541 nmi_enter();
17542
17543 inc_irq_stat(__nmi_count);
17544 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17545 }
17546
17547 #ifdef CONFIG_X86_32
17548 - if (regs->flags & X86_VM_MASK)
17549 + if (v8086_mode(regs))
17550 goto debug_vm86;
17551 #endif
17552
17553 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17554 * kernel space (but re-enable TF when returning to user mode).
17555 */
17556 if (condition & DR_STEP) {
17557 - if (!user_mode(regs))
17558 + if (!user_mode_novm(regs))
17559 goto clear_TF_reenable;
17560 }
17561
17562 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17563 * Handle strange cache flush from user space exception
17564 * in all other cases. This is undocumented behaviour.
17565 */
17566 - if (regs->flags & X86_VM_MASK) {
17567 + if (v8086_mode(regs)) {
17568 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17569 return;
17570 }
17571 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17572 void __math_state_restore(void)
17573 {
17574 struct thread_info *thread = current_thread_info();
17575 - struct task_struct *tsk = thread->task;
17576 + struct task_struct *tsk = current;
17577
17578 /*
17579 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17580 @@ -825,8 +865,7 @@ void __math_state_restore(void)
17581 */
17582 asmlinkage void math_state_restore(void)
17583 {
17584 - struct thread_info *thread = current_thread_info();
17585 - struct task_struct *tsk = thread->task;
17586 + struct task_struct *tsk = current;
17587
17588 if (!tsk_used_math(tsk)) {
17589 local_irq_enable();
17590 diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S
17591 --- linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17592 +++ linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17593 @@ -1,105 +0,0 @@
17594 -/*
17595 - *
17596 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
17597 - * code has been borrowed from boot/setup.S and was introduced by
17598 - * Andi Kleen.
17599 - *
17600 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17601 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17602 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17603 - *
17604 - * This source code is licensed under the GNU General Public License,
17605 - * Version 2. See the file COPYING for more details.
17606 - *
17607 - * This is a common code for verification whether CPU supports
17608 - * long mode and SSE or not. It is not called directly instead this
17609 - * file is included at various places and compiled in that context.
17610 - * Following are the current usage.
17611 - *
17612 - * This file is included by both 16bit and 32bit code.
17613 - *
17614 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17615 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17616 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17617 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17618 - *
17619 - * verify_cpu, returns the status of cpu check in register %eax.
17620 - * 0: Success 1: Failure
17621 - *
17622 - * The caller needs to check for the error code and take the action
17623 - * appropriately. Either display a message or halt.
17624 - */
17625 -
17626 -#include <asm/cpufeature.h>
17627 -
17628 -verify_cpu:
17629 - pushfl # Save caller passed flags
17630 - pushl $0 # Kill any dangerous flags
17631 - popfl
17632 -
17633 - pushfl # standard way to check for cpuid
17634 - popl %eax
17635 - movl %eax,%ebx
17636 - xorl $0x200000,%eax
17637 - pushl %eax
17638 - popfl
17639 - pushfl
17640 - popl %eax
17641 - cmpl %eax,%ebx
17642 - jz verify_cpu_no_longmode # cpu has no cpuid
17643 -
17644 - movl $0x0,%eax # See if cpuid 1 is implemented
17645 - cpuid
17646 - cmpl $0x1,%eax
17647 - jb verify_cpu_no_longmode # no cpuid 1
17648 -
17649 - xor %di,%di
17650 - cmpl $0x68747541,%ebx # AuthenticAMD
17651 - jnz verify_cpu_noamd
17652 - cmpl $0x69746e65,%edx
17653 - jnz verify_cpu_noamd
17654 - cmpl $0x444d4163,%ecx
17655 - jnz verify_cpu_noamd
17656 - mov $1,%di # cpu is from AMD
17657 -
17658 -verify_cpu_noamd:
17659 - movl $0x1,%eax # Does the cpu have what it takes
17660 - cpuid
17661 - andl $REQUIRED_MASK0,%edx
17662 - xorl $REQUIRED_MASK0,%edx
17663 - jnz verify_cpu_no_longmode
17664 -
17665 - movl $0x80000000,%eax # See if extended cpuid is implemented
17666 - cpuid
17667 - cmpl $0x80000001,%eax
17668 - jb verify_cpu_no_longmode # no extended cpuid
17669 -
17670 - movl $0x80000001,%eax # Does the cpu have what it takes
17671 - cpuid
17672 - andl $REQUIRED_MASK1,%edx
17673 - xorl $REQUIRED_MASK1,%edx
17674 - jnz verify_cpu_no_longmode
17675 -
17676 -verify_cpu_sse_test:
17677 - movl $1,%eax
17678 - cpuid
17679 - andl $SSE_MASK,%edx
17680 - cmpl $SSE_MASK,%edx
17681 - je verify_cpu_sse_ok
17682 - test %di,%di
17683 - jz verify_cpu_no_longmode # only try to force SSE on AMD
17684 - movl $0xc0010015,%ecx # HWCR
17685 - rdmsr
17686 - btr $15,%eax # enable SSE
17687 - wrmsr
17688 - xor %di,%di # don't loop
17689 - jmp verify_cpu_sse_test # try again
17690 -
17691 -verify_cpu_no_longmode:
17692 - popfl # Restore caller passed flags
17693 - movl $1,%eax
17694 - ret
17695 -verify_cpu_sse_ok:
17696 - popfl # Restore caller passed flags
17697 - xorl %eax, %eax
17698 - ret
17699 diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu.S linux-2.6.32.45/arch/x86/kernel/verify_cpu.S
17700 --- linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17701 +++ linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17702 @@ -0,0 +1,140 @@
17703 +/*
17704 + *
17705 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
17706 + * code has been borrowed from boot/setup.S and was introduced by
17707 + * Andi Kleen.
17708 + *
17709 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17710 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17711 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17712 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17713 + *
17714 + * This source code is licensed under the GNU General Public License,
17715 + * Version 2. See the file COPYING for more details.
17716 + *
17717 + * This is a common code for verification whether CPU supports
17718 + * long mode and SSE or not. It is not called directly instead this
17719 + * file is included at various places and compiled in that context.
17720 + * This file is expected to run in 32bit code. Currently:
17721 + *
17722 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17723 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
17724 + * arch/x86/kernel/head_32.S: processor startup
17725 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17726 + *
17727 + * verify_cpu, returns the status of longmode and SSE in register %eax.
17728 + * 0: Success 1: Failure
17729 + *
17730 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17731 + *
17732 + * The caller needs to check for the error code and take the action
17733 + * appropriately. Either display a message or halt.
17734 + */
17735 +
17736 +#include <asm/cpufeature.h>
17737 +#include <asm/msr-index.h>
17738 +
17739 +verify_cpu:
17740 + pushfl # Save caller passed flags
17741 + pushl $0 # Kill any dangerous flags
17742 + popfl
17743 +
17744 + pushfl # standard way to check for cpuid
17745 + popl %eax
17746 + movl %eax,%ebx
17747 + xorl $0x200000,%eax
17748 + pushl %eax
17749 + popfl
17750 + pushfl
17751 + popl %eax
17752 + cmpl %eax,%ebx
17753 + jz verify_cpu_no_longmode # cpu has no cpuid
17754 +
17755 + movl $0x0,%eax # See if cpuid 1 is implemented
17756 + cpuid
17757 + cmpl $0x1,%eax
17758 + jb verify_cpu_no_longmode # no cpuid 1
17759 +
17760 + xor %di,%di
17761 + cmpl $0x68747541,%ebx # AuthenticAMD
17762 + jnz verify_cpu_noamd
17763 + cmpl $0x69746e65,%edx
17764 + jnz verify_cpu_noamd
17765 + cmpl $0x444d4163,%ecx
17766 + jnz verify_cpu_noamd
17767 + mov $1,%di # cpu is from AMD
17768 + jmp verify_cpu_check
17769 +
17770 +verify_cpu_noamd:
17771 + cmpl $0x756e6547,%ebx # GenuineIntel?
17772 + jnz verify_cpu_check
17773 + cmpl $0x49656e69,%edx
17774 + jnz verify_cpu_check
17775 + cmpl $0x6c65746e,%ecx
17776 + jnz verify_cpu_check
17777 +
17778 + # only call IA32_MISC_ENABLE when:
17779 + # family > 6 || (family == 6 && model >= 0xd)
17780 + movl $0x1, %eax # check CPU family and model
17781 + cpuid
17782 + movl %eax, %ecx
17783 +
17784 + andl $0x0ff00f00, %eax # mask family and extended family
17785 + shrl $8, %eax
17786 + cmpl $6, %eax
17787 + ja verify_cpu_clear_xd # family > 6, ok
17788 + jb verify_cpu_check # family < 6, skip
17789 +
17790 + andl $0x000f00f0, %ecx # mask model and extended model
17791 + shrl $4, %ecx
17792 + cmpl $0xd, %ecx
17793 + jb verify_cpu_check # family == 6, model < 0xd, skip
17794 +
17795 +verify_cpu_clear_xd:
17796 + movl $MSR_IA32_MISC_ENABLE, %ecx
17797 + rdmsr
17798 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17799 + jnc verify_cpu_check # only write MSR if bit was changed
17800 + wrmsr
17801 +
17802 +verify_cpu_check:
17803 + movl $0x1,%eax # Does the cpu have what it takes
17804 + cpuid
17805 + andl $REQUIRED_MASK0,%edx
17806 + xorl $REQUIRED_MASK0,%edx
17807 + jnz verify_cpu_no_longmode
17808 +
17809 + movl $0x80000000,%eax # See if extended cpuid is implemented
17810 + cpuid
17811 + cmpl $0x80000001,%eax
17812 + jb verify_cpu_no_longmode # no extended cpuid
17813 +
17814 + movl $0x80000001,%eax # Does the cpu have what it takes
17815 + cpuid
17816 + andl $REQUIRED_MASK1,%edx
17817 + xorl $REQUIRED_MASK1,%edx
17818 + jnz verify_cpu_no_longmode
17819 +
17820 +verify_cpu_sse_test:
17821 + movl $1,%eax
17822 + cpuid
17823 + andl $SSE_MASK,%edx
17824 + cmpl $SSE_MASK,%edx
17825 + je verify_cpu_sse_ok
17826 + test %di,%di
17827 + jz verify_cpu_no_longmode # only try to force SSE on AMD
17828 + movl $MSR_K7_HWCR,%ecx
17829 + rdmsr
17830 + btr $15,%eax # enable SSE
17831 + wrmsr
17832 + xor %di,%di # don't loop
17833 + jmp verify_cpu_sse_test # try again
17834 +
17835 +verify_cpu_no_longmode:
17836 + popfl # Restore caller passed flags
17837 + movl $1,%eax
17838 + ret
17839 +verify_cpu_sse_ok:
17840 + popfl # Restore caller passed flags
17841 + xorl %eax, %eax
17842 + ret
17843 diff -urNp linux-2.6.32.45/arch/x86/kernel/vm86_32.c linux-2.6.32.45/arch/x86/kernel/vm86_32.c
17844 --- linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
17845 +++ linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
17846 @@ -41,6 +41,7 @@
17847 #include <linux/ptrace.h>
17848 #include <linux/audit.h>
17849 #include <linux/stddef.h>
17850 +#include <linux/grsecurity.h>
17851
17852 #include <asm/uaccess.h>
17853 #include <asm/io.h>
17854 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
17855 do_exit(SIGSEGV);
17856 }
17857
17858 - tss = &per_cpu(init_tss, get_cpu());
17859 + tss = init_tss + get_cpu();
17860 current->thread.sp0 = current->thread.saved_sp0;
17861 current->thread.sysenter_cs = __KERNEL_CS;
17862 load_sp0(tss, &current->thread);
17863 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
17864 struct task_struct *tsk;
17865 int tmp, ret = -EPERM;
17866
17867 +#ifdef CONFIG_GRKERNSEC_VM86
17868 + if (!capable(CAP_SYS_RAWIO)) {
17869 + gr_handle_vm86();
17870 + goto out;
17871 + }
17872 +#endif
17873 +
17874 tsk = current;
17875 if (tsk->thread.saved_sp0)
17876 goto out;
17877 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
17878 int tmp, ret;
17879 struct vm86plus_struct __user *v86;
17880
17881 +#ifdef CONFIG_GRKERNSEC_VM86
17882 + if (!capable(CAP_SYS_RAWIO)) {
17883 + gr_handle_vm86();
17884 + ret = -EPERM;
17885 + goto out;
17886 + }
17887 +#endif
17888 +
17889 tsk = current;
17890 switch (regs->bx) {
17891 case VM86_REQUEST_IRQ:
17892 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
17893 tsk->thread.saved_fs = info->regs32->fs;
17894 tsk->thread.saved_gs = get_user_gs(info->regs32);
17895
17896 - tss = &per_cpu(init_tss, get_cpu());
17897 + tss = init_tss + get_cpu();
17898 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17899 if (cpu_has_sep)
17900 tsk->thread.sysenter_cs = 0;
17901 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
17902 goto cannot_handle;
17903 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17904 goto cannot_handle;
17905 - intr_ptr = (unsigned long __user *) (i << 2);
17906 + intr_ptr = (__force unsigned long __user *) (i << 2);
17907 if (get_user(segoffs, intr_ptr))
17908 goto cannot_handle;
17909 if ((segoffs >> 16) == BIOSSEG)
17910 diff -urNp linux-2.6.32.45/arch/x86/kernel/vmi_32.c linux-2.6.32.45/arch/x86/kernel/vmi_32.c
17911 --- linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
17912 +++ linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-08-05 20:33:55.000000000 -0400
17913 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
17914 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
17915
17916 #define call_vrom_func(rom,func) \
17917 - (((VROMFUNC *)(rom->func))())
17918 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
17919
17920 #define call_vrom_long_func(rom,func,arg) \
17921 - (((VROMLONGFUNC *)(rom->func)) (arg))
17922 +({\
17923 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
17924 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
17925 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
17926 + __reloc;\
17927 +})
17928
17929 -static struct vrom_header *vmi_rom;
17930 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
17931 static int disable_pge;
17932 static int disable_pse;
17933 static int disable_sep;
17934 @@ -76,10 +81,10 @@ static struct {
17935 void (*set_initial_ap_state)(int, int);
17936 void (*halt)(void);
17937 void (*set_lazy_mode)(int mode);
17938 -} vmi_ops;
17939 +} __no_const vmi_ops __read_only;
17940
17941 /* Cached VMI operations */
17942 -struct vmi_timer_ops vmi_timer_ops;
17943 +struct vmi_timer_ops vmi_timer_ops __read_only;
17944
17945 /*
17946 * VMI patching routines.
17947 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
17948 static inline void patch_offset(void *insnbuf,
17949 unsigned long ip, unsigned long dest)
17950 {
17951 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
17952 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
17953 }
17954
17955 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
17956 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
17957 {
17958 u64 reloc;
17959 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
17960 +
17961 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
17962 switch(rel->type) {
17963 case VMI_RELOCATION_CALL_REL:
17964 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
17965
17966 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17967 {
17968 - const pte_t pte = { .pte = 0 };
17969 + const pte_t pte = __pte(0ULL);
17970 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17971 }
17972
17973 static void vmi_pmd_clear(pmd_t *pmd)
17974 {
17975 - const pte_t pte = { .pte = 0 };
17976 + const pte_t pte = __pte(0ULL);
17977 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17978 }
17979 #endif
17980 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17981 ap.ss = __KERNEL_DS;
17982 ap.esp = (unsigned long) start_esp;
17983
17984 - ap.ds = __USER_DS;
17985 - ap.es = __USER_DS;
17986 + ap.ds = __KERNEL_DS;
17987 + ap.es = __KERNEL_DS;
17988 ap.fs = __KERNEL_PERCPU;
17989 - ap.gs = __KERNEL_STACK_CANARY;
17990 + savesegment(gs, ap.gs);
17991
17992 ap.eflags = 0;
17993
17994 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17995 paravirt_leave_lazy_mmu();
17996 }
17997
17998 +#ifdef CONFIG_PAX_KERNEXEC
17999 +static unsigned long vmi_pax_open_kernel(void)
18000 +{
18001 + return 0;
18002 +}
18003 +
18004 +static unsigned long vmi_pax_close_kernel(void)
18005 +{
18006 + return 0;
18007 +}
18008 +#endif
18009 +
18010 static inline int __init check_vmi_rom(struct vrom_header *rom)
18011 {
18012 struct pci_header *pci;
18013 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
18014 return 0;
18015 if (rom->vrom_signature != VMI_SIGNATURE)
18016 return 0;
18017 + if (rom->rom_length * 512 > sizeof(*rom)) {
18018 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
18019 + return 0;
18020 + }
18021 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
18022 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
18023 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
18024 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
18025 struct vrom_header *romstart;
18026 romstart = (struct vrom_header *)isa_bus_to_virt(base);
18027 if (check_vmi_rom(romstart)) {
18028 - vmi_rom = romstart;
18029 + vmi_rom = *romstart;
18030 return 1;
18031 }
18032 }
18033 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
18034
18035 para_fill(pv_irq_ops.safe_halt, Halt);
18036
18037 +#ifdef CONFIG_PAX_KERNEXEC
18038 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
18039 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
18040 +#endif
18041 +
18042 /*
18043 * Alternative instruction rewriting doesn't happen soon enough
18044 * to convert VMI_IRET to a call instead of a jump; so we have
18045 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
18046
18047 void __init vmi_init(void)
18048 {
18049 - if (!vmi_rom)
18050 + if (!vmi_rom.rom_signature)
18051 probe_vmi_rom();
18052 else
18053 - check_vmi_rom(vmi_rom);
18054 + check_vmi_rom(&vmi_rom);
18055
18056 /* In case probing for or validating the ROM failed, basil */
18057 - if (!vmi_rom)
18058 + if (!vmi_rom.rom_signature)
18059 return;
18060
18061 - reserve_top_address(-vmi_rom->virtual_top);
18062 + reserve_top_address(-vmi_rom.virtual_top);
18063
18064 #ifdef CONFIG_X86_IO_APIC
18065 /* This is virtual hardware; timer routing is wired correctly */
18066 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
18067 {
18068 unsigned long flags;
18069
18070 - if (!vmi_rom)
18071 + if (!vmi_rom.rom_signature)
18072 return;
18073
18074 local_irq_save(flags);
18075 diff -urNp linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S
18076 --- linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
18077 +++ linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
18078 @@ -26,6 +26,13 @@
18079 #include <asm/page_types.h>
18080 #include <asm/cache.h>
18081 #include <asm/boot.h>
18082 +#include <asm/segment.h>
18083 +
18084 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18085 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18086 +#else
18087 +#define __KERNEL_TEXT_OFFSET 0
18088 +#endif
18089
18090 #undef i386 /* in case the preprocessor is a 32bit one */
18091
18092 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
18093 #ifdef CONFIG_X86_32
18094 OUTPUT_ARCH(i386)
18095 ENTRY(phys_startup_32)
18096 -jiffies = jiffies_64;
18097 #else
18098 OUTPUT_ARCH(i386:x86-64)
18099 ENTRY(phys_startup_64)
18100 -jiffies_64 = jiffies;
18101 #endif
18102
18103 PHDRS {
18104 text PT_LOAD FLAGS(5); /* R_E */
18105 - data PT_LOAD FLAGS(7); /* RWE */
18106 +#ifdef CONFIG_X86_32
18107 + module PT_LOAD FLAGS(5); /* R_E */
18108 +#endif
18109 +#ifdef CONFIG_XEN
18110 + rodata PT_LOAD FLAGS(5); /* R_E */
18111 +#else
18112 + rodata PT_LOAD FLAGS(4); /* R__ */
18113 +#endif
18114 + data PT_LOAD FLAGS(6); /* RW_ */
18115 #ifdef CONFIG_X86_64
18116 user PT_LOAD FLAGS(5); /* R_E */
18117 +#endif
18118 + init.begin PT_LOAD FLAGS(6); /* RW_ */
18119 #ifdef CONFIG_SMP
18120 percpu PT_LOAD FLAGS(6); /* RW_ */
18121 #endif
18122 + text.init PT_LOAD FLAGS(5); /* R_E */
18123 + text.exit PT_LOAD FLAGS(5); /* R_E */
18124 init PT_LOAD FLAGS(7); /* RWE */
18125 -#endif
18126 note PT_NOTE FLAGS(0); /* ___ */
18127 }
18128
18129 SECTIONS
18130 {
18131 #ifdef CONFIG_X86_32
18132 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18133 - phys_startup_32 = startup_32 - LOAD_OFFSET;
18134 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18135 #else
18136 - . = __START_KERNEL;
18137 - phys_startup_64 = startup_64 - LOAD_OFFSET;
18138 + . = __START_KERNEL;
18139 #endif
18140
18141 /* Text and read-only data */
18142 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
18143 - _text = .;
18144 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18145 /* bootstrapping code */
18146 +#ifdef CONFIG_X86_32
18147 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18148 +#else
18149 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18150 +#endif
18151 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18152 + _text = .;
18153 HEAD_TEXT
18154 #ifdef CONFIG_X86_32
18155 . = ALIGN(PAGE_SIZE);
18156 @@ -82,28 +102,71 @@ SECTIONS
18157 IRQENTRY_TEXT
18158 *(.fixup)
18159 *(.gnu.warning)
18160 - /* End of text section */
18161 - _etext = .;
18162 } :text = 0x9090
18163
18164 - NOTES :text :note
18165 + . += __KERNEL_TEXT_OFFSET;
18166 +
18167 +#ifdef CONFIG_X86_32
18168 + . = ALIGN(PAGE_SIZE);
18169 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
18170 + *(.vmi.rom)
18171 + } :module
18172 +
18173 + . = ALIGN(PAGE_SIZE);
18174 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18175 +
18176 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18177 + MODULES_EXEC_VADDR = .;
18178 + BYTE(0)
18179 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18180 + . = ALIGN(HPAGE_SIZE);
18181 + MODULES_EXEC_END = . - 1;
18182 +#endif
18183 +
18184 + } :module
18185 +#endif
18186
18187 - EXCEPTION_TABLE(16) :text = 0x9090
18188 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18189 + /* End of text section */
18190 + _etext = . - __KERNEL_TEXT_OFFSET;
18191 + }
18192 +
18193 +#ifdef CONFIG_X86_32
18194 + . = ALIGN(PAGE_SIZE);
18195 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18196 + *(.idt)
18197 + . = ALIGN(PAGE_SIZE);
18198 + *(.empty_zero_page)
18199 + *(.swapper_pg_fixmap)
18200 + *(.swapper_pg_pmd)
18201 + *(.swapper_pg_dir)
18202 + *(.trampoline_pg_dir)
18203 + } :rodata
18204 +#endif
18205 +
18206 + . = ALIGN(PAGE_SIZE);
18207 + NOTES :rodata :note
18208 +
18209 + EXCEPTION_TABLE(16) :rodata
18210
18211 RO_DATA(PAGE_SIZE)
18212
18213 /* Data */
18214 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18215 +
18216 +#ifdef CONFIG_PAX_KERNEXEC
18217 + . = ALIGN(HPAGE_SIZE);
18218 +#else
18219 + . = ALIGN(PAGE_SIZE);
18220 +#endif
18221 +
18222 /* Start of data section */
18223 _sdata = .;
18224
18225 /* init_task */
18226 INIT_TASK_DATA(THREAD_SIZE)
18227
18228 -#ifdef CONFIG_X86_32
18229 - /* 32 bit has nosave before _edata */
18230 NOSAVE_DATA
18231 -#endif
18232
18233 PAGE_ALIGNED_DATA(PAGE_SIZE)
18234
18235 @@ -112,6 +175,8 @@ SECTIONS
18236 DATA_DATA
18237 CONSTRUCTORS
18238
18239 + jiffies = jiffies_64;
18240 +
18241 /* rarely changed data like cpu maps */
18242 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
18243
18244 @@ -166,12 +231,6 @@ SECTIONS
18245 }
18246 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18247
18248 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18249 - .jiffies : AT(VLOAD(.jiffies)) {
18250 - *(.jiffies)
18251 - }
18252 - jiffies = VVIRT(.jiffies);
18253 -
18254 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18255 *(.vsyscall_3)
18256 }
18257 @@ -187,12 +246,19 @@ SECTIONS
18258 #endif /* CONFIG_X86_64 */
18259
18260 /* Init code and data - will be freed after init */
18261 - . = ALIGN(PAGE_SIZE);
18262 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18263 + BYTE(0)
18264 +
18265 +#ifdef CONFIG_PAX_KERNEXEC
18266 + . = ALIGN(HPAGE_SIZE);
18267 +#else
18268 + . = ALIGN(PAGE_SIZE);
18269 +#endif
18270 +
18271 __init_begin = .; /* paired with __init_end */
18272 - }
18273 + } :init.begin
18274
18275 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18276 +#ifdef CONFIG_SMP
18277 /*
18278 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18279 * output PHDR, so the next output section - .init.text - should
18280 @@ -201,12 +267,27 @@ SECTIONS
18281 PERCPU_VADDR(0, :percpu)
18282 #endif
18283
18284 - INIT_TEXT_SECTION(PAGE_SIZE)
18285 -#ifdef CONFIG_X86_64
18286 - :init
18287 -#endif
18288 + . = ALIGN(PAGE_SIZE);
18289 + init_begin = .;
18290 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18291 + VMLINUX_SYMBOL(_sinittext) = .;
18292 + INIT_TEXT
18293 + VMLINUX_SYMBOL(_einittext) = .;
18294 + . = ALIGN(PAGE_SIZE);
18295 + } :text.init
18296
18297 - INIT_DATA_SECTION(16)
18298 + /*
18299 + * .exit.text is discard at runtime, not link time, to deal with
18300 + * references from .altinstructions and .eh_frame
18301 + */
18302 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18303 + EXIT_TEXT
18304 + . = ALIGN(16);
18305 + } :text.exit
18306 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18307 +
18308 + . = ALIGN(PAGE_SIZE);
18309 + INIT_DATA_SECTION(16) :init
18310
18311 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18312 __x86_cpu_dev_start = .;
18313 @@ -232,19 +313,11 @@ SECTIONS
18314 *(.altinstr_replacement)
18315 }
18316
18317 - /*
18318 - * .exit.text is discard at runtime, not link time, to deal with
18319 - * references from .altinstructions and .eh_frame
18320 - */
18321 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18322 - EXIT_TEXT
18323 - }
18324 -
18325 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18326 EXIT_DATA
18327 }
18328
18329 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18330 +#ifndef CONFIG_SMP
18331 PERCPU(PAGE_SIZE)
18332 #endif
18333
18334 @@ -267,12 +340,6 @@ SECTIONS
18335 . = ALIGN(PAGE_SIZE);
18336 }
18337
18338 -#ifdef CONFIG_X86_64
18339 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18340 - NOSAVE_DATA
18341 - }
18342 -#endif
18343 -
18344 /* BSS */
18345 . = ALIGN(PAGE_SIZE);
18346 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18347 @@ -288,6 +355,7 @@ SECTIONS
18348 __brk_base = .;
18349 . += 64 * 1024; /* 64k alignment slop space */
18350 *(.brk_reservation) /* areas brk users have reserved */
18351 + . = ALIGN(HPAGE_SIZE);
18352 __brk_limit = .;
18353 }
18354
18355 @@ -316,13 +384,12 @@ SECTIONS
18356 * for the boot processor.
18357 */
18358 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18359 -INIT_PER_CPU(gdt_page);
18360 INIT_PER_CPU(irq_stack_union);
18361
18362 /*
18363 * Build-time check on the image size:
18364 */
18365 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18366 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18367 "kernel image bigger than KERNEL_IMAGE_SIZE");
18368
18369 #ifdef CONFIG_SMP
18370 diff -urNp linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c
18371 --- linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18372 +++ linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18373 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18374
18375 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18376 /* copy vsyscall data */
18377 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18378 vsyscall_gtod_data.clock.vread = clock->vread;
18379 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18380 vsyscall_gtod_data.clock.mask = clock->mask;
18381 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18382 We do this here because otherwise user space would do it on
18383 its own in a likely inferior way (no access to jiffies).
18384 If you don't like it pass NULL. */
18385 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
18386 + if (tcache && tcache->blob[0] == (j = jiffies)) {
18387 p = tcache->blob[1];
18388 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18389 /* Load per CPU data from RDTSCP */
18390 diff -urNp linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c
18391 --- linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18392 +++ linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18393 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18394
18395 EXPORT_SYMBOL(copy_user_generic);
18396 EXPORT_SYMBOL(__copy_user_nocache);
18397 -EXPORT_SYMBOL(copy_from_user);
18398 -EXPORT_SYMBOL(copy_to_user);
18399 EXPORT_SYMBOL(__copy_from_user_inatomic);
18400
18401 EXPORT_SYMBOL(copy_page);
18402 diff -urNp linux-2.6.32.45/arch/x86/kernel/xsave.c linux-2.6.32.45/arch/x86/kernel/xsave.c
18403 --- linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18404 +++ linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18405 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18406 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18407 return -1;
18408
18409 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18410 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18411 fx_sw_user->extended_size -
18412 FP_XSTATE_MAGIC2_SIZE));
18413 /*
18414 @@ -196,7 +196,7 @@ fx_only:
18415 * the other extended state.
18416 */
18417 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18418 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18419 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18420 }
18421
18422 /*
18423 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18424 if (task_thread_info(tsk)->status & TS_XSAVE)
18425 err = restore_user_xstate(buf);
18426 else
18427 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18428 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
18429 buf);
18430 if (unlikely(err)) {
18431 /*
18432 diff -urNp linux-2.6.32.45/arch/x86/kvm/emulate.c linux-2.6.32.45/arch/x86/kvm/emulate.c
18433 --- linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18434 +++ linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18435 @@ -81,8 +81,8 @@
18436 #define Src2CL (1<<29)
18437 #define Src2ImmByte (2<<29)
18438 #define Src2One (3<<29)
18439 -#define Src2Imm16 (4<<29)
18440 -#define Src2Mask (7<<29)
18441 +#define Src2Imm16 (4U<<29)
18442 +#define Src2Mask (7U<<29)
18443
18444 enum {
18445 Group1_80, Group1_81, Group1_82, Group1_83,
18446 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
18447
18448 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18449 do { \
18450 + unsigned long _tmp; \
18451 __asm__ __volatile__ ( \
18452 _PRE_EFLAGS("0", "4", "2") \
18453 _op _suffix " %"_x"3,%1; " \
18454 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
18455 /* Raw emulation: instruction has two explicit operands. */
18456 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18457 do { \
18458 - unsigned long _tmp; \
18459 - \
18460 switch ((_dst).bytes) { \
18461 case 2: \
18462 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18463 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
18464
18465 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18466 do { \
18467 - unsigned long _tmp; \
18468 switch ((_dst).bytes) { \
18469 case 1: \
18470 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18471 diff -urNp linux-2.6.32.45/arch/x86/kvm/lapic.c linux-2.6.32.45/arch/x86/kvm/lapic.c
18472 --- linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18473 +++ linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18474 @@ -52,7 +52,7 @@
18475 #define APIC_BUS_CYCLE_NS 1
18476
18477 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18478 -#define apic_debug(fmt, arg...)
18479 +#define apic_debug(fmt, arg...) do {} while (0)
18480
18481 #define APIC_LVT_NUM 6
18482 /* 14 is the version for Xeon and Pentium 8.4.8*/
18483 diff -urNp linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h
18484 --- linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18485 +++ linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18486 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18487 int level = PT_PAGE_TABLE_LEVEL;
18488 unsigned long mmu_seq;
18489
18490 + pax_track_stack();
18491 +
18492 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18493 kvm_mmu_audit(vcpu, "pre page fault");
18494
18495 diff -urNp linux-2.6.32.45/arch/x86/kvm/svm.c linux-2.6.32.45/arch/x86/kvm/svm.c
18496 --- linux-2.6.32.45/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18497 +++ linux-2.6.32.45/arch/x86/kvm/svm.c 2011-08-05 20:33:55.000000000 -0400
18498 @@ -2485,7 +2485,11 @@ static void reload_tss(struct kvm_vcpu *
18499 int cpu = raw_smp_processor_id();
18500
18501 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18502 +
18503 + pax_open_kernel();
18504 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18505 + pax_close_kernel();
18506 +
18507 load_TR_desc();
18508 }
18509
18510 @@ -2946,7 +2950,7 @@ static bool svm_gb_page_enable(void)
18511 return true;
18512 }
18513
18514 -static struct kvm_x86_ops svm_x86_ops = {
18515 +static const struct kvm_x86_ops svm_x86_ops = {
18516 .cpu_has_kvm_support = has_svm,
18517 .disabled_by_bios = is_disabled,
18518 .hardware_setup = svm_hardware_setup,
18519 diff -urNp linux-2.6.32.45/arch/x86/kvm/vmx.c linux-2.6.32.45/arch/x86/kvm/vmx.c
18520 --- linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18521 +++ linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18522 @@ -570,7 +570,11 @@ static void reload_tss(void)
18523
18524 kvm_get_gdt(&gdt);
18525 descs = (void *)gdt.base;
18526 +
18527 + pax_open_kernel();
18528 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18529 + pax_close_kernel();
18530 +
18531 load_TR_desc();
18532 }
18533
18534 @@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18535 if (!cpu_has_vmx_flexpriority())
18536 flexpriority_enabled = 0;
18537
18538 - if (!cpu_has_vmx_tpr_shadow())
18539 - kvm_x86_ops->update_cr8_intercept = NULL;
18540 + if (!cpu_has_vmx_tpr_shadow()) {
18541 + pax_open_kernel();
18542 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18543 + pax_close_kernel();
18544 + }
18545
18546 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18547 kvm_disable_largepages();
18548 @@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18549 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18550
18551 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18552 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18553 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18554 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18555 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18556 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18557 @@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18558 "jmp .Lkvm_vmx_return \n\t"
18559 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18560 ".Lkvm_vmx_return: "
18561 +
18562 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18563 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18564 + ".Lkvm_vmx_return2: "
18565 +#endif
18566 +
18567 /* Save guest registers, load host registers, keep flags */
18568 "xchg %0, (%%"R"sp) \n\t"
18569 "mov %%"R"ax, %c[rax](%0) \n\t"
18570 @@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18571 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18572 #endif
18573 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18574 +
18575 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18576 + ,[cs]"i"(__KERNEL_CS)
18577 +#endif
18578 +
18579 : "cc", "memory"
18580 - , R"bx", R"di", R"si"
18581 + , R"ax", R"bx", R"di", R"si"
18582 #ifdef CONFIG_X86_64
18583 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18584 #endif
18585 @@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18586 if (vmx->rmode.irq.pending)
18587 fixup_rmode_irq(vmx);
18588
18589 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18590 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18591 +
18592 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18593 + loadsegment(fs, __KERNEL_PERCPU);
18594 +#endif
18595 +
18596 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18597 + __set_fs(current_thread_info()->addr_limit);
18598 +#endif
18599 +
18600 vmx->launched = 1;
18601
18602 vmx_complete_interrupts(vmx);
18603 @@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18604 return false;
18605 }
18606
18607 -static struct kvm_x86_ops vmx_x86_ops = {
18608 +static const struct kvm_x86_ops vmx_x86_ops = {
18609 .cpu_has_kvm_support = cpu_has_kvm_support,
18610 .disabled_by_bios = vmx_disabled_by_bios,
18611 .hardware_setup = hardware_setup,
18612 diff -urNp linux-2.6.32.45/arch/x86/kvm/x86.c linux-2.6.32.45/arch/x86/kvm/x86.c
18613 --- linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18614 +++ linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18615 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18616 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18617 struct kvm_cpuid_entry2 __user *entries);
18618
18619 -struct kvm_x86_ops *kvm_x86_ops;
18620 +const struct kvm_x86_ops *kvm_x86_ops;
18621 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18622
18623 int ignore_msrs = 0;
18624 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18625 struct kvm_cpuid2 *cpuid,
18626 struct kvm_cpuid_entry2 __user *entries)
18627 {
18628 - int r;
18629 + int r, i;
18630
18631 r = -E2BIG;
18632 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18633 goto out;
18634 r = -EFAULT;
18635 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18636 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18637 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18638 goto out;
18639 + for (i = 0; i < cpuid->nent; ++i) {
18640 + struct kvm_cpuid_entry2 cpuid_entry;
18641 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18642 + goto out;
18643 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18644 + }
18645 vcpu->arch.cpuid_nent = cpuid->nent;
18646 kvm_apic_set_version(vcpu);
18647 return 0;
18648 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18649 struct kvm_cpuid2 *cpuid,
18650 struct kvm_cpuid_entry2 __user *entries)
18651 {
18652 - int r;
18653 + int r, i;
18654
18655 vcpu_load(vcpu);
18656 r = -E2BIG;
18657 if (cpuid->nent < vcpu->arch.cpuid_nent)
18658 goto out;
18659 r = -EFAULT;
18660 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18661 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18662 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18663 goto out;
18664 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18665 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18666 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18667 + goto out;
18668 + }
18669 return 0;
18670
18671 out:
18672 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18673 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18674 struct kvm_interrupt *irq)
18675 {
18676 - if (irq->irq < 0 || irq->irq >= 256)
18677 + if (irq->irq >= 256)
18678 return -EINVAL;
18679 if (irqchip_in_kernel(vcpu->kvm))
18680 return -ENXIO;
18681 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18682 .notifier_call = kvmclock_cpufreq_notifier
18683 };
18684
18685 -int kvm_arch_init(void *opaque)
18686 +int kvm_arch_init(const void *opaque)
18687 {
18688 int r, cpu;
18689 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18690 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18691
18692 if (kvm_x86_ops) {
18693 printk(KERN_ERR "kvm: already loaded the other module\n");
18694 diff -urNp linux-2.6.32.45/arch/x86/lguest/boot.c linux-2.6.32.45/arch/x86/lguest/boot.c
18695 --- linux-2.6.32.45/arch/x86/lguest/boot.c 2011-03-27 14:31:47.000000000 -0400
18696 +++ linux-2.6.32.45/arch/x86/lguest/boot.c 2011-08-05 20:33:55.000000000 -0400
18697 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vt
18698 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18699 * Launcher to reboot us.
18700 */
18701 -static void lguest_restart(char *reason)
18702 +static __noreturn void lguest_restart(char *reason)
18703 {
18704 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
18705 + BUG();
18706 }
18707
18708 /*G:050
18709 diff -urNp linux-2.6.32.45/arch/x86/lib/atomic64_32.c linux-2.6.32.45/arch/x86/lib/atomic64_32.c
18710 --- linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18711 +++ linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18712 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18713 }
18714 EXPORT_SYMBOL(atomic64_cmpxchg);
18715
18716 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18717 +{
18718 + return cmpxchg8b(&ptr->counter, old_val, new_val);
18719 +}
18720 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18721 +
18722 /**
18723 * atomic64_xchg - xchg atomic64 variable
18724 * @ptr: pointer to type atomic64_t
18725 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18726 EXPORT_SYMBOL(atomic64_xchg);
18727
18728 /**
18729 + * atomic64_xchg_unchecked - xchg atomic64 variable
18730 + * @ptr: pointer to type atomic64_unchecked_t
18731 + * @new_val: value to assign
18732 + *
18733 + * Atomically xchgs the value of @ptr to @new_val and returns
18734 + * the old value.
18735 + */
18736 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18737 +{
18738 + /*
18739 + * Try first with a (possibly incorrect) assumption about
18740 + * what we have there. We'll do two loops most likely,
18741 + * but we'll get an ownership MESI transaction straight away
18742 + * instead of a read transaction followed by a
18743 + * flush-for-ownership transaction:
18744 + */
18745 + u64 old_val, real_val = 0;
18746 +
18747 + do {
18748 + old_val = real_val;
18749 +
18750 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18751 +
18752 + } while (real_val != old_val);
18753 +
18754 + return old_val;
18755 +}
18756 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
18757 +
18758 +/**
18759 * atomic64_set - set atomic64 variable
18760 * @ptr: pointer to type atomic64_t
18761 * @new_val: value to assign
18762 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18763 EXPORT_SYMBOL(atomic64_set);
18764
18765 /**
18766 -EXPORT_SYMBOL(atomic64_read);
18767 + * atomic64_unchecked_set - set atomic64 variable
18768 + * @ptr: pointer to type atomic64_unchecked_t
18769 + * @new_val: value to assign
18770 + *
18771 + * Atomically sets the value of @ptr to @new_val.
18772 + */
18773 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18774 +{
18775 + atomic64_xchg_unchecked(ptr, new_val);
18776 +}
18777 +EXPORT_SYMBOL(atomic64_set_unchecked);
18778 +
18779 +/**
18780 * atomic64_add_return - add and return
18781 * @delta: integer value to add
18782 * @ptr: pointer to type atomic64_t
18783 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18784 }
18785 EXPORT_SYMBOL(atomic64_add_return);
18786
18787 +/**
18788 + * atomic64_add_return_unchecked - add and return
18789 + * @delta: integer value to add
18790 + * @ptr: pointer to type atomic64_unchecked_t
18791 + *
18792 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
18793 + */
18794 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18795 +{
18796 + /*
18797 + * Try first with a (possibly incorrect) assumption about
18798 + * what we have there. We'll do two loops most likely,
18799 + * but we'll get an ownership MESI transaction straight away
18800 + * instead of a read transaction followed by a
18801 + * flush-for-ownership transaction:
18802 + */
18803 + u64 old_val, new_val, real_val = 0;
18804 +
18805 + do {
18806 + old_val = real_val;
18807 + new_val = old_val + delta;
18808 +
18809 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18810 +
18811 + } while (real_val != old_val);
18812 +
18813 + return new_val;
18814 +}
18815 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
18816 +
18817 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
18818 {
18819 return atomic64_add_return(-delta, ptr);
18820 }
18821 EXPORT_SYMBOL(atomic64_sub_return);
18822
18823 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18824 +{
18825 + return atomic64_add_return_unchecked(-delta, ptr);
18826 +}
18827 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
18828 +
18829 u64 atomic64_inc_return(atomic64_t *ptr)
18830 {
18831 return atomic64_add_return(1, ptr);
18832 }
18833 EXPORT_SYMBOL(atomic64_inc_return);
18834
18835 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
18836 +{
18837 + return atomic64_add_return_unchecked(1, ptr);
18838 +}
18839 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
18840 +
18841 u64 atomic64_dec_return(atomic64_t *ptr)
18842 {
18843 return atomic64_sub_return(1, ptr);
18844 }
18845 EXPORT_SYMBOL(atomic64_dec_return);
18846
18847 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
18848 +{
18849 + return atomic64_sub_return_unchecked(1, ptr);
18850 +}
18851 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
18852 +
18853 /**
18854 * atomic64_add - add integer to atomic64 variable
18855 * @delta: integer value to add
18856 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
18857 EXPORT_SYMBOL(atomic64_add);
18858
18859 /**
18860 + * atomic64_add_unchecked - add integer to atomic64 variable
18861 + * @delta: integer value to add
18862 + * @ptr: pointer to type atomic64_unchecked_t
18863 + *
18864 + * Atomically adds @delta to @ptr.
18865 + */
18866 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18867 +{
18868 + atomic64_add_return_unchecked(delta, ptr);
18869 +}
18870 +EXPORT_SYMBOL(atomic64_add_unchecked);
18871 +
18872 +/**
18873 * atomic64_sub - subtract the atomic64 variable
18874 * @delta: integer value to subtract
18875 * @ptr: pointer to type atomic64_t
18876 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
18877 EXPORT_SYMBOL(atomic64_sub);
18878
18879 /**
18880 + * atomic64_sub_unchecked - subtract the atomic64 variable
18881 + * @delta: integer value to subtract
18882 + * @ptr: pointer to type atomic64_unchecked_t
18883 + *
18884 + * Atomically subtracts @delta from @ptr.
18885 + */
18886 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18887 +{
18888 + atomic64_add_unchecked(-delta, ptr);
18889 +}
18890 +EXPORT_SYMBOL(atomic64_sub_unchecked);
18891 +
18892 +/**
18893 * atomic64_sub_and_test - subtract value from variable and test result
18894 * @delta: integer value to subtract
18895 * @ptr: pointer to type atomic64_t
18896 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
18897 EXPORT_SYMBOL(atomic64_inc);
18898
18899 /**
18900 + * atomic64_inc_unchecked - increment atomic64 variable
18901 + * @ptr: pointer to type atomic64_unchecked_t
18902 + *
18903 + * Atomically increments @ptr by 1.
18904 + */
18905 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
18906 +{
18907 + atomic64_add_unchecked(1, ptr);
18908 +}
18909 +EXPORT_SYMBOL(atomic64_inc_unchecked);
18910 +
18911 +/**
18912 * atomic64_dec - decrement atomic64 variable
18913 * @ptr: pointer to type atomic64_t
18914 *
18915 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
18916 EXPORT_SYMBOL(atomic64_dec);
18917
18918 /**
18919 + * atomic64_dec_unchecked - decrement atomic64 variable
18920 + * @ptr: pointer to type atomic64_unchecked_t
18921 + *
18922 + * Atomically decrements @ptr by 1.
18923 + */
18924 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
18925 +{
18926 + atomic64_sub_unchecked(1, ptr);
18927 +}
18928 +EXPORT_SYMBOL(atomic64_dec_unchecked);
18929 +
18930 +/**
18931 * atomic64_dec_and_test - decrement and test
18932 * @ptr: pointer to type atomic64_t
18933 *
18934 diff -urNp linux-2.6.32.45/arch/x86/lib/checksum_32.S linux-2.6.32.45/arch/x86/lib/checksum_32.S
18935 --- linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
18936 +++ linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
18937 @@ -28,7 +28,8 @@
18938 #include <linux/linkage.h>
18939 #include <asm/dwarf2.h>
18940 #include <asm/errno.h>
18941 -
18942 +#include <asm/segment.h>
18943 +
18944 /*
18945 * computes a partial checksum, e.g. for TCP/UDP fragments
18946 */
18947 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
18948
18949 #define ARGBASE 16
18950 #define FP 12
18951 -
18952 -ENTRY(csum_partial_copy_generic)
18953 +
18954 +ENTRY(csum_partial_copy_generic_to_user)
18955 CFI_STARTPROC
18956 +
18957 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18958 + pushl %gs
18959 + CFI_ADJUST_CFA_OFFSET 4
18960 + popl %es
18961 + CFI_ADJUST_CFA_OFFSET -4
18962 + jmp csum_partial_copy_generic
18963 +#endif
18964 +
18965 +ENTRY(csum_partial_copy_generic_from_user)
18966 +
18967 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18968 + pushl %gs
18969 + CFI_ADJUST_CFA_OFFSET 4
18970 + popl %ds
18971 + CFI_ADJUST_CFA_OFFSET -4
18972 +#endif
18973 +
18974 +ENTRY(csum_partial_copy_generic)
18975 subl $4,%esp
18976 CFI_ADJUST_CFA_OFFSET 4
18977 pushl %edi
18978 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
18979 jmp 4f
18980 SRC(1: movw (%esi), %bx )
18981 addl $2, %esi
18982 -DST( movw %bx, (%edi) )
18983 +DST( movw %bx, %es:(%edi) )
18984 addl $2, %edi
18985 addw %bx, %ax
18986 adcl $0, %eax
18987 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18988 SRC(1: movl (%esi), %ebx )
18989 SRC( movl 4(%esi), %edx )
18990 adcl %ebx, %eax
18991 -DST( movl %ebx, (%edi) )
18992 +DST( movl %ebx, %es:(%edi) )
18993 adcl %edx, %eax
18994 -DST( movl %edx, 4(%edi) )
18995 +DST( movl %edx, %es:4(%edi) )
18996
18997 SRC( movl 8(%esi), %ebx )
18998 SRC( movl 12(%esi), %edx )
18999 adcl %ebx, %eax
19000 -DST( movl %ebx, 8(%edi) )
19001 +DST( movl %ebx, %es:8(%edi) )
19002 adcl %edx, %eax
19003 -DST( movl %edx, 12(%edi) )
19004 +DST( movl %edx, %es:12(%edi) )
19005
19006 SRC( movl 16(%esi), %ebx )
19007 SRC( movl 20(%esi), %edx )
19008 adcl %ebx, %eax
19009 -DST( movl %ebx, 16(%edi) )
19010 +DST( movl %ebx, %es:16(%edi) )
19011 adcl %edx, %eax
19012 -DST( movl %edx, 20(%edi) )
19013 +DST( movl %edx, %es:20(%edi) )
19014
19015 SRC( movl 24(%esi), %ebx )
19016 SRC( movl 28(%esi), %edx )
19017 adcl %ebx, %eax
19018 -DST( movl %ebx, 24(%edi) )
19019 +DST( movl %ebx, %es:24(%edi) )
19020 adcl %edx, %eax
19021 -DST( movl %edx, 28(%edi) )
19022 +DST( movl %edx, %es:28(%edi) )
19023
19024 lea 32(%esi), %esi
19025 lea 32(%edi), %edi
19026 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
19027 shrl $2, %edx # This clears CF
19028 SRC(3: movl (%esi), %ebx )
19029 adcl %ebx, %eax
19030 -DST( movl %ebx, (%edi) )
19031 +DST( movl %ebx, %es:(%edi) )
19032 lea 4(%esi), %esi
19033 lea 4(%edi), %edi
19034 dec %edx
19035 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
19036 jb 5f
19037 SRC( movw (%esi), %cx )
19038 leal 2(%esi), %esi
19039 -DST( movw %cx, (%edi) )
19040 +DST( movw %cx, %es:(%edi) )
19041 leal 2(%edi), %edi
19042 je 6f
19043 shll $16,%ecx
19044 SRC(5: movb (%esi), %cl )
19045 -DST( movb %cl, (%edi) )
19046 +DST( movb %cl, %es:(%edi) )
19047 6: addl %ecx, %eax
19048 adcl $0, %eax
19049 7:
19050 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
19051
19052 6001:
19053 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19054 - movl $-EFAULT, (%ebx)
19055 + movl $-EFAULT, %ss:(%ebx)
19056
19057 # zero the complete destination - computing the rest
19058 # is too much work
19059 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
19060
19061 6002:
19062 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19063 - movl $-EFAULT,(%ebx)
19064 + movl $-EFAULT,%ss:(%ebx)
19065 jmp 5000b
19066
19067 .previous
19068
19069 + pushl %ss
19070 + CFI_ADJUST_CFA_OFFSET 4
19071 + popl %ds
19072 + CFI_ADJUST_CFA_OFFSET -4
19073 + pushl %ss
19074 + CFI_ADJUST_CFA_OFFSET 4
19075 + popl %es
19076 + CFI_ADJUST_CFA_OFFSET -4
19077 popl %ebx
19078 CFI_ADJUST_CFA_OFFSET -4
19079 CFI_RESTORE ebx
19080 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
19081 CFI_ADJUST_CFA_OFFSET -4
19082 ret
19083 CFI_ENDPROC
19084 -ENDPROC(csum_partial_copy_generic)
19085 +ENDPROC(csum_partial_copy_generic_to_user)
19086
19087 #else
19088
19089 /* Version for PentiumII/PPro */
19090
19091 #define ROUND1(x) \
19092 + nop; nop; nop; \
19093 SRC(movl x(%esi), %ebx ) ; \
19094 addl %ebx, %eax ; \
19095 - DST(movl %ebx, x(%edi) ) ;
19096 + DST(movl %ebx, %es:x(%edi)) ;
19097
19098 #define ROUND(x) \
19099 + nop; nop; nop; \
19100 SRC(movl x(%esi), %ebx ) ; \
19101 adcl %ebx, %eax ; \
19102 - DST(movl %ebx, x(%edi) ) ;
19103 + DST(movl %ebx, %es:x(%edi)) ;
19104
19105 #define ARGBASE 12
19106 -
19107 -ENTRY(csum_partial_copy_generic)
19108 +
19109 +ENTRY(csum_partial_copy_generic_to_user)
19110 CFI_STARTPROC
19111 +
19112 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19113 + pushl %gs
19114 + CFI_ADJUST_CFA_OFFSET 4
19115 + popl %es
19116 + CFI_ADJUST_CFA_OFFSET -4
19117 + jmp csum_partial_copy_generic
19118 +#endif
19119 +
19120 +ENTRY(csum_partial_copy_generic_from_user)
19121 +
19122 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19123 + pushl %gs
19124 + CFI_ADJUST_CFA_OFFSET 4
19125 + popl %ds
19126 + CFI_ADJUST_CFA_OFFSET -4
19127 +#endif
19128 +
19129 +ENTRY(csum_partial_copy_generic)
19130 pushl %ebx
19131 CFI_ADJUST_CFA_OFFSET 4
19132 CFI_REL_OFFSET ebx, 0
19133 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
19134 subl %ebx, %edi
19135 lea -1(%esi),%edx
19136 andl $-32,%edx
19137 - lea 3f(%ebx,%ebx), %ebx
19138 + lea 3f(%ebx,%ebx,2), %ebx
19139 testl %esi, %esi
19140 jmp *%ebx
19141 1: addl $64,%esi
19142 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
19143 jb 5f
19144 SRC( movw (%esi), %dx )
19145 leal 2(%esi), %esi
19146 -DST( movw %dx, (%edi) )
19147 +DST( movw %dx, %es:(%edi) )
19148 leal 2(%edi), %edi
19149 je 6f
19150 shll $16,%edx
19151 5:
19152 SRC( movb (%esi), %dl )
19153 -DST( movb %dl, (%edi) )
19154 +DST( movb %dl, %es:(%edi) )
19155 6: addl %edx, %eax
19156 adcl $0, %eax
19157 7:
19158 .section .fixup, "ax"
19159 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19160 - movl $-EFAULT, (%ebx)
19161 + movl $-EFAULT, %ss:(%ebx)
19162 # zero the complete destination (computing the rest is too much work)
19163 movl ARGBASE+8(%esp),%edi # dst
19164 movl ARGBASE+12(%esp),%ecx # len
19165 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
19166 rep; stosb
19167 jmp 7b
19168 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19169 - movl $-EFAULT, (%ebx)
19170 + movl $-EFAULT, %ss:(%ebx)
19171 jmp 7b
19172 .previous
19173
19174 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19175 + pushl %ss
19176 + CFI_ADJUST_CFA_OFFSET 4
19177 + popl %ds
19178 + CFI_ADJUST_CFA_OFFSET -4
19179 + pushl %ss
19180 + CFI_ADJUST_CFA_OFFSET 4
19181 + popl %es
19182 + CFI_ADJUST_CFA_OFFSET -4
19183 +#endif
19184 +
19185 popl %esi
19186 CFI_ADJUST_CFA_OFFSET -4
19187 CFI_RESTORE esi
19188 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
19189 CFI_RESTORE ebx
19190 ret
19191 CFI_ENDPROC
19192 -ENDPROC(csum_partial_copy_generic)
19193 +ENDPROC(csum_partial_copy_generic_to_user)
19194
19195 #undef ROUND
19196 #undef ROUND1
19197 diff -urNp linux-2.6.32.45/arch/x86/lib/clear_page_64.S linux-2.6.32.45/arch/x86/lib/clear_page_64.S
19198 --- linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
19199 +++ linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
19200 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
19201
19202 #include <asm/cpufeature.h>
19203
19204 - .section .altinstr_replacement,"ax"
19205 + .section .altinstr_replacement,"a"
19206 1: .byte 0xeb /* jmp <disp8> */
19207 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19208 2:
19209 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_page_64.S linux-2.6.32.45/arch/x86/lib/copy_page_64.S
19210 --- linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
19211 +++ linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
19212 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
19213
19214 #include <asm/cpufeature.h>
19215
19216 - .section .altinstr_replacement,"ax"
19217 + .section .altinstr_replacement,"a"
19218 1: .byte 0xeb /* jmp <disp8> */
19219 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19220 2:
19221 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_64.S linux-2.6.32.45/arch/x86/lib/copy_user_64.S
19222 --- linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
19223 +++ linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
19224 @@ -15,13 +15,14 @@
19225 #include <asm/asm-offsets.h>
19226 #include <asm/thread_info.h>
19227 #include <asm/cpufeature.h>
19228 +#include <asm/pgtable.h>
19229
19230 .macro ALTERNATIVE_JUMP feature,orig,alt
19231 0:
19232 .byte 0xe9 /* 32bit jump */
19233 .long \orig-1f /* by default jump to orig */
19234 1:
19235 - .section .altinstr_replacement,"ax"
19236 + .section .altinstr_replacement,"a"
19237 2: .byte 0xe9 /* near jump with 32bit immediate */
19238 .long \alt-1b /* offset */ /* or alternatively to alt */
19239 .previous
19240 @@ -64,49 +65,19 @@
19241 #endif
19242 .endm
19243
19244 -/* Standard copy_to_user with segment limit checking */
19245 -ENTRY(copy_to_user)
19246 - CFI_STARTPROC
19247 - GET_THREAD_INFO(%rax)
19248 - movq %rdi,%rcx
19249 - addq %rdx,%rcx
19250 - jc bad_to_user
19251 - cmpq TI_addr_limit(%rax),%rcx
19252 - ja bad_to_user
19253 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19254 - CFI_ENDPROC
19255 -ENDPROC(copy_to_user)
19256 -
19257 -/* Standard copy_from_user with segment limit checking */
19258 -ENTRY(copy_from_user)
19259 - CFI_STARTPROC
19260 - GET_THREAD_INFO(%rax)
19261 - movq %rsi,%rcx
19262 - addq %rdx,%rcx
19263 - jc bad_from_user
19264 - cmpq TI_addr_limit(%rax),%rcx
19265 - ja bad_from_user
19266 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19267 - CFI_ENDPROC
19268 -ENDPROC(copy_from_user)
19269 -
19270 ENTRY(copy_user_generic)
19271 CFI_STARTPROC
19272 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19273 CFI_ENDPROC
19274 ENDPROC(copy_user_generic)
19275
19276 -ENTRY(__copy_from_user_inatomic)
19277 - CFI_STARTPROC
19278 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19279 - CFI_ENDPROC
19280 -ENDPROC(__copy_from_user_inatomic)
19281 -
19282 .section .fixup,"ax"
19283 /* must zero dest */
19284 ENTRY(bad_from_user)
19285 bad_from_user:
19286 CFI_STARTPROC
19287 + testl %edx,%edx
19288 + js bad_to_user
19289 movl %edx,%ecx
19290 xorl %eax,%eax
19291 rep
19292 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S
19293 --- linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19294 +++ linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19295 @@ -14,6 +14,7 @@
19296 #include <asm/current.h>
19297 #include <asm/asm-offsets.h>
19298 #include <asm/thread_info.h>
19299 +#include <asm/pgtable.h>
19300
19301 .macro ALIGN_DESTINATION
19302 #ifdef FIX_ALIGNMENT
19303 @@ -50,6 +51,15 @@
19304 */
19305 ENTRY(__copy_user_nocache)
19306 CFI_STARTPROC
19307 +
19308 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19309 + mov $PAX_USER_SHADOW_BASE,%rcx
19310 + cmp %rcx,%rsi
19311 + jae 1f
19312 + add %rcx,%rsi
19313 +1:
19314 +#endif
19315 +
19316 cmpl $8,%edx
19317 jb 20f /* less then 8 bytes, go to byte copy loop */
19318 ALIGN_DESTINATION
19319 diff -urNp linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c
19320 --- linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19321 +++ linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19322 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19323 len -= 2;
19324 }
19325 }
19326 +
19327 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19328 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19329 + src += PAX_USER_SHADOW_BASE;
19330 +#endif
19331 +
19332 isum = csum_partial_copy_generic((__force const void *)src,
19333 dst, len, isum, errp, NULL);
19334 if (unlikely(*errp))
19335 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19336 }
19337
19338 *errp = 0;
19339 +
19340 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19341 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19342 + dst += PAX_USER_SHADOW_BASE;
19343 +#endif
19344 +
19345 return csum_partial_copy_generic(src, (void __force *)dst,
19346 len, isum, NULL, errp);
19347 }
19348 diff -urNp linux-2.6.32.45/arch/x86/lib/getuser.S linux-2.6.32.45/arch/x86/lib/getuser.S
19349 --- linux-2.6.32.45/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19350 +++ linux-2.6.32.45/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19351 @@ -33,14 +33,35 @@
19352 #include <asm/asm-offsets.h>
19353 #include <asm/thread_info.h>
19354 #include <asm/asm.h>
19355 +#include <asm/segment.h>
19356 +#include <asm/pgtable.h>
19357 +
19358 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19359 +#define __copyuser_seg gs;
19360 +#else
19361 +#define __copyuser_seg
19362 +#endif
19363
19364 .text
19365 ENTRY(__get_user_1)
19366 CFI_STARTPROC
19367 +
19368 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19369 GET_THREAD_INFO(%_ASM_DX)
19370 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19371 jae bad_get_user
19372 -1: movzb (%_ASM_AX),%edx
19373 +
19374 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19375 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19376 + cmp %_ASM_DX,%_ASM_AX
19377 + jae 1234f
19378 + add %_ASM_DX,%_ASM_AX
19379 +1234:
19380 +#endif
19381 +
19382 +#endif
19383 +
19384 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19385 xor %eax,%eax
19386 ret
19387 CFI_ENDPROC
19388 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19389 ENTRY(__get_user_2)
19390 CFI_STARTPROC
19391 add $1,%_ASM_AX
19392 +
19393 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19394 jc bad_get_user
19395 GET_THREAD_INFO(%_ASM_DX)
19396 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19397 jae bad_get_user
19398 -2: movzwl -1(%_ASM_AX),%edx
19399 +
19400 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19401 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19402 + cmp %_ASM_DX,%_ASM_AX
19403 + jae 1234f
19404 + add %_ASM_DX,%_ASM_AX
19405 +1234:
19406 +#endif
19407 +
19408 +#endif
19409 +
19410 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19411 xor %eax,%eax
19412 ret
19413 CFI_ENDPROC
19414 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19415 ENTRY(__get_user_4)
19416 CFI_STARTPROC
19417 add $3,%_ASM_AX
19418 +
19419 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19420 jc bad_get_user
19421 GET_THREAD_INFO(%_ASM_DX)
19422 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19423 jae bad_get_user
19424 -3: mov -3(%_ASM_AX),%edx
19425 +
19426 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19427 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19428 + cmp %_ASM_DX,%_ASM_AX
19429 + jae 1234f
19430 + add %_ASM_DX,%_ASM_AX
19431 +1234:
19432 +#endif
19433 +
19434 +#endif
19435 +
19436 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19437 xor %eax,%eax
19438 ret
19439 CFI_ENDPROC
19440 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19441 GET_THREAD_INFO(%_ASM_DX)
19442 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19443 jae bad_get_user
19444 +
19445 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19446 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19447 + cmp %_ASM_DX,%_ASM_AX
19448 + jae 1234f
19449 + add %_ASM_DX,%_ASM_AX
19450 +1234:
19451 +#endif
19452 +
19453 4: movq -7(%_ASM_AX),%_ASM_DX
19454 xor %eax,%eax
19455 ret
19456 diff -urNp linux-2.6.32.45/arch/x86/lib/memcpy_64.S linux-2.6.32.45/arch/x86/lib/memcpy_64.S
19457 --- linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19458 +++ linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19459 @@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19460 * It is also a lot simpler. Use this when possible:
19461 */
19462
19463 - .section .altinstr_replacement, "ax"
19464 + .section .altinstr_replacement, "a"
19465 1: .byte 0xeb /* jmp <disp8> */
19466 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19467 2:
19468 diff -urNp linux-2.6.32.45/arch/x86/lib/memset_64.S linux-2.6.32.45/arch/x86/lib/memset_64.S
19469 --- linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19470 +++ linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19471 @@ -118,7 +118,7 @@ ENDPROC(__memset)
19472
19473 #include <asm/cpufeature.h>
19474
19475 - .section .altinstr_replacement,"ax"
19476 + .section .altinstr_replacement,"a"
19477 1: .byte 0xeb /* jmp <disp8> */
19478 .byte (memset_c - memset) - (2f - 1b) /* offset */
19479 2:
19480 diff -urNp linux-2.6.32.45/arch/x86/lib/mmx_32.c linux-2.6.32.45/arch/x86/lib/mmx_32.c
19481 --- linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19482 +++ linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19483 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19484 {
19485 void *p;
19486 int i;
19487 + unsigned long cr0;
19488
19489 if (unlikely(in_interrupt()))
19490 return __memcpy(to, from, len);
19491 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19492 kernel_fpu_begin();
19493
19494 __asm__ __volatile__ (
19495 - "1: prefetch (%0)\n" /* This set is 28 bytes */
19496 - " prefetch 64(%0)\n"
19497 - " prefetch 128(%0)\n"
19498 - " prefetch 192(%0)\n"
19499 - " prefetch 256(%0)\n"
19500 + "1: prefetch (%1)\n" /* This set is 28 bytes */
19501 + " prefetch 64(%1)\n"
19502 + " prefetch 128(%1)\n"
19503 + " prefetch 192(%1)\n"
19504 + " prefetch 256(%1)\n"
19505 "2: \n"
19506 ".section .fixup, \"ax\"\n"
19507 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19508 + "3: \n"
19509 +
19510 +#ifdef CONFIG_PAX_KERNEXEC
19511 + " movl %%cr0, %0\n"
19512 + " movl %0, %%eax\n"
19513 + " andl $0xFFFEFFFF, %%eax\n"
19514 + " movl %%eax, %%cr0\n"
19515 +#endif
19516 +
19517 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19518 +
19519 +#ifdef CONFIG_PAX_KERNEXEC
19520 + " movl %0, %%cr0\n"
19521 +#endif
19522 +
19523 " jmp 2b\n"
19524 ".previous\n"
19525 _ASM_EXTABLE(1b, 3b)
19526 - : : "r" (from));
19527 + : "=&r" (cr0) : "r" (from) : "ax");
19528
19529 for ( ; i > 5; i--) {
19530 __asm__ __volatile__ (
19531 - "1: prefetch 320(%0)\n"
19532 - "2: movq (%0), %%mm0\n"
19533 - " movq 8(%0), %%mm1\n"
19534 - " movq 16(%0), %%mm2\n"
19535 - " movq 24(%0), %%mm3\n"
19536 - " movq %%mm0, (%1)\n"
19537 - " movq %%mm1, 8(%1)\n"
19538 - " movq %%mm2, 16(%1)\n"
19539 - " movq %%mm3, 24(%1)\n"
19540 - " movq 32(%0), %%mm0\n"
19541 - " movq 40(%0), %%mm1\n"
19542 - " movq 48(%0), %%mm2\n"
19543 - " movq 56(%0), %%mm3\n"
19544 - " movq %%mm0, 32(%1)\n"
19545 - " movq %%mm1, 40(%1)\n"
19546 - " movq %%mm2, 48(%1)\n"
19547 - " movq %%mm3, 56(%1)\n"
19548 + "1: prefetch 320(%1)\n"
19549 + "2: movq (%1), %%mm0\n"
19550 + " movq 8(%1), %%mm1\n"
19551 + " movq 16(%1), %%mm2\n"
19552 + " movq 24(%1), %%mm3\n"
19553 + " movq %%mm0, (%2)\n"
19554 + " movq %%mm1, 8(%2)\n"
19555 + " movq %%mm2, 16(%2)\n"
19556 + " movq %%mm3, 24(%2)\n"
19557 + " movq 32(%1), %%mm0\n"
19558 + " movq 40(%1), %%mm1\n"
19559 + " movq 48(%1), %%mm2\n"
19560 + " movq 56(%1), %%mm3\n"
19561 + " movq %%mm0, 32(%2)\n"
19562 + " movq %%mm1, 40(%2)\n"
19563 + " movq %%mm2, 48(%2)\n"
19564 + " movq %%mm3, 56(%2)\n"
19565 ".section .fixup, \"ax\"\n"
19566 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19567 + "3:\n"
19568 +
19569 +#ifdef CONFIG_PAX_KERNEXEC
19570 + " movl %%cr0, %0\n"
19571 + " movl %0, %%eax\n"
19572 + " andl $0xFFFEFFFF, %%eax\n"
19573 + " movl %%eax, %%cr0\n"
19574 +#endif
19575 +
19576 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19577 +
19578 +#ifdef CONFIG_PAX_KERNEXEC
19579 + " movl %0, %%cr0\n"
19580 +#endif
19581 +
19582 " jmp 2b\n"
19583 ".previous\n"
19584 _ASM_EXTABLE(1b, 3b)
19585 - : : "r" (from), "r" (to) : "memory");
19586 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19587
19588 from += 64;
19589 to += 64;
19590 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19591 static void fast_copy_page(void *to, void *from)
19592 {
19593 int i;
19594 + unsigned long cr0;
19595
19596 kernel_fpu_begin();
19597
19598 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19599 * but that is for later. -AV
19600 */
19601 __asm__ __volatile__(
19602 - "1: prefetch (%0)\n"
19603 - " prefetch 64(%0)\n"
19604 - " prefetch 128(%0)\n"
19605 - " prefetch 192(%0)\n"
19606 - " prefetch 256(%0)\n"
19607 + "1: prefetch (%1)\n"
19608 + " prefetch 64(%1)\n"
19609 + " prefetch 128(%1)\n"
19610 + " prefetch 192(%1)\n"
19611 + " prefetch 256(%1)\n"
19612 "2: \n"
19613 ".section .fixup, \"ax\"\n"
19614 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19615 + "3: \n"
19616 +
19617 +#ifdef CONFIG_PAX_KERNEXEC
19618 + " movl %%cr0, %0\n"
19619 + " movl %0, %%eax\n"
19620 + " andl $0xFFFEFFFF, %%eax\n"
19621 + " movl %%eax, %%cr0\n"
19622 +#endif
19623 +
19624 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19625 +
19626 +#ifdef CONFIG_PAX_KERNEXEC
19627 + " movl %0, %%cr0\n"
19628 +#endif
19629 +
19630 " jmp 2b\n"
19631 ".previous\n"
19632 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19633 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19634
19635 for (i = 0; i < (4096-320)/64; i++) {
19636 __asm__ __volatile__ (
19637 - "1: prefetch 320(%0)\n"
19638 - "2: movq (%0), %%mm0\n"
19639 - " movntq %%mm0, (%1)\n"
19640 - " movq 8(%0), %%mm1\n"
19641 - " movntq %%mm1, 8(%1)\n"
19642 - " movq 16(%0), %%mm2\n"
19643 - " movntq %%mm2, 16(%1)\n"
19644 - " movq 24(%0), %%mm3\n"
19645 - " movntq %%mm3, 24(%1)\n"
19646 - " movq 32(%0), %%mm4\n"
19647 - " movntq %%mm4, 32(%1)\n"
19648 - " movq 40(%0), %%mm5\n"
19649 - " movntq %%mm5, 40(%1)\n"
19650 - " movq 48(%0), %%mm6\n"
19651 - " movntq %%mm6, 48(%1)\n"
19652 - " movq 56(%0), %%mm7\n"
19653 - " movntq %%mm7, 56(%1)\n"
19654 + "1: prefetch 320(%1)\n"
19655 + "2: movq (%1), %%mm0\n"
19656 + " movntq %%mm0, (%2)\n"
19657 + " movq 8(%1), %%mm1\n"
19658 + " movntq %%mm1, 8(%2)\n"
19659 + " movq 16(%1), %%mm2\n"
19660 + " movntq %%mm2, 16(%2)\n"
19661 + " movq 24(%1), %%mm3\n"
19662 + " movntq %%mm3, 24(%2)\n"
19663 + " movq 32(%1), %%mm4\n"
19664 + " movntq %%mm4, 32(%2)\n"
19665 + " movq 40(%1), %%mm5\n"
19666 + " movntq %%mm5, 40(%2)\n"
19667 + " movq 48(%1), %%mm6\n"
19668 + " movntq %%mm6, 48(%2)\n"
19669 + " movq 56(%1), %%mm7\n"
19670 + " movntq %%mm7, 56(%2)\n"
19671 ".section .fixup, \"ax\"\n"
19672 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19673 + "3:\n"
19674 +
19675 +#ifdef CONFIG_PAX_KERNEXEC
19676 + " movl %%cr0, %0\n"
19677 + " movl %0, %%eax\n"
19678 + " andl $0xFFFEFFFF, %%eax\n"
19679 + " movl %%eax, %%cr0\n"
19680 +#endif
19681 +
19682 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19683 +
19684 +#ifdef CONFIG_PAX_KERNEXEC
19685 + " movl %0, %%cr0\n"
19686 +#endif
19687 +
19688 " jmp 2b\n"
19689 ".previous\n"
19690 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19691 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19692
19693 from += 64;
19694 to += 64;
19695 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19696 static void fast_copy_page(void *to, void *from)
19697 {
19698 int i;
19699 + unsigned long cr0;
19700
19701 kernel_fpu_begin();
19702
19703 __asm__ __volatile__ (
19704 - "1: prefetch (%0)\n"
19705 - " prefetch 64(%0)\n"
19706 - " prefetch 128(%0)\n"
19707 - " prefetch 192(%0)\n"
19708 - " prefetch 256(%0)\n"
19709 + "1: prefetch (%1)\n"
19710 + " prefetch 64(%1)\n"
19711 + " prefetch 128(%1)\n"
19712 + " prefetch 192(%1)\n"
19713 + " prefetch 256(%1)\n"
19714 "2: \n"
19715 ".section .fixup, \"ax\"\n"
19716 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19717 + "3: \n"
19718 +
19719 +#ifdef CONFIG_PAX_KERNEXEC
19720 + " movl %%cr0, %0\n"
19721 + " movl %0, %%eax\n"
19722 + " andl $0xFFFEFFFF, %%eax\n"
19723 + " movl %%eax, %%cr0\n"
19724 +#endif
19725 +
19726 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19727 +
19728 +#ifdef CONFIG_PAX_KERNEXEC
19729 + " movl %0, %%cr0\n"
19730 +#endif
19731 +
19732 " jmp 2b\n"
19733 ".previous\n"
19734 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19735 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19736
19737 for (i = 0; i < 4096/64; i++) {
19738 __asm__ __volatile__ (
19739 - "1: prefetch 320(%0)\n"
19740 - "2: movq (%0), %%mm0\n"
19741 - " movq 8(%0), %%mm1\n"
19742 - " movq 16(%0), %%mm2\n"
19743 - " movq 24(%0), %%mm3\n"
19744 - " movq %%mm0, (%1)\n"
19745 - " movq %%mm1, 8(%1)\n"
19746 - " movq %%mm2, 16(%1)\n"
19747 - " movq %%mm3, 24(%1)\n"
19748 - " movq 32(%0), %%mm0\n"
19749 - " movq 40(%0), %%mm1\n"
19750 - " movq 48(%0), %%mm2\n"
19751 - " movq 56(%0), %%mm3\n"
19752 - " movq %%mm0, 32(%1)\n"
19753 - " movq %%mm1, 40(%1)\n"
19754 - " movq %%mm2, 48(%1)\n"
19755 - " movq %%mm3, 56(%1)\n"
19756 + "1: prefetch 320(%1)\n"
19757 + "2: movq (%1), %%mm0\n"
19758 + " movq 8(%1), %%mm1\n"
19759 + " movq 16(%1), %%mm2\n"
19760 + " movq 24(%1), %%mm3\n"
19761 + " movq %%mm0, (%2)\n"
19762 + " movq %%mm1, 8(%2)\n"
19763 + " movq %%mm2, 16(%2)\n"
19764 + " movq %%mm3, 24(%2)\n"
19765 + " movq 32(%1), %%mm0\n"
19766 + " movq 40(%1), %%mm1\n"
19767 + " movq 48(%1), %%mm2\n"
19768 + " movq 56(%1), %%mm3\n"
19769 + " movq %%mm0, 32(%2)\n"
19770 + " movq %%mm1, 40(%2)\n"
19771 + " movq %%mm2, 48(%2)\n"
19772 + " movq %%mm3, 56(%2)\n"
19773 ".section .fixup, \"ax\"\n"
19774 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19775 + "3:\n"
19776 +
19777 +#ifdef CONFIG_PAX_KERNEXEC
19778 + " movl %%cr0, %0\n"
19779 + " movl %0, %%eax\n"
19780 + " andl $0xFFFEFFFF, %%eax\n"
19781 + " movl %%eax, %%cr0\n"
19782 +#endif
19783 +
19784 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19785 +
19786 +#ifdef CONFIG_PAX_KERNEXEC
19787 + " movl %0, %%cr0\n"
19788 +#endif
19789 +
19790 " jmp 2b\n"
19791 ".previous\n"
19792 _ASM_EXTABLE(1b, 3b)
19793 - : : "r" (from), "r" (to) : "memory");
19794 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19795
19796 from += 64;
19797 to += 64;
19798 diff -urNp linux-2.6.32.45/arch/x86/lib/putuser.S linux-2.6.32.45/arch/x86/lib/putuser.S
19799 --- linux-2.6.32.45/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19800 +++ linux-2.6.32.45/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19801 @@ -15,7 +15,8 @@
19802 #include <asm/thread_info.h>
19803 #include <asm/errno.h>
19804 #include <asm/asm.h>
19805 -
19806 +#include <asm/segment.h>
19807 +#include <asm/pgtable.h>
19808
19809 /*
19810 * __put_user_X
19811 @@ -29,52 +30,119 @@
19812 * as they get called from within inline assembly.
19813 */
19814
19815 -#define ENTER CFI_STARTPROC ; \
19816 - GET_THREAD_INFO(%_ASM_BX)
19817 +#define ENTER CFI_STARTPROC
19818 #define EXIT ret ; \
19819 CFI_ENDPROC
19820
19821 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19822 +#define _DEST %_ASM_CX,%_ASM_BX
19823 +#else
19824 +#define _DEST %_ASM_CX
19825 +#endif
19826 +
19827 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19828 +#define __copyuser_seg gs;
19829 +#else
19830 +#define __copyuser_seg
19831 +#endif
19832 +
19833 .text
19834 ENTRY(__put_user_1)
19835 ENTER
19836 +
19837 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19838 + GET_THREAD_INFO(%_ASM_BX)
19839 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
19840 jae bad_put_user
19841 -1: movb %al,(%_ASM_CX)
19842 +
19843 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19844 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19845 + cmp %_ASM_BX,%_ASM_CX
19846 + jb 1234f
19847 + xor %ebx,%ebx
19848 +1234:
19849 +#endif
19850 +
19851 +#endif
19852 +
19853 +1: __copyuser_seg movb %al,(_DEST)
19854 xor %eax,%eax
19855 EXIT
19856 ENDPROC(__put_user_1)
19857
19858 ENTRY(__put_user_2)
19859 ENTER
19860 +
19861 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19862 + GET_THREAD_INFO(%_ASM_BX)
19863 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19864 sub $1,%_ASM_BX
19865 cmp %_ASM_BX,%_ASM_CX
19866 jae bad_put_user
19867 -2: movw %ax,(%_ASM_CX)
19868 +
19869 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19870 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19871 + cmp %_ASM_BX,%_ASM_CX
19872 + jb 1234f
19873 + xor %ebx,%ebx
19874 +1234:
19875 +#endif
19876 +
19877 +#endif
19878 +
19879 +2: __copyuser_seg movw %ax,(_DEST)
19880 xor %eax,%eax
19881 EXIT
19882 ENDPROC(__put_user_2)
19883
19884 ENTRY(__put_user_4)
19885 ENTER
19886 +
19887 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19888 + GET_THREAD_INFO(%_ASM_BX)
19889 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19890 sub $3,%_ASM_BX
19891 cmp %_ASM_BX,%_ASM_CX
19892 jae bad_put_user
19893 -3: movl %eax,(%_ASM_CX)
19894 +
19895 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19896 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19897 + cmp %_ASM_BX,%_ASM_CX
19898 + jb 1234f
19899 + xor %ebx,%ebx
19900 +1234:
19901 +#endif
19902 +
19903 +#endif
19904 +
19905 +3: __copyuser_seg movl %eax,(_DEST)
19906 xor %eax,%eax
19907 EXIT
19908 ENDPROC(__put_user_4)
19909
19910 ENTRY(__put_user_8)
19911 ENTER
19912 +
19913 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19914 + GET_THREAD_INFO(%_ASM_BX)
19915 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19916 sub $7,%_ASM_BX
19917 cmp %_ASM_BX,%_ASM_CX
19918 jae bad_put_user
19919 -4: mov %_ASM_AX,(%_ASM_CX)
19920 +
19921 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19922 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19923 + cmp %_ASM_BX,%_ASM_CX
19924 + jb 1234f
19925 + xor %ebx,%ebx
19926 +1234:
19927 +#endif
19928 +
19929 +#endif
19930 +
19931 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
19932 #ifdef CONFIG_X86_32
19933 -5: movl %edx,4(%_ASM_CX)
19934 +5: __copyuser_seg movl %edx,4(_DEST)
19935 #endif
19936 xor %eax,%eax
19937 EXIT
19938 diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_32.c linux-2.6.32.45/arch/x86/lib/usercopy_32.c
19939 --- linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
19940 +++ linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
19941 @@ -43,7 +43,7 @@ do { \
19942 __asm__ __volatile__( \
19943 " testl %1,%1\n" \
19944 " jz 2f\n" \
19945 - "0: lodsb\n" \
19946 + "0: "__copyuser_seg"lodsb\n" \
19947 " stosb\n" \
19948 " testb %%al,%%al\n" \
19949 " jz 1f\n" \
19950 @@ -128,10 +128,12 @@ do { \
19951 int __d0; \
19952 might_fault(); \
19953 __asm__ __volatile__( \
19954 + __COPYUSER_SET_ES \
19955 "0: rep; stosl\n" \
19956 " movl %2,%0\n" \
19957 "1: rep; stosb\n" \
19958 "2:\n" \
19959 + __COPYUSER_RESTORE_ES \
19960 ".section .fixup,\"ax\"\n" \
19961 "3: lea 0(%2,%0,4),%0\n" \
19962 " jmp 2b\n" \
19963 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19964 might_fault();
19965
19966 __asm__ __volatile__(
19967 + __COPYUSER_SET_ES
19968 " testl %0, %0\n"
19969 " jz 3f\n"
19970 " andl %0,%%ecx\n"
19971 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19972 " subl %%ecx,%0\n"
19973 " addl %0,%%eax\n"
19974 "1:\n"
19975 + __COPYUSER_RESTORE_ES
19976 ".section .fixup,\"ax\"\n"
19977 "2: xorl %%eax,%%eax\n"
19978 " jmp 1b\n"
19979 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19980
19981 #ifdef CONFIG_X86_INTEL_USERCOPY
19982 static unsigned long
19983 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
19984 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19985 {
19986 int d0, d1;
19987 __asm__ __volatile__(
19988 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19989 " .align 2,0x90\n"
19990 "3: movl 0(%4), %%eax\n"
19991 "4: movl 4(%4), %%edx\n"
19992 - "5: movl %%eax, 0(%3)\n"
19993 - "6: movl %%edx, 4(%3)\n"
19994 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19995 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19996 "7: movl 8(%4), %%eax\n"
19997 "8: movl 12(%4),%%edx\n"
19998 - "9: movl %%eax, 8(%3)\n"
19999 - "10: movl %%edx, 12(%3)\n"
20000 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20001 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20002 "11: movl 16(%4), %%eax\n"
20003 "12: movl 20(%4), %%edx\n"
20004 - "13: movl %%eax, 16(%3)\n"
20005 - "14: movl %%edx, 20(%3)\n"
20006 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20007 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20008 "15: movl 24(%4), %%eax\n"
20009 "16: movl 28(%4), %%edx\n"
20010 - "17: movl %%eax, 24(%3)\n"
20011 - "18: movl %%edx, 28(%3)\n"
20012 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20013 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20014 "19: movl 32(%4), %%eax\n"
20015 "20: movl 36(%4), %%edx\n"
20016 - "21: movl %%eax, 32(%3)\n"
20017 - "22: movl %%edx, 36(%3)\n"
20018 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20019 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20020 "23: movl 40(%4), %%eax\n"
20021 "24: movl 44(%4), %%edx\n"
20022 - "25: movl %%eax, 40(%3)\n"
20023 - "26: movl %%edx, 44(%3)\n"
20024 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20025 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20026 "27: movl 48(%4), %%eax\n"
20027 "28: movl 52(%4), %%edx\n"
20028 - "29: movl %%eax, 48(%3)\n"
20029 - "30: movl %%edx, 52(%3)\n"
20030 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20031 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20032 "31: movl 56(%4), %%eax\n"
20033 "32: movl 60(%4), %%edx\n"
20034 - "33: movl %%eax, 56(%3)\n"
20035 - "34: movl %%edx, 60(%3)\n"
20036 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20037 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20038 " addl $-64, %0\n"
20039 " addl $64, %4\n"
20040 " addl $64, %3\n"
20041 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
20042 " shrl $2, %0\n"
20043 " andl $3, %%eax\n"
20044 " cld\n"
20045 + __COPYUSER_SET_ES
20046 "99: rep; movsl\n"
20047 "36: movl %%eax, %0\n"
20048 "37: rep; movsb\n"
20049 "100:\n"
20050 + __COPYUSER_RESTORE_ES
20051 + ".section .fixup,\"ax\"\n"
20052 + "101: lea 0(%%eax,%0,4),%0\n"
20053 + " jmp 100b\n"
20054 + ".previous\n"
20055 + ".section __ex_table,\"a\"\n"
20056 + " .align 4\n"
20057 + " .long 1b,100b\n"
20058 + " .long 2b,100b\n"
20059 + " .long 3b,100b\n"
20060 + " .long 4b,100b\n"
20061 + " .long 5b,100b\n"
20062 + " .long 6b,100b\n"
20063 + " .long 7b,100b\n"
20064 + " .long 8b,100b\n"
20065 + " .long 9b,100b\n"
20066 + " .long 10b,100b\n"
20067 + " .long 11b,100b\n"
20068 + " .long 12b,100b\n"
20069 + " .long 13b,100b\n"
20070 + " .long 14b,100b\n"
20071 + " .long 15b,100b\n"
20072 + " .long 16b,100b\n"
20073 + " .long 17b,100b\n"
20074 + " .long 18b,100b\n"
20075 + " .long 19b,100b\n"
20076 + " .long 20b,100b\n"
20077 + " .long 21b,100b\n"
20078 + " .long 22b,100b\n"
20079 + " .long 23b,100b\n"
20080 + " .long 24b,100b\n"
20081 + " .long 25b,100b\n"
20082 + " .long 26b,100b\n"
20083 + " .long 27b,100b\n"
20084 + " .long 28b,100b\n"
20085 + " .long 29b,100b\n"
20086 + " .long 30b,100b\n"
20087 + " .long 31b,100b\n"
20088 + " .long 32b,100b\n"
20089 + " .long 33b,100b\n"
20090 + " .long 34b,100b\n"
20091 + " .long 35b,100b\n"
20092 + " .long 36b,100b\n"
20093 + " .long 37b,100b\n"
20094 + " .long 99b,101b\n"
20095 + ".previous"
20096 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
20097 + : "1"(to), "2"(from), "0"(size)
20098 + : "eax", "edx", "memory");
20099 + return size;
20100 +}
20101 +
20102 +static unsigned long
20103 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20104 +{
20105 + int d0, d1;
20106 + __asm__ __volatile__(
20107 + " .align 2,0x90\n"
20108 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20109 + " cmpl $67, %0\n"
20110 + " jbe 3f\n"
20111 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20112 + " .align 2,0x90\n"
20113 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20114 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20115 + "5: movl %%eax, 0(%3)\n"
20116 + "6: movl %%edx, 4(%3)\n"
20117 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20118 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20119 + "9: movl %%eax, 8(%3)\n"
20120 + "10: movl %%edx, 12(%3)\n"
20121 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20122 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20123 + "13: movl %%eax, 16(%3)\n"
20124 + "14: movl %%edx, 20(%3)\n"
20125 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20126 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20127 + "17: movl %%eax, 24(%3)\n"
20128 + "18: movl %%edx, 28(%3)\n"
20129 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20130 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20131 + "21: movl %%eax, 32(%3)\n"
20132 + "22: movl %%edx, 36(%3)\n"
20133 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20134 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20135 + "25: movl %%eax, 40(%3)\n"
20136 + "26: movl %%edx, 44(%3)\n"
20137 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20138 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20139 + "29: movl %%eax, 48(%3)\n"
20140 + "30: movl %%edx, 52(%3)\n"
20141 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20142 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20143 + "33: movl %%eax, 56(%3)\n"
20144 + "34: movl %%edx, 60(%3)\n"
20145 + " addl $-64, %0\n"
20146 + " addl $64, %4\n"
20147 + " addl $64, %3\n"
20148 + " cmpl $63, %0\n"
20149 + " ja 1b\n"
20150 + "35: movl %0, %%eax\n"
20151 + " shrl $2, %0\n"
20152 + " andl $3, %%eax\n"
20153 + " cld\n"
20154 + "99: rep; "__copyuser_seg" movsl\n"
20155 + "36: movl %%eax, %0\n"
20156 + "37: rep; "__copyuser_seg" movsb\n"
20157 + "100:\n"
20158 ".section .fixup,\"ax\"\n"
20159 "101: lea 0(%%eax,%0,4),%0\n"
20160 " jmp 100b\n"
20161 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
20162 int d0, d1;
20163 __asm__ __volatile__(
20164 " .align 2,0x90\n"
20165 - "0: movl 32(%4), %%eax\n"
20166 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20167 " cmpl $67, %0\n"
20168 " jbe 2f\n"
20169 - "1: movl 64(%4), %%eax\n"
20170 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20171 " .align 2,0x90\n"
20172 - "2: movl 0(%4), %%eax\n"
20173 - "21: movl 4(%4), %%edx\n"
20174 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20175 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20176 " movl %%eax, 0(%3)\n"
20177 " movl %%edx, 4(%3)\n"
20178 - "3: movl 8(%4), %%eax\n"
20179 - "31: movl 12(%4),%%edx\n"
20180 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20181 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20182 " movl %%eax, 8(%3)\n"
20183 " movl %%edx, 12(%3)\n"
20184 - "4: movl 16(%4), %%eax\n"
20185 - "41: movl 20(%4), %%edx\n"
20186 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20187 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20188 " movl %%eax, 16(%3)\n"
20189 " movl %%edx, 20(%3)\n"
20190 - "10: movl 24(%4), %%eax\n"
20191 - "51: movl 28(%4), %%edx\n"
20192 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20193 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20194 " movl %%eax, 24(%3)\n"
20195 " movl %%edx, 28(%3)\n"
20196 - "11: movl 32(%4), %%eax\n"
20197 - "61: movl 36(%4), %%edx\n"
20198 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20199 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20200 " movl %%eax, 32(%3)\n"
20201 " movl %%edx, 36(%3)\n"
20202 - "12: movl 40(%4), %%eax\n"
20203 - "71: movl 44(%4), %%edx\n"
20204 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20205 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20206 " movl %%eax, 40(%3)\n"
20207 " movl %%edx, 44(%3)\n"
20208 - "13: movl 48(%4), %%eax\n"
20209 - "81: movl 52(%4), %%edx\n"
20210 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20211 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20212 " movl %%eax, 48(%3)\n"
20213 " movl %%edx, 52(%3)\n"
20214 - "14: movl 56(%4), %%eax\n"
20215 - "91: movl 60(%4), %%edx\n"
20216 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20217 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20218 " movl %%eax, 56(%3)\n"
20219 " movl %%edx, 60(%3)\n"
20220 " addl $-64, %0\n"
20221 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
20222 " shrl $2, %0\n"
20223 " andl $3, %%eax\n"
20224 " cld\n"
20225 - "6: rep; movsl\n"
20226 + "6: rep; "__copyuser_seg" movsl\n"
20227 " movl %%eax,%0\n"
20228 - "7: rep; movsb\n"
20229 + "7: rep; "__copyuser_seg" movsb\n"
20230 "8:\n"
20231 ".section .fixup,\"ax\"\n"
20232 "9: lea 0(%%eax,%0,4),%0\n"
20233 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
20234
20235 __asm__ __volatile__(
20236 " .align 2,0x90\n"
20237 - "0: movl 32(%4), %%eax\n"
20238 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20239 " cmpl $67, %0\n"
20240 " jbe 2f\n"
20241 - "1: movl 64(%4), %%eax\n"
20242 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20243 " .align 2,0x90\n"
20244 - "2: movl 0(%4), %%eax\n"
20245 - "21: movl 4(%4), %%edx\n"
20246 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20247 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20248 " movnti %%eax, 0(%3)\n"
20249 " movnti %%edx, 4(%3)\n"
20250 - "3: movl 8(%4), %%eax\n"
20251 - "31: movl 12(%4),%%edx\n"
20252 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20253 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20254 " movnti %%eax, 8(%3)\n"
20255 " movnti %%edx, 12(%3)\n"
20256 - "4: movl 16(%4), %%eax\n"
20257 - "41: movl 20(%4), %%edx\n"
20258 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20259 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20260 " movnti %%eax, 16(%3)\n"
20261 " movnti %%edx, 20(%3)\n"
20262 - "10: movl 24(%4), %%eax\n"
20263 - "51: movl 28(%4), %%edx\n"
20264 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20265 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20266 " movnti %%eax, 24(%3)\n"
20267 " movnti %%edx, 28(%3)\n"
20268 - "11: movl 32(%4), %%eax\n"
20269 - "61: movl 36(%4), %%edx\n"
20270 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20271 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20272 " movnti %%eax, 32(%3)\n"
20273 " movnti %%edx, 36(%3)\n"
20274 - "12: movl 40(%4), %%eax\n"
20275 - "71: movl 44(%4), %%edx\n"
20276 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20277 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20278 " movnti %%eax, 40(%3)\n"
20279 " movnti %%edx, 44(%3)\n"
20280 - "13: movl 48(%4), %%eax\n"
20281 - "81: movl 52(%4), %%edx\n"
20282 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20283 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20284 " movnti %%eax, 48(%3)\n"
20285 " movnti %%edx, 52(%3)\n"
20286 - "14: movl 56(%4), %%eax\n"
20287 - "91: movl 60(%4), %%edx\n"
20288 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20289 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20290 " movnti %%eax, 56(%3)\n"
20291 " movnti %%edx, 60(%3)\n"
20292 " addl $-64, %0\n"
20293 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20294 " shrl $2, %0\n"
20295 " andl $3, %%eax\n"
20296 " cld\n"
20297 - "6: rep; movsl\n"
20298 + "6: rep; "__copyuser_seg" movsl\n"
20299 " movl %%eax,%0\n"
20300 - "7: rep; movsb\n"
20301 + "7: rep; "__copyuser_seg" movsb\n"
20302 "8:\n"
20303 ".section .fixup,\"ax\"\n"
20304 "9: lea 0(%%eax,%0,4),%0\n"
20305 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20306
20307 __asm__ __volatile__(
20308 " .align 2,0x90\n"
20309 - "0: movl 32(%4), %%eax\n"
20310 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20311 " cmpl $67, %0\n"
20312 " jbe 2f\n"
20313 - "1: movl 64(%4), %%eax\n"
20314 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20315 " .align 2,0x90\n"
20316 - "2: movl 0(%4), %%eax\n"
20317 - "21: movl 4(%4), %%edx\n"
20318 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20319 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20320 " movnti %%eax, 0(%3)\n"
20321 " movnti %%edx, 4(%3)\n"
20322 - "3: movl 8(%4), %%eax\n"
20323 - "31: movl 12(%4),%%edx\n"
20324 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20325 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20326 " movnti %%eax, 8(%3)\n"
20327 " movnti %%edx, 12(%3)\n"
20328 - "4: movl 16(%4), %%eax\n"
20329 - "41: movl 20(%4), %%edx\n"
20330 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20331 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20332 " movnti %%eax, 16(%3)\n"
20333 " movnti %%edx, 20(%3)\n"
20334 - "10: movl 24(%4), %%eax\n"
20335 - "51: movl 28(%4), %%edx\n"
20336 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20337 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20338 " movnti %%eax, 24(%3)\n"
20339 " movnti %%edx, 28(%3)\n"
20340 - "11: movl 32(%4), %%eax\n"
20341 - "61: movl 36(%4), %%edx\n"
20342 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20343 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20344 " movnti %%eax, 32(%3)\n"
20345 " movnti %%edx, 36(%3)\n"
20346 - "12: movl 40(%4), %%eax\n"
20347 - "71: movl 44(%4), %%edx\n"
20348 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20349 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20350 " movnti %%eax, 40(%3)\n"
20351 " movnti %%edx, 44(%3)\n"
20352 - "13: movl 48(%4), %%eax\n"
20353 - "81: movl 52(%4), %%edx\n"
20354 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20355 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20356 " movnti %%eax, 48(%3)\n"
20357 " movnti %%edx, 52(%3)\n"
20358 - "14: movl 56(%4), %%eax\n"
20359 - "91: movl 60(%4), %%edx\n"
20360 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20361 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20362 " movnti %%eax, 56(%3)\n"
20363 " movnti %%edx, 60(%3)\n"
20364 " addl $-64, %0\n"
20365 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20366 " shrl $2, %0\n"
20367 " andl $3, %%eax\n"
20368 " cld\n"
20369 - "6: rep; movsl\n"
20370 + "6: rep; "__copyuser_seg" movsl\n"
20371 " movl %%eax,%0\n"
20372 - "7: rep; movsb\n"
20373 + "7: rep; "__copyuser_seg" movsb\n"
20374 "8:\n"
20375 ".section .fixup,\"ax\"\n"
20376 "9: lea 0(%%eax,%0,4),%0\n"
20377 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20378 */
20379 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20380 unsigned long size);
20381 -unsigned long __copy_user_intel(void __user *to, const void *from,
20382 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20383 + unsigned long size);
20384 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20385 unsigned long size);
20386 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20387 const void __user *from, unsigned long size);
20388 #endif /* CONFIG_X86_INTEL_USERCOPY */
20389
20390 /* Generic arbitrary sized copy. */
20391 -#define __copy_user(to, from, size) \
20392 +#define __copy_user(to, from, size, prefix, set, restore) \
20393 do { \
20394 int __d0, __d1, __d2; \
20395 __asm__ __volatile__( \
20396 + set \
20397 " cmp $7,%0\n" \
20398 " jbe 1f\n" \
20399 " movl %1,%0\n" \
20400 " negl %0\n" \
20401 " andl $7,%0\n" \
20402 " subl %0,%3\n" \
20403 - "4: rep; movsb\n" \
20404 + "4: rep; "prefix"movsb\n" \
20405 " movl %3,%0\n" \
20406 " shrl $2,%0\n" \
20407 " andl $3,%3\n" \
20408 " .align 2,0x90\n" \
20409 - "0: rep; movsl\n" \
20410 + "0: rep; "prefix"movsl\n" \
20411 " movl %3,%0\n" \
20412 - "1: rep; movsb\n" \
20413 + "1: rep; "prefix"movsb\n" \
20414 "2:\n" \
20415 + restore \
20416 ".section .fixup,\"ax\"\n" \
20417 "5: addl %3,%0\n" \
20418 " jmp 2b\n" \
20419 @@ -682,14 +799,14 @@ do { \
20420 " negl %0\n" \
20421 " andl $7,%0\n" \
20422 " subl %0,%3\n" \
20423 - "4: rep; movsb\n" \
20424 + "4: rep; "__copyuser_seg"movsb\n" \
20425 " movl %3,%0\n" \
20426 " shrl $2,%0\n" \
20427 " andl $3,%3\n" \
20428 " .align 2,0x90\n" \
20429 - "0: rep; movsl\n" \
20430 + "0: rep; "__copyuser_seg"movsl\n" \
20431 " movl %3,%0\n" \
20432 - "1: rep; movsb\n" \
20433 + "1: rep; "__copyuser_seg"movsb\n" \
20434 "2:\n" \
20435 ".section .fixup,\"ax\"\n" \
20436 "5: addl %3,%0\n" \
20437 @@ -775,9 +892,9 @@ survive:
20438 }
20439 #endif
20440 if (movsl_is_ok(to, from, n))
20441 - __copy_user(to, from, n);
20442 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20443 else
20444 - n = __copy_user_intel(to, from, n);
20445 + n = __generic_copy_to_user_intel(to, from, n);
20446 return n;
20447 }
20448 EXPORT_SYMBOL(__copy_to_user_ll);
20449 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20450 unsigned long n)
20451 {
20452 if (movsl_is_ok(to, from, n))
20453 - __copy_user(to, from, n);
20454 + __copy_user(to, from, n, __copyuser_seg, "", "");
20455 else
20456 - n = __copy_user_intel((void __user *)to,
20457 - (const void *)from, n);
20458 + n = __generic_copy_from_user_intel(to, from, n);
20459 return n;
20460 }
20461 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20462 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20463 if (n > 64 && cpu_has_xmm2)
20464 n = __copy_user_intel_nocache(to, from, n);
20465 else
20466 - __copy_user(to, from, n);
20467 + __copy_user(to, from, n, __copyuser_seg, "", "");
20468 #else
20469 - __copy_user(to, from, n);
20470 + __copy_user(to, from, n, __copyuser_seg, "", "");
20471 #endif
20472 return n;
20473 }
20474 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20475
20476 -/**
20477 - * copy_to_user: - Copy a block of data into user space.
20478 - * @to: Destination address, in user space.
20479 - * @from: Source address, in kernel space.
20480 - * @n: Number of bytes to copy.
20481 - *
20482 - * Context: User context only. This function may sleep.
20483 - *
20484 - * Copy data from kernel space to user space.
20485 - *
20486 - * Returns number of bytes that could not be copied.
20487 - * On success, this will be zero.
20488 - */
20489 -unsigned long
20490 -copy_to_user(void __user *to, const void *from, unsigned long n)
20491 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20492 +void __set_fs(mm_segment_t x)
20493 {
20494 - if (access_ok(VERIFY_WRITE, to, n))
20495 - n = __copy_to_user(to, from, n);
20496 - return n;
20497 + switch (x.seg) {
20498 + case 0:
20499 + loadsegment(gs, 0);
20500 + break;
20501 + case TASK_SIZE_MAX:
20502 + loadsegment(gs, __USER_DS);
20503 + break;
20504 + case -1UL:
20505 + loadsegment(gs, __KERNEL_DS);
20506 + break;
20507 + default:
20508 + BUG();
20509 + }
20510 + return;
20511 }
20512 -EXPORT_SYMBOL(copy_to_user);
20513 +EXPORT_SYMBOL(__set_fs);
20514
20515 -/**
20516 - * copy_from_user: - Copy a block of data from user space.
20517 - * @to: Destination address, in kernel space.
20518 - * @from: Source address, in user space.
20519 - * @n: Number of bytes to copy.
20520 - *
20521 - * Context: User context only. This function may sleep.
20522 - *
20523 - * Copy data from user space to kernel space.
20524 - *
20525 - * Returns number of bytes that could not be copied.
20526 - * On success, this will be zero.
20527 - *
20528 - * If some data could not be copied, this function will pad the copied
20529 - * data to the requested size using zero bytes.
20530 - */
20531 -unsigned long
20532 -copy_from_user(void *to, const void __user *from, unsigned long n)
20533 +void set_fs(mm_segment_t x)
20534 {
20535 - if (access_ok(VERIFY_READ, from, n))
20536 - n = __copy_from_user(to, from, n);
20537 - else
20538 - memset(to, 0, n);
20539 - return n;
20540 + current_thread_info()->addr_limit = x;
20541 + __set_fs(x);
20542 }
20543 -EXPORT_SYMBOL(copy_from_user);
20544 +EXPORT_SYMBOL(set_fs);
20545 +#endif
20546 diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_64.c linux-2.6.32.45/arch/x86/lib/usercopy_64.c
20547 --- linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20548 +++ linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20549 @@ -42,6 +42,12 @@ long
20550 __strncpy_from_user(char *dst, const char __user *src, long count)
20551 {
20552 long res;
20553 +
20554 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20555 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20556 + src += PAX_USER_SHADOW_BASE;
20557 +#endif
20558 +
20559 __do_strncpy_from_user(dst, src, count, res);
20560 return res;
20561 }
20562 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20563 {
20564 long __d0;
20565 might_fault();
20566 +
20567 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20568 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20569 + addr += PAX_USER_SHADOW_BASE;
20570 +#endif
20571 +
20572 /* no memory constraint because it doesn't change any memory gcc knows
20573 about */
20574 asm volatile(
20575 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20576
20577 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20578 {
20579 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20580 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20581 +
20582 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20583 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20584 + to += PAX_USER_SHADOW_BASE;
20585 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20586 + from += PAX_USER_SHADOW_BASE;
20587 +#endif
20588 +
20589 return copy_user_generic((__force void *)to, (__force void *)from, len);
20590 - }
20591 - return len;
20592 + }
20593 + return len;
20594 }
20595 EXPORT_SYMBOL(copy_in_user);
20596
20597 diff -urNp linux-2.6.32.45/arch/x86/Makefile linux-2.6.32.45/arch/x86/Makefile
20598 --- linux-2.6.32.45/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20599 +++ linux-2.6.32.45/arch/x86/Makefile 2011-07-19 18:16:02.000000000 -0400
20600 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
20601 else
20602 BITS := 64
20603 UTS_MACHINE := x86_64
20604 + biarch := $(call cc-option,-m64)
20605 CHECKFLAGS += -D__x86_64__ -m64
20606
20607 KBUILD_AFLAGS += -m64
20608 @@ -189,3 +190,12 @@ define archhelp
20609 echo ' FDARGS="..." arguments for the booted kernel'
20610 echo ' FDINITRD=file initrd for the booted kernel'
20611 endef
20612 +
20613 +define OLD_LD
20614 +
20615 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20616 +*** Please upgrade your binutils to 2.18 or newer
20617 +endef
20618 +
20619 +archprepare:
20620 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20621 diff -urNp linux-2.6.32.45/arch/x86/mm/extable.c linux-2.6.32.45/arch/x86/mm/extable.c
20622 --- linux-2.6.32.45/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20623 +++ linux-2.6.32.45/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20624 @@ -1,14 +1,71 @@
20625 #include <linux/module.h>
20626 #include <linux/spinlock.h>
20627 +#include <linux/sort.h>
20628 #include <asm/uaccess.h>
20629 +#include <asm/pgtable.h>
20630
20631 +/*
20632 + * The exception table needs to be sorted so that the binary
20633 + * search that we use to find entries in it works properly.
20634 + * This is used both for the kernel exception table and for
20635 + * the exception tables of modules that get loaded.
20636 + */
20637 +static int cmp_ex(const void *a, const void *b)
20638 +{
20639 + const struct exception_table_entry *x = a, *y = b;
20640 +
20641 + /* avoid overflow */
20642 + if (x->insn > y->insn)
20643 + return 1;
20644 + if (x->insn < y->insn)
20645 + return -1;
20646 + return 0;
20647 +}
20648 +
20649 +static void swap_ex(void *a, void *b, int size)
20650 +{
20651 + struct exception_table_entry t, *x = a, *y = b;
20652 +
20653 + t = *x;
20654 +
20655 + pax_open_kernel();
20656 + *x = *y;
20657 + *y = t;
20658 + pax_close_kernel();
20659 +}
20660 +
20661 +void sort_extable(struct exception_table_entry *start,
20662 + struct exception_table_entry *finish)
20663 +{
20664 + sort(start, finish - start, sizeof(struct exception_table_entry),
20665 + cmp_ex, swap_ex);
20666 +}
20667 +
20668 +#ifdef CONFIG_MODULES
20669 +/*
20670 + * If the exception table is sorted, any referring to the module init
20671 + * will be at the beginning or the end.
20672 + */
20673 +void trim_init_extable(struct module *m)
20674 +{
20675 + /*trim the beginning*/
20676 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20677 + m->extable++;
20678 + m->num_exentries--;
20679 + }
20680 + /*trim the end*/
20681 + while (m->num_exentries &&
20682 + within_module_init(m->extable[m->num_exentries-1].insn, m))
20683 + m->num_exentries--;
20684 +}
20685 +#endif /* CONFIG_MODULES */
20686
20687 int fixup_exception(struct pt_regs *regs)
20688 {
20689 const struct exception_table_entry *fixup;
20690
20691 #ifdef CONFIG_PNPBIOS
20692 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20693 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20694 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20695 extern u32 pnp_bios_is_utter_crap;
20696 pnp_bios_is_utter_crap = 1;
20697 diff -urNp linux-2.6.32.45/arch/x86/mm/fault.c linux-2.6.32.45/arch/x86/mm/fault.c
20698 --- linux-2.6.32.45/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20699 +++ linux-2.6.32.45/arch/x86/mm/fault.c 2011-08-17 20:06:44.000000000 -0400
20700 @@ -11,10 +11,19 @@
20701 #include <linux/kprobes.h> /* __kprobes, ... */
20702 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20703 #include <linux/perf_event.h> /* perf_sw_event */
20704 +#include <linux/unistd.h>
20705 +#include <linux/compiler.h>
20706
20707 #include <asm/traps.h> /* dotraplinkage, ... */
20708 #include <asm/pgalloc.h> /* pgd_*(), ... */
20709 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20710 +#include <asm/vsyscall.h>
20711 +#include <asm/tlbflush.h>
20712 +
20713 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20714 +#include <asm/stacktrace.h>
20715 +#include "../kernel/dumpstack.h"
20716 +#endif
20717
20718 /*
20719 * Page fault error code bits:
20720 @@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20721 int ret = 0;
20722
20723 /* kprobe_running() needs smp_processor_id() */
20724 - if (kprobes_built_in() && !user_mode_vm(regs)) {
20725 + if (kprobes_built_in() && !user_mode(regs)) {
20726 preempt_disable();
20727 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20728 ret = 1;
20729 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20730 return !instr_lo || (instr_lo>>1) == 1;
20731 case 0x00:
20732 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20733 - if (probe_kernel_address(instr, opcode))
20734 + if (user_mode(regs)) {
20735 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20736 + return 0;
20737 + } else if (probe_kernel_address(instr, opcode))
20738 return 0;
20739
20740 *prefetch = (instr_lo == 0xF) &&
20741 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20742 while (instr < max_instr) {
20743 unsigned char opcode;
20744
20745 - if (probe_kernel_address(instr, opcode))
20746 + if (user_mode(regs)) {
20747 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20748 + break;
20749 + } else if (probe_kernel_address(instr, opcode))
20750 break;
20751
20752 instr++;
20753 @@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20754 force_sig_info(si_signo, &info, tsk);
20755 }
20756
20757 +#ifdef CONFIG_PAX_EMUTRAMP
20758 +static int pax_handle_fetch_fault(struct pt_regs *regs);
20759 +#endif
20760 +
20761 +#ifdef CONFIG_PAX_PAGEEXEC
20762 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20763 +{
20764 + pgd_t *pgd;
20765 + pud_t *pud;
20766 + pmd_t *pmd;
20767 +
20768 + pgd = pgd_offset(mm, address);
20769 + if (!pgd_present(*pgd))
20770 + return NULL;
20771 + pud = pud_offset(pgd, address);
20772 + if (!pud_present(*pud))
20773 + return NULL;
20774 + pmd = pmd_offset(pud, address);
20775 + if (!pmd_present(*pmd))
20776 + return NULL;
20777 + return pmd;
20778 +}
20779 +#endif
20780 +
20781 DEFINE_SPINLOCK(pgd_lock);
20782 LIST_HEAD(pgd_list);
20783
20784 @@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20785 address += PMD_SIZE) {
20786
20787 unsigned long flags;
20788 +
20789 +#ifdef CONFIG_PAX_PER_CPU_PGD
20790 + unsigned long cpu;
20791 +#else
20792 struct page *page;
20793 +#endif
20794
20795 spin_lock_irqsave(&pgd_lock, flags);
20796 +
20797 +#ifdef CONFIG_PAX_PER_CPU_PGD
20798 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20799 + pgd_t *pgd = get_cpu_pgd(cpu);
20800 +#else
20801 list_for_each_entry(page, &pgd_list, lru) {
20802 - if (!vmalloc_sync_one(page_address(page), address))
20803 + pgd_t *pgd = page_address(page);
20804 +#endif
20805 +
20806 + if (!vmalloc_sync_one(pgd, address))
20807 break;
20808 }
20809 spin_unlock_irqrestore(&pgd_lock, flags);
20810 @@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20811 * an interrupt in the middle of a task switch..
20812 */
20813 pgd_paddr = read_cr3();
20814 +
20815 +#ifdef CONFIG_PAX_PER_CPU_PGD
20816 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20817 +#endif
20818 +
20819 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20820 if (!pmd_k)
20821 return -1;
20822 @@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
20823
20824 const pgd_t *pgd_ref = pgd_offset_k(address);
20825 unsigned long flags;
20826 +
20827 +#ifdef CONFIG_PAX_PER_CPU_PGD
20828 + unsigned long cpu;
20829 +#else
20830 struct page *page;
20831 +#endif
20832
20833 if (pgd_none(*pgd_ref))
20834 continue;
20835
20836 spin_lock_irqsave(&pgd_lock, flags);
20837 +
20838 +#ifdef CONFIG_PAX_PER_CPU_PGD
20839 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20840 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
20841 +#else
20842 list_for_each_entry(page, &pgd_list, lru) {
20843 pgd_t *pgd;
20844 pgd = (pgd_t *)page_address(page) + pgd_index(address);
20845 +#endif
20846 +
20847 if (pgd_none(*pgd))
20848 set_pgd(pgd, *pgd_ref);
20849 else
20850 @@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
20851 * happen within a race in page table update. In the later
20852 * case just flush:
20853 */
20854 +
20855 +#ifdef CONFIG_PAX_PER_CPU_PGD
20856 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20857 + pgd = pgd_offset_cpu(smp_processor_id(), address);
20858 +#else
20859 pgd = pgd_offset(current->active_mm, address);
20860 +#endif
20861 +
20862 pgd_ref = pgd_offset_k(address);
20863 if (pgd_none(*pgd_ref))
20864 return -1;
20865 @@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
20866 static int is_errata100(struct pt_regs *regs, unsigned long address)
20867 {
20868 #ifdef CONFIG_X86_64
20869 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20870 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20871 return 1;
20872 #endif
20873 return 0;
20874 @@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
20875 }
20876
20877 static const char nx_warning[] = KERN_CRIT
20878 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20879 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20880
20881 static void
20882 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20883 @@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
20884 if (!oops_may_print())
20885 return;
20886
20887 - if (error_code & PF_INSTR) {
20888 + if (nx_enabled && (error_code & PF_INSTR)) {
20889 unsigned int level;
20890
20891 pte_t *pte = lookup_address(address, &level);
20892
20893 if (pte && pte_present(*pte) && !pte_exec(*pte))
20894 - printk(nx_warning, current_uid());
20895 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20896 }
20897
20898 +#ifdef CONFIG_PAX_KERNEXEC
20899 + if (init_mm.start_code <= address && address < init_mm.end_code) {
20900 + if (current->signal->curr_ip)
20901 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20902 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20903 + else
20904 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20905 + current->comm, task_pid_nr(current), current_uid(), current_euid());
20906 + }
20907 +#endif
20908 +
20909 printk(KERN_ALERT "BUG: unable to handle kernel ");
20910 if (address < PAGE_SIZE)
20911 printk(KERN_CONT "NULL pointer dereference");
20912 @@ -704,6 +791,70 @@ __bad_area_nosemaphore(struct pt_regs *r
20913 unsigned long address, int si_code)
20914 {
20915 struct task_struct *tsk = current;
20916 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20917 + struct mm_struct *mm = tsk->mm;
20918 +#endif
20919 +
20920 +#ifdef CONFIG_X86_64
20921 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
20922 + if (regs->ip == (unsigned long)vgettimeofday) {
20923 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
20924 + return;
20925 + } else if (regs->ip == (unsigned long)vtime) {
20926 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
20927 + return;
20928 + } else if (regs->ip == (unsigned long)vgetcpu) {
20929 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
20930 + return;
20931 + }
20932 + }
20933 +#endif
20934 +
20935 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20936 + if (mm && (error_code & PF_USER)) {
20937 + unsigned long ip = regs->ip;
20938 +
20939 + if (v8086_mode(regs))
20940 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20941 +
20942 + /*
20943 + * It's possible to have interrupts off here:
20944 + */
20945 + local_irq_enable();
20946 +
20947 +#ifdef CONFIG_PAX_PAGEEXEC
20948 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
20949 + ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
20950 +
20951 +#ifdef CONFIG_PAX_EMUTRAMP
20952 + switch (pax_handle_fetch_fault(regs)) {
20953 + case 2:
20954 + return;
20955 + }
20956 +#endif
20957 +
20958 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20959 + do_group_exit(SIGKILL);
20960 + }
20961 +#endif
20962 +
20963 +#ifdef CONFIG_PAX_SEGMEXEC
20964 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
20965 +
20966 +#ifdef CONFIG_PAX_EMUTRAMP
20967 + switch (pax_handle_fetch_fault(regs)) {
20968 + case 2:
20969 + return;
20970 + }
20971 +#endif
20972 +
20973 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20974 + do_group_exit(SIGKILL);
20975 + }
20976 +#endif
20977 +
20978 + }
20979 +#endif
20980
20981 /* User mode accesses just cause a SIGSEGV */
20982 if (error_code & PF_USER) {
20983 @@ -857,6 +1008,99 @@ static int spurious_fault_check(unsigned
20984 return 1;
20985 }
20986
20987 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20988 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20989 +{
20990 + pte_t *pte;
20991 + pmd_t *pmd;
20992 + spinlock_t *ptl;
20993 + unsigned char pte_mask;
20994 +
20995 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20996 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
20997 + return 0;
20998 +
20999 + /* PaX: it's our fault, let's handle it if we can */
21000 +
21001 + /* PaX: take a look at read faults before acquiring any locks */
21002 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21003 + /* instruction fetch attempt from a protected page in user mode */
21004 + up_read(&mm->mmap_sem);
21005 +
21006 +#ifdef CONFIG_PAX_EMUTRAMP
21007 + switch (pax_handle_fetch_fault(regs)) {
21008 + case 2:
21009 + return 1;
21010 + }
21011 +#endif
21012 +
21013 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21014 + do_group_exit(SIGKILL);
21015 + }
21016 +
21017 + pmd = pax_get_pmd(mm, address);
21018 + if (unlikely(!pmd))
21019 + return 0;
21020 +
21021 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21022 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21023 + pte_unmap_unlock(pte, ptl);
21024 + return 0;
21025 + }
21026 +
21027 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21028 + /* write attempt to a protected page in user mode */
21029 + pte_unmap_unlock(pte, ptl);
21030 + return 0;
21031 + }
21032 +
21033 +#ifdef CONFIG_SMP
21034 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21035 +#else
21036 + if (likely(address > get_limit(regs->cs)))
21037 +#endif
21038 + {
21039 + set_pte(pte, pte_mkread(*pte));
21040 + __flush_tlb_one(address);
21041 + pte_unmap_unlock(pte, ptl);
21042 + up_read(&mm->mmap_sem);
21043 + return 1;
21044 + }
21045 +
21046 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21047 +
21048 + /*
21049 + * PaX: fill DTLB with user rights and retry
21050 + */
21051 + __asm__ __volatile__ (
21052 + "orb %2,(%1)\n"
21053 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21054 +/*
21055 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21056 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21057 + * page fault when examined during a TLB load attempt. this is true not only
21058 + * for PTEs holding a non-present entry but also present entries that will
21059 + * raise a page fault (such as those set up by PaX, or the copy-on-write
21060 + * mechanism). in effect it means that we do *not* need to flush the TLBs
21061 + * for our target pages since their PTEs are simply not in the TLBs at all.
21062 +
21063 + * the best thing in omitting it is that we gain around 15-20% speed in the
21064 + * fast path of the page fault handler and can get rid of tracing since we
21065 + * can no longer flush unintended entries.
21066 + */
21067 + "invlpg (%0)\n"
21068 +#endif
21069 + __copyuser_seg"testb $0,(%0)\n"
21070 + "xorb %3,(%1)\n"
21071 + :
21072 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21073 + : "memory", "cc");
21074 + pte_unmap_unlock(pte, ptl);
21075 + up_read(&mm->mmap_sem);
21076 + return 1;
21077 +}
21078 +#endif
21079 +
21080 /*
21081 * Handle a spurious fault caused by a stale TLB entry.
21082 *
21083 @@ -923,6 +1167,9 @@ int show_unhandled_signals = 1;
21084 static inline int
21085 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
21086 {
21087 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21088 + return 1;
21089 +
21090 if (write) {
21091 /* write, present and write, not present: */
21092 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21093 @@ -956,17 +1203,31 @@ do_page_fault(struct pt_regs *regs, unsi
21094 {
21095 struct vm_area_struct *vma;
21096 struct task_struct *tsk;
21097 - unsigned long address;
21098 struct mm_struct *mm;
21099 int write;
21100 int fault;
21101
21102 + /* Get the faulting address: */
21103 + unsigned long address = read_cr2();
21104 +
21105 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21106 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21107 + if (!search_exception_tables(regs->ip)) {
21108 + bad_area_nosemaphore(regs, error_code, address);
21109 + return;
21110 + }
21111 + if (address < PAX_USER_SHADOW_BASE) {
21112 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21113 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
21114 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21115 + } else
21116 + address -= PAX_USER_SHADOW_BASE;
21117 + }
21118 +#endif
21119 +
21120 tsk = current;
21121 mm = tsk->mm;
21122
21123 - /* Get the faulting address: */
21124 - address = read_cr2();
21125 -
21126 /*
21127 * Detect and handle instructions that would cause a page fault for
21128 * both a tracked kernel page and a userspace page.
21129 @@ -1026,7 +1287,7 @@ do_page_fault(struct pt_regs *regs, unsi
21130 * User-mode registers count as a user access even for any
21131 * potential system fault or CPU buglet:
21132 */
21133 - if (user_mode_vm(regs)) {
21134 + if (user_mode(regs)) {
21135 local_irq_enable();
21136 error_code |= PF_USER;
21137 } else {
21138 @@ -1080,6 +1341,11 @@ do_page_fault(struct pt_regs *regs, unsi
21139 might_sleep();
21140 }
21141
21142 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21143 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21144 + return;
21145 +#endif
21146 +
21147 vma = find_vma(mm, address);
21148 if (unlikely(!vma)) {
21149 bad_area(regs, error_code, address);
21150 @@ -1091,18 +1357,24 @@ do_page_fault(struct pt_regs *regs, unsi
21151 bad_area(regs, error_code, address);
21152 return;
21153 }
21154 - if (error_code & PF_USER) {
21155 - /*
21156 - * Accessing the stack below %sp is always a bug.
21157 - * The large cushion allows instructions like enter
21158 - * and pusha to work. ("enter $65535, $31" pushes
21159 - * 32 pointers and then decrements %sp by 65535.)
21160 - */
21161 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21162 - bad_area(regs, error_code, address);
21163 - return;
21164 - }
21165 + /*
21166 + * Accessing the stack below %sp is always a bug.
21167 + * The large cushion allows instructions like enter
21168 + * and pusha to work. ("enter $65535, $31" pushes
21169 + * 32 pointers and then decrements %sp by 65535.)
21170 + */
21171 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21172 + bad_area(regs, error_code, address);
21173 + return;
21174 }
21175 +
21176 +#ifdef CONFIG_PAX_SEGMEXEC
21177 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21178 + bad_area(regs, error_code, address);
21179 + return;
21180 + }
21181 +#endif
21182 +
21183 if (unlikely(expand_stack(vma, address))) {
21184 bad_area(regs, error_code, address);
21185 return;
21186 @@ -1146,3 +1418,199 @@ good_area:
21187
21188 up_read(&mm->mmap_sem);
21189 }
21190 +
21191 +#ifdef CONFIG_PAX_EMUTRAMP
21192 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21193 +{
21194 + int err;
21195 +
21196 + do { /* PaX: gcc trampoline emulation #1 */
21197 + unsigned char mov1, mov2;
21198 + unsigned short jmp;
21199 + unsigned int addr1, addr2;
21200 +
21201 +#ifdef CONFIG_X86_64
21202 + if ((regs->ip + 11) >> 32)
21203 + break;
21204 +#endif
21205 +
21206 + err = get_user(mov1, (unsigned char __user *)regs->ip);
21207 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21208 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21209 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21210 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21211 +
21212 + if (err)
21213 + break;
21214 +
21215 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21216 + regs->cx = addr1;
21217 + regs->ax = addr2;
21218 + regs->ip = addr2;
21219 + return 2;
21220 + }
21221 + } while (0);
21222 +
21223 + do { /* PaX: gcc trampoline emulation #2 */
21224 + unsigned char mov, jmp;
21225 + unsigned int addr1, addr2;
21226 +
21227 +#ifdef CONFIG_X86_64
21228 + if ((regs->ip + 9) >> 32)
21229 + break;
21230 +#endif
21231 +
21232 + err = get_user(mov, (unsigned char __user *)regs->ip);
21233 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21234 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21235 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21236 +
21237 + if (err)
21238 + break;
21239 +
21240 + if (mov == 0xB9 && jmp == 0xE9) {
21241 + regs->cx = addr1;
21242 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21243 + return 2;
21244 + }
21245 + } while (0);
21246 +
21247 + return 1; /* PaX in action */
21248 +}
21249 +
21250 +#ifdef CONFIG_X86_64
21251 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21252 +{
21253 + int err;
21254 +
21255 + do { /* PaX: gcc trampoline emulation #1 */
21256 + unsigned short mov1, mov2, jmp1;
21257 + unsigned char jmp2;
21258 + unsigned int addr1;
21259 + unsigned long addr2;
21260 +
21261 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21262 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21263 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21264 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21265 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21266 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21267 +
21268 + if (err)
21269 + break;
21270 +
21271 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21272 + regs->r11 = addr1;
21273 + regs->r10 = addr2;
21274 + regs->ip = addr1;
21275 + return 2;
21276 + }
21277 + } while (0);
21278 +
21279 + do { /* PaX: gcc trampoline emulation #2 */
21280 + unsigned short mov1, mov2, jmp1;
21281 + unsigned char jmp2;
21282 + unsigned long addr1, addr2;
21283 +
21284 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21285 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21286 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21287 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21288 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21289 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21290 +
21291 + if (err)
21292 + break;
21293 +
21294 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21295 + regs->r11 = addr1;
21296 + regs->r10 = addr2;
21297 + regs->ip = addr1;
21298 + return 2;
21299 + }
21300 + } while (0);
21301 +
21302 + return 1; /* PaX in action */
21303 +}
21304 +#endif
21305 +
21306 +/*
21307 + * PaX: decide what to do with offenders (regs->ip = fault address)
21308 + *
21309 + * returns 1 when task should be killed
21310 + * 2 when gcc trampoline was detected
21311 + */
21312 +static int pax_handle_fetch_fault(struct pt_regs *regs)
21313 +{
21314 + if (v8086_mode(regs))
21315 + return 1;
21316 +
21317 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21318 + return 1;
21319 +
21320 +#ifdef CONFIG_X86_32
21321 + return pax_handle_fetch_fault_32(regs);
21322 +#else
21323 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21324 + return pax_handle_fetch_fault_32(regs);
21325 + else
21326 + return pax_handle_fetch_fault_64(regs);
21327 +#endif
21328 +}
21329 +#endif
21330 +
21331 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21332 +void pax_report_insns(void *pc, void *sp)
21333 +{
21334 + long i;
21335 +
21336 + printk(KERN_ERR "PAX: bytes at PC: ");
21337 + for (i = 0; i < 20; i++) {
21338 + unsigned char c;
21339 + if (get_user(c, (__force unsigned char __user *)pc+i))
21340 + printk(KERN_CONT "?? ");
21341 + else
21342 + printk(KERN_CONT "%02x ", c);
21343 + }
21344 + printk("\n");
21345 +
21346 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21347 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
21348 + unsigned long c;
21349 + if (get_user(c, (__force unsigned long __user *)sp+i))
21350 +#ifdef CONFIG_X86_32
21351 + printk(KERN_CONT "???????? ");
21352 +#else
21353 + printk(KERN_CONT "???????????????? ");
21354 +#endif
21355 + else
21356 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21357 + }
21358 + printk("\n");
21359 +}
21360 +#endif
21361 +
21362 +/**
21363 + * probe_kernel_write(): safely attempt to write to a location
21364 + * @dst: address to write to
21365 + * @src: pointer to the data that shall be written
21366 + * @size: size of the data chunk
21367 + *
21368 + * Safely write to address @dst from the buffer at @src. If a kernel fault
21369 + * happens, handle that and return -EFAULT.
21370 + */
21371 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21372 +{
21373 + long ret;
21374 + mm_segment_t old_fs = get_fs();
21375 +
21376 + set_fs(KERNEL_DS);
21377 + pagefault_disable();
21378 + pax_open_kernel();
21379 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21380 + pax_close_kernel();
21381 + pagefault_enable();
21382 + set_fs(old_fs);
21383 +
21384 + return ret ? -EFAULT : 0;
21385 +}
21386 diff -urNp linux-2.6.32.45/arch/x86/mm/gup.c linux-2.6.32.45/arch/x86/mm/gup.c
21387 --- linux-2.6.32.45/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21388 +++ linux-2.6.32.45/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21389 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21390 addr = start;
21391 len = (unsigned long) nr_pages << PAGE_SHIFT;
21392 end = start + len;
21393 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21394 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21395 (void __user *)start, len)))
21396 return 0;
21397
21398 diff -urNp linux-2.6.32.45/arch/x86/mm/highmem_32.c linux-2.6.32.45/arch/x86/mm/highmem_32.c
21399 --- linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21400 +++ linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21401 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21402 idx = type + KM_TYPE_NR*smp_processor_id();
21403 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21404 BUG_ON(!pte_none(*(kmap_pte-idx)));
21405 +
21406 + pax_open_kernel();
21407 set_pte(kmap_pte-idx, mk_pte(page, prot));
21408 + pax_close_kernel();
21409
21410 return (void *)vaddr;
21411 }
21412 diff -urNp linux-2.6.32.45/arch/x86/mm/hugetlbpage.c linux-2.6.32.45/arch/x86/mm/hugetlbpage.c
21413 --- linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21414 +++ linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21415 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21416 struct hstate *h = hstate_file(file);
21417 struct mm_struct *mm = current->mm;
21418 struct vm_area_struct *vma;
21419 - unsigned long start_addr;
21420 + unsigned long start_addr, pax_task_size = TASK_SIZE;
21421 +
21422 +#ifdef CONFIG_PAX_SEGMEXEC
21423 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21424 + pax_task_size = SEGMEXEC_TASK_SIZE;
21425 +#endif
21426 +
21427 + pax_task_size -= PAGE_SIZE;
21428
21429 if (len > mm->cached_hole_size) {
21430 - start_addr = mm->free_area_cache;
21431 + start_addr = mm->free_area_cache;
21432 } else {
21433 - start_addr = TASK_UNMAPPED_BASE;
21434 - mm->cached_hole_size = 0;
21435 + start_addr = mm->mmap_base;
21436 + mm->cached_hole_size = 0;
21437 }
21438
21439 full_search:
21440 @@ -281,26 +288,27 @@ full_search:
21441
21442 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21443 /* At this point: (!vma || addr < vma->vm_end). */
21444 - if (TASK_SIZE - len < addr) {
21445 + if (pax_task_size - len < addr) {
21446 /*
21447 * Start a new search - just in case we missed
21448 * some holes.
21449 */
21450 - if (start_addr != TASK_UNMAPPED_BASE) {
21451 - start_addr = TASK_UNMAPPED_BASE;
21452 + if (start_addr != mm->mmap_base) {
21453 + start_addr = mm->mmap_base;
21454 mm->cached_hole_size = 0;
21455 goto full_search;
21456 }
21457 return -ENOMEM;
21458 }
21459 - if (!vma || addr + len <= vma->vm_start) {
21460 - mm->free_area_cache = addr + len;
21461 - return addr;
21462 - }
21463 + if (check_heap_stack_gap(vma, addr, len))
21464 + break;
21465 if (addr + mm->cached_hole_size < vma->vm_start)
21466 mm->cached_hole_size = vma->vm_start - addr;
21467 addr = ALIGN(vma->vm_end, huge_page_size(h));
21468 }
21469 +
21470 + mm->free_area_cache = addr + len;
21471 + return addr;
21472 }
21473
21474 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21475 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21476 {
21477 struct hstate *h = hstate_file(file);
21478 struct mm_struct *mm = current->mm;
21479 - struct vm_area_struct *vma, *prev_vma;
21480 - unsigned long base = mm->mmap_base, addr = addr0;
21481 + struct vm_area_struct *vma;
21482 + unsigned long base = mm->mmap_base, addr;
21483 unsigned long largest_hole = mm->cached_hole_size;
21484 - int first_time = 1;
21485
21486 /* don't allow allocations above current base */
21487 if (mm->free_area_cache > base)
21488 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21489 largest_hole = 0;
21490 mm->free_area_cache = base;
21491 }
21492 -try_again:
21493 +
21494 /* make sure it can fit in the remaining address space */
21495 if (mm->free_area_cache < len)
21496 goto fail;
21497
21498 /* either no address requested or cant fit in requested address hole */
21499 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
21500 + addr = (mm->free_area_cache - len);
21501 do {
21502 + addr &= huge_page_mask(h);
21503 + vma = find_vma(mm, addr);
21504 /*
21505 * Lookup failure means no vma is above this address,
21506 * i.e. return with success:
21507 - */
21508 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21509 - return addr;
21510 -
21511 - /*
21512 * new region fits between prev_vma->vm_end and
21513 * vma->vm_start, use it:
21514 */
21515 - if (addr + len <= vma->vm_start &&
21516 - (!prev_vma || (addr >= prev_vma->vm_end))) {
21517 + if (check_heap_stack_gap(vma, addr, len)) {
21518 /* remember the address as a hint for next time */
21519 - mm->cached_hole_size = largest_hole;
21520 - return (mm->free_area_cache = addr);
21521 - } else {
21522 - /* pull free_area_cache down to the first hole */
21523 - if (mm->free_area_cache == vma->vm_end) {
21524 - mm->free_area_cache = vma->vm_start;
21525 - mm->cached_hole_size = largest_hole;
21526 - }
21527 + mm->cached_hole_size = largest_hole;
21528 + return (mm->free_area_cache = addr);
21529 + }
21530 + /* pull free_area_cache down to the first hole */
21531 + if (mm->free_area_cache == vma->vm_end) {
21532 + mm->free_area_cache = vma->vm_start;
21533 + mm->cached_hole_size = largest_hole;
21534 }
21535
21536 /* remember the largest hole we saw so far */
21537 if (addr + largest_hole < vma->vm_start)
21538 - largest_hole = vma->vm_start - addr;
21539 + largest_hole = vma->vm_start - addr;
21540
21541 /* try just below the current vma->vm_start */
21542 - addr = (vma->vm_start - len) & huge_page_mask(h);
21543 - } while (len <= vma->vm_start);
21544 + addr = skip_heap_stack_gap(vma, len);
21545 + } while (!IS_ERR_VALUE(addr));
21546
21547 fail:
21548 /*
21549 - * if hint left us with no space for the requested
21550 - * mapping then try again:
21551 - */
21552 - if (first_time) {
21553 - mm->free_area_cache = base;
21554 - largest_hole = 0;
21555 - first_time = 0;
21556 - goto try_again;
21557 - }
21558 - /*
21559 * A failed mmap() very likely causes application failure,
21560 * so fall back to the bottom-up function here. This scenario
21561 * can happen with large stack limits and large mmap()
21562 * allocations.
21563 */
21564 - mm->free_area_cache = TASK_UNMAPPED_BASE;
21565 +
21566 +#ifdef CONFIG_PAX_SEGMEXEC
21567 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21568 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21569 + else
21570 +#endif
21571 +
21572 + mm->mmap_base = TASK_UNMAPPED_BASE;
21573 +
21574 +#ifdef CONFIG_PAX_RANDMMAP
21575 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21576 + mm->mmap_base += mm->delta_mmap;
21577 +#endif
21578 +
21579 + mm->free_area_cache = mm->mmap_base;
21580 mm->cached_hole_size = ~0UL;
21581 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21582 len, pgoff, flags);
21583 @@ -387,6 +393,7 @@ fail:
21584 /*
21585 * Restore the topdown base:
21586 */
21587 + mm->mmap_base = base;
21588 mm->free_area_cache = base;
21589 mm->cached_hole_size = ~0UL;
21590
21591 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21592 struct hstate *h = hstate_file(file);
21593 struct mm_struct *mm = current->mm;
21594 struct vm_area_struct *vma;
21595 + unsigned long pax_task_size = TASK_SIZE;
21596
21597 if (len & ~huge_page_mask(h))
21598 return -EINVAL;
21599 - if (len > TASK_SIZE)
21600 +
21601 +#ifdef CONFIG_PAX_SEGMEXEC
21602 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21603 + pax_task_size = SEGMEXEC_TASK_SIZE;
21604 +#endif
21605 +
21606 + pax_task_size -= PAGE_SIZE;
21607 +
21608 + if (len > pax_task_size)
21609 return -ENOMEM;
21610
21611 if (flags & MAP_FIXED) {
21612 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21613 if (addr) {
21614 addr = ALIGN(addr, huge_page_size(h));
21615 vma = find_vma(mm, addr);
21616 - if (TASK_SIZE - len >= addr &&
21617 - (!vma || addr + len <= vma->vm_start))
21618 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21619 return addr;
21620 }
21621 if (mm->get_unmapped_area == arch_get_unmapped_area)
21622 diff -urNp linux-2.6.32.45/arch/x86/mm/init_32.c linux-2.6.32.45/arch/x86/mm/init_32.c
21623 --- linux-2.6.32.45/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21624 +++ linux-2.6.32.45/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21625 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21626 }
21627
21628 /*
21629 - * Creates a middle page table and puts a pointer to it in the
21630 - * given global directory entry. This only returns the gd entry
21631 - * in non-PAE compilation mode, since the middle layer is folded.
21632 - */
21633 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
21634 -{
21635 - pud_t *pud;
21636 - pmd_t *pmd_table;
21637 -
21638 -#ifdef CONFIG_X86_PAE
21639 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21640 - if (after_bootmem)
21641 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21642 - else
21643 - pmd_table = (pmd_t *)alloc_low_page();
21644 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21645 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21646 - pud = pud_offset(pgd, 0);
21647 - BUG_ON(pmd_table != pmd_offset(pud, 0));
21648 -
21649 - return pmd_table;
21650 - }
21651 -#endif
21652 - pud = pud_offset(pgd, 0);
21653 - pmd_table = pmd_offset(pud, 0);
21654 -
21655 - return pmd_table;
21656 -}
21657 -
21658 -/*
21659 * Create a page table and place a pointer to it in a middle page
21660 * directory entry:
21661 */
21662 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21663 page_table = (pte_t *)alloc_low_page();
21664
21665 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21666 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21667 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21668 +#else
21669 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21670 +#endif
21671 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21672 }
21673
21674 return pte_offset_kernel(pmd, 0);
21675 }
21676
21677 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
21678 +{
21679 + pud_t *pud;
21680 + pmd_t *pmd_table;
21681 +
21682 + pud = pud_offset(pgd, 0);
21683 + pmd_table = pmd_offset(pud, 0);
21684 +
21685 + return pmd_table;
21686 +}
21687 +
21688 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21689 {
21690 int pgd_idx = pgd_index(vaddr);
21691 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21692 int pgd_idx, pmd_idx;
21693 unsigned long vaddr;
21694 pgd_t *pgd;
21695 + pud_t *pud;
21696 pmd_t *pmd;
21697 pte_t *pte = NULL;
21698
21699 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21700 pgd = pgd_base + pgd_idx;
21701
21702 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21703 - pmd = one_md_table_init(pgd);
21704 - pmd = pmd + pmd_index(vaddr);
21705 + pud = pud_offset(pgd, vaddr);
21706 + pmd = pmd_offset(pud, vaddr);
21707 +
21708 +#ifdef CONFIG_X86_PAE
21709 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21710 +#endif
21711 +
21712 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21713 pmd++, pmd_idx++) {
21714 pte = page_table_kmap_check(one_page_table_init(pmd),
21715 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21716 }
21717 }
21718
21719 -static inline int is_kernel_text(unsigned long addr)
21720 +static inline int is_kernel_text(unsigned long start, unsigned long end)
21721 {
21722 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21723 - return 1;
21724 - return 0;
21725 + if ((start > ktla_ktva((unsigned long)_etext) ||
21726 + end <= ktla_ktva((unsigned long)_stext)) &&
21727 + (start > ktla_ktva((unsigned long)_einittext) ||
21728 + end <= ktla_ktva((unsigned long)_sinittext)) &&
21729 +
21730 +#ifdef CONFIG_ACPI_SLEEP
21731 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21732 +#endif
21733 +
21734 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21735 + return 0;
21736 + return 1;
21737 }
21738
21739 /*
21740 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21741 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21742 unsigned long start_pfn, end_pfn;
21743 pgd_t *pgd_base = swapper_pg_dir;
21744 - int pgd_idx, pmd_idx, pte_ofs;
21745 + unsigned int pgd_idx, pmd_idx, pte_ofs;
21746 unsigned long pfn;
21747 pgd_t *pgd;
21748 + pud_t *pud;
21749 pmd_t *pmd;
21750 pte_t *pte;
21751 unsigned pages_2m, pages_4k;
21752 @@ -278,8 +279,13 @@ repeat:
21753 pfn = start_pfn;
21754 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21755 pgd = pgd_base + pgd_idx;
21756 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21757 - pmd = one_md_table_init(pgd);
21758 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21759 + pud = pud_offset(pgd, 0);
21760 + pmd = pmd_offset(pud, 0);
21761 +
21762 +#ifdef CONFIG_X86_PAE
21763 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21764 +#endif
21765
21766 if (pfn >= end_pfn)
21767 continue;
21768 @@ -291,14 +297,13 @@ repeat:
21769 #endif
21770 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21771 pmd++, pmd_idx++) {
21772 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21773 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21774
21775 /*
21776 * Map with big pages if possible, otherwise
21777 * create normal page tables:
21778 */
21779 if (use_pse) {
21780 - unsigned int addr2;
21781 pgprot_t prot = PAGE_KERNEL_LARGE;
21782 /*
21783 * first pass will use the same initial
21784 @@ -308,11 +313,7 @@ repeat:
21785 __pgprot(PTE_IDENT_ATTR |
21786 _PAGE_PSE);
21787
21788 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21789 - PAGE_OFFSET + PAGE_SIZE-1;
21790 -
21791 - if (is_kernel_text(addr) ||
21792 - is_kernel_text(addr2))
21793 + if (is_kernel_text(address, address + PMD_SIZE))
21794 prot = PAGE_KERNEL_LARGE_EXEC;
21795
21796 pages_2m++;
21797 @@ -329,7 +330,7 @@ repeat:
21798 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21799 pte += pte_ofs;
21800 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21801 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21802 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21803 pgprot_t prot = PAGE_KERNEL;
21804 /*
21805 * first pass will use the same initial
21806 @@ -337,7 +338,7 @@ repeat:
21807 */
21808 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21809
21810 - if (is_kernel_text(addr))
21811 + if (is_kernel_text(address, address + PAGE_SIZE))
21812 prot = PAGE_KERNEL_EXEC;
21813
21814 pages_4k++;
21815 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21816
21817 pud = pud_offset(pgd, va);
21818 pmd = pmd_offset(pud, va);
21819 - if (!pmd_present(*pmd))
21820 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
21821 break;
21822
21823 pte = pte_offset_kernel(pmd, va);
21824 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
21825
21826 static void __init pagetable_init(void)
21827 {
21828 - pgd_t *pgd_base = swapper_pg_dir;
21829 -
21830 - permanent_kmaps_init(pgd_base);
21831 + permanent_kmaps_init(swapper_pg_dir);
21832 }
21833
21834 #ifdef CONFIG_ACPI_SLEEP
21835 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
21836 * ACPI suspend needs this for resume, because things like the intel-agp
21837 * driver might have split up a kernel 4MB mapping.
21838 */
21839 -char swsusp_pg_dir[PAGE_SIZE]
21840 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
21841 __attribute__ ((aligned(PAGE_SIZE)));
21842
21843 static inline void save_pg_dir(void)
21844 {
21845 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
21846 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
21847 }
21848 #else /* !CONFIG_ACPI_SLEEP */
21849 static inline void save_pg_dir(void)
21850 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
21851 flush_tlb_all();
21852 }
21853
21854 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21855 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21856 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21857
21858 /* user-defined highmem size */
21859 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
21860 * Initialize the boot-time allocator (with low memory only):
21861 */
21862 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
21863 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21864 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21865 PAGE_SIZE);
21866 if (bootmap == -1L)
21867 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
21868 @@ -864,6 +863,12 @@ void __init mem_init(void)
21869
21870 pci_iommu_alloc();
21871
21872 +#ifdef CONFIG_PAX_PER_CPU_PGD
21873 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21874 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21875 + KERNEL_PGD_PTRS);
21876 +#endif
21877 +
21878 #ifdef CONFIG_FLATMEM
21879 BUG_ON(!mem_map);
21880 #endif
21881 @@ -881,7 +886,7 @@ void __init mem_init(void)
21882 set_highmem_pages_init();
21883
21884 codesize = (unsigned long) &_etext - (unsigned long) &_text;
21885 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21886 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21887 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21888
21889 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21890 @@ -923,10 +928,10 @@ void __init mem_init(void)
21891 ((unsigned long)&__init_end -
21892 (unsigned long)&__init_begin) >> 10,
21893
21894 - (unsigned long)&_etext, (unsigned long)&_edata,
21895 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21896 + (unsigned long)&_sdata, (unsigned long)&_edata,
21897 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21898
21899 - (unsigned long)&_text, (unsigned long)&_etext,
21900 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21901 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21902
21903 /*
21904 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
21905 if (!kernel_set_to_readonly)
21906 return;
21907
21908 + start = ktla_ktva(start);
21909 pr_debug("Set kernel text: %lx - %lx for read write\n",
21910 start, start+size);
21911
21912 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
21913 if (!kernel_set_to_readonly)
21914 return;
21915
21916 + start = ktla_ktva(start);
21917 pr_debug("Set kernel text: %lx - %lx for read only\n",
21918 start, start+size);
21919
21920 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
21921 unsigned long start = PFN_ALIGN(_text);
21922 unsigned long size = PFN_ALIGN(_etext) - start;
21923
21924 + start = ktla_ktva(start);
21925 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21926 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21927 size >> 10);
21928 diff -urNp linux-2.6.32.45/arch/x86/mm/init_64.c linux-2.6.32.45/arch/x86/mm/init_64.c
21929 --- linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
21930 +++ linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
21931 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21932 pmd = fill_pmd(pud, vaddr);
21933 pte = fill_pte(pmd, vaddr);
21934
21935 + pax_open_kernel();
21936 set_pte(pte, new_pte);
21937 + pax_close_kernel();
21938
21939 /*
21940 * It's enough to flush this one mapping.
21941 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
21942 pgd = pgd_offset_k((unsigned long)__va(phys));
21943 if (pgd_none(*pgd)) {
21944 pud = (pud_t *) spp_getpage();
21945 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21946 - _PAGE_USER));
21947 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21948 }
21949 pud = pud_offset(pgd, (unsigned long)__va(phys));
21950 if (pud_none(*pud)) {
21951 pmd = (pmd_t *) spp_getpage();
21952 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21953 - _PAGE_USER));
21954 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21955 }
21956 pmd = pmd_offset(pud, phys);
21957 BUG_ON(!pmd_none(*pmd));
21958 @@ -675,6 +675,12 @@ void __init mem_init(void)
21959
21960 pci_iommu_alloc();
21961
21962 +#ifdef CONFIG_PAX_PER_CPU_PGD
21963 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21964 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21965 + KERNEL_PGD_PTRS);
21966 +#endif
21967 +
21968 /* clear_bss() already clear the empty_zero_page */
21969
21970 reservedpages = 0;
21971 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
21972 static struct vm_area_struct gate_vma = {
21973 .vm_start = VSYSCALL_START,
21974 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21975 - .vm_page_prot = PAGE_READONLY_EXEC,
21976 - .vm_flags = VM_READ | VM_EXEC
21977 + .vm_page_prot = PAGE_READONLY,
21978 + .vm_flags = VM_READ
21979 };
21980
21981 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21982 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
21983
21984 const char *arch_vma_name(struct vm_area_struct *vma)
21985 {
21986 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21987 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21988 return "[vdso]";
21989 if (vma == &gate_vma)
21990 return "[vsyscall]";
21991 diff -urNp linux-2.6.32.45/arch/x86/mm/init.c linux-2.6.32.45/arch/x86/mm/init.c
21992 --- linux-2.6.32.45/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21993 +++ linux-2.6.32.45/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21994 @@ -69,11 +69,7 @@ static void __init find_early_table_spac
21995 * cause a hotspot and fill up ZONE_DMA. The page tables
21996 * need roughly 0.5KB per GB.
21997 */
21998 -#ifdef CONFIG_X86_32
21999 - start = 0x7000;
22000 -#else
22001 - start = 0x8000;
22002 -#endif
22003 + start = 0x100000;
22004 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
22005 tables, PAGE_SIZE);
22006 if (e820_table_start == -1UL)
22007 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
22008 #endif
22009
22010 set_nx();
22011 - if (nx_enabled)
22012 + if (nx_enabled && cpu_has_nx)
22013 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
22014
22015 /* Enable PSE if available */
22016 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
22017 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
22018 * mmio resources as well as potential bios/acpi data regions.
22019 */
22020 +
22021 int devmem_is_allowed(unsigned long pagenr)
22022 {
22023 +#ifdef CONFIG_GRKERNSEC_KMEM
22024 + /* allow BDA */
22025 + if (!pagenr)
22026 + return 1;
22027 + /* allow EBDA */
22028 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22029 + return 1;
22030 + /* allow ISA/video mem */
22031 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22032 + return 1;
22033 + /* throw out everything else below 1MB */
22034 + if (pagenr <= 256)
22035 + return 0;
22036 +#else
22037 if (pagenr <= 256)
22038 return 1;
22039 +#endif
22040 +
22041 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22042 return 0;
22043 if (!page_is_ram(pagenr))
22044 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
22045
22046 void free_initmem(void)
22047 {
22048 +
22049 +#ifdef CONFIG_PAX_KERNEXEC
22050 +#ifdef CONFIG_X86_32
22051 + /* PaX: limit KERNEL_CS to actual size */
22052 + unsigned long addr, limit;
22053 + struct desc_struct d;
22054 + int cpu;
22055 +
22056 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22057 + limit = (limit - 1UL) >> PAGE_SHIFT;
22058 +
22059 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22060 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22061 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22062 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22063 + }
22064 +
22065 + /* PaX: make KERNEL_CS read-only */
22066 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22067 + if (!paravirt_enabled())
22068 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22069 +/*
22070 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22071 + pgd = pgd_offset_k(addr);
22072 + pud = pud_offset(pgd, addr);
22073 + pmd = pmd_offset(pud, addr);
22074 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22075 + }
22076 +*/
22077 +#ifdef CONFIG_X86_PAE
22078 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22079 +/*
22080 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22081 + pgd = pgd_offset_k(addr);
22082 + pud = pud_offset(pgd, addr);
22083 + pmd = pmd_offset(pud, addr);
22084 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22085 + }
22086 +*/
22087 +#endif
22088 +
22089 +#ifdef CONFIG_MODULES
22090 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22091 +#endif
22092 +
22093 +#else
22094 + pgd_t *pgd;
22095 + pud_t *pud;
22096 + pmd_t *pmd;
22097 + unsigned long addr, end;
22098 +
22099 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22100 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22101 + pgd = pgd_offset_k(addr);
22102 + pud = pud_offset(pgd, addr);
22103 + pmd = pmd_offset(pud, addr);
22104 + if (!pmd_present(*pmd))
22105 + continue;
22106 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22107 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22108 + else
22109 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22110 + }
22111 +
22112 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22113 + end = addr + KERNEL_IMAGE_SIZE;
22114 + for (; addr < end; addr += PMD_SIZE) {
22115 + pgd = pgd_offset_k(addr);
22116 + pud = pud_offset(pgd, addr);
22117 + pmd = pmd_offset(pud, addr);
22118 + if (!pmd_present(*pmd))
22119 + continue;
22120 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22121 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22122 + }
22123 +#endif
22124 +
22125 + flush_tlb_all();
22126 +#endif
22127 +
22128 free_init_pages("unused kernel memory",
22129 (unsigned long)(&__init_begin),
22130 (unsigned long)(&__init_end));
22131 diff -urNp linux-2.6.32.45/arch/x86/mm/iomap_32.c linux-2.6.32.45/arch/x86/mm/iomap_32.c
22132 --- linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
22133 +++ linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
22134 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
22135 debug_kmap_atomic(type);
22136 idx = type + KM_TYPE_NR * smp_processor_id();
22137 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22138 +
22139 + pax_open_kernel();
22140 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22141 + pax_close_kernel();
22142 +
22143 arch_flush_lazy_mmu_mode();
22144
22145 return (void *)vaddr;
22146 diff -urNp linux-2.6.32.45/arch/x86/mm/ioremap.c linux-2.6.32.45/arch/x86/mm/ioremap.c
22147 --- linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
22148 +++ linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
22149 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
22150 * Second special case: Some BIOSen report the PC BIOS
22151 * area (640->1Mb) as ram even though it is not.
22152 */
22153 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
22154 - pagenr < (BIOS_END >> PAGE_SHIFT))
22155 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
22156 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22157 return 0;
22158
22159 for (i = 0; i < e820.nr_map; i++) {
22160 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
22161 /*
22162 * Don't allow anybody to remap normal RAM that we're using..
22163 */
22164 - for (pfn = phys_addr >> PAGE_SHIFT;
22165 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
22166 - pfn++) {
22167 -
22168 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
22169 int is_ram = page_is_ram(pfn);
22170
22171 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22172 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22173 return NULL;
22174 WARN_ON_ONCE(is_ram);
22175 }
22176 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
22177 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22178
22179 static __initdata int after_paging_init;
22180 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22181 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22182
22183 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22184 {
22185 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
22186 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22187
22188 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22189 - memset(bm_pte, 0, sizeof(bm_pte));
22190 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
22191 + pmd_populate_user(&init_mm, pmd, bm_pte);
22192
22193 /*
22194 * The boot-ioremap range spans multiple pmds, for which
22195 diff -urNp linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c
22196 --- linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
22197 +++ linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
22198 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
22199 * memory (e.g. tracked pages)? For now, we need this to avoid
22200 * invoking kmemcheck for PnP BIOS calls.
22201 */
22202 - if (regs->flags & X86_VM_MASK)
22203 + if (v8086_mode(regs))
22204 return false;
22205 - if (regs->cs != __KERNEL_CS)
22206 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22207 return false;
22208
22209 pte = kmemcheck_pte_lookup(address);
22210 diff -urNp linux-2.6.32.45/arch/x86/mm/mmap.c linux-2.6.32.45/arch/x86/mm/mmap.c
22211 --- linux-2.6.32.45/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
22212 +++ linux-2.6.32.45/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
22213 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
22214 * Leave an at least ~128 MB hole with possible stack randomization.
22215 */
22216 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22217 -#define MAX_GAP (TASK_SIZE/6*5)
22218 +#define MAX_GAP (pax_task_size/6*5)
22219
22220 /*
22221 * True on X86_32 or when emulating IA32 on X86_64
22222 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22223 return rnd << PAGE_SHIFT;
22224 }
22225
22226 -static unsigned long mmap_base(void)
22227 +static unsigned long mmap_base(struct mm_struct *mm)
22228 {
22229 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
22230 + unsigned long pax_task_size = TASK_SIZE;
22231 +
22232 +#ifdef CONFIG_PAX_SEGMEXEC
22233 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22234 + pax_task_size = SEGMEXEC_TASK_SIZE;
22235 +#endif
22236
22237 if (gap < MIN_GAP)
22238 gap = MIN_GAP;
22239 else if (gap > MAX_GAP)
22240 gap = MAX_GAP;
22241
22242 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22243 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22244 }
22245
22246 /*
22247 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22248 * does, but not when emulating X86_32
22249 */
22250 -static unsigned long mmap_legacy_base(void)
22251 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
22252 {
22253 - if (mmap_is_ia32())
22254 + if (mmap_is_ia32()) {
22255 +
22256 +#ifdef CONFIG_PAX_SEGMEXEC
22257 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22258 + return SEGMEXEC_TASK_UNMAPPED_BASE;
22259 + else
22260 +#endif
22261 +
22262 return TASK_UNMAPPED_BASE;
22263 - else
22264 + } else
22265 return TASK_UNMAPPED_BASE + mmap_rnd();
22266 }
22267
22268 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22269 void arch_pick_mmap_layout(struct mm_struct *mm)
22270 {
22271 if (mmap_is_legacy()) {
22272 - mm->mmap_base = mmap_legacy_base();
22273 + mm->mmap_base = mmap_legacy_base(mm);
22274 +
22275 +#ifdef CONFIG_PAX_RANDMMAP
22276 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22277 + mm->mmap_base += mm->delta_mmap;
22278 +#endif
22279 +
22280 mm->get_unmapped_area = arch_get_unmapped_area;
22281 mm->unmap_area = arch_unmap_area;
22282 } else {
22283 - mm->mmap_base = mmap_base();
22284 + mm->mmap_base = mmap_base(mm);
22285 +
22286 +#ifdef CONFIG_PAX_RANDMMAP
22287 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22288 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22289 +#endif
22290 +
22291 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22292 mm->unmap_area = arch_unmap_area_topdown;
22293 }
22294 diff -urNp linux-2.6.32.45/arch/x86/mm/mmio-mod.c linux-2.6.32.45/arch/x86/mm/mmio-mod.c
22295 --- linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22296 +++ linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22297 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22298 break;
22299 default:
22300 {
22301 - unsigned char *ip = (unsigned char *)instptr;
22302 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22303 my_trace->opcode = MMIO_UNKNOWN_OP;
22304 my_trace->width = 0;
22305 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22306 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22307 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22308 void __iomem *addr)
22309 {
22310 - static atomic_t next_id;
22311 + static atomic_unchecked_t next_id;
22312 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22313 /* These are page-unaligned. */
22314 struct mmiotrace_map map = {
22315 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22316 .private = trace
22317 },
22318 .phys = offset,
22319 - .id = atomic_inc_return(&next_id)
22320 + .id = atomic_inc_return_unchecked(&next_id)
22321 };
22322 map.map_id = trace->id;
22323
22324 diff -urNp linux-2.6.32.45/arch/x86/mm/numa_32.c linux-2.6.32.45/arch/x86/mm/numa_32.c
22325 --- linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22326 +++ linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22327 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22328 }
22329 #endif
22330
22331 -extern unsigned long find_max_low_pfn(void);
22332 extern unsigned long highend_pfn, highstart_pfn;
22333
22334 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22335 diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr.c linux-2.6.32.45/arch/x86/mm/pageattr.c
22336 --- linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22337 +++ linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22338 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22339 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22340 */
22341 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22342 - pgprot_val(forbidden) |= _PAGE_NX;
22343 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22344
22345 /*
22346 * The kernel text needs to be executable for obvious reasons
22347 * Does not cover __inittext since that is gone later on. On
22348 * 64bit we do not enforce !NX on the low mapping
22349 */
22350 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
22351 - pgprot_val(forbidden) |= _PAGE_NX;
22352 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22353 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22354
22355 +#ifdef CONFIG_DEBUG_RODATA
22356 /*
22357 * The .rodata section needs to be read-only. Using the pfn
22358 * catches all aliases.
22359 @@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22360 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22361 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22362 pgprot_val(forbidden) |= _PAGE_RW;
22363 +#endif
22364 +
22365 +#ifdef CONFIG_PAX_KERNEXEC
22366 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22367 + pgprot_val(forbidden) |= _PAGE_RW;
22368 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22369 + }
22370 +#endif
22371
22372 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22373
22374 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22375 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22376 {
22377 /* change init_mm */
22378 + pax_open_kernel();
22379 set_pte_atomic(kpte, pte);
22380 +
22381 #ifdef CONFIG_X86_32
22382 if (!SHARED_KERNEL_PMD) {
22383 +
22384 +#ifdef CONFIG_PAX_PER_CPU_PGD
22385 + unsigned long cpu;
22386 +#else
22387 struct page *page;
22388 +#endif
22389
22390 +#ifdef CONFIG_PAX_PER_CPU_PGD
22391 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22392 + pgd_t *pgd = get_cpu_pgd(cpu);
22393 +#else
22394 list_for_each_entry(page, &pgd_list, lru) {
22395 - pgd_t *pgd;
22396 + pgd_t *pgd = (pgd_t *)page_address(page);
22397 +#endif
22398 +
22399 pud_t *pud;
22400 pmd_t *pmd;
22401
22402 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
22403 + pgd += pgd_index(address);
22404 pud = pud_offset(pgd, address);
22405 pmd = pmd_offset(pud, address);
22406 set_pte_atomic((pte_t *)pmd, pte);
22407 }
22408 }
22409 #endif
22410 + pax_close_kernel();
22411 }
22412
22413 static int
22414 diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr-test.c linux-2.6.32.45/arch/x86/mm/pageattr-test.c
22415 --- linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22416 +++ linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22417 @@ -36,7 +36,7 @@ enum {
22418
22419 static int pte_testbit(pte_t pte)
22420 {
22421 - return pte_flags(pte) & _PAGE_UNUSED1;
22422 + return pte_flags(pte) & _PAGE_CPA_TEST;
22423 }
22424
22425 struct split_state {
22426 diff -urNp linux-2.6.32.45/arch/x86/mm/pat.c linux-2.6.32.45/arch/x86/mm/pat.c
22427 --- linux-2.6.32.45/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22428 +++ linux-2.6.32.45/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22429 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22430
22431 conflict:
22432 printk(KERN_INFO "%s:%d conflicting memory types "
22433 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22434 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22435 new->end, cattr_name(new->type), cattr_name(entry->type));
22436 return -EBUSY;
22437 }
22438 @@ -559,7 +559,7 @@ unlock_ret:
22439
22440 if (err) {
22441 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22442 - current->comm, current->pid, start, end);
22443 + current->comm, task_pid_nr(current), start, end);
22444 }
22445
22446 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22447 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22448 while (cursor < to) {
22449 if (!devmem_is_allowed(pfn)) {
22450 printk(KERN_INFO
22451 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22452 - current->comm, from, to);
22453 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22454 + current->comm, from, to, cursor);
22455 return 0;
22456 }
22457 cursor += PAGE_SIZE;
22458 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22459 printk(KERN_INFO
22460 "%s:%d ioremap_change_attr failed %s "
22461 "for %Lx-%Lx\n",
22462 - current->comm, current->pid,
22463 + current->comm, task_pid_nr(current),
22464 cattr_name(flags),
22465 base, (unsigned long long)(base + size));
22466 return -EINVAL;
22467 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22468 free_memtype(paddr, paddr + size);
22469 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22470 " for %Lx-%Lx, got %s\n",
22471 - current->comm, current->pid,
22472 + current->comm, task_pid_nr(current),
22473 cattr_name(want_flags),
22474 (unsigned long long)paddr,
22475 (unsigned long long)(paddr + size),
22476 diff -urNp linux-2.6.32.45/arch/x86/mm/pf_in.c linux-2.6.32.45/arch/x86/mm/pf_in.c
22477 --- linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22478 +++ linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22479 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22480 int i;
22481 enum reason_type rv = OTHERS;
22482
22483 - p = (unsigned char *)ins_addr;
22484 + p = (unsigned char *)ktla_ktva(ins_addr);
22485 p += skip_prefix(p, &prf);
22486 p += get_opcode(p, &opcode);
22487
22488 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22489 struct prefix_bits prf;
22490 int i;
22491
22492 - p = (unsigned char *)ins_addr;
22493 + p = (unsigned char *)ktla_ktva(ins_addr);
22494 p += skip_prefix(p, &prf);
22495 p += get_opcode(p, &opcode);
22496
22497 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22498 struct prefix_bits prf;
22499 int i;
22500
22501 - p = (unsigned char *)ins_addr;
22502 + p = (unsigned char *)ktla_ktva(ins_addr);
22503 p += skip_prefix(p, &prf);
22504 p += get_opcode(p, &opcode);
22505
22506 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22507 int i;
22508 unsigned long rv;
22509
22510 - p = (unsigned char *)ins_addr;
22511 + p = (unsigned char *)ktla_ktva(ins_addr);
22512 p += skip_prefix(p, &prf);
22513 p += get_opcode(p, &opcode);
22514 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22515 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22516 int i;
22517 unsigned long rv;
22518
22519 - p = (unsigned char *)ins_addr;
22520 + p = (unsigned char *)ktla_ktva(ins_addr);
22521 p += skip_prefix(p, &prf);
22522 p += get_opcode(p, &opcode);
22523 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22524 diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable_32.c linux-2.6.32.45/arch/x86/mm/pgtable_32.c
22525 --- linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22526 +++ linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22527 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22528 return;
22529 }
22530 pte = pte_offset_kernel(pmd, vaddr);
22531 +
22532 + pax_open_kernel();
22533 if (pte_val(pteval))
22534 set_pte_at(&init_mm, vaddr, pte, pteval);
22535 else
22536 pte_clear(&init_mm, vaddr, pte);
22537 + pax_close_kernel();
22538
22539 /*
22540 * It's enough to flush this one mapping.
22541 diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable.c linux-2.6.32.45/arch/x86/mm/pgtable.c
22542 --- linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22543 +++ linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22544 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22545 list_del(&page->lru);
22546 }
22547
22548 -#define UNSHARED_PTRS_PER_PGD \
22549 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22550 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22551 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22552
22553 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22554 +{
22555 + while (count--)
22556 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22557 +}
22558 +#endif
22559 +
22560 +#ifdef CONFIG_PAX_PER_CPU_PGD
22561 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22562 +{
22563 + while (count--)
22564 +
22565 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22566 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22567 +#else
22568 + *dst++ = *src++;
22569 +#endif
22570 +
22571 +}
22572 +#endif
22573 +
22574 +#ifdef CONFIG_X86_64
22575 +#define pxd_t pud_t
22576 +#define pyd_t pgd_t
22577 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22578 +#define pxd_free(mm, pud) pud_free((mm), (pud))
22579 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22580 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22581 +#define PYD_SIZE PGDIR_SIZE
22582 +#else
22583 +#define pxd_t pmd_t
22584 +#define pyd_t pud_t
22585 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22586 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
22587 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22588 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
22589 +#define PYD_SIZE PUD_SIZE
22590 +#endif
22591 +
22592 +#ifdef CONFIG_PAX_PER_CPU_PGD
22593 +static inline void pgd_ctor(pgd_t *pgd) {}
22594 +static inline void pgd_dtor(pgd_t *pgd) {}
22595 +#else
22596 static void pgd_ctor(pgd_t *pgd)
22597 {
22598 /* If the pgd points to a shared pagetable level (either the
22599 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22600 pgd_list_del(pgd);
22601 spin_unlock_irqrestore(&pgd_lock, flags);
22602 }
22603 +#endif
22604
22605 /*
22606 * List of all pgd's needed for non-PAE so it can invalidate entries
22607 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22608 * -- wli
22609 */
22610
22611 -#ifdef CONFIG_X86_PAE
22612 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22613 /*
22614 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22615 * updating the top-level pagetable entries to guarantee the
22616 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22617 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22618 * and initialize the kernel pmds here.
22619 */
22620 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22621 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22622
22623 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22624 {
22625 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22626 */
22627 flush_tlb_mm(mm);
22628 }
22629 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22630 +#define PREALLOCATED_PXDS USER_PGD_PTRS
22631 #else /* !CONFIG_X86_PAE */
22632
22633 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22634 -#define PREALLOCATED_PMDS 0
22635 +#define PREALLOCATED_PXDS 0
22636
22637 #endif /* CONFIG_X86_PAE */
22638
22639 -static void free_pmds(pmd_t *pmds[])
22640 +static void free_pxds(pxd_t *pxds[])
22641 {
22642 int i;
22643
22644 - for(i = 0; i < PREALLOCATED_PMDS; i++)
22645 - if (pmds[i])
22646 - free_page((unsigned long)pmds[i]);
22647 + for(i = 0; i < PREALLOCATED_PXDS; i++)
22648 + if (pxds[i])
22649 + free_page((unsigned long)pxds[i]);
22650 }
22651
22652 -static int preallocate_pmds(pmd_t *pmds[])
22653 +static int preallocate_pxds(pxd_t *pxds[])
22654 {
22655 int i;
22656 bool failed = false;
22657
22658 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22659 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22660 - if (pmd == NULL)
22661 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22662 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22663 + if (pxd == NULL)
22664 failed = true;
22665 - pmds[i] = pmd;
22666 + pxds[i] = pxd;
22667 }
22668
22669 if (failed) {
22670 - free_pmds(pmds);
22671 + free_pxds(pxds);
22672 return -ENOMEM;
22673 }
22674
22675 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22676 * preallocate which never got a corresponding vma will need to be
22677 * freed manually.
22678 */
22679 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22680 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22681 {
22682 int i;
22683
22684 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22685 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22686 pgd_t pgd = pgdp[i];
22687
22688 if (pgd_val(pgd) != 0) {
22689 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22690 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22691
22692 - pgdp[i] = native_make_pgd(0);
22693 + set_pgd(pgdp + i, native_make_pgd(0));
22694
22695 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22696 - pmd_free(mm, pmd);
22697 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22698 + pxd_free(mm, pxd);
22699 }
22700 }
22701 }
22702
22703 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22704 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22705 {
22706 - pud_t *pud;
22707 + pyd_t *pyd;
22708 unsigned long addr;
22709 int i;
22710
22711 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22712 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22713 return;
22714
22715 - pud = pud_offset(pgd, 0);
22716 +#ifdef CONFIG_X86_64
22717 + pyd = pyd_offset(mm, 0L);
22718 +#else
22719 + pyd = pyd_offset(pgd, 0L);
22720 +#endif
22721
22722 - for (addr = i = 0; i < PREALLOCATED_PMDS;
22723 - i++, pud++, addr += PUD_SIZE) {
22724 - pmd_t *pmd = pmds[i];
22725 + for (addr = i = 0; i < PREALLOCATED_PXDS;
22726 + i++, pyd++, addr += PYD_SIZE) {
22727 + pxd_t *pxd = pxds[i];
22728
22729 if (i >= KERNEL_PGD_BOUNDARY)
22730 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22731 - sizeof(pmd_t) * PTRS_PER_PMD);
22732 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22733 + sizeof(pxd_t) * PTRS_PER_PMD);
22734
22735 - pud_populate(mm, pud, pmd);
22736 + pyd_populate(mm, pyd, pxd);
22737 }
22738 }
22739
22740 pgd_t *pgd_alloc(struct mm_struct *mm)
22741 {
22742 pgd_t *pgd;
22743 - pmd_t *pmds[PREALLOCATED_PMDS];
22744 + pxd_t *pxds[PREALLOCATED_PXDS];
22745 +
22746 unsigned long flags;
22747
22748 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22749 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22750
22751 mm->pgd = pgd;
22752
22753 - if (preallocate_pmds(pmds) != 0)
22754 + if (preallocate_pxds(pxds) != 0)
22755 goto out_free_pgd;
22756
22757 if (paravirt_pgd_alloc(mm) != 0)
22758 - goto out_free_pmds;
22759 + goto out_free_pxds;
22760
22761 /*
22762 * Make sure that pre-populating the pmds is atomic with
22763 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22764 spin_lock_irqsave(&pgd_lock, flags);
22765
22766 pgd_ctor(pgd);
22767 - pgd_prepopulate_pmd(mm, pgd, pmds);
22768 + pgd_prepopulate_pxd(mm, pgd, pxds);
22769
22770 spin_unlock_irqrestore(&pgd_lock, flags);
22771
22772 return pgd;
22773
22774 -out_free_pmds:
22775 - free_pmds(pmds);
22776 +out_free_pxds:
22777 + free_pxds(pxds);
22778 out_free_pgd:
22779 free_page((unsigned long)pgd);
22780 out:
22781 @@ -287,7 +338,7 @@ out:
22782
22783 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22784 {
22785 - pgd_mop_up_pmds(mm, pgd);
22786 + pgd_mop_up_pxds(mm, pgd);
22787 pgd_dtor(pgd);
22788 paravirt_pgd_free(mm, pgd);
22789 free_page((unsigned long)pgd);
22790 diff -urNp linux-2.6.32.45/arch/x86/mm/setup_nx.c linux-2.6.32.45/arch/x86/mm/setup_nx.c
22791 --- linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22792 +++ linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22793 @@ -4,11 +4,10 @@
22794
22795 #include <asm/pgtable.h>
22796
22797 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22798 int nx_enabled;
22799
22800 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22801 -static int disable_nx __cpuinitdata;
22802 -
22803 +#ifndef CONFIG_PAX_PAGEEXEC
22804 /*
22805 * noexec = on|off
22806 *
22807 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22808 if (!str)
22809 return -EINVAL;
22810 if (!strncmp(str, "on", 2)) {
22811 - __supported_pte_mask |= _PAGE_NX;
22812 - disable_nx = 0;
22813 + nx_enabled = 1;
22814 } else if (!strncmp(str, "off", 3)) {
22815 - disable_nx = 1;
22816 - __supported_pte_mask &= ~_PAGE_NX;
22817 + nx_enabled = 0;
22818 }
22819 return 0;
22820 }
22821 early_param("noexec", noexec_setup);
22822 #endif
22823 +#endif
22824
22825 #ifdef CONFIG_X86_PAE
22826 void __init set_nx(void)
22827 {
22828 - unsigned int v[4], l, h;
22829 + if (!nx_enabled && cpu_has_nx) {
22830 + unsigned l, h;
22831
22832 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
22833 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
22834 -
22835 - if ((v[3] & (1 << 20)) && !disable_nx) {
22836 - rdmsr(MSR_EFER, l, h);
22837 - l |= EFER_NX;
22838 - wrmsr(MSR_EFER, l, h);
22839 - nx_enabled = 1;
22840 - __supported_pte_mask |= _PAGE_NX;
22841 - }
22842 + __supported_pte_mask &= ~_PAGE_NX;
22843 + rdmsr(MSR_EFER, l, h);
22844 + l &= ~EFER_NX;
22845 + wrmsr(MSR_EFER, l, h);
22846 }
22847 }
22848 #else
22849 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
22850 unsigned long efer;
22851
22852 rdmsrl(MSR_EFER, efer);
22853 - if (!(efer & EFER_NX) || disable_nx)
22854 + if (!(efer & EFER_NX) || !nx_enabled)
22855 __supported_pte_mask &= ~_PAGE_NX;
22856 }
22857 #endif
22858 diff -urNp linux-2.6.32.45/arch/x86/mm/tlb.c linux-2.6.32.45/arch/x86/mm/tlb.c
22859 --- linux-2.6.32.45/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
22860 +++ linux-2.6.32.45/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
22861 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
22862 BUG();
22863 cpumask_clear_cpu(cpu,
22864 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
22865 +
22866 +#ifndef CONFIG_PAX_PER_CPU_PGD
22867 load_cr3(swapper_pg_dir);
22868 +#endif
22869 +
22870 }
22871 EXPORT_SYMBOL_GPL(leave_mm);
22872
22873 diff -urNp linux-2.6.32.45/arch/x86/oprofile/backtrace.c linux-2.6.32.45/arch/x86/oprofile/backtrace.c
22874 --- linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
22875 +++ linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
22876 @@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
22877 struct frame_head bufhead[2];
22878
22879 /* Also check accessibility of one struct frame_head beyond */
22880 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
22881 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
22882 return NULL;
22883 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
22884 return NULL;
22885 @@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
22886 {
22887 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
22888
22889 - if (!user_mode_vm(regs)) {
22890 + if (!user_mode(regs)) {
22891 unsigned long stack = kernel_stack_pointer(regs);
22892 if (depth)
22893 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22894 diff -urNp linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c
22895 --- linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
22896 +++ linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
22897 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
22898 #endif
22899 }
22900
22901 -static int inline addr_increment(void)
22902 +static inline int addr_increment(void)
22903 {
22904 #ifdef CONFIG_SMP
22905 return smp_num_siblings == 2 ? 2 : 1;
22906 diff -urNp linux-2.6.32.45/arch/x86/pci/common.c linux-2.6.32.45/arch/x86/pci/common.c
22907 --- linux-2.6.32.45/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
22908 +++ linux-2.6.32.45/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
22909 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
22910 int pcibios_last_bus = -1;
22911 unsigned long pirq_table_addr;
22912 struct pci_bus *pci_root_bus;
22913 -struct pci_raw_ops *raw_pci_ops;
22914 -struct pci_raw_ops *raw_pci_ext_ops;
22915 +const struct pci_raw_ops *raw_pci_ops;
22916 +const struct pci_raw_ops *raw_pci_ext_ops;
22917
22918 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
22919 int reg, int len, u32 *val)
22920 diff -urNp linux-2.6.32.45/arch/x86/pci/direct.c linux-2.6.32.45/arch/x86/pci/direct.c
22921 --- linux-2.6.32.45/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
22922 +++ linux-2.6.32.45/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
22923 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
22924
22925 #undef PCI_CONF1_ADDRESS
22926
22927 -struct pci_raw_ops pci_direct_conf1 = {
22928 +const struct pci_raw_ops pci_direct_conf1 = {
22929 .read = pci_conf1_read,
22930 .write = pci_conf1_write,
22931 };
22932 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
22933
22934 #undef PCI_CONF2_ADDRESS
22935
22936 -struct pci_raw_ops pci_direct_conf2 = {
22937 +const struct pci_raw_ops pci_direct_conf2 = {
22938 .read = pci_conf2_read,
22939 .write = pci_conf2_write,
22940 };
22941 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
22942 * This should be close to trivial, but it isn't, because there are buggy
22943 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
22944 */
22945 -static int __init pci_sanity_check(struct pci_raw_ops *o)
22946 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
22947 {
22948 u32 x = 0;
22949 int year, devfn;
22950 diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_32.c linux-2.6.32.45/arch/x86/pci/mmconfig_32.c
22951 --- linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
22952 +++ linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
22953 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
22954 return 0;
22955 }
22956
22957 -static struct pci_raw_ops pci_mmcfg = {
22958 +static const struct pci_raw_ops pci_mmcfg = {
22959 .read = pci_mmcfg_read,
22960 .write = pci_mmcfg_write,
22961 };
22962 diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_64.c linux-2.6.32.45/arch/x86/pci/mmconfig_64.c
22963 --- linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
22964 +++ linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
22965 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
22966 return 0;
22967 }
22968
22969 -static struct pci_raw_ops pci_mmcfg = {
22970 +static const struct pci_raw_ops pci_mmcfg = {
22971 .read = pci_mmcfg_read,
22972 .write = pci_mmcfg_write,
22973 };
22974 diff -urNp linux-2.6.32.45/arch/x86/pci/numaq_32.c linux-2.6.32.45/arch/x86/pci/numaq_32.c
22975 --- linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
22976 +++ linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
22977 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
22978
22979 #undef PCI_CONF1_MQ_ADDRESS
22980
22981 -static struct pci_raw_ops pci_direct_conf1_mq = {
22982 +static const struct pci_raw_ops pci_direct_conf1_mq = {
22983 .read = pci_conf1_mq_read,
22984 .write = pci_conf1_mq_write
22985 };
22986 diff -urNp linux-2.6.32.45/arch/x86/pci/olpc.c linux-2.6.32.45/arch/x86/pci/olpc.c
22987 --- linux-2.6.32.45/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
22988 +++ linux-2.6.32.45/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
22989 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
22990 return 0;
22991 }
22992
22993 -static struct pci_raw_ops pci_olpc_conf = {
22994 +static const struct pci_raw_ops pci_olpc_conf = {
22995 .read = pci_olpc_read,
22996 .write = pci_olpc_write,
22997 };
22998 diff -urNp linux-2.6.32.45/arch/x86/pci/pcbios.c linux-2.6.32.45/arch/x86/pci/pcbios.c
22999 --- linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
23000 +++ linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
23001 @@ -56,50 +56,93 @@ union bios32 {
23002 static struct {
23003 unsigned long address;
23004 unsigned short segment;
23005 -} bios32_indirect = { 0, __KERNEL_CS };
23006 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23007
23008 /*
23009 * Returns the entry point for the given service, NULL on error
23010 */
23011
23012 -static unsigned long bios32_service(unsigned long service)
23013 +static unsigned long __devinit bios32_service(unsigned long service)
23014 {
23015 unsigned char return_code; /* %al */
23016 unsigned long address; /* %ebx */
23017 unsigned long length; /* %ecx */
23018 unsigned long entry; /* %edx */
23019 unsigned long flags;
23020 + struct desc_struct d, *gdt;
23021
23022 local_irq_save(flags);
23023 - __asm__("lcall *(%%edi); cld"
23024 +
23025 + gdt = get_cpu_gdt_table(smp_processor_id());
23026 +
23027 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23028 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23029 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23030 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23031 +
23032 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23033 : "=a" (return_code),
23034 "=b" (address),
23035 "=c" (length),
23036 "=d" (entry)
23037 : "0" (service),
23038 "1" (0),
23039 - "D" (&bios32_indirect));
23040 + "D" (&bios32_indirect),
23041 + "r"(__PCIBIOS_DS)
23042 + : "memory");
23043 +
23044 + pax_open_kernel();
23045 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23046 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23047 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23048 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23049 + pax_close_kernel();
23050 +
23051 local_irq_restore(flags);
23052
23053 switch (return_code) {
23054 - case 0:
23055 - return address + entry;
23056 - case 0x80: /* Not present */
23057 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23058 - return 0;
23059 - default: /* Shouldn't happen */
23060 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23061 - service, return_code);
23062 + case 0: {
23063 + int cpu;
23064 + unsigned char flags;
23065 +
23066 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23067 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23068 + printk(KERN_WARNING "bios32_service: not valid\n");
23069 return 0;
23070 + }
23071 + address = address + PAGE_OFFSET;
23072 + length += 16UL; /* some BIOSs underreport this... */
23073 + flags = 4;
23074 + if (length >= 64*1024*1024) {
23075 + length >>= PAGE_SHIFT;
23076 + flags |= 8;
23077 + }
23078 +
23079 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
23080 + gdt = get_cpu_gdt_table(cpu);
23081 + pack_descriptor(&d, address, length, 0x9b, flags);
23082 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23083 + pack_descriptor(&d, address, length, 0x93, flags);
23084 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23085 + }
23086 + return entry;
23087 + }
23088 + case 0x80: /* Not present */
23089 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23090 + return 0;
23091 + default: /* Shouldn't happen */
23092 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23093 + service, return_code);
23094 + return 0;
23095 }
23096 }
23097
23098 static struct {
23099 unsigned long address;
23100 unsigned short segment;
23101 -} pci_indirect = { 0, __KERNEL_CS };
23102 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23103
23104 -static int pci_bios_present;
23105 +static int pci_bios_present __read_only;
23106
23107 static int __devinit check_pcibios(void)
23108 {
23109 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
23110 unsigned long flags, pcibios_entry;
23111
23112 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23113 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23114 + pci_indirect.address = pcibios_entry;
23115
23116 local_irq_save(flags);
23117 - __asm__(
23118 - "lcall *(%%edi); cld\n\t"
23119 + __asm__("movw %w6, %%ds\n\t"
23120 + "lcall *%%ss:(%%edi); cld\n\t"
23121 + "push %%ss\n\t"
23122 + "pop %%ds\n\t"
23123 "jc 1f\n\t"
23124 "xor %%ah, %%ah\n"
23125 "1:"
23126 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
23127 "=b" (ebx),
23128 "=c" (ecx)
23129 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23130 - "D" (&pci_indirect)
23131 + "D" (&pci_indirect),
23132 + "r" (__PCIBIOS_DS)
23133 : "memory");
23134 local_irq_restore(flags);
23135
23136 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
23137
23138 switch (len) {
23139 case 1:
23140 - __asm__("lcall *(%%esi); cld\n\t"
23141 + __asm__("movw %w6, %%ds\n\t"
23142 + "lcall *%%ss:(%%esi); cld\n\t"
23143 + "push %%ss\n\t"
23144 + "pop %%ds\n\t"
23145 "jc 1f\n\t"
23146 "xor %%ah, %%ah\n"
23147 "1:"
23148 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
23149 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23150 "b" (bx),
23151 "D" ((long)reg),
23152 - "S" (&pci_indirect));
23153 + "S" (&pci_indirect),
23154 + "r" (__PCIBIOS_DS));
23155 /*
23156 * Zero-extend the result beyond 8 bits, do not trust the
23157 * BIOS having done it:
23158 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
23159 *value &= 0xff;
23160 break;
23161 case 2:
23162 - __asm__("lcall *(%%esi); cld\n\t"
23163 + __asm__("movw %w6, %%ds\n\t"
23164 + "lcall *%%ss:(%%esi); cld\n\t"
23165 + "push %%ss\n\t"
23166 + "pop %%ds\n\t"
23167 "jc 1f\n\t"
23168 "xor %%ah, %%ah\n"
23169 "1:"
23170 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
23171 : "1" (PCIBIOS_READ_CONFIG_WORD),
23172 "b" (bx),
23173 "D" ((long)reg),
23174 - "S" (&pci_indirect));
23175 + "S" (&pci_indirect),
23176 + "r" (__PCIBIOS_DS));
23177 /*
23178 * Zero-extend the result beyond 16 bits, do not trust the
23179 * BIOS having done it:
23180 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
23181 *value &= 0xffff;
23182 break;
23183 case 4:
23184 - __asm__("lcall *(%%esi); cld\n\t"
23185 + __asm__("movw %w6, %%ds\n\t"
23186 + "lcall *%%ss:(%%esi); cld\n\t"
23187 + "push %%ss\n\t"
23188 + "pop %%ds\n\t"
23189 "jc 1f\n\t"
23190 "xor %%ah, %%ah\n"
23191 "1:"
23192 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
23193 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23194 "b" (bx),
23195 "D" ((long)reg),
23196 - "S" (&pci_indirect));
23197 + "S" (&pci_indirect),
23198 + "r" (__PCIBIOS_DS));
23199 break;
23200 }
23201
23202 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
23203
23204 switch (len) {
23205 case 1:
23206 - __asm__("lcall *(%%esi); cld\n\t"
23207 + __asm__("movw %w6, %%ds\n\t"
23208 + "lcall *%%ss:(%%esi); cld\n\t"
23209 + "push %%ss\n\t"
23210 + "pop %%ds\n\t"
23211 "jc 1f\n\t"
23212 "xor %%ah, %%ah\n"
23213 "1:"
23214 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
23215 "c" (value),
23216 "b" (bx),
23217 "D" ((long)reg),
23218 - "S" (&pci_indirect));
23219 + "S" (&pci_indirect),
23220 + "r" (__PCIBIOS_DS));
23221 break;
23222 case 2:
23223 - __asm__("lcall *(%%esi); cld\n\t"
23224 + __asm__("movw %w6, %%ds\n\t"
23225 + "lcall *%%ss:(%%esi); cld\n\t"
23226 + "push %%ss\n\t"
23227 + "pop %%ds\n\t"
23228 "jc 1f\n\t"
23229 "xor %%ah, %%ah\n"
23230 "1:"
23231 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
23232 "c" (value),
23233 "b" (bx),
23234 "D" ((long)reg),
23235 - "S" (&pci_indirect));
23236 + "S" (&pci_indirect),
23237 + "r" (__PCIBIOS_DS));
23238 break;
23239 case 4:
23240 - __asm__("lcall *(%%esi); cld\n\t"
23241 + __asm__("movw %w6, %%ds\n\t"
23242 + "lcall *%%ss:(%%esi); cld\n\t"
23243 + "push %%ss\n\t"
23244 + "pop %%ds\n\t"
23245 "jc 1f\n\t"
23246 "xor %%ah, %%ah\n"
23247 "1:"
23248 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
23249 "c" (value),
23250 "b" (bx),
23251 "D" ((long)reg),
23252 - "S" (&pci_indirect));
23253 + "S" (&pci_indirect),
23254 + "r" (__PCIBIOS_DS));
23255 break;
23256 }
23257
23258 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
23259 * Function table for BIOS32 access
23260 */
23261
23262 -static struct pci_raw_ops pci_bios_access = {
23263 +static const struct pci_raw_ops pci_bios_access = {
23264 .read = pci_bios_read,
23265 .write = pci_bios_write
23266 };
23267 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
23268 * Try to find PCI BIOS.
23269 */
23270
23271 -static struct pci_raw_ops * __devinit pci_find_bios(void)
23272 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
23273 {
23274 union bios32 *check;
23275 unsigned char sum;
23276 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
23277
23278 DBG("PCI: Fetching IRQ routing table... ");
23279 __asm__("push %%es\n\t"
23280 + "movw %w8, %%ds\n\t"
23281 "push %%ds\n\t"
23282 "pop %%es\n\t"
23283 - "lcall *(%%esi); cld\n\t"
23284 + "lcall *%%ss:(%%esi); cld\n\t"
23285 "pop %%es\n\t"
23286 + "push %%ss\n\t"
23287 + "pop %%ds\n"
23288 "jc 1f\n\t"
23289 "xor %%ah, %%ah\n"
23290 "1:"
23291 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
23292 "1" (0),
23293 "D" ((long) &opt),
23294 "S" (&pci_indirect),
23295 - "m" (opt)
23296 + "m" (opt),
23297 + "r" (__PCIBIOS_DS)
23298 : "memory");
23299 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23300 if (ret & 0xff00)
23301 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23302 {
23303 int ret;
23304
23305 - __asm__("lcall *(%%esi); cld\n\t"
23306 + __asm__("movw %w5, %%ds\n\t"
23307 + "lcall *%%ss:(%%esi); cld\n\t"
23308 + "push %%ss\n\t"
23309 + "pop %%ds\n"
23310 "jc 1f\n\t"
23311 "xor %%ah, %%ah\n"
23312 "1:"
23313 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23314 : "0" (PCIBIOS_SET_PCI_HW_INT),
23315 "b" ((dev->bus->number << 8) | dev->devfn),
23316 "c" ((irq << 8) | (pin + 10)),
23317 - "S" (&pci_indirect));
23318 + "S" (&pci_indirect),
23319 + "r" (__PCIBIOS_DS));
23320 return !(ret & 0xff00);
23321 }
23322 EXPORT_SYMBOL(pcibios_set_irq_routing);
23323 diff -urNp linux-2.6.32.45/arch/x86/power/cpu.c linux-2.6.32.45/arch/x86/power/cpu.c
23324 --- linux-2.6.32.45/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23325 +++ linux-2.6.32.45/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23326 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
23327 static void fix_processor_context(void)
23328 {
23329 int cpu = smp_processor_id();
23330 - struct tss_struct *t = &per_cpu(init_tss, cpu);
23331 + struct tss_struct *t = init_tss + cpu;
23332
23333 set_tss_desc(cpu, t); /*
23334 * This just modifies memory; should not be
23335 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
23336 */
23337
23338 #ifdef CONFIG_X86_64
23339 + pax_open_kernel();
23340 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23341 + pax_close_kernel();
23342
23343 syscall_init(); /* This sets MSR_*STAR and related */
23344 #endif
23345 diff -urNp linux-2.6.32.45/arch/x86/vdso/Makefile linux-2.6.32.45/arch/x86/vdso/Makefile
23346 --- linux-2.6.32.45/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23347 +++ linux-2.6.32.45/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23348 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23349 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23350 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23351
23352 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23353 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23354 GCOV_PROFILE := n
23355
23356 #
23357 diff -urNp linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c
23358 --- linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23359 +++ linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23360 @@ -22,24 +22,48 @@
23361 #include <asm/hpet.h>
23362 #include <asm/unistd.h>
23363 #include <asm/io.h>
23364 +#include <asm/fixmap.h>
23365 #include "vextern.h"
23366
23367 #define gtod vdso_vsyscall_gtod_data
23368
23369 +notrace noinline long __vdso_fallback_time(long *t)
23370 +{
23371 + long secs;
23372 + asm volatile("syscall"
23373 + : "=a" (secs)
23374 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23375 + return secs;
23376 +}
23377 +
23378 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23379 {
23380 long ret;
23381 asm("syscall" : "=a" (ret) :
23382 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23383 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23384 return ret;
23385 }
23386
23387 +notrace static inline cycle_t __vdso_vread_hpet(void)
23388 +{
23389 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23390 +}
23391 +
23392 +notrace static inline cycle_t __vdso_vread_tsc(void)
23393 +{
23394 + cycle_t ret = (cycle_t)vget_cycles();
23395 +
23396 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23397 +}
23398 +
23399 notrace static inline long vgetns(void)
23400 {
23401 long v;
23402 - cycles_t (*vread)(void);
23403 - vread = gtod->clock.vread;
23404 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23405 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23406 + v = __vdso_vread_tsc();
23407 + else
23408 + v = __vdso_vread_hpet();
23409 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23410 return (v * gtod->clock.mult) >> gtod->clock.shift;
23411 }
23412
23413 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23414
23415 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23416 {
23417 - if (likely(gtod->sysctl_enabled))
23418 + if (likely(gtod->sysctl_enabled &&
23419 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23420 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23421 switch (clock) {
23422 case CLOCK_REALTIME:
23423 if (likely(gtod->clock.vread))
23424 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23425 int clock_gettime(clockid_t, struct timespec *)
23426 __attribute__((weak, alias("__vdso_clock_gettime")));
23427
23428 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23429 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23430 {
23431 long ret;
23432 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23433 + asm("syscall" : "=a" (ret) :
23434 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23435 + return ret;
23436 +}
23437 +
23438 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23439 +{
23440 + if (likely(gtod->sysctl_enabled &&
23441 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23442 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23443 + {
23444 if (likely(tv != NULL)) {
23445 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23446 offsetof(struct timespec, tv_nsec) ||
23447 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23448 }
23449 return 0;
23450 }
23451 - asm("syscall" : "=a" (ret) :
23452 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23453 - return ret;
23454 + return __vdso_fallback_gettimeofday(tv, tz);
23455 }
23456 int gettimeofday(struct timeval *, struct timezone *)
23457 __attribute__((weak, alias("__vdso_gettimeofday")));
23458 diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c
23459 --- linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23460 +++ linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23461 @@ -25,6 +25,7 @@
23462 #include <asm/tlbflush.h>
23463 #include <asm/vdso.h>
23464 #include <asm/proto.h>
23465 +#include <asm/mman.h>
23466
23467 enum {
23468 VDSO_DISABLED = 0,
23469 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23470 void enable_sep_cpu(void)
23471 {
23472 int cpu = get_cpu();
23473 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
23474 + struct tss_struct *tss = init_tss + cpu;
23475
23476 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23477 put_cpu();
23478 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23479 gate_vma.vm_start = FIXADDR_USER_START;
23480 gate_vma.vm_end = FIXADDR_USER_END;
23481 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23482 - gate_vma.vm_page_prot = __P101;
23483 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23484 /*
23485 * Make sure the vDSO gets into every core dump.
23486 * Dumping its contents makes post-mortem fully interpretable later
23487 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23488 if (compat)
23489 addr = VDSO_HIGH_BASE;
23490 else {
23491 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23492 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23493 if (IS_ERR_VALUE(addr)) {
23494 ret = addr;
23495 goto up_fail;
23496 }
23497 }
23498
23499 - current->mm->context.vdso = (void *)addr;
23500 + current->mm->context.vdso = addr;
23501
23502 if (compat_uses_vma || !compat) {
23503 /*
23504 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23505 }
23506
23507 current_thread_info()->sysenter_return =
23508 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23509 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23510
23511 up_fail:
23512 if (ret)
23513 - current->mm->context.vdso = NULL;
23514 + current->mm->context.vdso = 0;
23515
23516 up_write(&mm->mmap_sem);
23517
23518 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23519
23520 const char *arch_vma_name(struct vm_area_struct *vma)
23521 {
23522 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23523 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23524 return "[vdso]";
23525 +
23526 +#ifdef CONFIG_PAX_SEGMEXEC
23527 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23528 + return "[vdso]";
23529 +#endif
23530 +
23531 return NULL;
23532 }
23533
23534 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23535 struct mm_struct *mm = tsk->mm;
23536
23537 /* Check to see if this task was created in compat vdso mode */
23538 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23539 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23540 return &gate_vma;
23541 return NULL;
23542 }
23543 diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso.lds.S linux-2.6.32.45/arch/x86/vdso/vdso.lds.S
23544 --- linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23545 +++ linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23546 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23547 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23548 #include "vextern.h"
23549 #undef VEXTERN
23550 +
23551 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23552 +VEXTERN(fallback_gettimeofday)
23553 +VEXTERN(fallback_time)
23554 +VEXTERN(getcpu)
23555 +#undef VEXTERN
23556 diff -urNp linux-2.6.32.45/arch/x86/vdso/vextern.h linux-2.6.32.45/arch/x86/vdso/vextern.h
23557 --- linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23558 +++ linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23559 @@ -11,6 +11,5 @@
23560 put into vextern.h and be referenced as a pointer with vdso prefix.
23561 The main kernel later fills in the values. */
23562
23563 -VEXTERN(jiffies)
23564 VEXTERN(vgetcpu_mode)
23565 VEXTERN(vsyscall_gtod_data)
23566 diff -urNp linux-2.6.32.45/arch/x86/vdso/vma.c linux-2.6.32.45/arch/x86/vdso/vma.c
23567 --- linux-2.6.32.45/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23568 +++ linux-2.6.32.45/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
23569 @@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
23570 if (!vbase)
23571 goto oom;
23572
23573 - if (memcmp(vbase, "\177ELF", 4)) {
23574 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
23575 printk("VDSO: I'm broken; not ELF\n");
23576 vdso_enabled = 0;
23577 }
23578 @@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
23579 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23580 #include "vextern.h"
23581 #undef VEXTERN
23582 + vunmap(vbase);
23583 return 0;
23584
23585 oom:
23586 @@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
23587 goto up_fail;
23588 }
23589
23590 - current->mm->context.vdso = (void *)addr;
23591 + current->mm->context.vdso = addr;
23592
23593 ret = install_special_mapping(mm, addr, vdso_size,
23594 VM_READ|VM_EXEC|
23595 @@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
23596 VM_ALWAYSDUMP,
23597 vdso_pages);
23598 if (ret) {
23599 - current->mm->context.vdso = NULL;
23600 + current->mm->context.vdso = 0;
23601 goto up_fail;
23602 }
23603
23604 @@ -132,10 +133,3 @@ up_fail:
23605 up_write(&mm->mmap_sem);
23606 return ret;
23607 }
23608 -
23609 -static __init int vdso_setup(char *s)
23610 -{
23611 - vdso_enabled = simple_strtoul(s, NULL, 0);
23612 - return 0;
23613 -}
23614 -__setup("vdso=", vdso_setup);
23615 diff -urNp linux-2.6.32.45/arch/x86/xen/enlighten.c linux-2.6.32.45/arch/x86/xen/enlighten.c
23616 --- linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23617 +++ linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23618 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23619
23620 struct shared_info xen_dummy_shared_info;
23621
23622 -void *xen_initial_gdt;
23623 -
23624 /*
23625 * Point at some empty memory to start with. We map the real shared_info
23626 * page as soon as fixmap is up and running.
23627 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23628
23629 preempt_disable();
23630
23631 - start = __get_cpu_var(idt_desc).address;
23632 + start = (unsigned long)__get_cpu_var(idt_desc).address;
23633 end = start + __get_cpu_var(idt_desc).size + 1;
23634
23635 xen_mc_flush();
23636 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23637 #endif
23638 };
23639
23640 -static void xen_reboot(int reason)
23641 +static __noreturn void xen_reboot(int reason)
23642 {
23643 struct sched_shutdown r = { .reason = reason };
23644
23645 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23646 BUG();
23647 }
23648
23649 -static void xen_restart(char *msg)
23650 +static __noreturn void xen_restart(char *msg)
23651 {
23652 xen_reboot(SHUTDOWN_reboot);
23653 }
23654
23655 -static void xen_emergency_restart(void)
23656 +static __noreturn void xen_emergency_restart(void)
23657 {
23658 xen_reboot(SHUTDOWN_reboot);
23659 }
23660
23661 -static void xen_machine_halt(void)
23662 +static __noreturn void xen_machine_halt(void)
23663 {
23664 xen_reboot(SHUTDOWN_poweroff);
23665 }
23666 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23667 */
23668 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23669
23670 -#ifdef CONFIG_X86_64
23671 /* Work out if we support NX */
23672 - check_efer();
23673 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23674 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23675 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23676 + unsigned l, h;
23677 +
23678 +#ifdef CONFIG_X86_PAE
23679 + nx_enabled = 1;
23680 +#endif
23681 + __supported_pte_mask |= _PAGE_NX;
23682 + rdmsr(MSR_EFER, l, h);
23683 + l |= EFER_NX;
23684 + wrmsr(MSR_EFER, l, h);
23685 + }
23686 #endif
23687
23688 xen_setup_features();
23689 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23690
23691 machine_ops = xen_machine_ops;
23692
23693 - /*
23694 - * The only reliable way to retain the initial address of the
23695 - * percpu gdt_page is to remember it here, so we can go and
23696 - * mark it RW later, when the initial percpu area is freed.
23697 - */
23698 - xen_initial_gdt = &per_cpu(gdt_page, 0);
23699 -
23700 xen_smp_init();
23701
23702 pgd = (pgd_t *)xen_start_info->pt_base;
23703 diff -urNp linux-2.6.32.45/arch/x86/xen/mmu.c linux-2.6.32.45/arch/x86/xen/mmu.c
23704 --- linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:04.000000000 -0400
23705 +++ linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:18.000000000 -0400
23706 @@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23707 convert_pfn_mfn(init_level4_pgt);
23708 convert_pfn_mfn(level3_ident_pgt);
23709 convert_pfn_mfn(level3_kernel_pgt);
23710 + convert_pfn_mfn(level3_vmalloc_pgt);
23711 + convert_pfn_mfn(level3_vmemmap_pgt);
23712
23713 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23714 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23715 @@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23716 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23717 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23718 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23719 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23720 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23721 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23722 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23723 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23724 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23725
23726 diff -urNp linux-2.6.32.45/arch/x86/xen/smp.c linux-2.6.32.45/arch/x86/xen/smp.c
23727 --- linux-2.6.32.45/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23728 +++ linux-2.6.32.45/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23729 @@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23730 {
23731 BUG_ON(smp_processor_id() != 0);
23732 native_smp_prepare_boot_cpu();
23733 -
23734 - /* We've switched to the "real" per-cpu gdt, so make sure the
23735 - old memory can be recycled */
23736 - make_lowmem_page_readwrite(xen_initial_gdt);
23737 -
23738 xen_setup_vcpu_info_placement();
23739 }
23740
23741 @@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23742 gdt = get_cpu_gdt_table(cpu);
23743
23744 ctxt->flags = VGCF_IN_KERNEL;
23745 - ctxt->user_regs.ds = __USER_DS;
23746 - ctxt->user_regs.es = __USER_DS;
23747 + ctxt->user_regs.ds = __KERNEL_DS;
23748 + ctxt->user_regs.es = __KERNEL_DS;
23749 ctxt->user_regs.ss = __KERNEL_DS;
23750 #ifdef CONFIG_X86_32
23751 ctxt->user_regs.fs = __KERNEL_PERCPU;
23752 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23753 + savesegment(gs, ctxt->user_regs.gs);
23754 #else
23755 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23756 #endif
23757 @@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23758 int rc;
23759
23760 per_cpu(current_task, cpu) = idle;
23761 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
23762 #ifdef CONFIG_X86_32
23763 irq_ctx_init(cpu);
23764 #else
23765 clear_tsk_thread_flag(idle, TIF_FORK);
23766 - per_cpu(kernel_stack, cpu) =
23767 - (unsigned long)task_stack_page(idle) -
23768 - KERNEL_STACK_OFFSET + THREAD_SIZE;
23769 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23770 #endif
23771 xen_setup_runstate_info(cpu);
23772 xen_setup_timer(cpu);
23773 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-asm_32.S linux-2.6.32.45/arch/x86/xen/xen-asm_32.S
23774 --- linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
23775 +++ linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
23776 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
23777 ESP_OFFSET=4 # bytes pushed onto stack
23778
23779 /*
23780 - * Store vcpu_info pointer for easy access. Do it this way to
23781 - * avoid having to reload %fs
23782 + * Store vcpu_info pointer for easy access.
23783 */
23784 #ifdef CONFIG_SMP
23785 - GET_THREAD_INFO(%eax)
23786 - movl TI_cpu(%eax), %eax
23787 - movl __per_cpu_offset(,%eax,4), %eax
23788 - mov per_cpu__xen_vcpu(%eax), %eax
23789 + push %fs
23790 + mov $(__KERNEL_PERCPU), %eax
23791 + mov %eax, %fs
23792 + mov PER_CPU_VAR(xen_vcpu), %eax
23793 + pop %fs
23794 #else
23795 movl per_cpu__xen_vcpu, %eax
23796 #endif
23797 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-head.S linux-2.6.32.45/arch/x86/xen/xen-head.S
23798 --- linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
23799 +++ linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
23800 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
23801 #ifdef CONFIG_X86_32
23802 mov %esi,xen_start_info
23803 mov $init_thread_union+THREAD_SIZE,%esp
23804 +#ifdef CONFIG_SMP
23805 + movl $cpu_gdt_table,%edi
23806 + movl $__per_cpu_load,%eax
23807 + movw %ax,__KERNEL_PERCPU + 2(%edi)
23808 + rorl $16,%eax
23809 + movb %al,__KERNEL_PERCPU + 4(%edi)
23810 + movb %ah,__KERNEL_PERCPU + 7(%edi)
23811 + movl $__per_cpu_end - 1,%eax
23812 + subl $__per_cpu_start,%eax
23813 + movw %ax,__KERNEL_PERCPU + 0(%edi)
23814 +#endif
23815 #else
23816 mov %rsi,xen_start_info
23817 mov $init_thread_union+THREAD_SIZE,%rsp
23818 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-ops.h linux-2.6.32.45/arch/x86/xen/xen-ops.h
23819 --- linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
23820 +++ linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
23821 @@ -10,8 +10,6 @@
23822 extern const char xen_hypervisor_callback[];
23823 extern const char xen_failsafe_callback[];
23824
23825 -extern void *xen_initial_gdt;
23826 -
23827 struct trap_info;
23828 void xen_copy_trap_info(struct trap_info *traps);
23829
23830 diff -urNp linux-2.6.32.45/block/blk-integrity.c linux-2.6.32.45/block/blk-integrity.c
23831 --- linux-2.6.32.45/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
23832 +++ linux-2.6.32.45/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
23833 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
23834 NULL,
23835 };
23836
23837 -static struct sysfs_ops integrity_ops = {
23838 +static const struct sysfs_ops integrity_ops = {
23839 .show = &integrity_attr_show,
23840 .store = &integrity_attr_store,
23841 };
23842 diff -urNp linux-2.6.32.45/block/blk-iopoll.c linux-2.6.32.45/block/blk-iopoll.c
23843 --- linux-2.6.32.45/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
23844 +++ linux-2.6.32.45/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
23845 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23846 }
23847 EXPORT_SYMBOL(blk_iopoll_complete);
23848
23849 -static void blk_iopoll_softirq(struct softirq_action *h)
23850 +static void blk_iopoll_softirq(void)
23851 {
23852 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23853 int rearm = 0, budget = blk_iopoll_budget;
23854 diff -urNp linux-2.6.32.45/block/blk-map.c linux-2.6.32.45/block/blk-map.c
23855 --- linux-2.6.32.45/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
23856 +++ linux-2.6.32.45/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
23857 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
23858 * direct dma. else, set up kernel bounce buffers
23859 */
23860 uaddr = (unsigned long) ubuf;
23861 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
23862 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
23863 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
23864 else
23865 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
23866 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
23867 for (i = 0; i < iov_count; i++) {
23868 unsigned long uaddr = (unsigned long)iov[i].iov_base;
23869
23870 + if (!iov[i].iov_len)
23871 + return -EINVAL;
23872 +
23873 if (uaddr & queue_dma_alignment(q)) {
23874 unaligned = 1;
23875 break;
23876 }
23877 - if (!iov[i].iov_len)
23878 - return -EINVAL;
23879 }
23880
23881 if (unaligned || (q->dma_pad_mask & len) || map_data)
23882 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
23883 if (!len || !kbuf)
23884 return -EINVAL;
23885
23886 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
23887 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
23888 if (do_copy)
23889 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23890 else
23891 diff -urNp linux-2.6.32.45/block/blk-softirq.c linux-2.6.32.45/block/blk-softirq.c
23892 --- linux-2.6.32.45/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
23893 +++ linux-2.6.32.45/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
23894 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23895 * Softirq action handler - move entries to local list and loop over them
23896 * while passing them to the queue registered handler.
23897 */
23898 -static void blk_done_softirq(struct softirq_action *h)
23899 +static void blk_done_softirq(void)
23900 {
23901 struct list_head *cpu_list, local_list;
23902
23903 diff -urNp linux-2.6.32.45/block/blk-sysfs.c linux-2.6.32.45/block/blk-sysfs.c
23904 --- linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
23905 +++ linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
23906 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
23907 kmem_cache_free(blk_requestq_cachep, q);
23908 }
23909
23910 -static struct sysfs_ops queue_sysfs_ops = {
23911 +static const struct sysfs_ops queue_sysfs_ops = {
23912 .show = queue_attr_show,
23913 .store = queue_attr_store,
23914 };
23915 diff -urNp linux-2.6.32.45/block/bsg.c linux-2.6.32.45/block/bsg.c
23916 --- linux-2.6.32.45/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
23917 +++ linux-2.6.32.45/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
23918 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23919 struct sg_io_v4 *hdr, struct bsg_device *bd,
23920 fmode_t has_write_perm)
23921 {
23922 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23923 + unsigned char *cmdptr;
23924 +
23925 if (hdr->request_len > BLK_MAX_CDB) {
23926 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23927 if (!rq->cmd)
23928 return -ENOMEM;
23929 - }
23930 + cmdptr = rq->cmd;
23931 + } else
23932 + cmdptr = tmpcmd;
23933
23934 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
23935 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
23936 hdr->request_len))
23937 return -EFAULT;
23938
23939 + if (cmdptr != rq->cmd)
23940 + memcpy(rq->cmd, cmdptr, hdr->request_len);
23941 +
23942 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23943 if (blk_verify_command(rq->cmd, has_write_perm))
23944 return -EPERM;
23945 diff -urNp linux-2.6.32.45/block/elevator.c linux-2.6.32.45/block/elevator.c
23946 --- linux-2.6.32.45/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
23947 +++ linux-2.6.32.45/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
23948 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
23949 return error;
23950 }
23951
23952 -static struct sysfs_ops elv_sysfs_ops = {
23953 +static const struct sysfs_ops elv_sysfs_ops = {
23954 .show = elv_attr_show,
23955 .store = elv_attr_store,
23956 };
23957 diff -urNp linux-2.6.32.45/block/scsi_ioctl.c linux-2.6.32.45/block/scsi_ioctl.c
23958 --- linux-2.6.32.45/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
23959 +++ linux-2.6.32.45/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
23960 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
23961 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23962 struct sg_io_hdr *hdr, fmode_t mode)
23963 {
23964 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23965 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23966 + unsigned char *cmdptr;
23967 +
23968 + if (rq->cmd != rq->__cmd)
23969 + cmdptr = rq->cmd;
23970 + else
23971 + cmdptr = tmpcmd;
23972 +
23973 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23974 return -EFAULT;
23975 +
23976 + if (cmdptr != rq->cmd)
23977 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23978 +
23979 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23980 return -EPERM;
23981
23982 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
23983 int err;
23984 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23985 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23986 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23987 + unsigned char *cmdptr;
23988
23989 if (!sic)
23990 return -EINVAL;
23991 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
23992 */
23993 err = -EFAULT;
23994 rq->cmd_len = cmdlen;
23995 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
23996 +
23997 + if (rq->cmd != rq->__cmd)
23998 + cmdptr = rq->cmd;
23999 + else
24000 + cmdptr = tmpcmd;
24001 +
24002 + if (copy_from_user(cmdptr, sic->data, cmdlen))
24003 goto error;
24004
24005 + if (rq->cmd != cmdptr)
24006 + memcpy(rq->cmd, cmdptr, cmdlen);
24007 +
24008 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24009 goto error;
24010
24011 diff -urNp linux-2.6.32.45/crypto/cryptd.c linux-2.6.32.45/crypto/cryptd.c
24012 --- linux-2.6.32.45/crypto/cryptd.c 2011-03-27 14:31:47.000000000 -0400
24013 +++ linux-2.6.32.45/crypto/cryptd.c 2011-08-05 20:33:55.000000000 -0400
24014 @@ -214,7 +214,7 @@ static int cryptd_blkcipher_enqueue(stru
24015 struct cryptd_queue *queue;
24016
24017 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
24018 - rctx->complete = req->base.complete;
24019 + *(void **)&rctx->complete = req->base.complete;
24020 req->base.complete = complete;
24021
24022 return cryptd_enqueue_request(queue, &req->base);
24023 diff -urNp linux-2.6.32.45/crypto/gf128mul.c linux-2.6.32.45/crypto/gf128mul.c
24024 --- linux-2.6.32.45/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
24025 +++ linux-2.6.32.45/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
24026 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
24027 for (i = 0; i < 7; ++i)
24028 gf128mul_x_lle(&p[i + 1], &p[i]);
24029
24030 - memset(r, 0, sizeof(r));
24031 + memset(r, 0, sizeof(*r));
24032 for (i = 0;;) {
24033 u8 ch = ((u8 *)b)[15 - i];
24034
24035 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
24036 for (i = 0; i < 7; ++i)
24037 gf128mul_x_bbe(&p[i + 1], &p[i]);
24038
24039 - memset(r, 0, sizeof(r));
24040 + memset(r, 0, sizeof(*r));
24041 for (i = 0;;) {
24042 u8 ch = ((u8 *)b)[i];
24043
24044 diff -urNp linux-2.6.32.45/crypto/serpent.c linux-2.6.32.45/crypto/serpent.c
24045 --- linux-2.6.32.45/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
24046 +++ linux-2.6.32.45/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
24047 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
24048 u32 r0,r1,r2,r3,r4;
24049 int i;
24050
24051 + pax_track_stack();
24052 +
24053 /* Copy key, add padding */
24054
24055 for (i = 0; i < keylen; ++i)
24056 diff -urNp linux-2.6.32.45/Documentation/dontdiff linux-2.6.32.45/Documentation/dontdiff
24057 --- linux-2.6.32.45/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
24058 +++ linux-2.6.32.45/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
24059 @@ -1,13 +1,16 @@
24060 *.a
24061 *.aux
24062 *.bin
24063 +*.cis
24064 *.cpio
24065 *.csp
24066 +*.dbg
24067 *.dsp
24068 *.dvi
24069 *.elf
24070 *.eps
24071 *.fw
24072 +*.gcno
24073 *.gen.S
24074 *.gif
24075 *.grep
24076 @@ -38,8 +41,10 @@
24077 *.tab.h
24078 *.tex
24079 *.ver
24080 +*.vim
24081 *.xml
24082 *_MODULES
24083 +*_reg_safe.h
24084 *_vga16.c
24085 *~
24086 *.9
24087 @@ -49,11 +54,16 @@
24088 53c700_d.h
24089 CVS
24090 ChangeSet
24091 +GPATH
24092 +GRTAGS
24093 +GSYMS
24094 +GTAGS
24095 Image
24096 Kerntypes
24097 Module.markers
24098 Module.symvers
24099 PENDING
24100 +PERF*
24101 SCCS
24102 System.map*
24103 TAGS
24104 @@ -76,7 +86,11 @@ btfixupprep
24105 build
24106 bvmlinux
24107 bzImage*
24108 +capability_names.h
24109 +capflags.c
24110 classlist.h*
24111 +clut_vga16.c
24112 +common-cmds.h
24113 comp*.log
24114 compile.h*
24115 conf
24116 @@ -103,13 +117,14 @@ gen_crc32table
24117 gen_init_cpio
24118 genksyms
24119 *_gray256.c
24120 +hash
24121 ihex2fw
24122 ikconfig.h*
24123 initramfs_data.cpio
24124 +initramfs_data.cpio.bz2
24125 initramfs_data.cpio.gz
24126 initramfs_list
24127 kallsyms
24128 -kconfig
24129 keywords.c
24130 ksym.c*
24131 ksym.h*
24132 @@ -133,7 +148,9 @@ mkboot
24133 mkbugboot
24134 mkcpustr
24135 mkdep
24136 +mkpiggy
24137 mkprep
24138 +mkregtable
24139 mktables
24140 mktree
24141 modpost
24142 @@ -149,6 +166,7 @@ patches*
24143 pca200e.bin
24144 pca200e_ecd.bin2
24145 piggy.gz
24146 +piggy.S
24147 piggyback
24148 pnmtologo
24149 ppc_defs.h*
24150 @@ -157,12 +175,15 @@ qconf
24151 raid6altivec*.c
24152 raid6int*.c
24153 raid6tables.c
24154 +regdb.c
24155 relocs
24156 +rlim_names.h
24157 series
24158 setup
24159 setup.bin
24160 setup.elf
24161 sImage
24162 +slabinfo
24163 sm_tbl*
24164 split-include
24165 syscalltab.h
24166 @@ -186,14 +207,20 @@ version.h*
24167 vmlinux
24168 vmlinux-*
24169 vmlinux.aout
24170 +vmlinux.bin.all
24171 +vmlinux.bin.bz2
24172 vmlinux.lds
24173 +vmlinux.relocs
24174 +voffset.h
24175 vsyscall.lds
24176 vsyscall_32.lds
24177 wanxlfw.inc
24178 uImage
24179 unifdef
24180 +utsrelease.h
24181 wakeup.bin
24182 wakeup.elf
24183 wakeup.lds
24184 zImage*
24185 zconf.hash.c
24186 +zoffset.h
24187 diff -urNp linux-2.6.32.45/Documentation/kernel-parameters.txt linux-2.6.32.45/Documentation/kernel-parameters.txt
24188 --- linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
24189 +++ linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
24190 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
24191 the specified number of seconds. This is to be used if
24192 your oopses keep scrolling off the screen.
24193
24194 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
24195 + virtualization environments that don't cope well with the
24196 + expand down segment used by UDEREF on X86-32 or the frequent
24197 + page table updates on X86-64.
24198 +
24199 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
24200 +
24201 pcbit= [HW,ISDN]
24202
24203 pcd. [PARIDE]
24204 diff -urNp linux-2.6.32.45/drivers/acpi/acpi_pad.c linux-2.6.32.45/drivers/acpi/acpi_pad.c
24205 --- linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
24206 +++ linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
24207 @@ -30,7 +30,7 @@
24208 #include <acpi/acpi_bus.h>
24209 #include <acpi/acpi_drivers.h>
24210
24211 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
24212 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
24213 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
24214 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
24215 static DEFINE_MUTEX(isolated_cpus_lock);
24216 diff -urNp linux-2.6.32.45/drivers/acpi/battery.c linux-2.6.32.45/drivers/acpi/battery.c
24217 --- linux-2.6.32.45/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
24218 +++ linux-2.6.32.45/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
24219 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
24220 }
24221
24222 static struct battery_file {
24223 - struct file_operations ops;
24224 + const struct file_operations ops;
24225 mode_t mode;
24226 const char *name;
24227 } acpi_battery_file[] = {
24228 diff -urNp linux-2.6.32.45/drivers/acpi/dock.c linux-2.6.32.45/drivers/acpi/dock.c
24229 --- linux-2.6.32.45/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
24230 +++ linux-2.6.32.45/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
24231 @@ -77,7 +77,7 @@ struct dock_dependent_device {
24232 struct list_head list;
24233 struct list_head hotplug_list;
24234 acpi_handle handle;
24235 - struct acpi_dock_ops *ops;
24236 + const struct acpi_dock_ops *ops;
24237 void *context;
24238 };
24239
24240 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
24241 * the dock driver after _DCK is executed.
24242 */
24243 int
24244 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
24245 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
24246 void *context)
24247 {
24248 struct dock_dependent_device *dd;
24249 diff -urNp linux-2.6.32.45/drivers/acpi/osl.c linux-2.6.32.45/drivers/acpi/osl.c
24250 --- linux-2.6.32.45/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
24251 +++ linux-2.6.32.45/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
24252 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
24253 void __iomem *virt_addr;
24254
24255 virt_addr = ioremap(phys_addr, width);
24256 + if (!virt_addr)
24257 + return AE_NO_MEMORY;
24258 if (!value)
24259 value = &dummy;
24260
24261 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
24262 void __iomem *virt_addr;
24263
24264 virt_addr = ioremap(phys_addr, width);
24265 + if (!virt_addr)
24266 + return AE_NO_MEMORY;
24267
24268 switch (width) {
24269 case 8:
24270 diff -urNp linux-2.6.32.45/drivers/acpi/power_meter.c linux-2.6.32.45/drivers/acpi/power_meter.c
24271 --- linux-2.6.32.45/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
24272 +++ linux-2.6.32.45/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
24273 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
24274 return res;
24275
24276 temp /= 1000;
24277 - if (temp < 0)
24278 - return -EINVAL;
24279
24280 mutex_lock(&resource->lock);
24281 resource->trip[attr->index - 7] = temp;
24282 diff -urNp linux-2.6.32.45/drivers/acpi/proc.c linux-2.6.32.45/drivers/acpi/proc.c
24283 --- linux-2.6.32.45/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
24284 +++ linux-2.6.32.45/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
24285 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
24286 size_t count, loff_t * ppos)
24287 {
24288 struct list_head *node, *next;
24289 - char strbuf[5];
24290 - char str[5] = "";
24291 - unsigned int len = count;
24292 + char strbuf[5] = {0};
24293 struct acpi_device *found_dev = NULL;
24294
24295 - if (len > 4)
24296 - len = 4;
24297 - if (len < 0)
24298 - return -EFAULT;
24299 + if (count > 4)
24300 + count = 4;
24301
24302 - if (copy_from_user(strbuf, buffer, len))
24303 + if (copy_from_user(strbuf, buffer, count))
24304 return -EFAULT;
24305 - strbuf[len] = '\0';
24306 - sscanf(strbuf, "%s", str);
24307 + strbuf[count] = '\0';
24308
24309 mutex_lock(&acpi_device_lock);
24310 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24311 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24312 if (!dev->wakeup.flags.valid)
24313 continue;
24314
24315 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
24316 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24317 dev->wakeup.state.enabled =
24318 dev->wakeup.state.enabled ? 0 : 1;
24319 found_dev = dev;
24320 diff -urNp linux-2.6.32.45/drivers/acpi/processor_core.c linux-2.6.32.45/drivers/acpi/processor_core.c
24321 --- linux-2.6.32.45/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24322 +++ linux-2.6.32.45/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24323 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24324 return 0;
24325 }
24326
24327 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24328 + BUG_ON(pr->id >= nr_cpu_ids);
24329
24330 /*
24331 * Buggy BIOS check
24332 diff -urNp linux-2.6.32.45/drivers/acpi/sbshc.c linux-2.6.32.45/drivers/acpi/sbshc.c
24333 --- linux-2.6.32.45/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24334 +++ linux-2.6.32.45/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24335 @@ -17,7 +17,7 @@
24336
24337 #define PREFIX "ACPI: "
24338
24339 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24340 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24341 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24342
24343 struct acpi_smb_hc {
24344 diff -urNp linux-2.6.32.45/drivers/acpi/sleep.c linux-2.6.32.45/drivers/acpi/sleep.c
24345 --- linux-2.6.32.45/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24346 +++ linux-2.6.32.45/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24347 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24348 }
24349 }
24350
24351 -static struct platform_suspend_ops acpi_suspend_ops = {
24352 +static const struct platform_suspend_ops acpi_suspend_ops = {
24353 .valid = acpi_suspend_state_valid,
24354 .begin = acpi_suspend_begin,
24355 .prepare_late = acpi_pm_prepare,
24356 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24357 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24358 * been requested.
24359 */
24360 -static struct platform_suspend_ops acpi_suspend_ops_old = {
24361 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
24362 .valid = acpi_suspend_state_valid,
24363 .begin = acpi_suspend_begin_old,
24364 .prepare_late = acpi_pm_disable_gpes,
24365 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24366 acpi_enable_all_runtime_gpes();
24367 }
24368
24369 -static struct platform_hibernation_ops acpi_hibernation_ops = {
24370 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
24371 .begin = acpi_hibernation_begin,
24372 .end = acpi_pm_end,
24373 .pre_snapshot = acpi_hibernation_pre_snapshot,
24374 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24375 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24376 * been requested.
24377 */
24378 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24379 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24380 .begin = acpi_hibernation_begin_old,
24381 .end = acpi_pm_end,
24382 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24383 diff -urNp linux-2.6.32.45/drivers/acpi/video.c linux-2.6.32.45/drivers/acpi/video.c
24384 --- linux-2.6.32.45/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24385 +++ linux-2.6.32.45/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24386 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24387 vd->brightness->levels[request_level]);
24388 }
24389
24390 -static struct backlight_ops acpi_backlight_ops = {
24391 +static const struct backlight_ops acpi_backlight_ops = {
24392 .get_brightness = acpi_video_get_brightness,
24393 .update_status = acpi_video_set_brightness,
24394 };
24395 diff -urNp linux-2.6.32.45/drivers/ata/ahci.c linux-2.6.32.45/drivers/ata/ahci.c
24396 --- linux-2.6.32.45/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24397 +++ linux-2.6.32.45/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24398 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24399 .sdev_attrs = ahci_sdev_attrs,
24400 };
24401
24402 -static struct ata_port_operations ahci_ops = {
24403 +static const struct ata_port_operations ahci_ops = {
24404 .inherits = &sata_pmp_port_ops,
24405
24406 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24407 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24408 .port_stop = ahci_port_stop,
24409 };
24410
24411 -static struct ata_port_operations ahci_vt8251_ops = {
24412 +static const struct ata_port_operations ahci_vt8251_ops = {
24413 .inherits = &ahci_ops,
24414 .hardreset = ahci_vt8251_hardreset,
24415 };
24416
24417 -static struct ata_port_operations ahci_p5wdh_ops = {
24418 +static const struct ata_port_operations ahci_p5wdh_ops = {
24419 .inherits = &ahci_ops,
24420 .hardreset = ahci_p5wdh_hardreset,
24421 };
24422
24423 -static struct ata_port_operations ahci_sb600_ops = {
24424 +static const struct ata_port_operations ahci_sb600_ops = {
24425 .inherits = &ahci_ops,
24426 .softreset = ahci_sb600_softreset,
24427 .pmp_softreset = ahci_sb600_softreset,
24428 diff -urNp linux-2.6.32.45/drivers/ata/ata_generic.c linux-2.6.32.45/drivers/ata/ata_generic.c
24429 --- linux-2.6.32.45/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24430 +++ linux-2.6.32.45/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24431 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
24432 ATA_BMDMA_SHT(DRV_NAME),
24433 };
24434
24435 -static struct ata_port_operations generic_port_ops = {
24436 +static const struct ata_port_operations generic_port_ops = {
24437 .inherits = &ata_bmdma_port_ops,
24438 .cable_detect = ata_cable_unknown,
24439 .set_mode = generic_set_mode,
24440 diff -urNp linux-2.6.32.45/drivers/ata/ata_piix.c linux-2.6.32.45/drivers/ata/ata_piix.c
24441 --- linux-2.6.32.45/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24442 +++ linux-2.6.32.45/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24443 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24444 ATA_BMDMA_SHT(DRV_NAME),
24445 };
24446
24447 -static struct ata_port_operations piix_pata_ops = {
24448 +static const struct ata_port_operations piix_pata_ops = {
24449 .inherits = &ata_bmdma32_port_ops,
24450 .cable_detect = ata_cable_40wire,
24451 .set_piomode = piix_set_piomode,
24452 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24453 .prereset = piix_pata_prereset,
24454 };
24455
24456 -static struct ata_port_operations piix_vmw_ops = {
24457 +static const struct ata_port_operations piix_vmw_ops = {
24458 .inherits = &piix_pata_ops,
24459 .bmdma_status = piix_vmw_bmdma_status,
24460 };
24461
24462 -static struct ata_port_operations ich_pata_ops = {
24463 +static const struct ata_port_operations ich_pata_ops = {
24464 .inherits = &piix_pata_ops,
24465 .cable_detect = ich_pata_cable_detect,
24466 .set_dmamode = ich_set_dmamode,
24467 };
24468
24469 -static struct ata_port_operations piix_sata_ops = {
24470 +static const struct ata_port_operations piix_sata_ops = {
24471 .inherits = &ata_bmdma_port_ops,
24472 };
24473
24474 -static struct ata_port_operations piix_sidpr_sata_ops = {
24475 +static const struct ata_port_operations piix_sidpr_sata_ops = {
24476 .inherits = &piix_sata_ops,
24477 .hardreset = sata_std_hardreset,
24478 .scr_read = piix_sidpr_scr_read,
24479 diff -urNp linux-2.6.32.45/drivers/ata/libata-acpi.c linux-2.6.32.45/drivers/ata/libata-acpi.c
24480 --- linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24481 +++ linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24482 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24483 ata_acpi_uevent(dev->link->ap, dev, event);
24484 }
24485
24486 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24487 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24488 .handler = ata_acpi_dev_notify_dock,
24489 .uevent = ata_acpi_dev_uevent,
24490 };
24491
24492 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24493 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24494 .handler = ata_acpi_ap_notify_dock,
24495 .uevent = ata_acpi_ap_uevent,
24496 };
24497 diff -urNp linux-2.6.32.45/drivers/ata/libata-core.c linux-2.6.32.45/drivers/ata/libata-core.c
24498 --- linux-2.6.32.45/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24499 +++ linux-2.6.32.45/drivers/ata/libata-core.c 2011-08-05 20:33:55.000000000 -0400
24500 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24501 struct ata_port *ap;
24502 unsigned int tag;
24503
24504 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24505 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24506 ap = qc->ap;
24507
24508 qc->flags = 0;
24509 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24510 struct ata_port *ap;
24511 struct ata_link *link;
24512
24513 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24514 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24515 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24516 ap = qc->ap;
24517 link = qc->dev->link;
24518 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24519 * LOCKING:
24520 * None.
24521 */
24522 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
24523 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24524 {
24525 static DEFINE_SPINLOCK(lock);
24526 const struct ata_port_operations *cur;
24527 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24528 return;
24529
24530 spin_lock(&lock);
24531 + pax_open_kernel();
24532
24533 for (cur = ops->inherits; cur; cur = cur->inherits) {
24534 void **inherit = (void **)cur;
24535 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24536 if (IS_ERR(*pp))
24537 *pp = NULL;
24538
24539 - ops->inherits = NULL;
24540 + *(struct ata_port_operations **)&ops->inherits = NULL;
24541
24542 + pax_close_kernel();
24543 spin_unlock(&lock);
24544 }
24545
24546 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24547 */
24548 /* KILLME - the only user left is ipr */
24549 void ata_host_init(struct ata_host *host, struct device *dev,
24550 - unsigned long flags, struct ata_port_operations *ops)
24551 + unsigned long flags, const struct ata_port_operations *ops)
24552 {
24553 spin_lock_init(&host->lock);
24554 host->dev = dev;
24555 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24556 /* truly dummy */
24557 }
24558
24559 -struct ata_port_operations ata_dummy_port_ops = {
24560 +const struct ata_port_operations ata_dummy_port_ops = {
24561 .qc_prep = ata_noop_qc_prep,
24562 .qc_issue = ata_dummy_qc_issue,
24563 .error_handler = ata_dummy_error_handler,
24564 diff -urNp linux-2.6.32.45/drivers/ata/libata-eh.c linux-2.6.32.45/drivers/ata/libata-eh.c
24565 --- linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:35:28.000000000 -0400
24566 +++ linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:33:59.000000000 -0400
24567 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24568 {
24569 struct ata_link *link;
24570
24571 + pax_track_stack();
24572 +
24573 ata_for_each_link(link, ap, HOST_FIRST)
24574 ata_eh_link_report(link);
24575 }
24576 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24577 */
24578 void ata_std_error_handler(struct ata_port *ap)
24579 {
24580 - struct ata_port_operations *ops = ap->ops;
24581 + const struct ata_port_operations *ops = ap->ops;
24582 ata_reset_fn_t hardreset = ops->hardreset;
24583
24584 /* ignore built-in hardreset if SCR access is not available */
24585 diff -urNp linux-2.6.32.45/drivers/ata/libata-pmp.c linux-2.6.32.45/drivers/ata/libata-pmp.c
24586 --- linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24587 +++ linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24588 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24589 */
24590 static int sata_pmp_eh_recover(struct ata_port *ap)
24591 {
24592 - struct ata_port_operations *ops = ap->ops;
24593 + const struct ata_port_operations *ops = ap->ops;
24594 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24595 struct ata_link *pmp_link = &ap->link;
24596 struct ata_device *pmp_dev = pmp_link->device;
24597 diff -urNp linux-2.6.32.45/drivers/ata/pata_acpi.c linux-2.6.32.45/drivers/ata/pata_acpi.c
24598 --- linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24599 +++ linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24600 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24601 ATA_BMDMA_SHT(DRV_NAME),
24602 };
24603
24604 -static struct ata_port_operations pacpi_ops = {
24605 +static const struct ata_port_operations pacpi_ops = {
24606 .inherits = &ata_bmdma_port_ops,
24607 .qc_issue = pacpi_qc_issue,
24608 .cable_detect = pacpi_cable_detect,
24609 diff -urNp linux-2.6.32.45/drivers/ata/pata_ali.c linux-2.6.32.45/drivers/ata/pata_ali.c
24610 --- linux-2.6.32.45/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24611 +++ linux-2.6.32.45/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24612 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24613 * Port operations for PIO only ALi
24614 */
24615
24616 -static struct ata_port_operations ali_early_port_ops = {
24617 +static const struct ata_port_operations ali_early_port_ops = {
24618 .inherits = &ata_sff_port_ops,
24619 .cable_detect = ata_cable_40wire,
24620 .set_piomode = ali_set_piomode,
24621 @@ -382,7 +382,7 @@ static const struct ata_port_operations
24622 * Port operations for DMA capable ALi without cable
24623 * detect
24624 */
24625 -static struct ata_port_operations ali_20_port_ops = {
24626 +static const struct ata_port_operations ali_20_port_ops = {
24627 .inherits = &ali_dma_base_ops,
24628 .cable_detect = ata_cable_40wire,
24629 .mode_filter = ali_20_filter,
24630 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24631 /*
24632 * Port operations for DMA capable ALi with cable detect
24633 */
24634 -static struct ata_port_operations ali_c2_port_ops = {
24635 +static const struct ata_port_operations ali_c2_port_ops = {
24636 .inherits = &ali_dma_base_ops,
24637 .check_atapi_dma = ali_check_atapi_dma,
24638 .cable_detect = ali_c2_cable_detect,
24639 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24640 /*
24641 * Port operations for DMA capable ALi with cable detect
24642 */
24643 -static struct ata_port_operations ali_c4_port_ops = {
24644 +static const struct ata_port_operations ali_c4_port_ops = {
24645 .inherits = &ali_dma_base_ops,
24646 .check_atapi_dma = ali_check_atapi_dma,
24647 .cable_detect = ali_c2_cable_detect,
24648 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24649 /*
24650 * Port operations for DMA capable ALi with cable detect and LBA48
24651 */
24652 -static struct ata_port_operations ali_c5_port_ops = {
24653 +static const struct ata_port_operations ali_c5_port_ops = {
24654 .inherits = &ali_dma_base_ops,
24655 .check_atapi_dma = ali_check_atapi_dma,
24656 .dev_config = ali_warn_atapi_dma,
24657 diff -urNp linux-2.6.32.45/drivers/ata/pata_amd.c linux-2.6.32.45/drivers/ata/pata_amd.c
24658 --- linux-2.6.32.45/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24659 +++ linux-2.6.32.45/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24660 @@ -397,28 +397,28 @@ static const struct ata_port_operations
24661 .prereset = amd_pre_reset,
24662 };
24663
24664 -static struct ata_port_operations amd33_port_ops = {
24665 +static const struct ata_port_operations amd33_port_ops = {
24666 .inherits = &amd_base_port_ops,
24667 .cable_detect = ata_cable_40wire,
24668 .set_piomode = amd33_set_piomode,
24669 .set_dmamode = amd33_set_dmamode,
24670 };
24671
24672 -static struct ata_port_operations amd66_port_ops = {
24673 +static const struct ata_port_operations amd66_port_ops = {
24674 .inherits = &amd_base_port_ops,
24675 .cable_detect = ata_cable_unknown,
24676 .set_piomode = amd66_set_piomode,
24677 .set_dmamode = amd66_set_dmamode,
24678 };
24679
24680 -static struct ata_port_operations amd100_port_ops = {
24681 +static const struct ata_port_operations amd100_port_ops = {
24682 .inherits = &amd_base_port_ops,
24683 .cable_detect = ata_cable_unknown,
24684 .set_piomode = amd100_set_piomode,
24685 .set_dmamode = amd100_set_dmamode,
24686 };
24687
24688 -static struct ata_port_operations amd133_port_ops = {
24689 +static const struct ata_port_operations amd133_port_ops = {
24690 .inherits = &amd_base_port_ops,
24691 .cable_detect = amd_cable_detect,
24692 .set_piomode = amd133_set_piomode,
24693 @@ -433,13 +433,13 @@ static const struct ata_port_operations
24694 .host_stop = nv_host_stop,
24695 };
24696
24697 -static struct ata_port_operations nv100_port_ops = {
24698 +static const struct ata_port_operations nv100_port_ops = {
24699 .inherits = &nv_base_port_ops,
24700 .set_piomode = nv100_set_piomode,
24701 .set_dmamode = nv100_set_dmamode,
24702 };
24703
24704 -static struct ata_port_operations nv133_port_ops = {
24705 +static const struct ata_port_operations nv133_port_ops = {
24706 .inherits = &nv_base_port_ops,
24707 .set_piomode = nv133_set_piomode,
24708 .set_dmamode = nv133_set_dmamode,
24709 diff -urNp linux-2.6.32.45/drivers/ata/pata_artop.c linux-2.6.32.45/drivers/ata/pata_artop.c
24710 --- linux-2.6.32.45/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24711 +++ linux-2.6.32.45/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24712 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24713 ATA_BMDMA_SHT(DRV_NAME),
24714 };
24715
24716 -static struct ata_port_operations artop6210_ops = {
24717 +static const struct ata_port_operations artop6210_ops = {
24718 .inherits = &ata_bmdma_port_ops,
24719 .cable_detect = ata_cable_40wire,
24720 .set_piomode = artop6210_set_piomode,
24721 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24722 .qc_defer = artop6210_qc_defer,
24723 };
24724
24725 -static struct ata_port_operations artop6260_ops = {
24726 +static const struct ata_port_operations artop6260_ops = {
24727 .inherits = &ata_bmdma_port_ops,
24728 .cable_detect = artop6260_cable_detect,
24729 .set_piomode = artop6260_set_piomode,
24730 diff -urNp linux-2.6.32.45/drivers/ata/pata_at32.c linux-2.6.32.45/drivers/ata/pata_at32.c
24731 --- linux-2.6.32.45/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24732 +++ linux-2.6.32.45/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24733 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24734 ATA_PIO_SHT(DRV_NAME),
24735 };
24736
24737 -static struct ata_port_operations at32_port_ops = {
24738 +static const struct ata_port_operations at32_port_ops = {
24739 .inherits = &ata_sff_port_ops,
24740 .cable_detect = ata_cable_40wire,
24741 .set_piomode = pata_at32_set_piomode,
24742 diff -urNp linux-2.6.32.45/drivers/ata/pata_at91.c linux-2.6.32.45/drivers/ata/pata_at91.c
24743 --- linux-2.6.32.45/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
24744 +++ linux-2.6.32.45/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
24745 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
24746 ATA_PIO_SHT(DRV_NAME),
24747 };
24748
24749 -static struct ata_port_operations pata_at91_port_ops = {
24750 +static const struct ata_port_operations pata_at91_port_ops = {
24751 .inherits = &ata_sff_port_ops,
24752
24753 .sff_data_xfer = pata_at91_data_xfer_noirq,
24754 diff -urNp linux-2.6.32.45/drivers/ata/pata_atiixp.c linux-2.6.32.45/drivers/ata/pata_atiixp.c
24755 --- linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
24756 +++ linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
24757 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
24758 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24759 };
24760
24761 -static struct ata_port_operations atiixp_port_ops = {
24762 +static const struct ata_port_operations atiixp_port_ops = {
24763 .inherits = &ata_bmdma_port_ops,
24764
24765 .qc_prep = ata_sff_dumb_qc_prep,
24766 diff -urNp linux-2.6.32.45/drivers/ata/pata_atp867x.c linux-2.6.32.45/drivers/ata/pata_atp867x.c
24767 --- linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
24768 +++ linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
24769 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
24770 ATA_BMDMA_SHT(DRV_NAME),
24771 };
24772
24773 -static struct ata_port_operations atp867x_ops = {
24774 +static const struct ata_port_operations atp867x_ops = {
24775 .inherits = &ata_bmdma_port_ops,
24776 .cable_detect = atp867x_cable_detect,
24777 .set_piomode = atp867x_set_piomode,
24778 diff -urNp linux-2.6.32.45/drivers/ata/pata_bf54x.c linux-2.6.32.45/drivers/ata/pata_bf54x.c
24779 --- linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
24780 +++ linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
24781 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
24782 .dma_boundary = ATA_DMA_BOUNDARY,
24783 };
24784
24785 -static struct ata_port_operations bfin_pata_ops = {
24786 +static const struct ata_port_operations bfin_pata_ops = {
24787 .inherits = &ata_sff_port_ops,
24788
24789 .set_piomode = bfin_set_piomode,
24790 diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd640.c linux-2.6.32.45/drivers/ata/pata_cmd640.c
24791 --- linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
24792 +++ linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
24793 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
24794 ATA_BMDMA_SHT(DRV_NAME),
24795 };
24796
24797 -static struct ata_port_operations cmd640_port_ops = {
24798 +static const struct ata_port_operations cmd640_port_ops = {
24799 .inherits = &ata_bmdma_port_ops,
24800 /* In theory xfer_noirq is not needed once we kill the prefetcher */
24801 .sff_data_xfer = ata_sff_data_xfer_noirq,
24802 diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd64x.c linux-2.6.32.45/drivers/ata/pata_cmd64x.c
24803 --- linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
24804 +++ linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
24805 @@ -271,18 +271,18 @@ static const struct ata_port_operations
24806 .set_dmamode = cmd64x_set_dmamode,
24807 };
24808
24809 -static struct ata_port_operations cmd64x_port_ops = {
24810 +static const struct ata_port_operations cmd64x_port_ops = {
24811 .inherits = &cmd64x_base_ops,
24812 .cable_detect = ata_cable_40wire,
24813 };
24814
24815 -static struct ata_port_operations cmd646r1_port_ops = {
24816 +static const struct ata_port_operations cmd646r1_port_ops = {
24817 .inherits = &cmd64x_base_ops,
24818 .bmdma_stop = cmd646r1_bmdma_stop,
24819 .cable_detect = ata_cable_40wire,
24820 };
24821
24822 -static struct ata_port_operations cmd648_port_ops = {
24823 +static const struct ata_port_operations cmd648_port_ops = {
24824 .inherits = &cmd64x_base_ops,
24825 .bmdma_stop = cmd648_bmdma_stop,
24826 .cable_detect = cmd648_cable_detect,
24827 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5520.c linux-2.6.32.45/drivers/ata/pata_cs5520.c
24828 --- linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
24829 +++ linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
24830 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
24831 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24832 };
24833
24834 -static struct ata_port_operations cs5520_port_ops = {
24835 +static const struct ata_port_operations cs5520_port_ops = {
24836 .inherits = &ata_bmdma_port_ops,
24837 .qc_prep = ata_sff_dumb_qc_prep,
24838 .cable_detect = ata_cable_40wire,
24839 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5530.c linux-2.6.32.45/drivers/ata/pata_cs5530.c
24840 --- linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
24841 +++ linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
24842 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
24843 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24844 };
24845
24846 -static struct ata_port_operations cs5530_port_ops = {
24847 +static const struct ata_port_operations cs5530_port_ops = {
24848 .inherits = &ata_bmdma_port_ops,
24849
24850 .qc_prep = ata_sff_dumb_qc_prep,
24851 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5535.c linux-2.6.32.45/drivers/ata/pata_cs5535.c
24852 --- linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
24853 +++ linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
24854 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
24855 ATA_BMDMA_SHT(DRV_NAME),
24856 };
24857
24858 -static struct ata_port_operations cs5535_port_ops = {
24859 +static const struct ata_port_operations cs5535_port_ops = {
24860 .inherits = &ata_bmdma_port_ops,
24861 .cable_detect = cs5535_cable_detect,
24862 .set_piomode = cs5535_set_piomode,
24863 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5536.c linux-2.6.32.45/drivers/ata/pata_cs5536.c
24864 --- linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
24865 +++ linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
24866 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
24867 ATA_BMDMA_SHT(DRV_NAME),
24868 };
24869
24870 -static struct ata_port_operations cs5536_port_ops = {
24871 +static const struct ata_port_operations cs5536_port_ops = {
24872 .inherits = &ata_bmdma_port_ops,
24873 .cable_detect = cs5536_cable_detect,
24874 .set_piomode = cs5536_set_piomode,
24875 diff -urNp linux-2.6.32.45/drivers/ata/pata_cypress.c linux-2.6.32.45/drivers/ata/pata_cypress.c
24876 --- linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
24877 +++ linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
24878 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
24879 ATA_BMDMA_SHT(DRV_NAME),
24880 };
24881
24882 -static struct ata_port_operations cy82c693_port_ops = {
24883 +static const struct ata_port_operations cy82c693_port_ops = {
24884 .inherits = &ata_bmdma_port_ops,
24885 .cable_detect = ata_cable_40wire,
24886 .set_piomode = cy82c693_set_piomode,
24887 diff -urNp linux-2.6.32.45/drivers/ata/pata_efar.c linux-2.6.32.45/drivers/ata/pata_efar.c
24888 --- linux-2.6.32.45/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
24889 +++ linux-2.6.32.45/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
24890 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
24891 ATA_BMDMA_SHT(DRV_NAME),
24892 };
24893
24894 -static struct ata_port_operations efar_ops = {
24895 +static const struct ata_port_operations efar_ops = {
24896 .inherits = &ata_bmdma_port_ops,
24897 .cable_detect = efar_cable_detect,
24898 .set_piomode = efar_set_piomode,
24899 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt366.c linux-2.6.32.45/drivers/ata/pata_hpt366.c
24900 --- linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
24901 +++ linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
24902 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
24903 * Configuration for HPT366/68
24904 */
24905
24906 -static struct ata_port_operations hpt366_port_ops = {
24907 +static const struct ata_port_operations hpt366_port_ops = {
24908 .inherits = &ata_bmdma_port_ops,
24909 .cable_detect = hpt36x_cable_detect,
24910 .mode_filter = hpt366_filter,
24911 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt37x.c linux-2.6.32.45/drivers/ata/pata_hpt37x.c
24912 --- linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
24913 +++ linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
24914 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
24915 * Configuration for HPT370
24916 */
24917
24918 -static struct ata_port_operations hpt370_port_ops = {
24919 +static const struct ata_port_operations hpt370_port_ops = {
24920 .inherits = &ata_bmdma_port_ops,
24921
24922 .bmdma_stop = hpt370_bmdma_stop,
24923 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
24924 * Configuration for HPT370A. Close to 370 but less filters
24925 */
24926
24927 -static struct ata_port_operations hpt370a_port_ops = {
24928 +static const struct ata_port_operations hpt370a_port_ops = {
24929 .inherits = &hpt370_port_ops,
24930 .mode_filter = hpt370a_filter,
24931 };
24932 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
24933 * and DMA mode setting functionality.
24934 */
24935
24936 -static struct ata_port_operations hpt372_port_ops = {
24937 +static const struct ata_port_operations hpt372_port_ops = {
24938 .inherits = &ata_bmdma_port_ops,
24939
24940 .bmdma_stop = hpt37x_bmdma_stop,
24941 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
24942 * but we have a different cable detection procedure for function 1.
24943 */
24944
24945 -static struct ata_port_operations hpt374_fn1_port_ops = {
24946 +static const struct ata_port_operations hpt374_fn1_port_ops = {
24947 .inherits = &hpt372_port_ops,
24948 .prereset = hpt374_fn1_pre_reset,
24949 };
24950 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c
24951 --- linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
24952 +++ linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
24953 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
24954 * Configuration for HPT3x2n.
24955 */
24956
24957 -static struct ata_port_operations hpt3x2n_port_ops = {
24958 +static const struct ata_port_operations hpt3x2n_port_ops = {
24959 .inherits = &ata_bmdma_port_ops,
24960
24961 .bmdma_stop = hpt3x2n_bmdma_stop,
24962 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x3.c linux-2.6.32.45/drivers/ata/pata_hpt3x3.c
24963 --- linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
24964 +++ linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
24965 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
24966 ATA_BMDMA_SHT(DRV_NAME),
24967 };
24968
24969 -static struct ata_port_operations hpt3x3_port_ops = {
24970 +static const struct ata_port_operations hpt3x3_port_ops = {
24971 .inherits = &ata_bmdma_port_ops,
24972 .cable_detect = ata_cable_40wire,
24973 .set_piomode = hpt3x3_set_piomode,
24974 diff -urNp linux-2.6.32.45/drivers/ata/pata_icside.c linux-2.6.32.45/drivers/ata/pata_icside.c
24975 --- linux-2.6.32.45/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
24976 +++ linux-2.6.32.45/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
24977 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
24978 }
24979 }
24980
24981 -static struct ata_port_operations pata_icside_port_ops = {
24982 +static const struct ata_port_operations pata_icside_port_ops = {
24983 .inherits = &ata_sff_port_ops,
24984 /* no need to build any PRD tables for DMA */
24985 .qc_prep = ata_noop_qc_prep,
24986 diff -urNp linux-2.6.32.45/drivers/ata/pata_isapnp.c linux-2.6.32.45/drivers/ata/pata_isapnp.c
24987 --- linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
24988 +++ linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
24989 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
24990 ATA_PIO_SHT(DRV_NAME),
24991 };
24992
24993 -static struct ata_port_operations isapnp_port_ops = {
24994 +static const struct ata_port_operations isapnp_port_ops = {
24995 .inherits = &ata_sff_port_ops,
24996 .cable_detect = ata_cable_40wire,
24997 };
24998
24999 -static struct ata_port_operations isapnp_noalt_port_ops = {
25000 +static const struct ata_port_operations isapnp_noalt_port_ops = {
25001 .inherits = &ata_sff_port_ops,
25002 .cable_detect = ata_cable_40wire,
25003 /* No altstatus so we don't want to use the lost interrupt poll */
25004 diff -urNp linux-2.6.32.45/drivers/ata/pata_it8213.c linux-2.6.32.45/drivers/ata/pata_it8213.c
25005 --- linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
25006 +++ linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
25007 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
25008 };
25009
25010
25011 -static struct ata_port_operations it8213_ops = {
25012 +static const struct ata_port_operations it8213_ops = {
25013 .inherits = &ata_bmdma_port_ops,
25014 .cable_detect = it8213_cable_detect,
25015 .set_piomode = it8213_set_piomode,
25016 diff -urNp linux-2.6.32.45/drivers/ata/pata_it821x.c linux-2.6.32.45/drivers/ata/pata_it821x.c
25017 --- linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
25018 +++ linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
25019 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
25020 ATA_BMDMA_SHT(DRV_NAME),
25021 };
25022
25023 -static struct ata_port_operations it821x_smart_port_ops = {
25024 +static const struct ata_port_operations it821x_smart_port_ops = {
25025 .inherits = &ata_bmdma_port_ops,
25026
25027 .check_atapi_dma= it821x_check_atapi_dma,
25028 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x
25029 .port_start = it821x_port_start,
25030 };
25031
25032 -static struct ata_port_operations it821x_passthru_port_ops = {
25033 +static const struct ata_port_operations it821x_passthru_port_ops = {
25034 .inherits = &ata_bmdma_port_ops,
25035
25036 .check_atapi_dma= it821x_check_atapi_dma,
25037 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x
25038 .port_start = it821x_port_start,
25039 };
25040
25041 -static struct ata_port_operations it821x_rdc_port_ops = {
25042 +static const struct ata_port_operations it821x_rdc_port_ops = {
25043 .inherits = &ata_bmdma_port_ops,
25044
25045 .check_atapi_dma= it821x_check_atapi_dma,
25046 diff -urNp linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c
25047 --- linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
25048 +++ linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
25049 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
25050 ATA_PIO_SHT(DRV_NAME),
25051 };
25052
25053 -static struct ata_port_operations ixp4xx_port_ops = {
25054 +static const struct ata_port_operations ixp4xx_port_ops = {
25055 .inherits = &ata_sff_port_ops,
25056 .sff_data_xfer = ixp4xx_mmio_data_xfer,
25057 .cable_detect = ata_cable_40wire,
25058 diff -urNp linux-2.6.32.45/drivers/ata/pata_jmicron.c linux-2.6.32.45/drivers/ata/pata_jmicron.c
25059 --- linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
25060 +++ linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
25061 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
25062 ATA_BMDMA_SHT(DRV_NAME),
25063 };
25064
25065 -static struct ata_port_operations jmicron_ops = {
25066 +static const struct ata_port_operations jmicron_ops = {
25067 .inherits = &ata_bmdma_port_ops,
25068 .prereset = jmicron_pre_reset,
25069 };
25070 diff -urNp linux-2.6.32.45/drivers/ata/pata_legacy.c linux-2.6.32.45/drivers/ata/pata_legacy.c
25071 --- linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
25072 +++ linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
25073 @@ -106,7 +106,7 @@ struct legacy_probe {
25074
25075 struct legacy_controller {
25076 const char *name;
25077 - struct ata_port_operations *ops;
25078 + const struct ata_port_operations *ops;
25079 unsigned int pio_mask;
25080 unsigned int flags;
25081 unsigned int pflags;
25082 @@ -223,12 +223,12 @@ static const struct ata_port_operations
25083 * pio_mask as well.
25084 */
25085
25086 -static struct ata_port_operations simple_port_ops = {
25087 +static const struct ata_port_operations simple_port_ops = {
25088 .inherits = &legacy_base_port_ops,
25089 .sff_data_xfer = ata_sff_data_xfer_noirq,
25090 };
25091
25092 -static struct ata_port_operations legacy_port_ops = {
25093 +static const struct ata_port_operations legacy_port_ops = {
25094 .inherits = &legacy_base_port_ops,
25095 .sff_data_xfer = ata_sff_data_xfer_noirq,
25096 .set_mode = legacy_set_mode,
25097 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
25098 return buflen;
25099 }
25100
25101 -static struct ata_port_operations pdc20230_port_ops = {
25102 +static const struct ata_port_operations pdc20230_port_ops = {
25103 .inherits = &legacy_base_port_ops,
25104 .set_piomode = pdc20230_set_piomode,
25105 .sff_data_xfer = pdc_data_xfer_vlb,
25106 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
25107 ioread8(ap->ioaddr.status_addr);
25108 }
25109
25110 -static struct ata_port_operations ht6560a_port_ops = {
25111 +static const struct ata_port_operations ht6560a_port_ops = {
25112 .inherits = &legacy_base_port_ops,
25113 .set_piomode = ht6560a_set_piomode,
25114 };
25115 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
25116 ioread8(ap->ioaddr.status_addr);
25117 }
25118
25119 -static struct ata_port_operations ht6560b_port_ops = {
25120 +static const struct ata_port_operations ht6560b_port_ops = {
25121 .inherits = &legacy_base_port_ops,
25122 .set_piomode = ht6560b_set_piomode,
25123 };
25124 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
25125 }
25126
25127
25128 -static struct ata_port_operations opti82c611a_port_ops = {
25129 +static const struct ata_port_operations opti82c611a_port_ops = {
25130 .inherits = &legacy_base_port_ops,
25131 .set_piomode = opti82c611a_set_piomode,
25132 };
25133 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
25134 return ata_sff_qc_issue(qc);
25135 }
25136
25137 -static struct ata_port_operations opti82c46x_port_ops = {
25138 +static const struct ata_port_operations opti82c46x_port_ops = {
25139 .inherits = &legacy_base_port_ops,
25140 .set_piomode = opti82c46x_set_piomode,
25141 .qc_issue = opti82c46x_qc_issue,
25142 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
25143 return 0;
25144 }
25145
25146 -static struct ata_port_operations qdi6500_port_ops = {
25147 +static const struct ata_port_operations qdi6500_port_ops = {
25148 .inherits = &legacy_base_port_ops,
25149 .set_piomode = qdi6500_set_piomode,
25150 .qc_issue = qdi_qc_issue,
25151 .sff_data_xfer = vlb32_data_xfer,
25152 };
25153
25154 -static struct ata_port_operations qdi6580_port_ops = {
25155 +static const struct ata_port_operations qdi6580_port_ops = {
25156 .inherits = &legacy_base_port_ops,
25157 .set_piomode = qdi6580_set_piomode,
25158 .sff_data_xfer = vlb32_data_xfer,
25159 };
25160
25161 -static struct ata_port_operations qdi6580dp_port_ops = {
25162 +static const struct ata_port_operations qdi6580dp_port_ops = {
25163 .inherits = &legacy_base_port_ops,
25164 .set_piomode = qdi6580dp_set_piomode,
25165 .sff_data_xfer = vlb32_data_xfer,
25166 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_
25167 return 0;
25168 }
25169
25170 -static struct ata_port_operations winbond_port_ops = {
25171 +static const struct ata_port_operations winbond_port_ops = {
25172 .inherits = &legacy_base_port_ops,
25173 .set_piomode = winbond_set_piomode,
25174 .sff_data_xfer = vlb32_data_xfer,
25175 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
25176 int pio_modes = controller->pio_mask;
25177 unsigned long io = probe->port;
25178 u32 mask = (1 << probe->slot);
25179 - struct ata_port_operations *ops = controller->ops;
25180 + const struct ata_port_operations *ops = controller->ops;
25181 struct legacy_data *ld = &legacy_data[probe->slot];
25182 struct ata_host *host = NULL;
25183 struct ata_port *ap;
25184 diff -urNp linux-2.6.32.45/drivers/ata/pata_marvell.c linux-2.6.32.45/drivers/ata/pata_marvell.c
25185 --- linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
25186 +++ linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
25187 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
25188 ATA_BMDMA_SHT(DRV_NAME),
25189 };
25190
25191 -static struct ata_port_operations marvell_ops = {
25192 +static const struct ata_port_operations marvell_ops = {
25193 .inherits = &ata_bmdma_port_ops,
25194 .cable_detect = marvell_cable_detect,
25195 .prereset = marvell_pre_reset,
25196 diff -urNp linux-2.6.32.45/drivers/ata/pata_mpc52xx.c linux-2.6.32.45/drivers/ata/pata_mpc52xx.c
25197 --- linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
25198 +++ linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
25199 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
25200 ATA_PIO_SHT(DRV_NAME),
25201 };
25202
25203 -static struct ata_port_operations mpc52xx_ata_port_ops = {
25204 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
25205 .inherits = &ata_bmdma_port_ops,
25206 .sff_dev_select = mpc52xx_ata_dev_select,
25207 .set_piomode = mpc52xx_ata_set_piomode,
25208 diff -urNp linux-2.6.32.45/drivers/ata/pata_mpiix.c linux-2.6.32.45/drivers/ata/pata_mpiix.c
25209 --- linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
25210 +++ linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
25211 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
25212 ATA_PIO_SHT(DRV_NAME),
25213 };
25214
25215 -static struct ata_port_operations mpiix_port_ops = {
25216 +static const struct ata_port_operations mpiix_port_ops = {
25217 .inherits = &ata_sff_port_ops,
25218 .qc_issue = mpiix_qc_issue,
25219 .cable_detect = ata_cable_40wire,
25220 diff -urNp linux-2.6.32.45/drivers/ata/pata_netcell.c linux-2.6.32.45/drivers/ata/pata_netcell.c
25221 --- linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
25222 +++ linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
25223 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
25224 ATA_BMDMA_SHT(DRV_NAME),
25225 };
25226
25227 -static struct ata_port_operations netcell_ops = {
25228 +static const struct ata_port_operations netcell_ops = {
25229 .inherits = &ata_bmdma_port_ops,
25230 .cable_detect = ata_cable_80wire,
25231 .read_id = netcell_read_id,
25232 diff -urNp linux-2.6.32.45/drivers/ata/pata_ninja32.c linux-2.6.32.45/drivers/ata/pata_ninja32.c
25233 --- linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
25234 +++ linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
25235 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
25236 ATA_BMDMA_SHT(DRV_NAME),
25237 };
25238
25239 -static struct ata_port_operations ninja32_port_ops = {
25240 +static const struct ata_port_operations ninja32_port_ops = {
25241 .inherits = &ata_bmdma_port_ops,
25242 .sff_dev_select = ninja32_dev_select,
25243 .cable_detect = ata_cable_40wire,
25244 diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87410.c linux-2.6.32.45/drivers/ata/pata_ns87410.c
25245 --- linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
25246 +++ linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
25247 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
25248 ATA_PIO_SHT(DRV_NAME),
25249 };
25250
25251 -static struct ata_port_operations ns87410_port_ops = {
25252 +static const struct ata_port_operations ns87410_port_ops = {
25253 .inherits = &ata_sff_port_ops,
25254 .qc_issue = ns87410_qc_issue,
25255 .cable_detect = ata_cable_40wire,
25256 diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87415.c linux-2.6.32.45/drivers/ata/pata_ns87415.c
25257 --- linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
25258 +++ linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
25259 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
25260 }
25261 #endif /* 87560 SuperIO Support */
25262
25263 -static struct ata_port_operations ns87415_pata_ops = {
25264 +static const struct ata_port_operations ns87415_pata_ops = {
25265 .inherits = &ata_bmdma_port_ops,
25266
25267 .check_atapi_dma = ns87415_check_atapi_dma,
25268 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
25269 };
25270
25271 #if defined(CONFIG_SUPERIO)
25272 -static struct ata_port_operations ns87560_pata_ops = {
25273 +static const struct ata_port_operations ns87560_pata_ops = {
25274 .inherits = &ns87415_pata_ops,
25275 .sff_tf_read = ns87560_tf_read,
25276 .sff_check_status = ns87560_check_status,
25277 diff -urNp linux-2.6.32.45/drivers/ata/pata_octeon_cf.c linux-2.6.32.45/drivers/ata/pata_octeon_cf.c
25278 --- linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
25279 +++ linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
25280 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
25281 return 0;
25282 }
25283
25284 +/* cannot be const */
25285 static struct ata_port_operations octeon_cf_ops = {
25286 .inherits = &ata_sff_port_ops,
25287 .check_atapi_dma = octeon_cf_check_atapi_dma,
25288 diff -urNp linux-2.6.32.45/drivers/ata/pata_oldpiix.c linux-2.6.32.45/drivers/ata/pata_oldpiix.c
25289 --- linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
25290 +++ linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
25291 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
25292 ATA_BMDMA_SHT(DRV_NAME),
25293 };
25294
25295 -static struct ata_port_operations oldpiix_pata_ops = {
25296 +static const struct ata_port_operations oldpiix_pata_ops = {
25297 .inherits = &ata_bmdma_port_ops,
25298 .qc_issue = oldpiix_qc_issue,
25299 .cable_detect = ata_cable_40wire,
25300 diff -urNp linux-2.6.32.45/drivers/ata/pata_opti.c linux-2.6.32.45/drivers/ata/pata_opti.c
25301 --- linux-2.6.32.45/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
25302 +++ linux-2.6.32.45/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
25303 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
25304 ATA_PIO_SHT(DRV_NAME),
25305 };
25306
25307 -static struct ata_port_operations opti_port_ops = {
25308 +static const struct ata_port_operations opti_port_ops = {
25309 .inherits = &ata_sff_port_ops,
25310 .cable_detect = ata_cable_40wire,
25311 .set_piomode = opti_set_piomode,
25312 diff -urNp linux-2.6.32.45/drivers/ata/pata_optidma.c linux-2.6.32.45/drivers/ata/pata_optidma.c
25313 --- linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25314 +++ linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25315 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25316 ATA_BMDMA_SHT(DRV_NAME),
25317 };
25318
25319 -static struct ata_port_operations optidma_port_ops = {
25320 +static const struct ata_port_operations optidma_port_ops = {
25321 .inherits = &ata_bmdma_port_ops,
25322 .cable_detect = ata_cable_40wire,
25323 .set_piomode = optidma_set_pio_mode,
25324 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25325 .prereset = optidma_pre_reset,
25326 };
25327
25328 -static struct ata_port_operations optiplus_port_ops = {
25329 +static const struct ata_port_operations optiplus_port_ops = {
25330 .inherits = &optidma_port_ops,
25331 .set_piomode = optiplus_set_pio_mode,
25332 .set_dmamode = optiplus_set_dma_mode,
25333 diff -urNp linux-2.6.32.45/drivers/ata/pata_palmld.c linux-2.6.32.45/drivers/ata/pata_palmld.c
25334 --- linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25335 +++ linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25336 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25337 ATA_PIO_SHT(DRV_NAME),
25338 };
25339
25340 -static struct ata_port_operations palmld_port_ops = {
25341 +static const struct ata_port_operations palmld_port_ops = {
25342 .inherits = &ata_sff_port_ops,
25343 .sff_data_xfer = ata_sff_data_xfer_noirq,
25344 .cable_detect = ata_cable_40wire,
25345 diff -urNp linux-2.6.32.45/drivers/ata/pata_pcmcia.c linux-2.6.32.45/drivers/ata/pata_pcmcia.c
25346 --- linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25347 +++ linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25348 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25349 ATA_PIO_SHT(DRV_NAME),
25350 };
25351
25352 -static struct ata_port_operations pcmcia_port_ops = {
25353 +static const struct ata_port_operations pcmcia_port_ops = {
25354 .inherits = &ata_sff_port_ops,
25355 .sff_data_xfer = ata_sff_data_xfer_noirq,
25356 .cable_detect = ata_cable_40wire,
25357 .set_mode = pcmcia_set_mode,
25358 };
25359
25360 -static struct ata_port_operations pcmcia_8bit_port_ops = {
25361 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
25362 .inherits = &ata_sff_port_ops,
25363 .sff_data_xfer = ata_data_xfer_8bit,
25364 .cable_detect = ata_cable_40wire,
25365 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25366 unsigned long io_base, ctl_base;
25367 void __iomem *io_addr, *ctl_addr;
25368 int n_ports = 1;
25369 - struct ata_port_operations *ops = &pcmcia_port_ops;
25370 + const struct ata_port_operations *ops = &pcmcia_port_ops;
25371
25372 info = kzalloc(sizeof(*info), GFP_KERNEL);
25373 if (info == NULL)
25374 diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc2027x.c linux-2.6.32.45/drivers/ata/pata_pdc2027x.c
25375 --- linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25376 +++ linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25377 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25378 ATA_BMDMA_SHT(DRV_NAME),
25379 };
25380
25381 -static struct ata_port_operations pdc2027x_pata100_ops = {
25382 +static const struct ata_port_operations pdc2027x_pata100_ops = {
25383 .inherits = &ata_bmdma_port_ops,
25384 .check_atapi_dma = pdc2027x_check_atapi_dma,
25385 .cable_detect = pdc2027x_cable_detect,
25386 .prereset = pdc2027x_prereset,
25387 };
25388
25389 -static struct ata_port_operations pdc2027x_pata133_ops = {
25390 +static const struct ata_port_operations pdc2027x_pata133_ops = {
25391 .inherits = &pdc2027x_pata100_ops,
25392 .mode_filter = pdc2027x_mode_filter,
25393 .set_piomode = pdc2027x_set_piomode,
25394 diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c
25395 --- linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25396 +++ linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25397 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25398 ATA_BMDMA_SHT(DRV_NAME),
25399 };
25400
25401 -static struct ata_port_operations pdc2024x_port_ops = {
25402 +static const struct ata_port_operations pdc2024x_port_ops = {
25403 .inherits = &ata_bmdma_port_ops,
25404
25405 .cable_detect = ata_cable_40wire,
25406 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25407 .sff_exec_command = pdc202xx_exec_command,
25408 };
25409
25410 -static struct ata_port_operations pdc2026x_port_ops = {
25411 +static const struct ata_port_operations pdc2026x_port_ops = {
25412 .inherits = &pdc2024x_port_ops,
25413
25414 .check_atapi_dma = pdc2026x_check_atapi_dma,
25415 diff -urNp linux-2.6.32.45/drivers/ata/pata_platform.c linux-2.6.32.45/drivers/ata/pata_platform.c
25416 --- linux-2.6.32.45/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25417 +++ linux-2.6.32.45/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25418 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25419 ATA_PIO_SHT(DRV_NAME),
25420 };
25421
25422 -static struct ata_port_operations pata_platform_port_ops = {
25423 +static const struct ata_port_operations pata_platform_port_ops = {
25424 .inherits = &ata_sff_port_ops,
25425 .sff_data_xfer = ata_sff_data_xfer_noirq,
25426 .cable_detect = ata_cable_unknown,
25427 diff -urNp linux-2.6.32.45/drivers/ata/pata_qdi.c linux-2.6.32.45/drivers/ata/pata_qdi.c
25428 --- linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25429 +++ linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25430 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25431 ATA_PIO_SHT(DRV_NAME),
25432 };
25433
25434 -static struct ata_port_operations qdi6500_port_ops = {
25435 +static const struct ata_port_operations qdi6500_port_ops = {
25436 .inherits = &ata_sff_port_ops,
25437 .qc_issue = qdi_qc_issue,
25438 .sff_data_xfer = qdi_data_xfer,
25439 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25440 .set_piomode = qdi6500_set_piomode,
25441 };
25442
25443 -static struct ata_port_operations qdi6580_port_ops = {
25444 +static const struct ata_port_operations qdi6580_port_ops = {
25445 .inherits = &qdi6500_port_ops,
25446 .set_piomode = qdi6580_set_piomode,
25447 };
25448 diff -urNp linux-2.6.32.45/drivers/ata/pata_radisys.c linux-2.6.32.45/drivers/ata/pata_radisys.c
25449 --- linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25450 +++ linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25451 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25452 ATA_BMDMA_SHT(DRV_NAME),
25453 };
25454
25455 -static struct ata_port_operations radisys_pata_ops = {
25456 +static const struct ata_port_operations radisys_pata_ops = {
25457 .inherits = &ata_bmdma_port_ops,
25458 .qc_issue = radisys_qc_issue,
25459 .cable_detect = ata_cable_unknown,
25460 diff -urNp linux-2.6.32.45/drivers/ata/pata_rb532_cf.c linux-2.6.32.45/drivers/ata/pata_rb532_cf.c
25461 --- linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25462 +++ linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25463 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25464 return IRQ_HANDLED;
25465 }
25466
25467 -static struct ata_port_operations rb532_pata_port_ops = {
25468 +static const struct ata_port_operations rb532_pata_port_ops = {
25469 .inherits = &ata_sff_port_ops,
25470 .sff_data_xfer = ata_sff_data_xfer32,
25471 };
25472 diff -urNp linux-2.6.32.45/drivers/ata/pata_rdc.c linux-2.6.32.45/drivers/ata/pata_rdc.c
25473 --- linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25474 +++ linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25475 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25476 pci_write_config_byte(dev, 0x48, udma_enable);
25477 }
25478
25479 -static struct ata_port_operations rdc_pata_ops = {
25480 +static const struct ata_port_operations rdc_pata_ops = {
25481 .inherits = &ata_bmdma32_port_ops,
25482 .cable_detect = rdc_pata_cable_detect,
25483 .set_piomode = rdc_set_piomode,
25484 diff -urNp linux-2.6.32.45/drivers/ata/pata_rz1000.c linux-2.6.32.45/drivers/ata/pata_rz1000.c
25485 --- linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25486 +++ linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25487 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25488 ATA_PIO_SHT(DRV_NAME),
25489 };
25490
25491 -static struct ata_port_operations rz1000_port_ops = {
25492 +static const struct ata_port_operations rz1000_port_ops = {
25493 .inherits = &ata_sff_port_ops,
25494 .cable_detect = ata_cable_40wire,
25495 .set_mode = rz1000_set_mode,
25496 diff -urNp linux-2.6.32.45/drivers/ata/pata_sc1200.c linux-2.6.32.45/drivers/ata/pata_sc1200.c
25497 --- linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25498 +++ linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25499 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25500 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25501 };
25502
25503 -static struct ata_port_operations sc1200_port_ops = {
25504 +static const struct ata_port_operations sc1200_port_ops = {
25505 .inherits = &ata_bmdma_port_ops,
25506 .qc_prep = ata_sff_dumb_qc_prep,
25507 .qc_issue = sc1200_qc_issue,
25508 diff -urNp linux-2.6.32.45/drivers/ata/pata_scc.c linux-2.6.32.45/drivers/ata/pata_scc.c
25509 --- linux-2.6.32.45/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25510 +++ linux-2.6.32.45/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25511 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25512 ATA_BMDMA_SHT(DRV_NAME),
25513 };
25514
25515 -static struct ata_port_operations scc_pata_ops = {
25516 +static const struct ata_port_operations scc_pata_ops = {
25517 .inherits = &ata_bmdma_port_ops,
25518
25519 .set_piomode = scc_set_piomode,
25520 diff -urNp linux-2.6.32.45/drivers/ata/pata_sch.c linux-2.6.32.45/drivers/ata/pata_sch.c
25521 --- linux-2.6.32.45/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25522 +++ linux-2.6.32.45/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25523 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25524 ATA_BMDMA_SHT(DRV_NAME),
25525 };
25526
25527 -static struct ata_port_operations sch_pata_ops = {
25528 +static const struct ata_port_operations sch_pata_ops = {
25529 .inherits = &ata_bmdma_port_ops,
25530 .cable_detect = ata_cable_unknown,
25531 .set_piomode = sch_set_piomode,
25532 diff -urNp linux-2.6.32.45/drivers/ata/pata_serverworks.c linux-2.6.32.45/drivers/ata/pata_serverworks.c
25533 --- linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25534 +++ linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25535 @@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25536 ATA_BMDMA_SHT(DRV_NAME),
25537 };
25538
25539 -static struct ata_port_operations serverworks_osb4_port_ops = {
25540 +static const struct ata_port_operations serverworks_osb4_port_ops = {
25541 .inherits = &ata_bmdma_port_ops,
25542 .cable_detect = serverworks_cable_detect,
25543 .mode_filter = serverworks_osb4_filter,
25544 @@ -307,7 +307,7 @@ static struct ata_port_operations server
25545 .set_dmamode = serverworks_set_dmamode,
25546 };
25547
25548 -static struct ata_port_operations serverworks_csb_port_ops = {
25549 +static const struct ata_port_operations serverworks_csb_port_ops = {
25550 .inherits = &serverworks_osb4_port_ops,
25551 .mode_filter = serverworks_csb_filter,
25552 };
25553 diff -urNp linux-2.6.32.45/drivers/ata/pata_sil680.c linux-2.6.32.45/drivers/ata/pata_sil680.c
25554 --- linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25555 +++ linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25556 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25557 ATA_BMDMA_SHT(DRV_NAME),
25558 };
25559
25560 -static struct ata_port_operations sil680_port_ops = {
25561 +static const struct ata_port_operations sil680_port_ops = {
25562 .inherits = &ata_bmdma32_port_ops,
25563 .cable_detect = sil680_cable_detect,
25564 .set_piomode = sil680_set_piomode,
25565 diff -urNp linux-2.6.32.45/drivers/ata/pata_sis.c linux-2.6.32.45/drivers/ata/pata_sis.c
25566 --- linux-2.6.32.45/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25567 +++ linux-2.6.32.45/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25568 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25569 ATA_BMDMA_SHT(DRV_NAME),
25570 };
25571
25572 -static struct ata_port_operations sis_133_for_sata_ops = {
25573 +static const struct ata_port_operations sis_133_for_sata_ops = {
25574 .inherits = &ata_bmdma_port_ops,
25575 .set_piomode = sis_133_set_piomode,
25576 .set_dmamode = sis_133_set_dmamode,
25577 .cable_detect = sis_133_cable_detect,
25578 };
25579
25580 -static struct ata_port_operations sis_base_ops = {
25581 +static const struct ata_port_operations sis_base_ops = {
25582 .inherits = &ata_bmdma_port_ops,
25583 .prereset = sis_pre_reset,
25584 };
25585
25586 -static struct ata_port_operations sis_133_ops = {
25587 +static const struct ata_port_operations sis_133_ops = {
25588 .inherits = &sis_base_ops,
25589 .set_piomode = sis_133_set_piomode,
25590 .set_dmamode = sis_133_set_dmamode,
25591 .cable_detect = sis_133_cable_detect,
25592 };
25593
25594 -static struct ata_port_operations sis_133_early_ops = {
25595 +static const struct ata_port_operations sis_133_early_ops = {
25596 .inherits = &sis_base_ops,
25597 .set_piomode = sis_100_set_piomode,
25598 .set_dmamode = sis_133_early_set_dmamode,
25599 .cable_detect = sis_66_cable_detect,
25600 };
25601
25602 -static struct ata_port_operations sis_100_ops = {
25603 +static const struct ata_port_operations sis_100_ops = {
25604 .inherits = &sis_base_ops,
25605 .set_piomode = sis_100_set_piomode,
25606 .set_dmamode = sis_100_set_dmamode,
25607 .cable_detect = sis_66_cable_detect,
25608 };
25609
25610 -static struct ata_port_operations sis_66_ops = {
25611 +static const struct ata_port_operations sis_66_ops = {
25612 .inherits = &sis_base_ops,
25613 .set_piomode = sis_old_set_piomode,
25614 .set_dmamode = sis_66_set_dmamode,
25615 .cable_detect = sis_66_cable_detect,
25616 };
25617
25618 -static struct ata_port_operations sis_old_ops = {
25619 +static const struct ata_port_operations sis_old_ops = {
25620 .inherits = &sis_base_ops,
25621 .set_piomode = sis_old_set_piomode,
25622 .set_dmamode = sis_old_set_dmamode,
25623 diff -urNp linux-2.6.32.45/drivers/ata/pata_sl82c105.c linux-2.6.32.45/drivers/ata/pata_sl82c105.c
25624 --- linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25625 +++ linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25626 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25627 ATA_BMDMA_SHT(DRV_NAME),
25628 };
25629
25630 -static struct ata_port_operations sl82c105_port_ops = {
25631 +static const struct ata_port_operations sl82c105_port_ops = {
25632 .inherits = &ata_bmdma_port_ops,
25633 .qc_defer = sl82c105_qc_defer,
25634 .bmdma_start = sl82c105_bmdma_start,
25635 diff -urNp linux-2.6.32.45/drivers/ata/pata_triflex.c linux-2.6.32.45/drivers/ata/pata_triflex.c
25636 --- linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25637 +++ linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25638 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25639 ATA_BMDMA_SHT(DRV_NAME),
25640 };
25641
25642 -static struct ata_port_operations triflex_port_ops = {
25643 +static const struct ata_port_operations triflex_port_ops = {
25644 .inherits = &ata_bmdma_port_ops,
25645 .bmdma_start = triflex_bmdma_start,
25646 .bmdma_stop = triflex_bmdma_stop,
25647 diff -urNp linux-2.6.32.45/drivers/ata/pata_via.c linux-2.6.32.45/drivers/ata/pata_via.c
25648 --- linux-2.6.32.45/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25649 +++ linux-2.6.32.45/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25650 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25651 ATA_BMDMA_SHT(DRV_NAME),
25652 };
25653
25654 -static struct ata_port_operations via_port_ops = {
25655 +static const struct ata_port_operations via_port_ops = {
25656 .inherits = &ata_bmdma_port_ops,
25657 .cable_detect = via_cable_detect,
25658 .set_piomode = via_set_piomode,
25659 @@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25660 .port_start = via_port_start,
25661 };
25662
25663 -static struct ata_port_operations via_port_ops_noirq = {
25664 +static const struct ata_port_operations via_port_ops_noirq = {
25665 .inherits = &via_port_ops,
25666 .sff_data_xfer = ata_sff_data_xfer_noirq,
25667 };
25668 diff -urNp linux-2.6.32.45/drivers/ata/pata_winbond.c linux-2.6.32.45/drivers/ata/pata_winbond.c
25669 --- linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25670 +++ linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25671 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25672 ATA_PIO_SHT(DRV_NAME),
25673 };
25674
25675 -static struct ata_port_operations winbond_port_ops = {
25676 +static const struct ata_port_operations winbond_port_ops = {
25677 .inherits = &ata_sff_port_ops,
25678 .sff_data_xfer = winbond_data_xfer,
25679 .cable_detect = ata_cable_40wire,
25680 diff -urNp linux-2.6.32.45/drivers/ata/pdc_adma.c linux-2.6.32.45/drivers/ata/pdc_adma.c
25681 --- linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25682 +++ linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25683 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25684 .dma_boundary = ADMA_DMA_BOUNDARY,
25685 };
25686
25687 -static struct ata_port_operations adma_ata_ops = {
25688 +static const struct ata_port_operations adma_ata_ops = {
25689 .inherits = &ata_sff_port_ops,
25690
25691 .lost_interrupt = ATA_OP_NULL,
25692 diff -urNp linux-2.6.32.45/drivers/ata/sata_fsl.c linux-2.6.32.45/drivers/ata/sata_fsl.c
25693 --- linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25694 +++ linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25695 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25696 .dma_boundary = ATA_DMA_BOUNDARY,
25697 };
25698
25699 -static struct ata_port_operations sata_fsl_ops = {
25700 +static const struct ata_port_operations sata_fsl_ops = {
25701 .inherits = &sata_pmp_port_ops,
25702
25703 .qc_defer = ata_std_qc_defer,
25704 diff -urNp linux-2.6.32.45/drivers/ata/sata_inic162x.c linux-2.6.32.45/drivers/ata/sata_inic162x.c
25705 --- linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25706 +++ linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25707 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25708 return 0;
25709 }
25710
25711 -static struct ata_port_operations inic_port_ops = {
25712 +static const struct ata_port_operations inic_port_ops = {
25713 .inherits = &sata_port_ops,
25714
25715 .check_atapi_dma = inic_check_atapi_dma,
25716 diff -urNp linux-2.6.32.45/drivers/ata/sata_mv.c linux-2.6.32.45/drivers/ata/sata_mv.c
25717 --- linux-2.6.32.45/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25718 +++ linux-2.6.32.45/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25719 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25720 .dma_boundary = MV_DMA_BOUNDARY,
25721 };
25722
25723 -static struct ata_port_operations mv5_ops = {
25724 +static const struct ata_port_operations mv5_ops = {
25725 .inherits = &ata_sff_port_ops,
25726
25727 .lost_interrupt = ATA_OP_NULL,
25728 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25729 .port_stop = mv_port_stop,
25730 };
25731
25732 -static struct ata_port_operations mv6_ops = {
25733 +static const struct ata_port_operations mv6_ops = {
25734 .inherits = &mv5_ops,
25735 .dev_config = mv6_dev_config,
25736 .scr_read = mv_scr_read,
25737 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25738 .bmdma_status = mv_bmdma_status,
25739 };
25740
25741 -static struct ata_port_operations mv_iie_ops = {
25742 +static const struct ata_port_operations mv_iie_ops = {
25743 .inherits = &mv6_ops,
25744 .dev_config = ATA_OP_NULL,
25745 .qc_prep = mv_qc_prep_iie,
25746 diff -urNp linux-2.6.32.45/drivers/ata/sata_nv.c linux-2.6.32.45/drivers/ata/sata_nv.c
25747 --- linux-2.6.32.45/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
25748 +++ linux-2.6.32.45/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
25749 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
25750 * cases. Define nv_hardreset() which only kicks in for post-boot
25751 * probing and use it for all variants.
25752 */
25753 -static struct ata_port_operations nv_generic_ops = {
25754 +static const struct ata_port_operations nv_generic_ops = {
25755 .inherits = &ata_bmdma_port_ops,
25756 .lost_interrupt = ATA_OP_NULL,
25757 .scr_read = nv_scr_read,
25758 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
25759 .hardreset = nv_hardreset,
25760 };
25761
25762 -static struct ata_port_operations nv_nf2_ops = {
25763 +static const struct ata_port_operations nv_nf2_ops = {
25764 .inherits = &nv_generic_ops,
25765 .freeze = nv_nf2_freeze,
25766 .thaw = nv_nf2_thaw,
25767 };
25768
25769 -static struct ata_port_operations nv_ck804_ops = {
25770 +static const struct ata_port_operations nv_ck804_ops = {
25771 .inherits = &nv_generic_ops,
25772 .freeze = nv_ck804_freeze,
25773 .thaw = nv_ck804_thaw,
25774 .host_stop = nv_ck804_host_stop,
25775 };
25776
25777 -static struct ata_port_operations nv_adma_ops = {
25778 +static const struct ata_port_operations nv_adma_ops = {
25779 .inherits = &nv_ck804_ops,
25780
25781 .check_atapi_dma = nv_adma_check_atapi_dma,
25782 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
25783 .host_stop = nv_adma_host_stop,
25784 };
25785
25786 -static struct ata_port_operations nv_swncq_ops = {
25787 +static const struct ata_port_operations nv_swncq_ops = {
25788 .inherits = &nv_generic_ops,
25789
25790 .qc_defer = ata_std_qc_defer,
25791 diff -urNp linux-2.6.32.45/drivers/ata/sata_promise.c linux-2.6.32.45/drivers/ata/sata_promise.c
25792 --- linux-2.6.32.45/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
25793 +++ linux-2.6.32.45/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
25794 @@ -195,7 +195,7 @@ static const struct ata_port_operations
25795 .error_handler = pdc_error_handler,
25796 };
25797
25798 -static struct ata_port_operations pdc_sata_ops = {
25799 +static const struct ata_port_operations pdc_sata_ops = {
25800 .inherits = &pdc_common_ops,
25801 .cable_detect = pdc_sata_cable_detect,
25802 .freeze = pdc_sata_freeze,
25803 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
25804
25805 /* First-generation chips need a more restrictive ->check_atapi_dma op,
25806 and ->freeze/thaw that ignore the hotplug controls. */
25807 -static struct ata_port_operations pdc_old_sata_ops = {
25808 +static const struct ata_port_operations pdc_old_sata_ops = {
25809 .inherits = &pdc_sata_ops,
25810 .freeze = pdc_freeze,
25811 .thaw = pdc_thaw,
25812 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
25813 };
25814
25815 -static struct ata_port_operations pdc_pata_ops = {
25816 +static const struct ata_port_operations pdc_pata_ops = {
25817 .inherits = &pdc_common_ops,
25818 .cable_detect = pdc_pata_cable_detect,
25819 .freeze = pdc_freeze,
25820 diff -urNp linux-2.6.32.45/drivers/ata/sata_qstor.c linux-2.6.32.45/drivers/ata/sata_qstor.c
25821 --- linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
25822 +++ linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
25823 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
25824 .dma_boundary = QS_DMA_BOUNDARY,
25825 };
25826
25827 -static struct ata_port_operations qs_ata_ops = {
25828 +static const struct ata_port_operations qs_ata_ops = {
25829 .inherits = &ata_sff_port_ops,
25830
25831 .check_atapi_dma = qs_check_atapi_dma,
25832 diff -urNp linux-2.6.32.45/drivers/ata/sata_sil24.c linux-2.6.32.45/drivers/ata/sata_sil24.c
25833 --- linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
25834 +++ linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
25835 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
25836 .dma_boundary = ATA_DMA_BOUNDARY,
25837 };
25838
25839 -static struct ata_port_operations sil24_ops = {
25840 +static const struct ata_port_operations sil24_ops = {
25841 .inherits = &sata_pmp_port_ops,
25842
25843 .qc_defer = sil24_qc_defer,
25844 diff -urNp linux-2.6.32.45/drivers/ata/sata_sil.c linux-2.6.32.45/drivers/ata/sata_sil.c
25845 --- linux-2.6.32.45/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
25846 +++ linux-2.6.32.45/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
25847 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
25848 .sg_tablesize = ATA_MAX_PRD
25849 };
25850
25851 -static struct ata_port_operations sil_ops = {
25852 +static const struct ata_port_operations sil_ops = {
25853 .inherits = &ata_bmdma32_port_ops,
25854 .dev_config = sil_dev_config,
25855 .set_mode = sil_set_mode,
25856 diff -urNp linux-2.6.32.45/drivers/ata/sata_sis.c linux-2.6.32.45/drivers/ata/sata_sis.c
25857 --- linux-2.6.32.45/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
25858 +++ linux-2.6.32.45/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
25859 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
25860 ATA_BMDMA_SHT(DRV_NAME),
25861 };
25862
25863 -static struct ata_port_operations sis_ops = {
25864 +static const struct ata_port_operations sis_ops = {
25865 .inherits = &ata_bmdma_port_ops,
25866 .scr_read = sis_scr_read,
25867 .scr_write = sis_scr_write,
25868 diff -urNp linux-2.6.32.45/drivers/ata/sata_svw.c linux-2.6.32.45/drivers/ata/sata_svw.c
25869 --- linux-2.6.32.45/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
25870 +++ linux-2.6.32.45/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
25871 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
25872 };
25873
25874
25875 -static struct ata_port_operations k2_sata_ops = {
25876 +static const struct ata_port_operations k2_sata_ops = {
25877 .inherits = &ata_bmdma_port_ops,
25878 .sff_tf_load = k2_sata_tf_load,
25879 .sff_tf_read = k2_sata_tf_read,
25880 diff -urNp linux-2.6.32.45/drivers/ata/sata_sx4.c linux-2.6.32.45/drivers/ata/sata_sx4.c
25881 --- linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
25882 +++ linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
25883 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
25884 };
25885
25886 /* TODO: inherit from base port_ops after converting to new EH */
25887 -static struct ata_port_operations pdc_20621_ops = {
25888 +static const struct ata_port_operations pdc_20621_ops = {
25889 .inherits = &ata_sff_port_ops,
25890
25891 .check_atapi_dma = pdc_check_atapi_dma,
25892 diff -urNp linux-2.6.32.45/drivers/ata/sata_uli.c linux-2.6.32.45/drivers/ata/sata_uli.c
25893 --- linux-2.6.32.45/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
25894 +++ linux-2.6.32.45/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
25895 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
25896 ATA_BMDMA_SHT(DRV_NAME),
25897 };
25898
25899 -static struct ata_port_operations uli_ops = {
25900 +static const struct ata_port_operations uli_ops = {
25901 .inherits = &ata_bmdma_port_ops,
25902 .scr_read = uli_scr_read,
25903 .scr_write = uli_scr_write,
25904 diff -urNp linux-2.6.32.45/drivers/ata/sata_via.c linux-2.6.32.45/drivers/ata/sata_via.c
25905 --- linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
25906 +++ linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
25907 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
25908 ATA_BMDMA_SHT(DRV_NAME),
25909 };
25910
25911 -static struct ata_port_operations svia_base_ops = {
25912 +static const struct ata_port_operations svia_base_ops = {
25913 .inherits = &ata_bmdma_port_ops,
25914 .sff_tf_load = svia_tf_load,
25915 };
25916
25917 -static struct ata_port_operations vt6420_sata_ops = {
25918 +static const struct ata_port_operations vt6420_sata_ops = {
25919 .inherits = &svia_base_ops,
25920 .freeze = svia_noop_freeze,
25921 .prereset = vt6420_prereset,
25922 .bmdma_start = vt6420_bmdma_start,
25923 };
25924
25925 -static struct ata_port_operations vt6421_pata_ops = {
25926 +static const struct ata_port_operations vt6421_pata_ops = {
25927 .inherits = &svia_base_ops,
25928 .cable_detect = vt6421_pata_cable_detect,
25929 .set_piomode = vt6421_set_pio_mode,
25930 .set_dmamode = vt6421_set_dma_mode,
25931 };
25932
25933 -static struct ata_port_operations vt6421_sata_ops = {
25934 +static const struct ata_port_operations vt6421_sata_ops = {
25935 .inherits = &svia_base_ops,
25936 .scr_read = svia_scr_read,
25937 .scr_write = svia_scr_write,
25938 };
25939
25940 -static struct ata_port_operations vt8251_ops = {
25941 +static const struct ata_port_operations vt8251_ops = {
25942 .inherits = &svia_base_ops,
25943 .hardreset = sata_std_hardreset,
25944 .scr_read = vt8251_scr_read,
25945 diff -urNp linux-2.6.32.45/drivers/ata/sata_vsc.c linux-2.6.32.45/drivers/ata/sata_vsc.c
25946 --- linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
25947 +++ linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
25948 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
25949 };
25950
25951
25952 -static struct ata_port_operations vsc_sata_ops = {
25953 +static const struct ata_port_operations vsc_sata_ops = {
25954 .inherits = &ata_bmdma_port_ops,
25955 /* The IRQ handling is not quite standard SFF behaviour so we
25956 cannot use the default lost interrupt handler */
25957 diff -urNp linux-2.6.32.45/drivers/atm/adummy.c linux-2.6.32.45/drivers/atm/adummy.c
25958 --- linux-2.6.32.45/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
25959 +++ linux-2.6.32.45/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
25960 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
25961 vcc->pop(vcc, skb);
25962 else
25963 dev_kfree_skb_any(skb);
25964 - atomic_inc(&vcc->stats->tx);
25965 + atomic_inc_unchecked(&vcc->stats->tx);
25966
25967 return 0;
25968 }
25969 diff -urNp linux-2.6.32.45/drivers/atm/ambassador.c linux-2.6.32.45/drivers/atm/ambassador.c
25970 --- linux-2.6.32.45/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
25971 +++ linux-2.6.32.45/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
25972 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
25973 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25974
25975 // VC layer stats
25976 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25977 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25978
25979 // free the descriptor
25980 kfree (tx_descr);
25981 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
25982 dump_skb ("<<<", vc, skb);
25983
25984 // VC layer stats
25985 - atomic_inc(&atm_vcc->stats->rx);
25986 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25987 __net_timestamp(skb);
25988 // end of our responsability
25989 atm_vcc->push (atm_vcc, skb);
25990 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
25991 } else {
25992 PRINTK (KERN_INFO, "dropped over-size frame");
25993 // should we count this?
25994 - atomic_inc(&atm_vcc->stats->rx_drop);
25995 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25996 }
25997
25998 } else {
25999 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
26000 }
26001
26002 if (check_area (skb->data, skb->len)) {
26003 - atomic_inc(&atm_vcc->stats->tx_err);
26004 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26005 return -ENOMEM; // ?
26006 }
26007
26008 diff -urNp linux-2.6.32.45/drivers/atm/atmtcp.c linux-2.6.32.45/drivers/atm/atmtcp.c
26009 --- linux-2.6.32.45/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
26010 +++ linux-2.6.32.45/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
26011 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
26012 if (vcc->pop) vcc->pop(vcc,skb);
26013 else dev_kfree_skb(skb);
26014 if (dev_data) return 0;
26015 - atomic_inc(&vcc->stats->tx_err);
26016 + atomic_inc_unchecked(&vcc->stats->tx_err);
26017 return -ENOLINK;
26018 }
26019 size = skb->len+sizeof(struct atmtcp_hdr);
26020 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
26021 if (!new_skb) {
26022 if (vcc->pop) vcc->pop(vcc,skb);
26023 else dev_kfree_skb(skb);
26024 - atomic_inc(&vcc->stats->tx_err);
26025 + atomic_inc_unchecked(&vcc->stats->tx_err);
26026 return -ENOBUFS;
26027 }
26028 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26029 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
26030 if (vcc->pop) vcc->pop(vcc,skb);
26031 else dev_kfree_skb(skb);
26032 out_vcc->push(out_vcc,new_skb);
26033 - atomic_inc(&vcc->stats->tx);
26034 - atomic_inc(&out_vcc->stats->rx);
26035 + atomic_inc_unchecked(&vcc->stats->tx);
26036 + atomic_inc_unchecked(&out_vcc->stats->rx);
26037 return 0;
26038 }
26039
26040 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
26041 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26042 read_unlock(&vcc_sklist_lock);
26043 if (!out_vcc) {
26044 - atomic_inc(&vcc->stats->tx_err);
26045 + atomic_inc_unchecked(&vcc->stats->tx_err);
26046 goto done;
26047 }
26048 skb_pull(skb,sizeof(struct atmtcp_hdr));
26049 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
26050 __net_timestamp(new_skb);
26051 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26052 out_vcc->push(out_vcc,new_skb);
26053 - atomic_inc(&vcc->stats->tx);
26054 - atomic_inc(&out_vcc->stats->rx);
26055 + atomic_inc_unchecked(&vcc->stats->tx);
26056 + atomic_inc_unchecked(&out_vcc->stats->rx);
26057 done:
26058 if (vcc->pop) vcc->pop(vcc,skb);
26059 else dev_kfree_skb(skb);
26060 diff -urNp linux-2.6.32.45/drivers/atm/eni.c linux-2.6.32.45/drivers/atm/eni.c
26061 --- linux-2.6.32.45/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
26062 +++ linux-2.6.32.45/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
26063 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26064 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26065 vcc->dev->number);
26066 length = 0;
26067 - atomic_inc(&vcc->stats->rx_err);
26068 + atomic_inc_unchecked(&vcc->stats->rx_err);
26069 }
26070 else {
26071 length = ATM_CELL_SIZE-1; /* no HEC */
26072 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26073 size);
26074 }
26075 eff = length = 0;
26076 - atomic_inc(&vcc->stats->rx_err);
26077 + atomic_inc_unchecked(&vcc->stats->rx_err);
26078 }
26079 else {
26080 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26081 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26082 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26083 vcc->dev->number,vcc->vci,length,size << 2,descr);
26084 length = eff = 0;
26085 - atomic_inc(&vcc->stats->rx_err);
26086 + atomic_inc_unchecked(&vcc->stats->rx_err);
26087 }
26088 }
26089 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26090 @@ -770,7 +770,7 @@ rx_dequeued++;
26091 vcc->push(vcc,skb);
26092 pushed++;
26093 }
26094 - atomic_inc(&vcc->stats->rx);
26095 + atomic_inc_unchecked(&vcc->stats->rx);
26096 }
26097 wake_up(&eni_dev->rx_wait);
26098 }
26099 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
26100 PCI_DMA_TODEVICE);
26101 if (vcc->pop) vcc->pop(vcc,skb);
26102 else dev_kfree_skb_irq(skb);
26103 - atomic_inc(&vcc->stats->tx);
26104 + atomic_inc_unchecked(&vcc->stats->tx);
26105 wake_up(&eni_dev->tx_wait);
26106 dma_complete++;
26107 }
26108 diff -urNp linux-2.6.32.45/drivers/atm/firestream.c linux-2.6.32.45/drivers/atm/firestream.c
26109 --- linux-2.6.32.45/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
26110 +++ linux-2.6.32.45/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
26111 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct
26112 }
26113 }
26114
26115 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26116 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26117
26118 fs_dprintk (FS_DEBUG_TXMEM, "i");
26119 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26120 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_
26121 #endif
26122 skb_put (skb, qe->p1 & 0xffff);
26123 ATM_SKB(skb)->vcc = atm_vcc;
26124 - atomic_inc(&atm_vcc->stats->rx);
26125 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26126 __net_timestamp(skb);
26127 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26128 atm_vcc->push (atm_vcc, skb);
26129 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_
26130 kfree (pe);
26131 }
26132 if (atm_vcc)
26133 - atomic_inc(&atm_vcc->stats->rx_drop);
26134 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26135 break;
26136 case 0x1f: /* Reassembly abort: no buffers. */
26137 /* Silently increment error counter. */
26138 if (atm_vcc)
26139 - atomic_inc(&atm_vcc->stats->rx_drop);
26140 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26141 break;
26142 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26143 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26144 diff -urNp linux-2.6.32.45/drivers/atm/fore200e.c linux-2.6.32.45/drivers/atm/fore200e.c
26145 --- linux-2.6.32.45/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
26146 +++ linux-2.6.32.45/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
26147 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
26148 #endif
26149 /* check error condition */
26150 if (*entry->status & STATUS_ERROR)
26151 - atomic_inc(&vcc->stats->tx_err);
26152 + atomic_inc_unchecked(&vcc->stats->tx_err);
26153 else
26154 - atomic_inc(&vcc->stats->tx);
26155 + atomic_inc_unchecked(&vcc->stats->tx);
26156 }
26157 }
26158
26159 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
26160 if (skb == NULL) {
26161 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26162
26163 - atomic_inc(&vcc->stats->rx_drop);
26164 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26165 return -ENOMEM;
26166 }
26167
26168 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
26169
26170 dev_kfree_skb_any(skb);
26171
26172 - atomic_inc(&vcc->stats->rx_drop);
26173 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26174 return -ENOMEM;
26175 }
26176
26177 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26178
26179 vcc->push(vcc, skb);
26180 - atomic_inc(&vcc->stats->rx);
26181 + atomic_inc_unchecked(&vcc->stats->rx);
26182
26183 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26184
26185 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
26186 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
26187 fore200e->atm_dev->number,
26188 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
26189 - atomic_inc(&vcc->stats->rx_err);
26190 + atomic_inc_unchecked(&vcc->stats->rx_err);
26191 }
26192 }
26193
26194 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
26195 goto retry_here;
26196 }
26197
26198 - atomic_inc(&vcc->stats->tx_err);
26199 + atomic_inc_unchecked(&vcc->stats->tx_err);
26200
26201 fore200e->tx_sat++;
26202 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
26203 diff -urNp linux-2.6.32.45/drivers/atm/he.c linux-2.6.32.45/drivers/atm/he.c
26204 --- linux-2.6.32.45/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
26205 +++ linux-2.6.32.45/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
26206 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26207
26208 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
26209 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
26210 - atomic_inc(&vcc->stats->rx_drop);
26211 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26212 goto return_host_buffers;
26213 }
26214
26215 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26216 RBRQ_LEN_ERR(he_dev->rbrq_head)
26217 ? "LEN_ERR" : "",
26218 vcc->vpi, vcc->vci);
26219 - atomic_inc(&vcc->stats->rx_err);
26220 + atomic_inc_unchecked(&vcc->stats->rx_err);
26221 goto return_host_buffers;
26222 }
26223
26224 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26225 vcc->push(vcc, skb);
26226 spin_lock(&he_dev->global_lock);
26227
26228 - atomic_inc(&vcc->stats->rx);
26229 + atomic_inc_unchecked(&vcc->stats->rx);
26230
26231 return_host_buffers:
26232 ++pdus_assembled;
26233 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
26234 tpd->vcc->pop(tpd->vcc, tpd->skb);
26235 else
26236 dev_kfree_skb_any(tpd->skb);
26237 - atomic_inc(&tpd->vcc->stats->tx_err);
26238 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
26239 }
26240 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
26241 return;
26242 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26243 vcc->pop(vcc, skb);
26244 else
26245 dev_kfree_skb_any(skb);
26246 - atomic_inc(&vcc->stats->tx_err);
26247 + atomic_inc_unchecked(&vcc->stats->tx_err);
26248 return -EINVAL;
26249 }
26250
26251 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26252 vcc->pop(vcc, skb);
26253 else
26254 dev_kfree_skb_any(skb);
26255 - atomic_inc(&vcc->stats->tx_err);
26256 + atomic_inc_unchecked(&vcc->stats->tx_err);
26257 return -EINVAL;
26258 }
26259 #endif
26260 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26261 vcc->pop(vcc, skb);
26262 else
26263 dev_kfree_skb_any(skb);
26264 - atomic_inc(&vcc->stats->tx_err);
26265 + atomic_inc_unchecked(&vcc->stats->tx_err);
26266 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26267 return -ENOMEM;
26268 }
26269 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26270 vcc->pop(vcc, skb);
26271 else
26272 dev_kfree_skb_any(skb);
26273 - atomic_inc(&vcc->stats->tx_err);
26274 + atomic_inc_unchecked(&vcc->stats->tx_err);
26275 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26276 return -ENOMEM;
26277 }
26278 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26279 __enqueue_tpd(he_dev, tpd, cid);
26280 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26281
26282 - atomic_inc(&vcc->stats->tx);
26283 + atomic_inc_unchecked(&vcc->stats->tx);
26284
26285 return 0;
26286 }
26287 diff -urNp linux-2.6.32.45/drivers/atm/horizon.c linux-2.6.32.45/drivers/atm/horizon.c
26288 --- linux-2.6.32.45/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
26289 +++ linux-2.6.32.45/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
26290 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
26291 {
26292 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
26293 // VC layer stats
26294 - atomic_inc(&vcc->stats->rx);
26295 + atomic_inc_unchecked(&vcc->stats->rx);
26296 __net_timestamp(skb);
26297 // end of our responsability
26298 vcc->push (vcc, skb);
26299 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
26300 dev->tx_iovec = NULL;
26301
26302 // VC layer stats
26303 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26304 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26305
26306 // free the skb
26307 hrz_kfree_skb (skb);
26308 diff -urNp linux-2.6.32.45/drivers/atm/idt77252.c linux-2.6.32.45/drivers/atm/idt77252.c
26309 --- linux-2.6.32.45/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26310 +++ linux-2.6.32.45/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26311 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26312 else
26313 dev_kfree_skb(skb);
26314
26315 - atomic_inc(&vcc->stats->tx);
26316 + atomic_inc_unchecked(&vcc->stats->tx);
26317 }
26318
26319 atomic_dec(&scq->used);
26320 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26321 if ((sb = dev_alloc_skb(64)) == NULL) {
26322 printk("%s: Can't allocate buffers for aal0.\n",
26323 card->name);
26324 - atomic_add(i, &vcc->stats->rx_drop);
26325 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
26326 break;
26327 }
26328 if (!atm_charge(vcc, sb->truesize)) {
26329 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26330 card->name);
26331 - atomic_add(i - 1, &vcc->stats->rx_drop);
26332 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26333 dev_kfree_skb(sb);
26334 break;
26335 }
26336 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26337 ATM_SKB(sb)->vcc = vcc;
26338 __net_timestamp(sb);
26339 vcc->push(vcc, sb);
26340 - atomic_inc(&vcc->stats->rx);
26341 + atomic_inc_unchecked(&vcc->stats->rx);
26342
26343 cell += ATM_CELL_PAYLOAD;
26344 }
26345 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26346 "(CDC: %08x)\n",
26347 card->name, len, rpp->len, readl(SAR_REG_CDC));
26348 recycle_rx_pool_skb(card, rpp);
26349 - atomic_inc(&vcc->stats->rx_err);
26350 + atomic_inc_unchecked(&vcc->stats->rx_err);
26351 return;
26352 }
26353 if (stat & SAR_RSQE_CRC) {
26354 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26355 recycle_rx_pool_skb(card, rpp);
26356 - atomic_inc(&vcc->stats->rx_err);
26357 + atomic_inc_unchecked(&vcc->stats->rx_err);
26358 return;
26359 }
26360 if (skb_queue_len(&rpp->queue) > 1) {
26361 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26362 RXPRINTK("%s: Can't alloc RX skb.\n",
26363 card->name);
26364 recycle_rx_pool_skb(card, rpp);
26365 - atomic_inc(&vcc->stats->rx_err);
26366 + atomic_inc_unchecked(&vcc->stats->rx_err);
26367 return;
26368 }
26369 if (!atm_charge(vcc, skb->truesize)) {
26370 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26371 __net_timestamp(skb);
26372
26373 vcc->push(vcc, skb);
26374 - atomic_inc(&vcc->stats->rx);
26375 + atomic_inc_unchecked(&vcc->stats->rx);
26376
26377 return;
26378 }
26379 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26380 __net_timestamp(skb);
26381
26382 vcc->push(vcc, skb);
26383 - atomic_inc(&vcc->stats->rx);
26384 + atomic_inc_unchecked(&vcc->stats->rx);
26385
26386 if (skb->truesize > SAR_FB_SIZE_3)
26387 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26388 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26389 if (vcc->qos.aal != ATM_AAL0) {
26390 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26391 card->name, vpi, vci);
26392 - atomic_inc(&vcc->stats->rx_drop);
26393 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26394 goto drop;
26395 }
26396
26397 if ((sb = dev_alloc_skb(64)) == NULL) {
26398 printk("%s: Can't allocate buffers for AAL0.\n",
26399 card->name);
26400 - atomic_inc(&vcc->stats->rx_err);
26401 + atomic_inc_unchecked(&vcc->stats->rx_err);
26402 goto drop;
26403 }
26404
26405 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26406 ATM_SKB(sb)->vcc = vcc;
26407 __net_timestamp(sb);
26408 vcc->push(vcc, sb);
26409 - atomic_inc(&vcc->stats->rx);
26410 + atomic_inc_unchecked(&vcc->stats->rx);
26411
26412 drop:
26413 skb_pull(queue, 64);
26414 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26415
26416 if (vc == NULL) {
26417 printk("%s: NULL connection in send().\n", card->name);
26418 - atomic_inc(&vcc->stats->tx_err);
26419 + atomic_inc_unchecked(&vcc->stats->tx_err);
26420 dev_kfree_skb(skb);
26421 return -EINVAL;
26422 }
26423 if (!test_bit(VCF_TX, &vc->flags)) {
26424 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26425 - atomic_inc(&vcc->stats->tx_err);
26426 + atomic_inc_unchecked(&vcc->stats->tx_err);
26427 dev_kfree_skb(skb);
26428 return -EINVAL;
26429 }
26430 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26431 break;
26432 default:
26433 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26434 - atomic_inc(&vcc->stats->tx_err);
26435 + atomic_inc_unchecked(&vcc->stats->tx_err);
26436 dev_kfree_skb(skb);
26437 return -EINVAL;
26438 }
26439
26440 if (skb_shinfo(skb)->nr_frags != 0) {
26441 printk("%s: No scatter-gather yet.\n", card->name);
26442 - atomic_inc(&vcc->stats->tx_err);
26443 + atomic_inc_unchecked(&vcc->stats->tx_err);
26444 dev_kfree_skb(skb);
26445 return -EINVAL;
26446 }
26447 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26448
26449 err = queue_skb(card, vc, skb, oam);
26450 if (err) {
26451 - atomic_inc(&vcc->stats->tx_err);
26452 + atomic_inc_unchecked(&vcc->stats->tx_err);
26453 dev_kfree_skb(skb);
26454 return err;
26455 }
26456 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26457 skb = dev_alloc_skb(64);
26458 if (!skb) {
26459 printk("%s: Out of memory in send_oam().\n", card->name);
26460 - atomic_inc(&vcc->stats->tx_err);
26461 + atomic_inc_unchecked(&vcc->stats->tx_err);
26462 return -ENOMEM;
26463 }
26464 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26465 diff -urNp linux-2.6.32.45/drivers/atm/iphase.c linux-2.6.32.45/drivers/atm/iphase.c
26466 --- linux-2.6.32.45/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26467 +++ linux-2.6.32.45/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26468 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26469 status = (u_short) (buf_desc_ptr->desc_mode);
26470 if (status & (RX_CER | RX_PTE | RX_OFL))
26471 {
26472 - atomic_inc(&vcc->stats->rx_err);
26473 + atomic_inc_unchecked(&vcc->stats->rx_err);
26474 IF_ERR(printk("IA: bad packet, dropping it");)
26475 if (status & RX_CER) {
26476 IF_ERR(printk(" cause: packet CRC error\n");)
26477 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26478 len = dma_addr - buf_addr;
26479 if (len > iadev->rx_buf_sz) {
26480 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26481 - atomic_inc(&vcc->stats->rx_err);
26482 + atomic_inc_unchecked(&vcc->stats->rx_err);
26483 goto out_free_desc;
26484 }
26485
26486 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26487 ia_vcc = INPH_IA_VCC(vcc);
26488 if (ia_vcc == NULL)
26489 {
26490 - atomic_inc(&vcc->stats->rx_err);
26491 + atomic_inc_unchecked(&vcc->stats->rx_err);
26492 dev_kfree_skb_any(skb);
26493 atm_return(vcc, atm_guess_pdu2truesize(len));
26494 goto INCR_DLE;
26495 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26496 if ((length > iadev->rx_buf_sz) || (length >
26497 (skb->len - sizeof(struct cpcs_trailer))))
26498 {
26499 - atomic_inc(&vcc->stats->rx_err);
26500 + atomic_inc_unchecked(&vcc->stats->rx_err);
26501 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26502 length, skb->len);)
26503 dev_kfree_skb_any(skb);
26504 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26505
26506 IF_RX(printk("rx_dle_intr: skb push");)
26507 vcc->push(vcc,skb);
26508 - atomic_inc(&vcc->stats->rx);
26509 + atomic_inc_unchecked(&vcc->stats->rx);
26510 iadev->rx_pkt_cnt++;
26511 }
26512 INCR_DLE:
26513 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26514 {
26515 struct k_sonet_stats *stats;
26516 stats = &PRIV(_ia_dev[board])->sonet_stats;
26517 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26518 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26519 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26520 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26521 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26522 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26523 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26524 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26525 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26526 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26527 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26528 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26529 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26530 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26531 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26532 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26533 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26534 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26535 }
26536 ia_cmds.status = 0;
26537 break;
26538 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26539 if ((desc == 0) || (desc > iadev->num_tx_desc))
26540 {
26541 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26542 - atomic_inc(&vcc->stats->tx);
26543 + atomic_inc_unchecked(&vcc->stats->tx);
26544 if (vcc->pop)
26545 vcc->pop(vcc, skb);
26546 else
26547 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26548 ATM_DESC(skb) = vcc->vci;
26549 skb_queue_tail(&iadev->tx_dma_q, skb);
26550
26551 - atomic_inc(&vcc->stats->tx);
26552 + atomic_inc_unchecked(&vcc->stats->tx);
26553 iadev->tx_pkt_cnt++;
26554 /* Increment transaction counter */
26555 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26556
26557 #if 0
26558 /* add flow control logic */
26559 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26560 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26561 if (iavcc->vc_desc_cnt > 10) {
26562 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26563 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26564 diff -urNp linux-2.6.32.45/drivers/atm/lanai.c linux-2.6.32.45/drivers/atm/lanai.c
26565 --- linux-2.6.32.45/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26566 +++ linux-2.6.32.45/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26567 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26568 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26569 lanai_endtx(lanai, lvcc);
26570 lanai_free_skb(lvcc->tx.atmvcc, skb);
26571 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26572 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26573 }
26574
26575 /* Try to fill the buffer - don't call unless there is backlog */
26576 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26577 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26578 __net_timestamp(skb);
26579 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26580 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26581 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26582 out:
26583 lvcc->rx.buf.ptr = end;
26584 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26585 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26586 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26587 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26588 lanai->stats.service_rxnotaal5++;
26589 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26590 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26591 return 0;
26592 }
26593 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26594 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26595 int bytes;
26596 read_unlock(&vcc_sklist_lock);
26597 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26598 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26599 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26600 lvcc->stats.x.aal5.service_trash++;
26601 bytes = (SERVICE_GET_END(s) * 16) -
26602 (((unsigned long) lvcc->rx.buf.ptr) -
26603 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26604 }
26605 if (s & SERVICE_STREAM) {
26606 read_unlock(&vcc_sklist_lock);
26607 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26608 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26609 lvcc->stats.x.aal5.service_stream++;
26610 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26611 "PDU on VCI %d!\n", lanai->number, vci);
26612 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26613 return 0;
26614 }
26615 DPRINTK("got rx crc error on vci %d\n", vci);
26616 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26617 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26618 lvcc->stats.x.aal5.service_rxcrc++;
26619 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26620 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26621 diff -urNp linux-2.6.32.45/drivers/atm/nicstar.c linux-2.6.32.45/drivers/atm/nicstar.c
26622 --- linux-2.6.32.45/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26623 +++ linux-2.6.32.45/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26624 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26625 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26626 {
26627 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26628 - atomic_inc(&vcc->stats->tx_err);
26629 + atomic_inc_unchecked(&vcc->stats->tx_err);
26630 dev_kfree_skb_any(skb);
26631 return -EINVAL;
26632 }
26633 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26634 if (!vc->tx)
26635 {
26636 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26637 - atomic_inc(&vcc->stats->tx_err);
26638 + atomic_inc_unchecked(&vcc->stats->tx_err);
26639 dev_kfree_skb_any(skb);
26640 return -EINVAL;
26641 }
26642 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26643 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26644 {
26645 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26646 - atomic_inc(&vcc->stats->tx_err);
26647 + atomic_inc_unchecked(&vcc->stats->tx_err);
26648 dev_kfree_skb_any(skb);
26649 return -EINVAL;
26650 }
26651 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26652 if (skb_shinfo(skb)->nr_frags != 0)
26653 {
26654 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26655 - atomic_inc(&vcc->stats->tx_err);
26656 + atomic_inc_unchecked(&vcc->stats->tx_err);
26657 dev_kfree_skb_any(skb);
26658 return -EINVAL;
26659 }
26660 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26661
26662 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26663 {
26664 - atomic_inc(&vcc->stats->tx_err);
26665 + atomic_inc_unchecked(&vcc->stats->tx_err);
26666 dev_kfree_skb_any(skb);
26667 return -EIO;
26668 }
26669 - atomic_inc(&vcc->stats->tx);
26670 + atomic_inc_unchecked(&vcc->stats->tx);
26671
26672 return 0;
26673 }
26674 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26675 {
26676 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26677 card->index);
26678 - atomic_add(i,&vcc->stats->rx_drop);
26679 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
26680 break;
26681 }
26682 if (!atm_charge(vcc, sb->truesize))
26683 {
26684 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26685 card->index);
26686 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26687 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26688 dev_kfree_skb_any(sb);
26689 break;
26690 }
26691 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26692 ATM_SKB(sb)->vcc = vcc;
26693 __net_timestamp(sb);
26694 vcc->push(vcc, sb);
26695 - atomic_inc(&vcc->stats->rx);
26696 + atomic_inc_unchecked(&vcc->stats->rx);
26697 cell += ATM_CELL_PAYLOAD;
26698 }
26699
26700 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26701 if (iovb == NULL)
26702 {
26703 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26704 - atomic_inc(&vcc->stats->rx_drop);
26705 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26706 recycle_rx_buf(card, skb);
26707 return;
26708 }
26709 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26710 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26711 {
26712 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26713 - atomic_inc(&vcc->stats->rx_err);
26714 + atomic_inc_unchecked(&vcc->stats->rx_err);
26715 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26716 NS_SKB(iovb)->iovcnt = 0;
26717 iovb->len = 0;
26718 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26719 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26720 card->index);
26721 which_list(card, skb);
26722 - atomic_inc(&vcc->stats->rx_err);
26723 + atomic_inc_unchecked(&vcc->stats->rx_err);
26724 recycle_rx_buf(card, skb);
26725 vc->rx_iov = NULL;
26726 recycle_iov_buf(card, iovb);
26727 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26728 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26729 card->index);
26730 which_list(card, skb);
26731 - atomic_inc(&vcc->stats->rx_err);
26732 + atomic_inc_unchecked(&vcc->stats->rx_err);
26733 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26734 NS_SKB(iovb)->iovcnt);
26735 vc->rx_iov = NULL;
26736 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26737 printk(" - PDU size mismatch.\n");
26738 else
26739 printk(".\n");
26740 - atomic_inc(&vcc->stats->rx_err);
26741 + atomic_inc_unchecked(&vcc->stats->rx_err);
26742 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26743 NS_SKB(iovb)->iovcnt);
26744 vc->rx_iov = NULL;
26745 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
26746 if (!atm_charge(vcc, skb->truesize))
26747 {
26748 push_rxbufs(card, skb);
26749 - atomic_inc(&vcc->stats->rx_drop);
26750 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26751 }
26752 else
26753 {
26754 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
26755 ATM_SKB(skb)->vcc = vcc;
26756 __net_timestamp(skb);
26757 vcc->push(vcc, skb);
26758 - atomic_inc(&vcc->stats->rx);
26759 + atomic_inc_unchecked(&vcc->stats->rx);
26760 }
26761 }
26762 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
26763 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
26764 if (!atm_charge(vcc, sb->truesize))
26765 {
26766 push_rxbufs(card, sb);
26767 - atomic_inc(&vcc->stats->rx_drop);
26768 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26769 }
26770 else
26771 {
26772 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
26773 ATM_SKB(sb)->vcc = vcc;
26774 __net_timestamp(sb);
26775 vcc->push(vcc, sb);
26776 - atomic_inc(&vcc->stats->rx);
26777 + atomic_inc_unchecked(&vcc->stats->rx);
26778 }
26779
26780 push_rxbufs(card, skb);
26781 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
26782 if (!atm_charge(vcc, skb->truesize))
26783 {
26784 push_rxbufs(card, skb);
26785 - atomic_inc(&vcc->stats->rx_drop);
26786 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26787 }
26788 else
26789 {
26790 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
26791 ATM_SKB(skb)->vcc = vcc;
26792 __net_timestamp(skb);
26793 vcc->push(vcc, skb);
26794 - atomic_inc(&vcc->stats->rx);
26795 + atomic_inc_unchecked(&vcc->stats->rx);
26796 }
26797
26798 push_rxbufs(card, sb);
26799 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
26800 if (hb == NULL)
26801 {
26802 printk("nicstar%d: Out of huge buffers.\n", card->index);
26803 - atomic_inc(&vcc->stats->rx_drop);
26804 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26805 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26806 NS_SKB(iovb)->iovcnt);
26807 vc->rx_iov = NULL;
26808 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
26809 }
26810 else
26811 dev_kfree_skb_any(hb);
26812 - atomic_inc(&vcc->stats->rx_drop);
26813 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26814 }
26815 else
26816 {
26817 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
26818 #endif /* NS_USE_DESTRUCTORS */
26819 __net_timestamp(hb);
26820 vcc->push(vcc, hb);
26821 - atomic_inc(&vcc->stats->rx);
26822 + atomic_inc_unchecked(&vcc->stats->rx);
26823 }
26824 }
26825
26826 diff -urNp linux-2.6.32.45/drivers/atm/solos-pci.c linux-2.6.32.45/drivers/atm/solos-pci.c
26827 --- linux-2.6.32.45/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
26828 +++ linux-2.6.32.45/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
26829 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
26830 }
26831 atm_charge(vcc, skb->truesize);
26832 vcc->push(vcc, skb);
26833 - atomic_inc(&vcc->stats->rx);
26834 + atomic_inc_unchecked(&vcc->stats->rx);
26835 break;
26836
26837 case PKT_STATUS:
26838 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
26839 char msg[500];
26840 char item[10];
26841
26842 + pax_track_stack();
26843 +
26844 len = buf->len;
26845 for (i = 0; i < len; i++){
26846 if(i % 8 == 0)
26847 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
26848 vcc = SKB_CB(oldskb)->vcc;
26849
26850 if (vcc) {
26851 - atomic_inc(&vcc->stats->tx);
26852 + atomic_inc_unchecked(&vcc->stats->tx);
26853 solos_pop(vcc, oldskb);
26854 } else
26855 dev_kfree_skb_irq(oldskb);
26856 diff -urNp linux-2.6.32.45/drivers/atm/suni.c linux-2.6.32.45/drivers/atm/suni.c
26857 --- linux-2.6.32.45/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
26858 +++ linux-2.6.32.45/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
26859 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26860
26861
26862 #define ADD_LIMITED(s,v) \
26863 - atomic_add((v),&stats->s); \
26864 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26865 + atomic_add_unchecked((v),&stats->s); \
26866 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26867
26868
26869 static void suni_hz(unsigned long from_timer)
26870 diff -urNp linux-2.6.32.45/drivers/atm/uPD98402.c linux-2.6.32.45/drivers/atm/uPD98402.c
26871 --- linux-2.6.32.45/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
26872 +++ linux-2.6.32.45/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
26873 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
26874 struct sonet_stats tmp;
26875 int error = 0;
26876
26877 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26878 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26879 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26880 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26881 if (zero && !error) {
26882 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
26883
26884
26885 #define ADD_LIMITED(s,v) \
26886 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26887 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26888 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26889 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26890 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26891 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26892
26893
26894 static void stat_event(struct atm_dev *dev)
26895 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
26896 if (reason & uPD98402_INT_PFM) stat_event(dev);
26897 if (reason & uPD98402_INT_PCO) {
26898 (void) GET(PCOCR); /* clear interrupt cause */
26899 - atomic_add(GET(HECCT),
26900 + atomic_add_unchecked(GET(HECCT),
26901 &PRIV(dev)->sonet_stats.uncorr_hcs);
26902 }
26903 if ((reason & uPD98402_INT_RFO) &&
26904 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
26905 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26906 uPD98402_INT_LOS),PIMR); /* enable them */
26907 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26908 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26909 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26910 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26911 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26912 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26913 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26914 return 0;
26915 }
26916
26917 diff -urNp linux-2.6.32.45/drivers/atm/zatm.c linux-2.6.32.45/drivers/atm/zatm.c
26918 --- linux-2.6.32.45/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
26919 +++ linux-2.6.32.45/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
26920 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26921 }
26922 if (!size) {
26923 dev_kfree_skb_irq(skb);
26924 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26925 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26926 continue;
26927 }
26928 if (!atm_charge(vcc,skb->truesize)) {
26929 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26930 skb->len = size;
26931 ATM_SKB(skb)->vcc = vcc;
26932 vcc->push(vcc,skb);
26933 - atomic_inc(&vcc->stats->rx);
26934 + atomic_inc_unchecked(&vcc->stats->rx);
26935 }
26936 zout(pos & 0xffff,MTA(mbx));
26937 #if 0 /* probably a stupid idea */
26938 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
26939 skb_queue_head(&zatm_vcc->backlog,skb);
26940 break;
26941 }
26942 - atomic_inc(&vcc->stats->tx);
26943 + atomic_inc_unchecked(&vcc->stats->tx);
26944 wake_up(&zatm_vcc->tx_wait);
26945 }
26946
26947 diff -urNp linux-2.6.32.45/drivers/base/bus.c linux-2.6.32.45/drivers/base/bus.c
26948 --- linux-2.6.32.45/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
26949 +++ linux-2.6.32.45/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
26950 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
26951 return ret;
26952 }
26953
26954 -static struct sysfs_ops driver_sysfs_ops = {
26955 +static const struct sysfs_ops driver_sysfs_ops = {
26956 .show = drv_attr_show,
26957 .store = drv_attr_store,
26958 };
26959 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
26960 return ret;
26961 }
26962
26963 -static struct sysfs_ops bus_sysfs_ops = {
26964 +static const struct sysfs_ops bus_sysfs_ops = {
26965 .show = bus_attr_show,
26966 .store = bus_attr_store,
26967 };
26968 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
26969 return 0;
26970 }
26971
26972 -static struct kset_uevent_ops bus_uevent_ops = {
26973 +static const struct kset_uevent_ops bus_uevent_ops = {
26974 .filter = bus_uevent_filter,
26975 };
26976
26977 diff -urNp linux-2.6.32.45/drivers/base/class.c linux-2.6.32.45/drivers/base/class.c
26978 --- linux-2.6.32.45/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
26979 +++ linux-2.6.32.45/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
26980 @@ -63,7 +63,7 @@ static void class_release(struct kobject
26981 kfree(cp);
26982 }
26983
26984 -static struct sysfs_ops class_sysfs_ops = {
26985 +static const struct sysfs_ops class_sysfs_ops = {
26986 .show = class_attr_show,
26987 .store = class_attr_store,
26988 };
26989 diff -urNp linux-2.6.32.45/drivers/base/core.c linux-2.6.32.45/drivers/base/core.c
26990 --- linux-2.6.32.45/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
26991 +++ linux-2.6.32.45/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
26992 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
26993 return ret;
26994 }
26995
26996 -static struct sysfs_ops dev_sysfs_ops = {
26997 +static const struct sysfs_ops dev_sysfs_ops = {
26998 .show = dev_attr_show,
26999 .store = dev_attr_store,
27000 };
27001 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
27002 return retval;
27003 }
27004
27005 -static struct kset_uevent_ops device_uevent_ops = {
27006 +static const struct kset_uevent_ops device_uevent_ops = {
27007 .filter = dev_uevent_filter,
27008 .name = dev_uevent_name,
27009 .uevent = dev_uevent,
27010 diff -urNp linux-2.6.32.45/drivers/base/memory.c linux-2.6.32.45/drivers/base/memory.c
27011 --- linux-2.6.32.45/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
27012 +++ linux-2.6.32.45/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
27013 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
27014 return retval;
27015 }
27016
27017 -static struct kset_uevent_ops memory_uevent_ops = {
27018 +static const struct kset_uevent_ops memory_uevent_ops = {
27019 .name = memory_uevent_name,
27020 .uevent = memory_uevent,
27021 };
27022 diff -urNp linux-2.6.32.45/drivers/base/sys.c linux-2.6.32.45/drivers/base/sys.c
27023 --- linux-2.6.32.45/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
27024 +++ linux-2.6.32.45/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
27025 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
27026 return -EIO;
27027 }
27028
27029 -static struct sysfs_ops sysfs_ops = {
27030 +static const struct sysfs_ops sysfs_ops = {
27031 .show = sysdev_show,
27032 .store = sysdev_store,
27033 };
27034 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
27035 return -EIO;
27036 }
27037
27038 -static struct sysfs_ops sysfs_class_ops = {
27039 +static const struct sysfs_ops sysfs_class_ops = {
27040 .show = sysdev_class_show,
27041 .store = sysdev_class_store,
27042 };
27043 diff -urNp linux-2.6.32.45/drivers/block/cciss.c linux-2.6.32.45/drivers/block/cciss.c
27044 --- linux-2.6.32.45/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
27045 +++ linux-2.6.32.45/drivers/block/cciss.c 2011-08-05 20:33:55.000000000 -0400
27046 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
27047 int err;
27048 u32 cp;
27049
27050 + memset(&arg64, 0, sizeof(arg64));
27051 +
27052 err = 0;
27053 err |=
27054 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27055 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ct
27056 /* Wait (up to 20 seconds) for a command to complete */
27057
27058 for (i = 20 * HZ; i > 0; i--) {
27059 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
27060 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
27061 if (done == FIFO_EMPTY)
27062 schedule_timeout_uninterruptible(1);
27063 else
27064 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h,
27065 resend_cmd1:
27066
27067 /* Disable interrupt on the board. */
27068 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27069 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27070
27071 /* Make sure there is room in the command FIFO */
27072 /* Actually it should be completely empty at this time */
27073 @@ -2884,13 +2886,13 @@ resend_cmd1:
27074 /* tape side of the driver. */
27075 for (i = 200000; i > 0; i--) {
27076 /* if fifo isn't full go */
27077 - if (!(h->access.fifo_full(h)))
27078 + if (!(h->access->fifo_full(h)))
27079 break;
27080 udelay(10);
27081 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
27082 " waiting!\n", h->ctlr);
27083 }
27084 - h->access.submit_command(h, c); /* Send the cmd */
27085 + h->access->submit_command(h, c); /* Send the cmd */
27086 do {
27087 complete = pollcomplete(h->ctlr);
27088
27089 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
27090 while (!hlist_empty(&h->reqQ)) {
27091 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
27092 /* can't do anything if fifo is full */
27093 - if ((h->access.fifo_full(h))) {
27094 + if ((h->access->fifo_full(h))) {
27095 printk(KERN_WARNING "cciss: fifo full\n");
27096 break;
27097 }
27098 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
27099 h->Qdepth--;
27100
27101 /* Tell the controller execute command */
27102 - h->access.submit_command(h, c);
27103 + h->access->submit_command(h, c);
27104
27105 /* Put job onto the completed Q */
27106 addQ(&h->cmpQ, c);
27107 @@ -3393,17 +3395,17 @@ startio:
27108
27109 static inline unsigned long get_next_completion(ctlr_info_t *h)
27110 {
27111 - return h->access.command_completed(h);
27112 + return h->access->command_completed(h);
27113 }
27114
27115 static inline int interrupt_pending(ctlr_info_t *h)
27116 {
27117 - return h->access.intr_pending(h);
27118 + return h->access->intr_pending(h);
27119 }
27120
27121 static inline long interrupt_not_for_us(ctlr_info_t *h)
27122 {
27123 - return (((h->access.intr_pending(h) == 0) ||
27124 + return (((h->access->intr_pending(h) == 0) ||
27125 (h->interrupts_enabled == 0)));
27126 }
27127
27128 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr
27129 */
27130 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
27131 c->product_name = products[prod_index].product_name;
27132 - c->access = *(products[prod_index].access);
27133 + c->access = products[prod_index].access;
27134 c->nr_cmds = c->max_commands - 4;
27135 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
27136 (readb(&c->cfgtable->Signature[1]) != 'I') ||
27137 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(stru
27138 }
27139
27140 /* make sure the board interrupts are off */
27141 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
27142 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
27143 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
27144 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
27145 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
27146 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(stru
27147 cciss_scsi_setup(i);
27148
27149 /* Turn the interrupts on so we can service requests */
27150 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
27151 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
27152
27153 /* Get the firmware version */
27154 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27155 diff -urNp linux-2.6.32.45/drivers/block/cciss.h linux-2.6.32.45/drivers/block/cciss.h
27156 --- linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:35:28.000000000 -0400
27157 +++ linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:33:59.000000000 -0400
27158 @@ -90,7 +90,7 @@ struct ctlr_info
27159 // information about each logical volume
27160 drive_info_struct *drv[CISS_MAX_LUN];
27161
27162 - struct access_method access;
27163 + struct access_method *access;
27164
27165 /* queue and queue Info */
27166 struct hlist_head reqQ;
27167 diff -urNp linux-2.6.32.45/drivers/block/cpqarray.c linux-2.6.32.45/drivers/block/cpqarray.c
27168 --- linux-2.6.32.45/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
27169 +++ linux-2.6.32.45/drivers/block/cpqarray.c 2011-08-05 20:33:55.000000000 -0400
27170 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr
27171 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27172 goto Enomem4;
27173 }
27174 - hba[i]->access.set_intr_mask(hba[i], 0);
27175 + hba[i]->access->set_intr_mask(hba[i], 0);
27176 if (request_irq(hba[i]->intr, do_ida_intr,
27177 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27178 {
27179 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr
27180 add_timer(&hba[i]->timer);
27181
27182 /* Enable IRQ now that spinlock and rate limit timer are set up */
27183 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27184 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27185
27186 for(j=0; j<NWD; j++) {
27187 struct gendisk *disk = ida_gendisk[i][j];
27188 @@ -695,7 +695,7 @@ DBGINFO(
27189 for(i=0; i<NR_PRODUCTS; i++) {
27190 if (board_id == products[i].board_id) {
27191 c->product_name = products[i].product_name;
27192 - c->access = *(products[i].access);
27193 + c->access = products[i].access;
27194 break;
27195 }
27196 }
27197 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(v
27198 hba[ctlr]->intr = intr;
27199 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
27200 hba[ctlr]->product_name = products[j].product_name;
27201 - hba[ctlr]->access = *(products[j].access);
27202 + hba[ctlr]->access = products[j].access;
27203 hba[ctlr]->ctlr = ctlr;
27204 hba[ctlr]->board_id = board_id;
27205 hba[ctlr]->pci_dev = NULL; /* not PCI */
27206 @@ -896,6 +896,8 @@ static void do_ida_request(struct reques
27207 struct scatterlist tmp_sg[SG_MAX];
27208 int i, dir, seg;
27209
27210 + pax_track_stack();
27211 +
27212 if (blk_queue_plugged(q))
27213 goto startio;
27214
27215 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
27216
27217 while((c = h->reqQ) != NULL) {
27218 /* Can't do anything if we're busy */
27219 - if (h->access.fifo_full(h) == 0)
27220 + if (h->access->fifo_full(h) == 0)
27221 return;
27222
27223 /* Get the first entry from the request Q */
27224 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
27225 h->Qdepth--;
27226
27227 /* Tell the controller to do our bidding */
27228 - h->access.submit_command(h, c);
27229 + h->access->submit_command(h, c);
27230
27231 /* Get onto the completion Q */
27232 addQ(&h->cmpQ, c);
27233 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq,
27234 unsigned long flags;
27235 __u32 a,a1;
27236
27237 - istat = h->access.intr_pending(h);
27238 + istat = h->access->intr_pending(h);
27239 /* Is this interrupt for us? */
27240 if (istat == 0)
27241 return IRQ_NONE;
27242 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq,
27243 */
27244 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
27245 if (istat & FIFO_NOT_EMPTY) {
27246 - while((a = h->access.command_completed(h))) {
27247 + while((a = h->access->command_completed(h))) {
27248 a1 = a; a &= ~3;
27249 if ((c = h->cmpQ) == NULL)
27250 {
27251 @@ -1434,11 +1436,11 @@ static int sendcmd(
27252 /*
27253 * Disable interrupt
27254 */
27255 - info_p->access.set_intr_mask(info_p, 0);
27256 + info_p->access->set_intr_mask(info_p, 0);
27257 /* Make sure there is room in the command FIFO */
27258 /* Actually it should be completely empty at this time. */
27259 for (i = 200000; i > 0; i--) {
27260 - temp = info_p->access.fifo_full(info_p);
27261 + temp = info_p->access->fifo_full(info_p);
27262 if (temp != 0) {
27263 break;
27264 }
27265 @@ -1451,7 +1453,7 @@ DBG(
27266 /*
27267 * Send the cmd
27268 */
27269 - info_p->access.submit_command(info_p, c);
27270 + info_p->access->submit_command(info_p, c);
27271 complete = pollcomplete(ctlr);
27272
27273 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
27274 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t
27275 * we check the new geometry. Then turn interrupts back on when
27276 * we're done.
27277 */
27278 - host->access.set_intr_mask(host, 0);
27279 + host->access->set_intr_mask(host, 0);
27280 getgeometry(ctlr);
27281 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
27282 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
27283
27284 for(i=0; i<NWD; i++) {
27285 struct gendisk *disk = ida_gendisk[ctlr][i];
27286 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
27287 /* Wait (up to 2 seconds) for a command to complete */
27288
27289 for (i = 200000; i > 0; i--) {
27290 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
27291 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
27292 if (done == 0) {
27293 udelay(10); /* a short fixed delay */
27294 } else
27295 diff -urNp linux-2.6.32.45/drivers/block/cpqarray.h linux-2.6.32.45/drivers/block/cpqarray.h
27296 --- linux-2.6.32.45/drivers/block/cpqarray.h 2011-03-27 14:31:47.000000000 -0400
27297 +++ linux-2.6.32.45/drivers/block/cpqarray.h 2011-08-05 20:33:55.000000000 -0400
27298 @@ -99,7 +99,7 @@ struct ctlr_info {
27299 drv_info_t drv[NWD];
27300 struct proc_dir_entry *proc;
27301
27302 - struct access_method access;
27303 + struct access_method *access;
27304
27305 cmdlist_t *reqQ;
27306 cmdlist_t *cmpQ;
27307 diff -urNp linux-2.6.32.45/drivers/block/DAC960.c linux-2.6.32.45/drivers/block/DAC960.c
27308 --- linux-2.6.32.45/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
27309 +++ linux-2.6.32.45/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
27310 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
27311 unsigned long flags;
27312 int Channel, TargetID;
27313
27314 + pax_track_stack();
27315 +
27316 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
27317 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
27318 sizeof(DAC960_SCSI_Inquiry_T) +
27319 diff -urNp linux-2.6.32.45/drivers/block/nbd.c linux-2.6.32.45/drivers/block/nbd.c
27320 --- linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
27321 +++ linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
27322 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
27323 struct kvec iov;
27324 sigset_t blocked, oldset;
27325
27326 + pax_track_stack();
27327 +
27328 if (unlikely(!sock)) {
27329 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
27330 lo->disk->disk_name, (send ? "send" : "recv"));
27331 @@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
27332 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
27333 unsigned int cmd, unsigned long arg)
27334 {
27335 + pax_track_stack();
27336 +
27337 switch (cmd) {
27338 case NBD_DISCONNECT: {
27339 struct request sreq;
27340 diff -urNp linux-2.6.32.45/drivers/block/pktcdvd.c linux-2.6.32.45/drivers/block/pktcdvd.c
27341 --- linux-2.6.32.45/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
27342 +++ linux-2.6.32.45/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
27343 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
27344 return len;
27345 }
27346
27347 -static struct sysfs_ops kobj_pkt_ops = {
27348 +static const struct sysfs_ops kobj_pkt_ops = {
27349 .show = kobj_pkt_show,
27350 .store = kobj_pkt_store
27351 };
27352 diff -urNp linux-2.6.32.45/drivers/char/agp/frontend.c linux-2.6.32.45/drivers/char/agp/frontend.c
27353 --- linux-2.6.32.45/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
27354 +++ linux-2.6.32.45/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
27355 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
27356 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27357 return -EFAULT;
27358
27359 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27360 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27361 return -EFAULT;
27362
27363 client = agp_find_client_by_pid(reserve.pid);
27364 diff -urNp linux-2.6.32.45/drivers/char/briq_panel.c linux-2.6.32.45/drivers/char/briq_panel.c
27365 --- linux-2.6.32.45/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
27366 +++ linux-2.6.32.45/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
27367 @@ -10,6 +10,7 @@
27368 #include <linux/types.h>
27369 #include <linux/errno.h>
27370 #include <linux/tty.h>
27371 +#include <linux/mutex.h>
27372 #include <linux/timer.h>
27373 #include <linux/kernel.h>
27374 #include <linux/wait.h>
27375 @@ -36,6 +37,7 @@ static int vfd_is_open;
27376 static unsigned char vfd[40];
27377 static int vfd_cursor;
27378 static unsigned char ledpb, led;
27379 +static DEFINE_MUTEX(vfd_mutex);
27380
27381 static void update_vfd(void)
27382 {
27383 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
27384 if (!vfd_is_open)
27385 return -EBUSY;
27386
27387 + mutex_lock(&vfd_mutex);
27388 for (;;) {
27389 char c;
27390 if (!indx)
27391 break;
27392 - if (get_user(c, buf))
27393 + if (get_user(c, buf)) {
27394 + mutex_unlock(&vfd_mutex);
27395 return -EFAULT;
27396 + }
27397 if (esc) {
27398 set_led(c);
27399 esc = 0;
27400 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
27401 buf++;
27402 }
27403 update_vfd();
27404 + mutex_unlock(&vfd_mutex);
27405
27406 return len;
27407 }
27408 diff -urNp linux-2.6.32.45/drivers/char/genrtc.c linux-2.6.32.45/drivers/char/genrtc.c
27409 --- linux-2.6.32.45/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
27410 +++ linux-2.6.32.45/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
27411 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
27412 switch (cmd) {
27413
27414 case RTC_PLL_GET:
27415 + memset(&pll, 0, sizeof(pll));
27416 if (get_rtc_pll(&pll))
27417 return -EINVAL;
27418 else
27419 diff -urNp linux-2.6.32.45/drivers/char/hpet.c linux-2.6.32.45/drivers/char/hpet.c
27420 --- linux-2.6.32.45/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
27421 +++ linux-2.6.32.45/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
27422 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
27423 return 0;
27424 }
27425
27426 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
27427 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
27428
27429 static int
27430 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
27431 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
27432 }
27433
27434 static int
27435 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
27436 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
27437 {
27438 struct hpet_timer __iomem *timer;
27439 struct hpet __iomem *hpet;
27440 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
27441 {
27442 struct hpet_info info;
27443
27444 + memset(&info, 0, sizeof(info));
27445 +
27446 if (devp->hd_ireqfreq)
27447 info.hi_ireqfreq =
27448 hpet_time_div(hpetp, devp->hd_ireqfreq);
27449 - else
27450 - info.hi_ireqfreq = 0;
27451 info.hi_flags =
27452 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
27453 info.hi_hpet = hpetp->hp_which;
27454 diff -urNp linux-2.6.32.45/drivers/char/hvc_beat.c linux-2.6.32.45/drivers/char/hvc_beat.c
27455 --- linux-2.6.32.45/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
27456 +++ linux-2.6.32.45/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
27457 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
27458 return cnt;
27459 }
27460
27461 -static struct hv_ops hvc_beat_get_put_ops = {
27462 +static const struct hv_ops hvc_beat_get_put_ops = {
27463 .get_chars = hvc_beat_get_chars,
27464 .put_chars = hvc_beat_put_chars,
27465 };
27466 diff -urNp linux-2.6.32.45/drivers/char/hvc_console.c linux-2.6.32.45/drivers/char/hvc_console.c
27467 --- linux-2.6.32.45/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
27468 +++ linux-2.6.32.45/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
27469 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
27470 * console interfaces but can still be used as a tty device. This has to be
27471 * static because kmalloc will not work during early console init.
27472 */
27473 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27474 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27475 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
27476 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
27477
27478 @@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
27479 * vty adapters do NOT get an hvc_instantiate() callback since they
27480 * appear after early console init.
27481 */
27482 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
27483 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
27484 {
27485 struct hvc_struct *hp;
27486
27487 @@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
27488 };
27489
27490 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
27491 - struct hv_ops *ops, int outbuf_size)
27492 + const struct hv_ops *ops, int outbuf_size)
27493 {
27494 struct hvc_struct *hp;
27495 int i;
27496 diff -urNp linux-2.6.32.45/drivers/char/hvc_console.h linux-2.6.32.45/drivers/char/hvc_console.h
27497 --- linux-2.6.32.45/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
27498 +++ linux-2.6.32.45/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
27499 @@ -55,7 +55,7 @@ struct hvc_struct {
27500 int outbuf_size;
27501 int n_outbuf;
27502 uint32_t vtermno;
27503 - struct hv_ops *ops;
27504 + const struct hv_ops *ops;
27505 int irq_requested;
27506 int data;
27507 struct winsize ws;
27508 @@ -76,11 +76,11 @@ struct hv_ops {
27509 };
27510
27511 /* Register a vterm and a slot index for use as a console (console_init) */
27512 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
27513 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
27514
27515 /* register a vterm for hvc tty operation (module_init or hotplug add) */
27516 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
27517 - struct hv_ops *ops, int outbuf_size);
27518 + const struct hv_ops *ops, int outbuf_size);
27519 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
27520 extern int hvc_remove(struct hvc_struct *hp);
27521
27522 diff -urNp linux-2.6.32.45/drivers/char/hvc_iseries.c linux-2.6.32.45/drivers/char/hvc_iseries.c
27523 --- linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
27524 +++ linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
27525 @@ -197,7 +197,7 @@ done:
27526 return sent;
27527 }
27528
27529 -static struct hv_ops hvc_get_put_ops = {
27530 +static const struct hv_ops hvc_get_put_ops = {
27531 .get_chars = get_chars,
27532 .put_chars = put_chars,
27533 .notifier_add = notifier_add_irq,
27534 diff -urNp linux-2.6.32.45/drivers/char/hvc_iucv.c linux-2.6.32.45/drivers/char/hvc_iucv.c
27535 --- linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
27536 +++ linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
27537 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
27538
27539
27540 /* HVC operations */
27541 -static struct hv_ops hvc_iucv_ops = {
27542 +static const struct hv_ops hvc_iucv_ops = {
27543 .get_chars = hvc_iucv_get_chars,
27544 .put_chars = hvc_iucv_put_chars,
27545 .notifier_add = hvc_iucv_notifier_add,
27546 diff -urNp linux-2.6.32.45/drivers/char/hvc_rtas.c linux-2.6.32.45/drivers/char/hvc_rtas.c
27547 --- linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27548 +++ linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27549 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27550 return i;
27551 }
27552
27553 -static struct hv_ops hvc_rtas_get_put_ops = {
27554 +static const struct hv_ops hvc_rtas_get_put_ops = {
27555 .get_chars = hvc_rtas_read_console,
27556 .put_chars = hvc_rtas_write_console,
27557 };
27558 diff -urNp linux-2.6.32.45/drivers/char/hvcs.c linux-2.6.32.45/drivers/char/hvcs.c
27559 --- linux-2.6.32.45/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27560 +++ linux-2.6.32.45/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27561 @@ -82,6 +82,7 @@
27562 #include <asm/hvcserver.h>
27563 #include <asm/uaccess.h>
27564 #include <asm/vio.h>
27565 +#include <asm/local.h>
27566
27567 /*
27568 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27569 @@ -269,7 +270,7 @@ struct hvcs_struct {
27570 unsigned int index;
27571
27572 struct tty_struct *tty;
27573 - int open_count;
27574 + local_t open_count;
27575
27576 /*
27577 * Used to tell the driver kernel_thread what operations need to take
27578 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27579
27580 spin_lock_irqsave(&hvcsd->lock, flags);
27581
27582 - if (hvcsd->open_count > 0) {
27583 + if (local_read(&hvcsd->open_count) > 0) {
27584 spin_unlock_irqrestore(&hvcsd->lock, flags);
27585 printk(KERN_INFO "HVCS: vterm state unchanged. "
27586 "The hvcs device node is still in use.\n");
27587 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27588 if ((retval = hvcs_partner_connect(hvcsd)))
27589 goto error_release;
27590
27591 - hvcsd->open_count = 1;
27592 + local_set(&hvcsd->open_count, 1);
27593 hvcsd->tty = tty;
27594 tty->driver_data = hvcsd;
27595
27596 @@ -1169,7 +1170,7 @@ fast_open:
27597
27598 spin_lock_irqsave(&hvcsd->lock, flags);
27599 kref_get(&hvcsd->kref);
27600 - hvcsd->open_count++;
27601 + local_inc(&hvcsd->open_count);
27602 hvcsd->todo_mask |= HVCS_SCHED_READ;
27603 spin_unlock_irqrestore(&hvcsd->lock, flags);
27604
27605 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27606 hvcsd = tty->driver_data;
27607
27608 spin_lock_irqsave(&hvcsd->lock, flags);
27609 - if (--hvcsd->open_count == 0) {
27610 + if (local_dec_and_test(&hvcsd->open_count)) {
27611
27612 vio_disable_interrupts(hvcsd->vdev);
27613
27614 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27615 free_irq(irq, hvcsd);
27616 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27617 return;
27618 - } else if (hvcsd->open_count < 0) {
27619 + } else if (local_read(&hvcsd->open_count) < 0) {
27620 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27621 " is missmanaged.\n",
27622 - hvcsd->vdev->unit_address, hvcsd->open_count);
27623 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27624 }
27625
27626 spin_unlock_irqrestore(&hvcsd->lock, flags);
27627 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27628
27629 spin_lock_irqsave(&hvcsd->lock, flags);
27630 /* Preserve this so that we know how many kref refs to put */
27631 - temp_open_count = hvcsd->open_count;
27632 + temp_open_count = local_read(&hvcsd->open_count);
27633
27634 /*
27635 * Don't kref put inside the spinlock because the destruction
27636 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27637 hvcsd->tty->driver_data = NULL;
27638 hvcsd->tty = NULL;
27639
27640 - hvcsd->open_count = 0;
27641 + local_set(&hvcsd->open_count, 0);
27642
27643 /* This will drop any buffered data on the floor which is OK in a hangup
27644 * scenario. */
27645 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27646 * the middle of a write operation? This is a crummy place to do this
27647 * but we want to keep it all in the spinlock.
27648 */
27649 - if (hvcsd->open_count <= 0) {
27650 + if (local_read(&hvcsd->open_count) <= 0) {
27651 spin_unlock_irqrestore(&hvcsd->lock, flags);
27652 return -ENODEV;
27653 }
27654 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27655 {
27656 struct hvcs_struct *hvcsd = tty->driver_data;
27657
27658 - if (!hvcsd || hvcsd->open_count <= 0)
27659 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27660 return 0;
27661
27662 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27663 diff -urNp linux-2.6.32.45/drivers/char/hvc_udbg.c linux-2.6.32.45/drivers/char/hvc_udbg.c
27664 --- linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27665 +++ linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27666 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27667 return i;
27668 }
27669
27670 -static struct hv_ops hvc_udbg_ops = {
27671 +static const struct hv_ops hvc_udbg_ops = {
27672 .get_chars = hvc_udbg_get,
27673 .put_chars = hvc_udbg_put,
27674 };
27675 diff -urNp linux-2.6.32.45/drivers/char/hvc_vio.c linux-2.6.32.45/drivers/char/hvc_vio.c
27676 --- linux-2.6.32.45/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27677 +++ linux-2.6.32.45/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27678 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27679 return got;
27680 }
27681
27682 -static struct hv_ops hvc_get_put_ops = {
27683 +static const struct hv_ops hvc_get_put_ops = {
27684 .get_chars = filtered_get_chars,
27685 .put_chars = hvc_put_chars,
27686 .notifier_add = notifier_add_irq,
27687 diff -urNp linux-2.6.32.45/drivers/char/hvc_xen.c linux-2.6.32.45/drivers/char/hvc_xen.c
27688 --- linux-2.6.32.45/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27689 +++ linux-2.6.32.45/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27690 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27691 return recv;
27692 }
27693
27694 -static struct hv_ops hvc_ops = {
27695 +static const struct hv_ops hvc_ops = {
27696 .get_chars = read_console,
27697 .put_chars = write_console,
27698 .notifier_add = notifier_add_irq,
27699 diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c
27700 --- linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27701 +++ linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27702 @@ -414,7 +414,7 @@ struct ipmi_smi {
27703 struct proc_dir_entry *proc_dir;
27704 char proc_dir_name[10];
27705
27706 - atomic_t stats[IPMI_NUM_STATS];
27707 + atomic_unchecked_t stats[IPMI_NUM_STATS];
27708
27709 /*
27710 * run_to_completion duplicate of smb_info, smi_info
27711 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27712
27713
27714 #define ipmi_inc_stat(intf, stat) \
27715 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27716 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27717 #define ipmi_get_stat(intf, stat) \
27718 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27719 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27720
27721 static int is_lan_addr(struct ipmi_addr *addr)
27722 {
27723 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27724 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27725 init_waitqueue_head(&intf->waitq);
27726 for (i = 0; i < IPMI_NUM_STATS; i++)
27727 - atomic_set(&intf->stats[i], 0);
27728 + atomic_set_unchecked(&intf->stats[i], 0);
27729
27730 intf->proc_dir = NULL;
27731
27732 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27733 struct ipmi_smi_msg smi_msg;
27734 struct ipmi_recv_msg recv_msg;
27735
27736 + pax_track_stack();
27737 +
27738 si = (struct ipmi_system_interface_addr *) &addr;
27739 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27740 si->channel = IPMI_BMC_CHANNEL;
27741 diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c
27742 --- linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
27743 +++ linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
27744 @@ -277,7 +277,7 @@ struct smi_info {
27745 unsigned char slave_addr;
27746
27747 /* Counters and things for the proc filesystem. */
27748 - atomic_t stats[SI_NUM_STATS];
27749 + atomic_unchecked_t stats[SI_NUM_STATS];
27750
27751 struct task_struct *thread;
27752
27753 @@ -285,9 +285,9 @@ struct smi_info {
27754 };
27755
27756 #define smi_inc_stat(smi, stat) \
27757 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27758 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27759 #define smi_get_stat(smi, stat) \
27760 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27761 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27762
27763 #define SI_MAX_PARMS 4
27764
27765 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
27766 atomic_set(&new_smi->req_events, 0);
27767 new_smi->run_to_completion = 0;
27768 for (i = 0; i < SI_NUM_STATS; i++)
27769 - atomic_set(&new_smi->stats[i], 0);
27770 + atomic_set_unchecked(&new_smi->stats[i], 0);
27771
27772 new_smi->interrupt_disabled = 0;
27773 atomic_set(&new_smi->stop_operation, 0);
27774 diff -urNp linux-2.6.32.45/drivers/char/istallion.c linux-2.6.32.45/drivers/char/istallion.c
27775 --- linux-2.6.32.45/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
27776 +++ linux-2.6.32.45/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
27777 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
27778 * re-used for each stats call.
27779 */
27780 static comstats_t stli_comstats;
27781 -static combrd_t stli_brdstats;
27782 static struct asystats stli_cdkstats;
27783
27784 /*****************************************************************************/
27785 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
27786 {
27787 struct stlibrd *brdp;
27788 unsigned int i;
27789 + combrd_t stli_brdstats;
27790
27791 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
27792 return -EFAULT;
27793 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
27794 struct stliport stli_dummyport;
27795 struct stliport *portp;
27796
27797 + pax_track_stack();
27798 +
27799 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
27800 return -EFAULT;
27801 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
27802 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
27803 struct stlibrd stli_dummybrd;
27804 struct stlibrd *brdp;
27805
27806 + pax_track_stack();
27807 +
27808 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
27809 return -EFAULT;
27810 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
27811 diff -urNp linux-2.6.32.45/drivers/char/Kconfig linux-2.6.32.45/drivers/char/Kconfig
27812 --- linux-2.6.32.45/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
27813 +++ linux-2.6.32.45/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
27814 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
27815
27816 config DEVKMEM
27817 bool "/dev/kmem virtual device support"
27818 - default y
27819 + default n
27820 + depends on !GRKERNSEC_KMEM
27821 help
27822 Say Y here if you want to support the /dev/kmem device. The
27823 /dev/kmem device is rarely used, but can be used for certain
27824 @@ -1114,6 +1115,7 @@ config DEVPORT
27825 bool
27826 depends on !M68K
27827 depends on ISA || PCI
27828 + depends on !GRKERNSEC_KMEM
27829 default y
27830
27831 source "drivers/s390/char/Kconfig"
27832 diff -urNp linux-2.6.32.45/drivers/char/keyboard.c linux-2.6.32.45/drivers/char/keyboard.c
27833 --- linux-2.6.32.45/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
27834 +++ linux-2.6.32.45/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
27835 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
27836 kbd->kbdmode == VC_MEDIUMRAW) &&
27837 value != KVAL(K_SAK))
27838 return; /* SAK is allowed even in raw mode */
27839 +
27840 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
27841 + {
27842 + void *func = fn_handler[value];
27843 + if (func == fn_show_state || func == fn_show_ptregs ||
27844 + func == fn_show_mem)
27845 + return;
27846 + }
27847 +#endif
27848 +
27849 fn_handler[value](vc);
27850 }
27851
27852 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
27853 .evbit = { BIT_MASK(EV_SND) },
27854 },
27855
27856 - { }, /* Terminating entry */
27857 + { 0 }, /* Terminating entry */
27858 };
27859
27860 MODULE_DEVICE_TABLE(input, kbd_ids);
27861 diff -urNp linux-2.6.32.45/drivers/char/mem.c linux-2.6.32.45/drivers/char/mem.c
27862 --- linux-2.6.32.45/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
27863 +++ linux-2.6.32.45/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
27864 @@ -18,6 +18,7 @@
27865 #include <linux/raw.h>
27866 #include <linux/tty.h>
27867 #include <linux/capability.h>
27868 +#include <linux/security.h>
27869 #include <linux/ptrace.h>
27870 #include <linux/device.h>
27871 #include <linux/highmem.h>
27872 @@ -35,6 +36,10 @@
27873 # include <linux/efi.h>
27874 #endif
27875
27876 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27877 +extern struct file_operations grsec_fops;
27878 +#endif
27879 +
27880 static inline unsigned long size_inside_page(unsigned long start,
27881 unsigned long size)
27882 {
27883 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
27884
27885 while (cursor < to) {
27886 if (!devmem_is_allowed(pfn)) {
27887 +#ifdef CONFIG_GRKERNSEC_KMEM
27888 + gr_handle_mem_readwrite(from, to);
27889 +#else
27890 printk(KERN_INFO
27891 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27892 current->comm, from, to);
27893 +#endif
27894 return 0;
27895 }
27896 cursor += PAGE_SIZE;
27897 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
27898 }
27899 return 1;
27900 }
27901 +#elif defined(CONFIG_GRKERNSEC_KMEM)
27902 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27903 +{
27904 + return 0;
27905 +}
27906 #else
27907 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27908 {
27909 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
27910 #endif
27911
27912 while (count > 0) {
27913 + char *temp;
27914 +
27915 /*
27916 * Handle first page in case it's not aligned
27917 */
27918 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
27919 if (!ptr)
27920 return -EFAULT;
27921
27922 - if (copy_to_user(buf, ptr, sz)) {
27923 +#ifdef CONFIG_PAX_USERCOPY
27924 + temp = kmalloc(sz, GFP_KERNEL);
27925 + if (!temp) {
27926 + unxlate_dev_mem_ptr(p, ptr);
27927 + return -ENOMEM;
27928 + }
27929 + memcpy(temp, ptr, sz);
27930 +#else
27931 + temp = ptr;
27932 +#endif
27933 +
27934 + if (copy_to_user(buf, temp, sz)) {
27935 +
27936 +#ifdef CONFIG_PAX_USERCOPY
27937 + kfree(temp);
27938 +#endif
27939 +
27940 unxlate_dev_mem_ptr(p, ptr);
27941 return -EFAULT;
27942 }
27943
27944 +#ifdef CONFIG_PAX_USERCOPY
27945 + kfree(temp);
27946 +#endif
27947 +
27948 unxlate_dev_mem_ptr(p, ptr);
27949
27950 buf += sz;
27951 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
27952 size_t count, loff_t *ppos)
27953 {
27954 unsigned long p = *ppos;
27955 - ssize_t low_count, read, sz;
27956 + ssize_t low_count, read, sz, err = 0;
27957 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27958 - int err = 0;
27959
27960 read = 0;
27961 if (p < (unsigned long) high_memory) {
27962 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
27963 }
27964 #endif
27965 while (low_count > 0) {
27966 + char *temp;
27967 +
27968 sz = size_inside_page(p, low_count);
27969
27970 /*
27971 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
27972 */
27973 kbuf = xlate_dev_kmem_ptr((char *)p);
27974
27975 - if (copy_to_user(buf, kbuf, sz))
27976 +#ifdef CONFIG_PAX_USERCOPY
27977 + temp = kmalloc(sz, GFP_KERNEL);
27978 + if (!temp)
27979 + return -ENOMEM;
27980 + memcpy(temp, kbuf, sz);
27981 +#else
27982 + temp = kbuf;
27983 +#endif
27984 +
27985 + err = copy_to_user(buf, temp, sz);
27986 +
27987 +#ifdef CONFIG_PAX_USERCOPY
27988 + kfree(temp);
27989 +#endif
27990 +
27991 + if (err)
27992 return -EFAULT;
27993 buf += sz;
27994 p += sz;
27995 @@ -889,6 +941,9 @@ static const struct memdev {
27996 #ifdef CONFIG_CRASH_DUMP
27997 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27998 #endif
27999 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28000 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28001 +#endif
28002 };
28003
28004 static int memory_open(struct inode *inode, struct file *filp)
28005 diff -urNp linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c
28006 --- linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
28007 +++ linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
28008 @@ -29,6 +29,7 @@
28009 #include <linux/tty_driver.h>
28010 #include <linux/tty_flip.h>
28011 #include <linux/uaccess.h>
28012 +#include <asm/local.h>
28013
28014 #include "tty.h"
28015 #include "network.h"
28016 @@ -51,7 +52,7 @@ struct ipw_tty {
28017 int tty_type;
28018 struct ipw_network *network;
28019 struct tty_struct *linux_tty;
28020 - int open_count;
28021 + local_t open_count;
28022 unsigned int control_lines;
28023 struct mutex ipw_tty_mutex;
28024 int tx_bytes_queued;
28025 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
28026 mutex_unlock(&tty->ipw_tty_mutex);
28027 return -ENODEV;
28028 }
28029 - if (tty->open_count == 0)
28030 + if (local_read(&tty->open_count) == 0)
28031 tty->tx_bytes_queued = 0;
28032
28033 - tty->open_count++;
28034 + local_inc(&tty->open_count);
28035
28036 tty->linux_tty = linux_tty;
28037 linux_tty->driver_data = tty;
28038 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
28039
28040 static void do_ipw_close(struct ipw_tty *tty)
28041 {
28042 - tty->open_count--;
28043 -
28044 - if (tty->open_count == 0) {
28045 + if (local_dec_return(&tty->open_count) == 0) {
28046 struct tty_struct *linux_tty = tty->linux_tty;
28047
28048 if (linux_tty != NULL) {
28049 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
28050 return;
28051
28052 mutex_lock(&tty->ipw_tty_mutex);
28053 - if (tty->open_count == 0) {
28054 + if (local_read(&tty->open_count) == 0) {
28055 mutex_unlock(&tty->ipw_tty_mutex);
28056 return;
28057 }
28058 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
28059 return;
28060 }
28061
28062 - if (!tty->open_count) {
28063 + if (!local_read(&tty->open_count)) {
28064 mutex_unlock(&tty->ipw_tty_mutex);
28065 return;
28066 }
28067 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
28068 return -ENODEV;
28069
28070 mutex_lock(&tty->ipw_tty_mutex);
28071 - if (!tty->open_count) {
28072 + if (!local_read(&tty->open_count)) {
28073 mutex_unlock(&tty->ipw_tty_mutex);
28074 return -EINVAL;
28075 }
28076 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
28077 if (!tty)
28078 return -ENODEV;
28079
28080 - if (!tty->open_count)
28081 + if (!local_read(&tty->open_count))
28082 return -EINVAL;
28083
28084 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
28085 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
28086 if (!tty)
28087 return 0;
28088
28089 - if (!tty->open_count)
28090 + if (!local_read(&tty->open_count))
28091 return 0;
28092
28093 return tty->tx_bytes_queued;
28094 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
28095 if (!tty)
28096 return -ENODEV;
28097
28098 - if (!tty->open_count)
28099 + if (!local_read(&tty->open_count))
28100 return -EINVAL;
28101
28102 return get_control_lines(tty);
28103 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
28104 if (!tty)
28105 return -ENODEV;
28106
28107 - if (!tty->open_count)
28108 + if (!local_read(&tty->open_count))
28109 return -EINVAL;
28110
28111 return set_control_lines(tty, set, clear);
28112 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
28113 if (!tty)
28114 return -ENODEV;
28115
28116 - if (!tty->open_count)
28117 + if (!local_read(&tty->open_count))
28118 return -EINVAL;
28119
28120 /* FIXME: Exactly how is the tty object locked here .. */
28121 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
28122 against a parallel ioctl etc */
28123 mutex_lock(&ttyj->ipw_tty_mutex);
28124 }
28125 - while (ttyj->open_count)
28126 + while (local_read(&ttyj->open_count))
28127 do_ipw_close(ttyj);
28128 ipwireless_disassociate_network_ttys(network,
28129 ttyj->channel_idx);
28130 diff -urNp linux-2.6.32.45/drivers/char/pty.c linux-2.6.32.45/drivers/char/pty.c
28131 --- linux-2.6.32.45/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
28132 +++ linux-2.6.32.45/drivers/char/pty.c 2011-08-05 20:33:55.000000000 -0400
28133 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
28134 register_sysctl_table(pty_root_table);
28135
28136 /* Now create the /dev/ptmx special device */
28137 + pax_open_kernel();
28138 tty_default_fops(&ptmx_fops);
28139 - ptmx_fops.open = ptmx_open;
28140 + *(void **)&ptmx_fops.open = ptmx_open;
28141 + pax_close_kernel();
28142
28143 cdev_init(&ptmx_cdev, &ptmx_fops);
28144 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
28145 diff -urNp linux-2.6.32.45/drivers/char/random.c linux-2.6.32.45/drivers/char/random.c
28146 --- linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:37:25.000000000 -0400
28147 +++ linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:43:23.000000000 -0400
28148 @@ -254,8 +254,13 @@
28149 /*
28150 * Configuration information
28151 */
28152 +#ifdef CONFIG_GRKERNSEC_RANDNET
28153 +#define INPUT_POOL_WORDS 512
28154 +#define OUTPUT_POOL_WORDS 128
28155 +#else
28156 #define INPUT_POOL_WORDS 128
28157 #define OUTPUT_POOL_WORDS 32
28158 +#endif
28159 #define SEC_XFER_SIZE 512
28160
28161 /*
28162 @@ -292,10 +297,17 @@ static struct poolinfo {
28163 int poolwords;
28164 int tap1, tap2, tap3, tap4, tap5;
28165 } poolinfo_table[] = {
28166 +#ifdef CONFIG_GRKERNSEC_RANDNET
28167 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28168 + { 512, 411, 308, 208, 104, 1 },
28169 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28170 + { 128, 103, 76, 51, 25, 1 },
28171 +#else
28172 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28173 { 128, 103, 76, 51, 25, 1 },
28174 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28175 { 32, 26, 20, 14, 7, 1 },
28176 +#endif
28177 #if 0
28178 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28179 { 2048, 1638, 1231, 819, 411, 1 },
28180 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28181 #include <linux/sysctl.h>
28182
28183 static int min_read_thresh = 8, min_write_thresh;
28184 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
28185 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28186 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28187 static char sysctl_bootid[16];
28188
28189 diff -urNp linux-2.6.32.45/drivers/char/rocket.c linux-2.6.32.45/drivers/char/rocket.c
28190 --- linux-2.6.32.45/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
28191 +++ linux-2.6.32.45/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
28192 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
28193 struct rocket_ports tmp;
28194 int board;
28195
28196 + pax_track_stack();
28197 +
28198 if (!retports)
28199 return -EFAULT;
28200 memset(&tmp, 0, sizeof (tmp));
28201 diff -urNp linux-2.6.32.45/drivers/char/sonypi.c linux-2.6.32.45/drivers/char/sonypi.c
28202 --- linux-2.6.32.45/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
28203 +++ linux-2.6.32.45/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
28204 @@ -55,6 +55,7 @@
28205 #include <asm/uaccess.h>
28206 #include <asm/io.h>
28207 #include <asm/system.h>
28208 +#include <asm/local.h>
28209
28210 #include <linux/sonypi.h>
28211
28212 @@ -491,7 +492,7 @@ static struct sonypi_device {
28213 spinlock_t fifo_lock;
28214 wait_queue_head_t fifo_proc_list;
28215 struct fasync_struct *fifo_async;
28216 - int open_count;
28217 + local_t open_count;
28218 int model;
28219 struct input_dev *input_jog_dev;
28220 struct input_dev *input_key_dev;
28221 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
28222 static int sonypi_misc_release(struct inode *inode, struct file *file)
28223 {
28224 mutex_lock(&sonypi_device.lock);
28225 - sonypi_device.open_count--;
28226 + local_dec(&sonypi_device.open_count);
28227 mutex_unlock(&sonypi_device.lock);
28228 return 0;
28229 }
28230 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
28231 lock_kernel();
28232 mutex_lock(&sonypi_device.lock);
28233 /* Flush input queue on first open */
28234 - if (!sonypi_device.open_count)
28235 + if (!local_read(&sonypi_device.open_count))
28236 kfifo_reset(sonypi_device.fifo);
28237 - sonypi_device.open_count++;
28238 + local_inc(&sonypi_device.open_count);
28239 mutex_unlock(&sonypi_device.lock);
28240 unlock_kernel();
28241 return 0;
28242 diff -urNp linux-2.6.32.45/drivers/char/stallion.c linux-2.6.32.45/drivers/char/stallion.c
28243 --- linux-2.6.32.45/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
28244 +++ linux-2.6.32.45/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
28245 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
28246 struct stlport stl_dummyport;
28247 struct stlport *portp;
28248
28249 + pax_track_stack();
28250 +
28251 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
28252 return -EFAULT;
28253 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
28254 diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm_bios.c linux-2.6.32.45/drivers/char/tpm/tpm_bios.c
28255 --- linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
28256 +++ linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
28257 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
28258 event = addr;
28259
28260 if ((event->event_type == 0 && event->event_size == 0) ||
28261 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28262 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28263 return NULL;
28264
28265 return addr;
28266 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
28267 return NULL;
28268
28269 if ((event->event_type == 0 && event->event_size == 0) ||
28270 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28271 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28272 return NULL;
28273
28274 (*pos)++;
28275 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
28276 int i;
28277
28278 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28279 - seq_putc(m, data[i]);
28280 + if (!seq_putc(m, data[i]))
28281 + return -EFAULT;
28282
28283 return 0;
28284 }
28285 @@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
28286 log->bios_event_log_end = log->bios_event_log + len;
28287
28288 virt = acpi_os_map_memory(start, len);
28289 + if (!virt) {
28290 + kfree(log->bios_event_log);
28291 + log->bios_event_log = NULL;
28292 + return -EFAULT;
28293 + }
28294
28295 memcpy(log->bios_event_log, virt, len);
28296
28297 diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm.c linux-2.6.32.45/drivers/char/tpm/tpm.c
28298 --- linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
28299 +++ linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
28300 @@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
28301 chip->vendor.req_complete_val)
28302 goto out_recv;
28303
28304 - if ((status == chip->vendor.req_canceled)) {
28305 + if (status == chip->vendor.req_canceled) {
28306 dev_err(chip->dev, "Operation Canceled\n");
28307 rc = -ECANCELED;
28308 goto out;
28309 @@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
28310
28311 struct tpm_chip *chip = dev_get_drvdata(dev);
28312
28313 + pax_track_stack();
28314 +
28315 tpm_cmd.header.in = tpm_readpubek_header;
28316 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
28317 "attempting to read the PUBEK");
28318 diff -urNp linux-2.6.32.45/drivers/char/tty_io.c linux-2.6.32.45/drivers/char/tty_io.c
28319 --- linux-2.6.32.45/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
28320 +++ linux-2.6.32.45/drivers/char/tty_io.c 2011-08-05 20:33:55.000000000 -0400
28321 @@ -2582,8 +2582,10 @@ long tty_ioctl(struct file *file, unsign
28322 return retval;
28323 }
28324
28325 +EXPORT_SYMBOL(tty_ioctl);
28326 +
28327 #ifdef CONFIG_COMPAT
28328 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
28329 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
28330 unsigned long arg)
28331 {
28332 struct inode *inode = file->f_dentry->d_inode;
28333 @@ -2607,6 +2609,8 @@ static long tty_compat_ioctl(struct file
28334
28335 return retval;
28336 }
28337 +
28338 +EXPORT_SYMBOL(tty_compat_ioctl);
28339 #endif
28340
28341 /*
28342 @@ -3052,7 +3056,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
28343
28344 void tty_default_fops(struct file_operations *fops)
28345 {
28346 - *fops = tty_fops;
28347 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
28348 }
28349
28350 /*
28351 diff -urNp linux-2.6.32.45/drivers/char/tty_ldisc.c linux-2.6.32.45/drivers/char/tty_ldisc.c
28352 --- linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:04.000000000 -0400
28353 +++ linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:18.000000000 -0400
28354 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
28355 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
28356 struct tty_ldisc_ops *ldo = ld->ops;
28357
28358 - ldo->refcount--;
28359 + atomic_dec(&ldo->refcount);
28360 module_put(ldo->owner);
28361 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28362
28363 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
28364 spin_lock_irqsave(&tty_ldisc_lock, flags);
28365 tty_ldiscs[disc] = new_ldisc;
28366 new_ldisc->num = disc;
28367 - new_ldisc->refcount = 0;
28368 + atomic_set(&new_ldisc->refcount, 0);
28369 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28370
28371 return ret;
28372 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
28373 return -EINVAL;
28374
28375 spin_lock_irqsave(&tty_ldisc_lock, flags);
28376 - if (tty_ldiscs[disc]->refcount)
28377 + if (atomic_read(&tty_ldiscs[disc]->refcount))
28378 ret = -EBUSY;
28379 else
28380 tty_ldiscs[disc] = NULL;
28381 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
28382 if (ldops) {
28383 ret = ERR_PTR(-EAGAIN);
28384 if (try_module_get(ldops->owner)) {
28385 - ldops->refcount++;
28386 + atomic_inc(&ldops->refcount);
28387 ret = ldops;
28388 }
28389 }
28390 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
28391 unsigned long flags;
28392
28393 spin_lock_irqsave(&tty_ldisc_lock, flags);
28394 - ldops->refcount--;
28395 + atomic_dec(&ldops->refcount);
28396 module_put(ldops->owner);
28397 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28398 }
28399 diff -urNp linux-2.6.32.45/drivers/char/virtio_console.c linux-2.6.32.45/drivers/char/virtio_console.c
28400 --- linux-2.6.32.45/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
28401 +++ linux-2.6.32.45/drivers/char/virtio_console.c 2011-08-05 20:33:55.000000000 -0400
28402 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *
28403 * virtqueue, so we let the drivers do some boutique early-output thing. */
28404 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
28405 {
28406 - virtio_cons.put_chars = put_chars;
28407 + pax_open_kernel();
28408 + *(void **)&virtio_cons.put_chars = put_chars;
28409 + pax_close_kernel();
28410 return hvc_instantiate(0, 0, &virtio_cons);
28411 }
28412
28413 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(stru
28414 out_vq = vqs[1];
28415
28416 /* Start using the new console output. */
28417 - virtio_cons.get_chars = get_chars;
28418 - virtio_cons.put_chars = put_chars;
28419 - virtio_cons.notifier_add = notifier_add_vio;
28420 - virtio_cons.notifier_del = notifier_del_vio;
28421 - virtio_cons.notifier_hangup = notifier_del_vio;
28422 + pax_open_kernel();
28423 + *(void **)&virtio_cons.get_chars = get_chars;
28424 + *(void **)&virtio_cons.put_chars = put_chars;
28425 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
28426 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
28427 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
28428 + pax_close_kernel();
28429
28430 /* The first argument of hvc_alloc() is the virtual console number, so
28431 * we use zero. The second argument is the parameter for the
28432 diff -urNp linux-2.6.32.45/drivers/char/vt.c linux-2.6.32.45/drivers/char/vt.c
28433 --- linux-2.6.32.45/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
28434 +++ linux-2.6.32.45/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
28435 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
28436
28437 static void notify_write(struct vc_data *vc, unsigned int unicode)
28438 {
28439 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
28440 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
28441 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28442 }
28443
28444 diff -urNp linux-2.6.32.45/drivers/char/vt_ioctl.c linux-2.6.32.45/drivers/char/vt_ioctl.c
28445 --- linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28446 +++ linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28447 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28448 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28449 return -EFAULT;
28450
28451 - if (!capable(CAP_SYS_TTY_CONFIG))
28452 - perm = 0;
28453 -
28454 switch (cmd) {
28455 case KDGKBENT:
28456 key_map = key_maps[s];
28457 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28458 val = (i ? K_HOLE : K_NOSUCHMAP);
28459 return put_user(val, &user_kbe->kb_value);
28460 case KDSKBENT:
28461 + if (!capable(CAP_SYS_TTY_CONFIG))
28462 + perm = 0;
28463 +
28464 if (!perm)
28465 return -EPERM;
28466 +
28467 if (!i && v == K_NOSUCHMAP) {
28468 /* deallocate map */
28469 key_map = key_maps[s];
28470 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28471 int i, j, k;
28472 int ret;
28473
28474 - if (!capable(CAP_SYS_TTY_CONFIG))
28475 - perm = 0;
28476 -
28477 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28478 if (!kbs) {
28479 ret = -ENOMEM;
28480 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28481 kfree(kbs);
28482 return ((p && *p) ? -EOVERFLOW : 0);
28483 case KDSKBSENT:
28484 + if (!capable(CAP_SYS_TTY_CONFIG))
28485 + perm = 0;
28486 +
28487 if (!perm) {
28488 ret = -EPERM;
28489 goto reterr;
28490 diff -urNp linux-2.6.32.45/drivers/cpufreq/cpufreq.c linux-2.6.32.45/drivers/cpufreq/cpufreq.c
28491 --- linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28492 +++ linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28493 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28494 complete(&policy->kobj_unregister);
28495 }
28496
28497 -static struct sysfs_ops sysfs_ops = {
28498 +static const struct sysfs_ops sysfs_ops = {
28499 .show = show,
28500 .store = store,
28501 };
28502 diff -urNp linux-2.6.32.45/drivers/cpuidle/sysfs.c linux-2.6.32.45/drivers/cpuidle/sysfs.c
28503 --- linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28504 +++ linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28505 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28506 return ret;
28507 }
28508
28509 -static struct sysfs_ops cpuidle_sysfs_ops = {
28510 +static const struct sysfs_ops cpuidle_sysfs_ops = {
28511 .show = cpuidle_show,
28512 .store = cpuidle_store,
28513 };
28514 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28515 return ret;
28516 }
28517
28518 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
28519 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28520 .show = cpuidle_state_show,
28521 };
28522
28523 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28524 .release = cpuidle_state_sysfs_release,
28525 };
28526
28527 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28528 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28529 {
28530 kobject_put(&device->kobjs[i]->kobj);
28531 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28532 diff -urNp linux-2.6.32.45/drivers/crypto/hifn_795x.c linux-2.6.32.45/drivers/crypto/hifn_795x.c
28533 --- linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28534 +++ linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28535 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28536 0xCA, 0x34, 0x2B, 0x2E};
28537 struct scatterlist sg;
28538
28539 + pax_track_stack();
28540 +
28541 memset(src, 0, sizeof(src));
28542 memset(ctx.key, 0, sizeof(ctx.key));
28543
28544 diff -urNp linux-2.6.32.45/drivers/crypto/padlock-aes.c linux-2.6.32.45/drivers/crypto/padlock-aes.c
28545 --- linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
28546 +++ linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
28547 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
28548 struct crypto_aes_ctx gen_aes;
28549 int cpu;
28550
28551 + pax_track_stack();
28552 +
28553 if (key_len % 8) {
28554 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
28555 return -EINVAL;
28556 diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.c linux-2.6.32.45/drivers/dma/ioat/dma.c
28557 --- linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28558 +++ linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28559 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28560 return entry->show(&chan->common, page);
28561 }
28562
28563 -struct sysfs_ops ioat_sysfs_ops = {
28564 +const struct sysfs_ops ioat_sysfs_ops = {
28565 .show = ioat_attr_show,
28566 };
28567
28568 diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.h linux-2.6.32.45/drivers/dma/ioat/dma.h
28569 --- linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28570 +++ linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28571 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28572 unsigned long *phys_complete);
28573 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28574 void ioat_kobject_del(struct ioatdma_device *device);
28575 -extern struct sysfs_ops ioat_sysfs_ops;
28576 +extern const struct sysfs_ops ioat_sysfs_ops;
28577 extern struct ioat_sysfs_entry ioat_version_attr;
28578 extern struct ioat_sysfs_entry ioat_cap_attr;
28579 #endif /* IOATDMA_H */
28580 diff -urNp linux-2.6.32.45/drivers/edac/edac_device_sysfs.c linux-2.6.32.45/drivers/edac/edac_device_sysfs.c
28581 --- linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28582 +++ linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28583 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28584 }
28585
28586 /* edac_dev file operations for an 'ctl_info' */
28587 -static struct sysfs_ops device_ctl_info_ops = {
28588 +static const struct sysfs_ops device_ctl_info_ops = {
28589 .show = edac_dev_ctl_info_show,
28590 .store = edac_dev_ctl_info_store
28591 };
28592 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
28593 }
28594
28595 /* edac_dev file operations for an 'instance' */
28596 -static struct sysfs_ops device_instance_ops = {
28597 +static const struct sysfs_ops device_instance_ops = {
28598 .show = edac_dev_instance_show,
28599 .store = edac_dev_instance_store
28600 };
28601 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
28602 }
28603
28604 /* edac_dev file operations for a 'block' */
28605 -static struct sysfs_ops device_block_ops = {
28606 +static const struct sysfs_ops device_block_ops = {
28607 .show = edac_dev_block_show,
28608 .store = edac_dev_block_store
28609 };
28610 diff -urNp linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c
28611 --- linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28612 +++ linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28613 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
28614 return -EIO;
28615 }
28616
28617 -static struct sysfs_ops csrowfs_ops = {
28618 +static const struct sysfs_ops csrowfs_ops = {
28619 .show = csrowdev_show,
28620 .store = csrowdev_store
28621 };
28622 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
28623 }
28624
28625 /* Intermediate show/store table */
28626 -static struct sysfs_ops mci_ops = {
28627 +static const struct sysfs_ops mci_ops = {
28628 .show = mcidev_show,
28629 .store = mcidev_store
28630 };
28631 diff -urNp linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c
28632 --- linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28633 +++ linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
28634 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
28635 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28636 static int edac_pci_poll_msec = 1000; /* one second workq period */
28637
28638 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
28639 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28640 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28641 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28642
28643 static struct kobject *edac_pci_top_main_kobj;
28644 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28645 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
28646 }
28647
28648 /* fs_ops table */
28649 -static struct sysfs_ops pci_instance_ops = {
28650 +static const struct sysfs_ops pci_instance_ops = {
28651 .show = edac_pci_instance_show,
28652 .store = edac_pci_instance_store
28653 };
28654 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
28655 return -EIO;
28656 }
28657
28658 -static struct sysfs_ops edac_pci_sysfs_ops = {
28659 +static const struct sysfs_ops edac_pci_sysfs_ops = {
28660 .show = edac_pci_dev_show,
28661 .store = edac_pci_dev_store
28662 };
28663 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
28664 edac_printk(KERN_CRIT, EDAC_PCI,
28665 "Signaled System Error on %s\n",
28666 pci_name(dev));
28667 - atomic_inc(&pci_nonparity_count);
28668 + atomic_inc_unchecked(&pci_nonparity_count);
28669 }
28670
28671 if (status & (PCI_STATUS_PARITY)) {
28672 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
28673 "Master Data Parity Error on %s\n",
28674 pci_name(dev));
28675
28676 - atomic_inc(&pci_parity_count);
28677 + atomic_inc_unchecked(&pci_parity_count);
28678 }
28679
28680 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28681 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
28682 "Detected Parity Error on %s\n",
28683 pci_name(dev));
28684
28685 - atomic_inc(&pci_parity_count);
28686 + atomic_inc_unchecked(&pci_parity_count);
28687 }
28688 }
28689
28690 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
28691 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28692 "Signaled System Error on %s\n",
28693 pci_name(dev));
28694 - atomic_inc(&pci_nonparity_count);
28695 + atomic_inc_unchecked(&pci_nonparity_count);
28696 }
28697
28698 if (status & (PCI_STATUS_PARITY)) {
28699 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
28700 "Master Data Parity Error on "
28701 "%s\n", pci_name(dev));
28702
28703 - atomic_inc(&pci_parity_count);
28704 + atomic_inc_unchecked(&pci_parity_count);
28705 }
28706
28707 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28708 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
28709 "Detected Parity Error on %s\n",
28710 pci_name(dev));
28711
28712 - atomic_inc(&pci_parity_count);
28713 + atomic_inc_unchecked(&pci_parity_count);
28714 }
28715 }
28716 }
28717 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
28718 if (!check_pci_errors)
28719 return;
28720
28721 - before_count = atomic_read(&pci_parity_count);
28722 + before_count = atomic_read_unchecked(&pci_parity_count);
28723
28724 /* scan all PCI devices looking for a Parity Error on devices and
28725 * bridges.
28726 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
28727 /* Only if operator has selected panic on PCI Error */
28728 if (edac_pci_get_panic_on_pe()) {
28729 /* If the count is different 'after' from 'before' */
28730 - if (before_count != atomic_read(&pci_parity_count))
28731 + if (before_count != atomic_read_unchecked(&pci_parity_count))
28732 panic("EDAC: PCI Parity Error");
28733 }
28734 }
28735 diff -urNp linux-2.6.32.45/drivers/firewire/core-card.c linux-2.6.32.45/drivers/firewire/core-card.c
28736 --- linux-2.6.32.45/drivers/firewire/core-card.c 2011-03-27 14:31:47.000000000 -0400
28737 +++ linux-2.6.32.45/drivers/firewire/core-card.c 2011-08-05 20:33:55.000000000 -0400
28738 @@ -569,8 +569,10 @@ void fw_core_remove_card(struct fw_card
28739 mutex_unlock(&card_mutex);
28740
28741 /* Switch off most of the card driver interface. */
28742 - dummy_driver.free_iso_context = card->driver->free_iso_context;
28743 - dummy_driver.stop_iso = card->driver->stop_iso;
28744 + pax_open_kernel();
28745 + *(void **)&dummy_driver.free_iso_context = card->driver->free_iso_context;
28746 + *(void **)&dummy_driver.stop_iso = card->driver->stop_iso;
28747 + pax_close_kernel();
28748 card->driver = &dummy_driver;
28749
28750 fw_destroy_nodes(card);
28751 diff -urNp linux-2.6.32.45/drivers/firewire/core-cdev.c linux-2.6.32.45/drivers/firewire/core-cdev.c
28752 --- linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
28753 +++ linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
28754 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
28755 int ret;
28756
28757 if ((request->channels == 0 && request->bandwidth == 0) ||
28758 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
28759 - request->bandwidth < 0)
28760 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
28761 return -EINVAL;
28762
28763 r = kmalloc(sizeof(*r), GFP_KERNEL);
28764 diff -urNp linux-2.6.32.45/drivers/firewire/core-transaction.c linux-2.6.32.45/drivers/firewire/core-transaction.c
28765 --- linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
28766 +++ linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
28767 @@ -36,6 +36,7 @@
28768 #include <linux/string.h>
28769 #include <linux/timer.h>
28770 #include <linux/types.h>
28771 +#include <linux/sched.h>
28772
28773 #include <asm/byteorder.h>
28774
28775 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
28776 struct transaction_callback_data d;
28777 struct fw_transaction t;
28778
28779 + pax_track_stack();
28780 +
28781 init_completion(&d.done);
28782 d.payload = payload;
28783 fw_send_request(card, &t, tcode, destination_id, generation, speed,
28784 diff -urNp linux-2.6.32.45/drivers/firmware/dmi_scan.c linux-2.6.32.45/drivers/firmware/dmi_scan.c
28785 --- linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
28786 +++ linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
28787 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
28788 }
28789 }
28790 else {
28791 - /*
28792 - * no iounmap() for that ioremap(); it would be a no-op, but
28793 - * it's so early in setup that sucker gets confused into doing
28794 - * what it shouldn't if we actually call it.
28795 - */
28796 p = dmi_ioremap(0xF0000, 0x10000);
28797 if (p == NULL)
28798 goto error;
28799 diff -urNp linux-2.6.32.45/drivers/firmware/edd.c linux-2.6.32.45/drivers/firmware/edd.c
28800 --- linux-2.6.32.45/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
28801 +++ linux-2.6.32.45/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
28802 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
28803 return ret;
28804 }
28805
28806 -static struct sysfs_ops edd_attr_ops = {
28807 +static const struct sysfs_ops edd_attr_ops = {
28808 .show = edd_attr_show,
28809 };
28810
28811 diff -urNp linux-2.6.32.45/drivers/firmware/efivars.c linux-2.6.32.45/drivers/firmware/efivars.c
28812 --- linux-2.6.32.45/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
28813 +++ linux-2.6.32.45/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
28814 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
28815 return ret;
28816 }
28817
28818 -static struct sysfs_ops efivar_attr_ops = {
28819 +static const struct sysfs_ops efivar_attr_ops = {
28820 .show = efivar_attr_show,
28821 .store = efivar_attr_store,
28822 };
28823 diff -urNp linux-2.6.32.45/drivers/firmware/iscsi_ibft.c linux-2.6.32.45/drivers/firmware/iscsi_ibft.c
28824 --- linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
28825 +++ linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
28826 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
28827 return ret;
28828 }
28829
28830 -static struct sysfs_ops ibft_attr_ops = {
28831 +static const struct sysfs_ops ibft_attr_ops = {
28832 .show = ibft_show_attribute,
28833 };
28834
28835 diff -urNp linux-2.6.32.45/drivers/firmware/memmap.c linux-2.6.32.45/drivers/firmware/memmap.c
28836 --- linux-2.6.32.45/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
28837 +++ linux-2.6.32.45/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
28838 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
28839 NULL
28840 };
28841
28842 -static struct sysfs_ops memmap_attr_ops = {
28843 +static const struct sysfs_ops memmap_attr_ops = {
28844 .show = memmap_attr_show,
28845 };
28846
28847 diff -urNp linux-2.6.32.45/drivers/gpio/vr41xx_giu.c linux-2.6.32.45/drivers/gpio/vr41xx_giu.c
28848 --- linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
28849 +++ linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
28850 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
28851 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
28852 maskl, pendl, maskh, pendh);
28853
28854 - atomic_inc(&irq_err_count);
28855 + atomic_inc_unchecked(&irq_err_count);
28856
28857 return -EINVAL;
28858 }
28859 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c
28860 --- linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
28861 +++ linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
28862 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
28863 struct drm_crtc *tmp;
28864 int crtc_mask = 1;
28865
28866 - WARN(!crtc, "checking null crtc?");
28867 + BUG_ON(!crtc);
28868
28869 dev = crtc->dev;
28870
28871 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
28872
28873 adjusted_mode = drm_mode_duplicate(dev, mode);
28874
28875 + pax_track_stack();
28876 +
28877 crtc->enabled = drm_helper_crtc_in_use(crtc);
28878
28879 if (!crtc->enabled)
28880 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_drv.c linux-2.6.32.45/drivers/gpu/drm/drm_drv.c
28881 --- linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
28882 +++ linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
28883 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
28884 char *kdata = NULL;
28885
28886 atomic_inc(&dev->ioctl_count);
28887 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
28888 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
28889 ++file_priv->ioctl_count;
28890
28891 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
28892 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_fops.c linux-2.6.32.45/drivers/gpu/drm/drm_fops.c
28893 --- linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
28894 +++ linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
28895 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
28896 }
28897
28898 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
28899 - atomic_set(&dev->counts[i], 0);
28900 + atomic_set_unchecked(&dev->counts[i], 0);
28901
28902 dev->sigdata.lock = NULL;
28903
28904 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
28905
28906 retcode = drm_open_helper(inode, filp, dev);
28907 if (!retcode) {
28908 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
28909 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
28910 spin_lock(&dev->count_lock);
28911 - if (!dev->open_count++) {
28912 + if (local_inc_return(&dev->open_count) == 1) {
28913 spin_unlock(&dev->count_lock);
28914 retcode = drm_setup(dev);
28915 goto out;
28916 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
28917
28918 lock_kernel();
28919
28920 - DRM_DEBUG("open_count = %d\n", dev->open_count);
28921 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
28922
28923 if (dev->driver->preclose)
28924 dev->driver->preclose(dev, file_priv);
28925 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
28926 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
28927 task_pid_nr(current),
28928 (long)old_encode_dev(file_priv->minor->device),
28929 - dev->open_count);
28930 + local_read(&dev->open_count));
28931
28932 /* if the master has gone away we can't do anything with the lock */
28933 if (file_priv->minor->master)
28934 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
28935 * End inline drm_release
28936 */
28937
28938 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
28939 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
28940 spin_lock(&dev->count_lock);
28941 - if (!--dev->open_count) {
28942 + if (local_dec_and_test(&dev->open_count)) {
28943 if (atomic_read(&dev->ioctl_count)) {
28944 DRM_ERROR("Device busy: %d\n",
28945 atomic_read(&dev->ioctl_count));
28946 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_gem.c linux-2.6.32.45/drivers/gpu/drm/drm_gem.c
28947 --- linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
28948 +++ linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
28949 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
28950 spin_lock_init(&dev->object_name_lock);
28951 idr_init(&dev->object_name_idr);
28952 atomic_set(&dev->object_count, 0);
28953 - atomic_set(&dev->object_memory, 0);
28954 + atomic_set_unchecked(&dev->object_memory, 0);
28955 atomic_set(&dev->pin_count, 0);
28956 - atomic_set(&dev->pin_memory, 0);
28957 + atomic_set_unchecked(&dev->pin_memory, 0);
28958 atomic_set(&dev->gtt_count, 0);
28959 - atomic_set(&dev->gtt_memory, 0);
28960 + atomic_set_unchecked(&dev->gtt_memory, 0);
28961
28962 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
28963 if (!mm) {
28964 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
28965 goto fput;
28966 }
28967 atomic_inc(&dev->object_count);
28968 - atomic_add(obj->size, &dev->object_memory);
28969 + atomic_add_unchecked(obj->size, &dev->object_memory);
28970 return obj;
28971 fput:
28972 fput(obj->filp);
28973 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
28974
28975 fput(obj->filp);
28976 atomic_dec(&dev->object_count);
28977 - atomic_sub(obj->size, &dev->object_memory);
28978 + atomic_sub_unchecked(obj->size, &dev->object_memory);
28979 kfree(obj);
28980 }
28981 EXPORT_SYMBOL(drm_gem_object_free);
28982 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_info.c linux-2.6.32.45/drivers/gpu/drm/drm_info.c
28983 --- linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
28984 +++ linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
28985 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
28986 struct drm_local_map *map;
28987 struct drm_map_list *r_list;
28988
28989 - /* Hardcoded from _DRM_FRAME_BUFFER,
28990 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28991 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28992 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28993 + static const char * const types[] = {
28994 + [_DRM_FRAME_BUFFER] = "FB",
28995 + [_DRM_REGISTERS] = "REG",
28996 + [_DRM_SHM] = "SHM",
28997 + [_DRM_AGP] = "AGP",
28998 + [_DRM_SCATTER_GATHER] = "SG",
28999 + [_DRM_CONSISTENT] = "PCI",
29000 + [_DRM_GEM] = "GEM" };
29001 const char *type;
29002 int i;
29003
29004 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
29005 map = r_list->map;
29006 if (!map)
29007 continue;
29008 - if (map->type < 0 || map->type > 5)
29009 + if (map->type >= ARRAY_SIZE(types))
29010 type = "??";
29011 else
29012 type = types[map->type];
29013 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
29014 struct drm_device *dev = node->minor->dev;
29015
29016 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
29017 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
29018 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
29019 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
29020 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
29021 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
29022 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
29023 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
29024 seq_printf(m, "%d gtt total\n", dev->gtt_total);
29025 return 0;
29026 }
29027 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
29028 mutex_lock(&dev->struct_mutex);
29029 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
29030 atomic_read(&dev->vma_count),
29031 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29032 + NULL, 0);
29033 +#else
29034 high_memory, (u64)virt_to_phys(high_memory));
29035 +#endif
29036
29037 list_for_each_entry(pt, &dev->vmalist, head) {
29038 vma = pt->vma;
29039 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
29040 continue;
29041 seq_printf(m,
29042 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
29043 - pt->pid, vma->vm_start, vma->vm_end,
29044 + pt->pid,
29045 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29046 + 0, 0,
29047 +#else
29048 + vma->vm_start, vma->vm_end,
29049 +#endif
29050 vma->vm_flags & VM_READ ? 'r' : '-',
29051 vma->vm_flags & VM_WRITE ? 'w' : '-',
29052 vma->vm_flags & VM_EXEC ? 'x' : '-',
29053 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29054 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29055 vma->vm_flags & VM_IO ? 'i' : '-',
29056 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29057 + 0);
29058 +#else
29059 vma->vm_pgoff);
29060 +#endif
29061
29062 #if defined(__i386__)
29063 pgprot = pgprot_val(vma->vm_page_prot);
29064 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c
29065 --- linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
29066 +++ linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
29067 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
29068 stats->data[i].value =
29069 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29070 else
29071 - stats->data[i].value = atomic_read(&dev->counts[i]);
29072 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29073 stats->data[i].type = dev->types[i];
29074 }
29075
29076 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_lock.c linux-2.6.32.45/drivers/gpu/drm/drm_lock.c
29077 --- linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
29078 +++ linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
29079 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
29080 if (drm_lock_take(&master->lock, lock->context)) {
29081 master->lock.file_priv = file_priv;
29082 master->lock.lock_time = jiffies;
29083 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29084 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29085 break; /* Got lock */
29086 }
29087
29088 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
29089 return -EINVAL;
29090 }
29091
29092 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29093 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29094
29095 /* kernel_context_switch isn't used by any of the x86 drm
29096 * modules but is required by the Sparc driver.
29097 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c
29098 --- linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
29099 +++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
29100 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
29101 dma->buflist[vertex->idx],
29102 vertex->discard, vertex->used);
29103
29104 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29105 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29106 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29107 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29108 sarea_priv->last_enqueue = dev_priv->counter - 1;
29109 sarea_priv->last_dispatch = (int)hw_status[5];
29110
29111 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
29112 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29113 mc->last_render);
29114
29115 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29116 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29117 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29118 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29119 sarea_priv->last_enqueue = dev_priv->counter - 1;
29120 sarea_priv->last_dispatch = (int)hw_status[5];
29121
29122 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h
29123 --- linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
29124 +++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
29125 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29126 int page_flipping;
29127
29128 wait_queue_head_t irq_queue;
29129 - atomic_t irq_received;
29130 - atomic_t irq_emitted;
29131 + atomic_unchecked_t irq_received;
29132 + atomic_unchecked_t irq_emitted;
29133
29134 int front_offset;
29135 } drm_i810_private_t;
29136 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h
29137 --- linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
29138 +++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
29139 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
29140 int page_flipping;
29141
29142 wait_queue_head_t irq_queue;
29143 - atomic_t irq_received;
29144 - atomic_t irq_emitted;
29145 + atomic_unchecked_t irq_received;
29146 + atomic_unchecked_t irq_emitted;
29147
29148 int use_mi_batchbuffer_start;
29149
29150 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c
29151 --- linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
29152 +++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
29153 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
29154
29155 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
29156
29157 - atomic_inc(&dev_priv->irq_received);
29158 + atomic_inc_unchecked(&dev_priv->irq_received);
29159 wake_up_interruptible(&dev_priv->irq_queue);
29160
29161 return IRQ_HANDLED;
29162 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
29163
29164 DRM_DEBUG("%s\n", __func__);
29165
29166 - atomic_inc(&dev_priv->irq_emitted);
29167 + atomic_inc_unchecked(&dev_priv->irq_emitted);
29168
29169 BEGIN_LP_RING(2);
29170 OUT_RING(0);
29171 OUT_RING(GFX_OP_USER_INTERRUPT);
29172 ADVANCE_LP_RING();
29173
29174 - return atomic_read(&dev_priv->irq_emitted);
29175 + return atomic_read_unchecked(&dev_priv->irq_emitted);
29176 }
29177
29178 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
29179 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
29180
29181 DRM_DEBUG("%s\n", __func__);
29182
29183 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29184 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29185 return 0;
29186
29187 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
29188 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
29189
29190 for (;;) {
29191 __set_current_state(TASK_INTERRUPTIBLE);
29192 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29193 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29194 break;
29195 if ((signed)(end - jiffies) <= 0) {
29196 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
29197 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
29198 I830_WRITE16(I830REG_HWSTAM, 0xffff);
29199 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
29200 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
29201 - atomic_set(&dev_priv->irq_received, 0);
29202 - atomic_set(&dev_priv->irq_emitted, 0);
29203 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29204 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
29205 init_waitqueue_head(&dev_priv->irq_queue);
29206 }
29207
29208 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c
29209 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
29210 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
29211 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
29212 }
29213 }
29214
29215 -struct intel_dvo_dev_ops ch7017_ops = {
29216 +const struct intel_dvo_dev_ops ch7017_ops = {
29217 .init = ch7017_init,
29218 .detect = ch7017_detect,
29219 .mode_valid = ch7017_mode_valid,
29220 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c
29221 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
29222 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
29223 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
29224 }
29225 }
29226
29227 -struct intel_dvo_dev_ops ch7xxx_ops = {
29228 +const struct intel_dvo_dev_ops ch7xxx_ops = {
29229 .init = ch7xxx_init,
29230 .detect = ch7xxx_detect,
29231 .mode_valid = ch7xxx_mode_valid,
29232 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h
29233 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
29234 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
29235 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
29236 *
29237 * \return singly-linked list of modes or NULL if no modes found.
29238 */
29239 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
29240 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
29241
29242 /**
29243 * Clean up driver-specific bits of the output
29244 */
29245 - void (*destroy) (struct intel_dvo_device *dvo);
29246 + void (* const destroy) (struct intel_dvo_device *dvo);
29247
29248 /**
29249 * Debugging hook to dump device registers to log file
29250 */
29251 - void (*dump_regs)(struct intel_dvo_device *dvo);
29252 + void (* const dump_regs)(struct intel_dvo_device *dvo);
29253 };
29254
29255 -extern struct intel_dvo_dev_ops sil164_ops;
29256 -extern struct intel_dvo_dev_ops ch7xxx_ops;
29257 -extern struct intel_dvo_dev_ops ivch_ops;
29258 -extern struct intel_dvo_dev_ops tfp410_ops;
29259 -extern struct intel_dvo_dev_ops ch7017_ops;
29260 +extern const struct intel_dvo_dev_ops sil164_ops;
29261 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
29262 +extern const struct intel_dvo_dev_ops ivch_ops;
29263 +extern const struct intel_dvo_dev_ops tfp410_ops;
29264 +extern const struct intel_dvo_dev_ops ch7017_ops;
29265
29266 #endif /* _INTEL_DVO_H */
29267 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c
29268 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
29269 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
29270 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
29271 }
29272 }
29273
29274 -struct intel_dvo_dev_ops ivch_ops= {
29275 +const struct intel_dvo_dev_ops ivch_ops= {
29276 .init = ivch_init,
29277 .dpms = ivch_dpms,
29278 .save = ivch_save,
29279 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c
29280 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
29281 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
29282 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
29283 }
29284 }
29285
29286 -struct intel_dvo_dev_ops sil164_ops = {
29287 +const struct intel_dvo_dev_ops sil164_ops = {
29288 .init = sil164_init,
29289 .detect = sil164_detect,
29290 .mode_valid = sil164_mode_valid,
29291 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c
29292 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
29293 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
29294 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
29295 }
29296 }
29297
29298 -struct intel_dvo_dev_ops tfp410_ops = {
29299 +const struct intel_dvo_dev_ops tfp410_ops = {
29300 .init = tfp410_init,
29301 .detect = tfp410_detect,
29302 .mode_valid = tfp410_mode_valid,
29303 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c
29304 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
29305 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
29306 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
29307 I915_READ(GTIMR));
29308 }
29309 seq_printf(m, "Interrupts received: %d\n",
29310 - atomic_read(&dev_priv->irq_received));
29311 + atomic_read_unchecked(&dev_priv->irq_received));
29312 if (dev_priv->hw_status_page != NULL) {
29313 seq_printf(m, "Current sequence: %d\n",
29314 i915_get_gem_seqno(dev));
29315 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c
29316 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
29317 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
29318 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
29319 return i915_resume(dev);
29320 }
29321
29322 -static struct vm_operations_struct i915_gem_vm_ops = {
29323 +static const struct vm_operations_struct i915_gem_vm_ops = {
29324 .fault = i915_gem_fault,
29325 .open = drm_gem_vm_open,
29326 .close = drm_gem_vm_close,
29327 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h
29328 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
29329 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:33:55.000000000 -0400
29330 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
29331 /* display clock increase/decrease */
29332 /* pll clock increase/decrease */
29333 /* clock gating init */
29334 -};
29335 +} __no_const;
29336
29337 typedef struct drm_i915_private {
29338 struct drm_device *dev;
29339 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
29340 int page_flipping;
29341
29342 wait_queue_head_t irq_queue;
29343 - atomic_t irq_received;
29344 + atomic_unchecked_t irq_received;
29345 /** Protects user_irq_refcount and irq_mask_reg */
29346 spinlock_t user_irq_lock;
29347 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
29348 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c
29349 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
29350 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
29351 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
29352
29353 args->aper_size = dev->gtt_total;
29354 args->aper_available_size = (args->aper_size -
29355 - atomic_read(&dev->pin_memory));
29356 + atomic_read_unchecked(&dev->pin_memory));
29357
29358 return 0;
29359 }
29360 @@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
29361 return -EINVAL;
29362 }
29363
29364 + if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29365 + drm_gem_object_unreference(obj);
29366 + return -EFAULT;
29367 + }
29368 +
29369 if (i915_gem_object_needs_bit17_swizzle(obj)) {
29370 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
29371 } else {
29372 @@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
29373 return -EINVAL;
29374 }
29375
29376 + if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29377 + drm_gem_object_unreference(obj);
29378 + return -EFAULT;
29379 + }
29380 +
29381 /* We can only do the GTT pwrite on untiled buffers, as otherwise
29382 * it would end up going through the fenced access, and we'll get
29383 * different detiling behavior between reading and writing.
29384 @@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
29385
29386 if (obj_priv->gtt_space) {
29387 atomic_dec(&dev->gtt_count);
29388 - atomic_sub(obj->size, &dev->gtt_memory);
29389 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
29390
29391 drm_mm_put_block(obj_priv->gtt_space);
29392 obj_priv->gtt_space = NULL;
29393 @@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
29394 goto search_free;
29395 }
29396 atomic_inc(&dev->gtt_count);
29397 - atomic_add(obj->size, &dev->gtt_memory);
29398 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
29399
29400 /* Assert that the object is not currently in any GPU domain. As it
29401 * wasn't in the GTT, there shouldn't be any way it could have been in
29402 @@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
29403 "%d/%d gtt bytes\n",
29404 atomic_read(&dev->object_count),
29405 atomic_read(&dev->pin_count),
29406 - atomic_read(&dev->object_memory),
29407 - atomic_read(&dev->pin_memory),
29408 - atomic_read(&dev->gtt_memory),
29409 + atomic_read_unchecked(&dev->object_memory),
29410 + atomic_read_unchecked(&dev->pin_memory),
29411 + atomic_read_unchecked(&dev->gtt_memory),
29412 dev->gtt_total);
29413 }
29414 goto err;
29415 @@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
29416 */
29417 if (obj_priv->pin_count == 1) {
29418 atomic_inc(&dev->pin_count);
29419 - atomic_add(obj->size, &dev->pin_memory);
29420 + atomic_add_unchecked(obj->size, &dev->pin_memory);
29421 if (!obj_priv->active &&
29422 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
29423 !list_empty(&obj_priv->list))
29424 @@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
29425 list_move_tail(&obj_priv->list,
29426 &dev_priv->mm.inactive_list);
29427 atomic_dec(&dev->pin_count);
29428 - atomic_sub(obj->size, &dev->pin_memory);
29429 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
29430 }
29431 i915_verify_inactive(dev, __FILE__, __LINE__);
29432 }
29433 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c
29434 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
29435 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
29436 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
29437 int irq_received;
29438 int ret = IRQ_NONE;
29439
29440 - atomic_inc(&dev_priv->irq_received);
29441 + atomic_inc_unchecked(&dev_priv->irq_received);
29442
29443 if (IS_IGDNG(dev))
29444 return igdng_irq_handler(dev);
29445 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
29446 {
29447 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29448
29449 - atomic_set(&dev_priv->irq_received, 0);
29450 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29451
29452 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29453 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29454 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c
29455 --- linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-03-27 14:31:47.000000000 -0400
29456 +++ linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-08-05 20:33:55.000000000 -0400
29457 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *
29458 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
29459
29460 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
29461 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29462 + pax_open_kernel();
29463 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29464 + pax_close_kernel();
29465
29466 /* Read the regs to test if we can talk to the device */
29467 for (i = 0; i < 0x40; i++) {
29468 diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h
29469 --- linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
29470 +++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
29471 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29472 u32 clear_cmd;
29473 u32 maccess;
29474
29475 - atomic_t vbl_received; /**< Number of vblanks received. */
29476 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29477 wait_queue_head_t fence_queue;
29478 - atomic_t last_fence_retired;
29479 + atomic_unchecked_t last_fence_retired;
29480 u32 next_fence_to_post;
29481
29482 unsigned int fb_cpp;
29483 diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c
29484 --- linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29485 +++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29486 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29487 if (crtc != 0)
29488 return 0;
29489
29490 - return atomic_read(&dev_priv->vbl_received);
29491 + return atomic_read_unchecked(&dev_priv->vbl_received);
29492 }
29493
29494
29495 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29496 /* VBLANK interrupt */
29497 if (status & MGA_VLINEPEN) {
29498 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29499 - atomic_inc(&dev_priv->vbl_received);
29500 + atomic_inc_unchecked(&dev_priv->vbl_received);
29501 drm_handle_vblank(dev, 0);
29502 handled = 1;
29503 }
29504 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29505 MGA_WRITE(MGA_PRIMEND, prim_end);
29506 }
29507
29508 - atomic_inc(&dev_priv->last_fence_retired);
29509 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
29510 DRM_WAKEUP(&dev_priv->fence_queue);
29511 handled = 1;
29512 }
29513 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29514 * using fences.
29515 */
29516 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29517 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29518 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29519 - *sequence) <= (1 << 23)));
29520
29521 *sequence = cur_fence;
29522 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c
29523 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29524 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29525 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29526
29527 /* GH: Simple idle check.
29528 */
29529 - atomic_set(&dev_priv->idle_count, 0);
29530 + atomic_set_unchecked(&dev_priv->idle_count, 0);
29531
29532 /* We don't support anything other than bus-mastering ring mode,
29533 * but the ring can be in either AGP or PCI space for the ring
29534 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h
29535 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29536 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29537 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29538 int is_pci;
29539 unsigned long cce_buffers_offset;
29540
29541 - atomic_t idle_count;
29542 + atomic_unchecked_t idle_count;
29543
29544 int page_flipping;
29545 int current_page;
29546 u32 crtc_offset;
29547 u32 crtc_offset_cntl;
29548
29549 - atomic_t vbl_received;
29550 + atomic_unchecked_t vbl_received;
29551
29552 u32 color_fmt;
29553 unsigned int front_offset;
29554 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c
29555 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
29556 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
29557 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
29558 if (crtc != 0)
29559 return 0;
29560
29561 - return atomic_read(&dev_priv->vbl_received);
29562 + return atomic_read_unchecked(&dev_priv->vbl_received);
29563 }
29564
29565 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
29566 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
29567 /* VBLANK interrupt */
29568 if (status & R128_CRTC_VBLANK_INT) {
29569 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
29570 - atomic_inc(&dev_priv->vbl_received);
29571 + atomic_inc_unchecked(&dev_priv->vbl_received);
29572 drm_handle_vblank(dev, 0);
29573 return IRQ_HANDLED;
29574 }
29575 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c
29576 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
29577 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
29578 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
29579
29580 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
29581 {
29582 - if (atomic_read(&dev_priv->idle_count) == 0) {
29583 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
29584 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
29585 } else {
29586 - atomic_set(&dev_priv->idle_count, 0);
29587 + atomic_set_unchecked(&dev_priv->idle_count, 0);
29588 }
29589 }
29590
29591 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c
29592 --- linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
29593 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
29594 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
29595 char name[512];
29596 int i;
29597
29598 + pax_track_stack();
29599 +
29600 ctx->card = card;
29601 ctx->bios = bios;
29602
29603 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c
29604 --- linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
29605 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
29606 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
29607 regex_t mask_rex;
29608 regmatch_t match[4];
29609 char buf[1024];
29610 - size_t end;
29611 + long end;
29612 int len;
29613 int done = 0;
29614 int r;
29615 unsigned o;
29616 struct offset *offset;
29617 char last_reg_s[10];
29618 - int last_reg;
29619 + unsigned long last_reg;
29620
29621 if (regcomp
29622 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
29623 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c
29624 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
29625 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
29626 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
29627 bool linkb;
29628 struct radeon_i2c_bus_rec ddc_bus;
29629
29630 + pax_track_stack();
29631 +
29632 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29633
29634 if (data_offset == 0)
29635 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
29636 }
29637 }
29638
29639 -struct bios_connector {
29640 +static struct bios_connector {
29641 bool valid;
29642 uint16_t line_mux;
29643 uint16_t devices;
29644 int connector_type;
29645 struct radeon_i2c_bus_rec ddc_bus;
29646 -};
29647 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29648
29649 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
29650 drm_device
29651 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
29652 uint8_t dac;
29653 union atom_supported_devices *supported_devices;
29654 int i, j;
29655 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29656
29657 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29658
29659 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c
29660 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
29661 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
29662 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
29663
29664 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
29665 error = freq - current_freq;
29666 - error = error < 0 ? 0xffffffff : error;
29667 + error = (int32_t)error < 0 ? 0xffffffff : error;
29668 } else
29669 error = abs(current_freq - freq);
29670 vco_diff = abs(vco - best_vco);
29671 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h
29672 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
29673 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
29674 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
29675
29676 /* SW interrupt */
29677 wait_queue_head_t swi_queue;
29678 - atomic_t swi_emitted;
29679 + atomic_unchecked_t swi_emitted;
29680 int vblank_crtc;
29681 uint32_t irq_enable_reg;
29682 uint32_t r500_disp_irq_reg;
29683 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c
29684 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
29685 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
29686 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
29687 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
29688 return 0;
29689 }
29690 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
29691 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
29692 if (!rdev->cp.ready) {
29693 /* FIXME: cp is not running assume everythings is done right
29694 * away
29695 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
29696 return r;
29697 }
29698 WREG32(rdev->fence_drv.scratch_reg, 0);
29699 - atomic_set(&rdev->fence_drv.seq, 0);
29700 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
29701 INIT_LIST_HEAD(&rdev->fence_drv.created);
29702 INIT_LIST_HEAD(&rdev->fence_drv.emited);
29703 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
29704 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h
29705 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
29706 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:33:55.000000000 -0400
29707 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
29708 */
29709 struct radeon_fence_driver {
29710 uint32_t scratch_reg;
29711 - atomic_t seq;
29712 + atomic_unchecked_t seq;
29713 uint32_t last_seq;
29714 unsigned long count_timeout;
29715 wait_queue_head_t queue;
29716 @@ -640,7 +640,7 @@ struct radeon_asic {
29717 uint32_t offset, uint32_t obj_size);
29718 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
29719 void (*bandwidth_update)(struct radeon_device *rdev);
29720 -};
29721 +} __no_const;
29722
29723 /*
29724 * Asic structures
29725 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c
29726 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
29727 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
29728 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
29729 request = compat_alloc_user_space(sizeof(*request));
29730 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
29731 || __put_user(req32.param, &request->param)
29732 - || __put_user((void __user *)(unsigned long)req32.value,
29733 + || __put_user((unsigned long)req32.value,
29734 &request->value))
29735 return -EFAULT;
29736
29737 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c
29738 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
29739 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
29740 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
29741 unsigned int ret;
29742 RING_LOCALS;
29743
29744 - atomic_inc(&dev_priv->swi_emitted);
29745 - ret = atomic_read(&dev_priv->swi_emitted);
29746 + atomic_inc_unchecked(&dev_priv->swi_emitted);
29747 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
29748
29749 BEGIN_RING(4);
29750 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
29751 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
29752 drm_radeon_private_t *dev_priv =
29753 (drm_radeon_private_t *) dev->dev_private;
29754
29755 - atomic_set(&dev_priv->swi_emitted, 0);
29756 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
29757 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
29758
29759 dev->max_vblank_count = 0x001fffff;
29760 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c
29761 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
29762 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
29763 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
29764 {
29765 drm_radeon_private_t *dev_priv = dev->dev_private;
29766 drm_radeon_getparam_t *param = data;
29767 - int value;
29768 + int value = 0;
29769
29770 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
29771
29772 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c
29773 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
29774 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
29775 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
29776 DRM_INFO("radeon: ttm finalized\n");
29777 }
29778
29779 -static struct vm_operations_struct radeon_ttm_vm_ops;
29780 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
29781 -
29782 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
29783 -{
29784 - struct ttm_buffer_object *bo;
29785 - int r;
29786 -
29787 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
29788 - if (bo == NULL) {
29789 - return VM_FAULT_NOPAGE;
29790 - }
29791 - r = ttm_vm_ops->fault(vma, vmf);
29792 - return r;
29793 -}
29794 -
29795 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29796 {
29797 struct drm_file *file_priv;
29798 struct radeon_device *rdev;
29799 - int r;
29800
29801 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
29802 return drm_mmap(filp, vma);
29803 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
29804
29805 file_priv = (struct drm_file *)filp->private_data;
29806 rdev = file_priv->minor->dev->dev_private;
29807 - if (rdev == NULL) {
29808 + if (!rdev)
29809 return -EINVAL;
29810 - }
29811 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29812 - if (unlikely(r != 0)) {
29813 - return r;
29814 - }
29815 - if (unlikely(ttm_vm_ops == NULL)) {
29816 - ttm_vm_ops = vma->vm_ops;
29817 - radeon_ttm_vm_ops = *ttm_vm_ops;
29818 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29819 - }
29820 - vma->vm_ops = &radeon_ttm_vm_ops;
29821 - return 0;
29822 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29823 }
29824
29825
29826 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c
29827 --- linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
29828 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
29829 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
29830 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29831 rdev->pm.sideport_bandwidth.full)
29832 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29833 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
29834 + read_delay_latency.full = rfixed_const(800 * 1000);
29835 read_delay_latency.full = rfixed_div(read_delay_latency,
29836 rdev->pm.igp_sideport_mclk);
29837 + a.full = rfixed_const(370);
29838 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
29839 } else {
29840 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29841 rdev->pm.k8_bandwidth.full)
29842 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c
29843 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
29844 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
29845 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
29846 NULL
29847 };
29848
29849 -static struct sysfs_ops ttm_bo_global_ops = {
29850 +static const struct sysfs_ops ttm_bo_global_ops = {
29851 .show = &ttm_bo_global_show
29852 };
29853
29854 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c
29855 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
29856 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
29857 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
29858 {
29859 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
29860 vma->vm_private_data;
29861 - struct ttm_bo_device *bdev = bo->bdev;
29862 + struct ttm_bo_device *bdev;
29863 unsigned long bus_base;
29864 unsigned long bus_offset;
29865 unsigned long bus_size;
29866 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
29867 unsigned long address = (unsigned long)vmf->virtual_address;
29868 int retval = VM_FAULT_NOPAGE;
29869
29870 + if (!bo)
29871 + return VM_FAULT_NOPAGE;
29872 + bdev = bo->bdev;
29873 +
29874 /*
29875 * Work around locking order reversal in fault / nopfn
29876 * between mmap_sem and bo_reserve: Perform a trylock operation
29877 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c
29878 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
29879 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
29880 @@ -36,7 +36,7 @@
29881 struct ttm_global_item {
29882 struct mutex mutex;
29883 void *object;
29884 - int refcount;
29885 + atomic_t refcount;
29886 };
29887
29888 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
29889 @@ -49,7 +49,7 @@ void ttm_global_init(void)
29890 struct ttm_global_item *item = &glob[i];
29891 mutex_init(&item->mutex);
29892 item->object = NULL;
29893 - item->refcount = 0;
29894 + atomic_set(&item->refcount, 0);
29895 }
29896 }
29897
29898 @@ -59,7 +59,7 @@ void ttm_global_release(void)
29899 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
29900 struct ttm_global_item *item = &glob[i];
29901 BUG_ON(item->object != NULL);
29902 - BUG_ON(item->refcount != 0);
29903 + BUG_ON(atomic_read(&item->refcount) != 0);
29904 }
29905 }
29906
29907 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
29908 void *object;
29909
29910 mutex_lock(&item->mutex);
29911 - if (item->refcount == 0) {
29912 + if (atomic_read(&item->refcount) == 0) {
29913 item->object = kzalloc(ref->size, GFP_KERNEL);
29914 if (unlikely(item->object == NULL)) {
29915 ret = -ENOMEM;
29916 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
29917 goto out_err;
29918
29919 }
29920 - ++item->refcount;
29921 + atomic_inc(&item->refcount);
29922 ref->object = item->object;
29923 object = item->object;
29924 mutex_unlock(&item->mutex);
29925 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
29926 struct ttm_global_item *item = &glob[ref->global_type];
29927
29928 mutex_lock(&item->mutex);
29929 - BUG_ON(item->refcount == 0);
29930 + BUG_ON(atomic_read(&item->refcount) == 0);
29931 BUG_ON(ref->object != item->object);
29932 - if (--item->refcount == 0) {
29933 + if (atomic_dec_and_test(&item->refcount)) {
29934 ref->release(ref);
29935 item->object = NULL;
29936 }
29937 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c
29938 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
29939 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
29940 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
29941 NULL
29942 };
29943
29944 -static struct sysfs_ops ttm_mem_zone_ops = {
29945 +static const struct sysfs_ops ttm_mem_zone_ops = {
29946 .show = &ttm_mem_zone_show,
29947 .store = &ttm_mem_zone_store
29948 };
29949 diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h
29950 --- linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
29951 +++ linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
29952 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
29953 typedef uint32_t maskarray_t[5];
29954
29955 typedef struct drm_via_irq {
29956 - atomic_t irq_received;
29957 + atomic_unchecked_t irq_received;
29958 uint32_t pending_mask;
29959 uint32_t enable_mask;
29960 wait_queue_head_t irq_queue;
29961 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
29962 struct timeval last_vblank;
29963 int last_vblank_valid;
29964 unsigned usec_per_vblank;
29965 - atomic_t vbl_received;
29966 + atomic_unchecked_t vbl_received;
29967 drm_via_state_t hc_state;
29968 char pci_buf[VIA_PCI_BUF_SIZE];
29969 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
29970 diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c
29971 --- linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
29972 +++ linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
29973 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
29974 if (crtc != 0)
29975 return 0;
29976
29977 - return atomic_read(&dev_priv->vbl_received);
29978 + return atomic_read_unchecked(&dev_priv->vbl_received);
29979 }
29980
29981 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29982 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
29983
29984 status = VIA_READ(VIA_REG_INTERRUPT);
29985 if (status & VIA_IRQ_VBLANK_PENDING) {
29986 - atomic_inc(&dev_priv->vbl_received);
29987 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29988 + atomic_inc_unchecked(&dev_priv->vbl_received);
29989 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29990 do_gettimeofday(&cur_vblank);
29991 if (dev_priv->last_vblank_valid) {
29992 dev_priv->usec_per_vblank =
29993 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29994 dev_priv->last_vblank = cur_vblank;
29995 dev_priv->last_vblank_valid = 1;
29996 }
29997 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
29998 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
29999 DRM_DEBUG("US per vblank is: %u\n",
30000 dev_priv->usec_per_vblank);
30001 }
30002 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30003
30004 for (i = 0; i < dev_priv->num_irqs; ++i) {
30005 if (status & cur_irq->pending_mask) {
30006 - atomic_inc(&cur_irq->irq_received);
30007 + atomic_inc_unchecked(&cur_irq->irq_received);
30008 DRM_WAKEUP(&cur_irq->irq_queue);
30009 handled = 1;
30010 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
30011 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
30012 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30013 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30014 masks[irq][4]));
30015 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30016 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30017 } else {
30018 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30019 (((cur_irq_sequence =
30020 - atomic_read(&cur_irq->irq_received)) -
30021 + atomic_read_unchecked(&cur_irq->irq_received)) -
30022 *sequence) <= (1 << 23)));
30023 }
30024 *sequence = cur_irq_sequence;
30025 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
30026 }
30027
30028 for (i = 0; i < dev_priv->num_irqs; ++i) {
30029 - atomic_set(&cur_irq->irq_received, 0);
30030 + atomic_set_unchecked(&cur_irq->irq_received, 0);
30031 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30032 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30033 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30034 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
30035 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30036 case VIA_IRQ_RELATIVE:
30037 irqwait->request.sequence +=
30038 - atomic_read(&cur_irq->irq_received);
30039 + atomic_read_unchecked(&cur_irq->irq_received);
30040 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30041 case VIA_IRQ_ABSOLUTE:
30042 break;
30043 diff -urNp linux-2.6.32.45/drivers/hid/hid-core.c linux-2.6.32.45/drivers/hid/hid-core.c
30044 --- linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
30045 +++ linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
30046 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
30047
30048 int hid_add_device(struct hid_device *hdev)
30049 {
30050 - static atomic_t id = ATOMIC_INIT(0);
30051 + static atomic_unchecked_t id = ATOMIC_INIT(0);
30052 int ret;
30053
30054 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30055 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
30056 /* XXX hack, any other cleaner solution after the driver core
30057 * is converted to allow more than 20 bytes as the device name? */
30058 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30059 - hdev->vendor, hdev->product, atomic_inc_return(&id));
30060 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30061
30062 ret = device_add(&hdev->dev);
30063 if (!ret)
30064 diff -urNp linux-2.6.32.45/drivers/hid/usbhid/hiddev.c linux-2.6.32.45/drivers/hid/usbhid/hiddev.c
30065 --- linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
30066 +++ linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
30067 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
30068 return put_user(HID_VERSION, (int __user *)arg);
30069
30070 case HIDIOCAPPLICATION:
30071 - if (arg < 0 || arg >= hid->maxapplication)
30072 + if (arg >= hid->maxapplication)
30073 return -EINVAL;
30074
30075 for (i = 0; i < hid->maxcollection; i++)
30076 diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.c linux-2.6.32.45/drivers/hwmon/lis3lv02d.c
30077 --- linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
30078 +++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
30079 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
30080 * the lid is closed. This leads to interrupts as soon as a little move
30081 * is done.
30082 */
30083 - atomic_inc(&lis3_dev.count);
30084 + atomic_inc_unchecked(&lis3_dev.count);
30085
30086 wake_up_interruptible(&lis3_dev.misc_wait);
30087 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30088 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
30089 if (test_and_set_bit(0, &lis3_dev.misc_opened))
30090 return -EBUSY; /* already open */
30091
30092 - atomic_set(&lis3_dev.count, 0);
30093 + atomic_set_unchecked(&lis3_dev.count, 0);
30094
30095 /*
30096 * The sensor can generate interrupts for free-fall and direction
30097 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
30098 add_wait_queue(&lis3_dev.misc_wait, &wait);
30099 while (true) {
30100 set_current_state(TASK_INTERRUPTIBLE);
30101 - data = atomic_xchg(&lis3_dev.count, 0);
30102 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30103 if (data)
30104 break;
30105
30106 @@ -244,7 +244,7 @@ out:
30107 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30108 {
30109 poll_wait(file, &lis3_dev.misc_wait, wait);
30110 - if (atomic_read(&lis3_dev.count))
30111 + if (atomic_read_unchecked(&lis3_dev.count))
30112 return POLLIN | POLLRDNORM;
30113 return 0;
30114 }
30115 diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.h linux-2.6.32.45/drivers/hwmon/lis3lv02d.h
30116 --- linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
30117 +++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
30118 @@ -201,7 +201,7 @@ struct lis3lv02d {
30119
30120 struct input_polled_dev *idev; /* input device */
30121 struct platform_device *pdev; /* platform device */
30122 - atomic_t count; /* interrupt count after last read */
30123 + atomic_unchecked_t count; /* interrupt count after last read */
30124 int xcalib; /* calibrated null value for x */
30125 int ycalib; /* calibrated null value for y */
30126 int zcalib; /* calibrated null value for z */
30127 diff -urNp linux-2.6.32.45/drivers/hwmon/sht15.c linux-2.6.32.45/drivers/hwmon/sht15.c
30128 --- linux-2.6.32.45/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
30129 +++ linux-2.6.32.45/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
30130 @@ -112,7 +112,7 @@ struct sht15_data {
30131 int supply_uV;
30132 int supply_uV_valid;
30133 struct work_struct update_supply_work;
30134 - atomic_t interrupt_handled;
30135 + atomic_unchecked_t interrupt_handled;
30136 };
30137
30138 /**
30139 @@ -245,13 +245,13 @@ static inline int sht15_update_single_va
30140 return ret;
30141
30142 gpio_direction_input(data->pdata->gpio_data);
30143 - atomic_set(&data->interrupt_handled, 0);
30144 + atomic_set_unchecked(&data->interrupt_handled, 0);
30145
30146 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30147 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30148 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30149 /* Only relevant if the interrupt hasn't occured. */
30150 - if (!atomic_read(&data->interrupt_handled))
30151 + if (!atomic_read_unchecked(&data->interrupt_handled))
30152 schedule_work(&data->read_work);
30153 }
30154 ret = wait_event_timeout(data->wait_queue,
30155 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
30156 struct sht15_data *data = d;
30157 /* First disable the interrupt */
30158 disable_irq_nosync(irq);
30159 - atomic_inc(&data->interrupt_handled);
30160 + atomic_inc_unchecked(&data->interrupt_handled);
30161 /* Then schedule a reading work struct */
30162 if (data->flag != SHT15_READING_NOTHING)
30163 schedule_work(&data->read_work);
30164 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
30165 here as could have gone low in meantime so verify
30166 it hasn't!
30167 */
30168 - atomic_set(&data->interrupt_handled, 0);
30169 + atomic_set_unchecked(&data->interrupt_handled, 0);
30170 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30171 /* If still not occured or another handler has been scheduled */
30172 if (gpio_get_value(data->pdata->gpio_data)
30173 - || atomic_read(&data->interrupt_handled))
30174 + || atomic_read_unchecked(&data->interrupt_handled))
30175 return;
30176 }
30177 /* Read the data back from the device */
30178 diff -urNp linux-2.6.32.45/drivers/hwmon/w83791d.c linux-2.6.32.45/drivers/hwmon/w83791d.c
30179 --- linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
30180 +++ linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
30181 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
30182 struct i2c_board_info *info);
30183 static int w83791d_remove(struct i2c_client *client);
30184
30185 -static int w83791d_read(struct i2c_client *client, u8 register);
30186 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
30187 +static int w83791d_read(struct i2c_client *client, u8 reg);
30188 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
30189 static struct w83791d_data *w83791d_update_device(struct device *dev);
30190
30191 #ifdef DEBUG
30192 diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c
30193 --- linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-03-27 14:31:47.000000000 -0400
30194 +++ linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-05 20:33:55.000000000 -0400
30195 @@ -189,23 +189,23 @@ static int __init amd756_s4882_init(void
30196 }
30197
30198 /* Fill in the new structures */
30199 - s4882_algo[0] = *(amd756_smbus.algo);
30200 - s4882_algo[0].smbus_xfer = amd756_access_virt0;
30201 + memcpy((void *)&s4882_algo[0], amd756_smbus.algo, sizeof(s4882_algo[0]));
30202 + *(void **)&s4882_algo[0].smbus_xfer = amd756_access_virt0;
30203 s4882_adapter[0] = amd756_smbus;
30204 s4882_adapter[0].algo = s4882_algo;
30205 - s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
30206 + *(void **)&s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
30207 for (i = 1; i < 5; i++) {
30208 - s4882_algo[i] = *(amd756_smbus.algo);
30209 + memcpy((void *)&s4882_algo[i], amd756_smbus.algo, sizeof(s4882_algo[i]));
30210 s4882_adapter[i] = amd756_smbus;
30211 snprintf(s4882_adapter[i].name, sizeof(s4882_adapter[i].name),
30212 "SMBus 8111 adapter (CPU%d)", i-1);
30213 s4882_adapter[i].algo = s4882_algo+i;
30214 s4882_adapter[i].dev.parent = amd756_smbus.dev.parent;
30215 }
30216 - s4882_algo[1].smbus_xfer = amd756_access_virt1;
30217 - s4882_algo[2].smbus_xfer = amd756_access_virt2;
30218 - s4882_algo[3].smbus_xfer = amd756_access_virt3;
30219 - s4882_algo[4].smbus_xfer = amd756_access_virt4;
30220 + *(void **)&s4882_algo[1].smbus_xfer = amd756_access_virt1;
30221 + *(void **)&s4882_algo[2].smbus_xfer = amd756_access_virt2;
30222 + *(void **)&s4882_algo[3].smbus_xfer = amd756_access_virt3;
30223 + *(void **)&s4882_algo[4].smbus_xfer = amd756_access_virt4;
30224
30225 /* Register virtual adapters */
30226 for (i = 0; i < 5; i++) {
30227 diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c
30228 --- linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-03-27 14:31:47.000000000 -0400
30229 +++ linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-05 20:33:55.000000000 -0400
30230 @@ -184,23 +184,23 @@ static int __init nforce2_s4985_init(voi
30231 }
30232
30233 /* Fill in the new structures */
30234 - s4985_algo[0] = *(nforce2_smbus->algo);
30235 - s4985_algo[0].smbus_xfer = nforce2_access_virt0;
30236 + memcpy((void *)&s4985_algo[0], nforce2_smbus->algo, sizeof(s4985_algo[0]));
30237 + *(void **)&s4985_algo[0].smbus_xfer = nforce2_access_virt0;
30238 s4985_adapter[0] = *nforce2_smbus;
30239 s4985_adapter[0].algo = s4985_algo;
30240 s4985_adapter[0].dev.parent = nforce2_smbus->dev.parent;
30241 for (i = 1; i < 5; i++) {
30242 - s4985_algo[i] = *(nforce2_smbus->algo);
30243 + memcpy((void *)&s4985_algo[i], nforce2_smbus->algo, sizeof(s4985_algo[i]));
30244 s4985_adapter[i] = *nforce2_smbus;
30245 snprintf(s4985_adapter[i].name, sizeof(s4985_adapter[i].name),
30246 "SMBus nForce2 adapter (CPU%d)", i - 1);
30247 s4985_adapter[i].algo = s4985_algo + i;
30248 s4985_adapter[i].dev.parent = nforce2_smbus->dev.parent;
30249 }
30250 - s4985_algo[1].smbus_xfer = nforce2_access_virt1;
30251 - s4985_algo[2].smbus_xfer = nforce2_access_virt2;
30252 - s4985_algo[3].smbus_xfer = nforce2_access_virt3;
30253 - s4985_algo[4].smbus_xfer = nforce2_access_virt4;
30254 + *(void **)&s4985_algo[1].smbus_xfer = nforce2_access_virt1;
30255 + *(void **)&s4985_algo[2].smbus_xfer = nforce2_access_virt2;
30256 + *(void **)&s4985_algo[3].smbus_xfer = nforce2_access_virt3;
30257 + *(void **)&s4985_algo[4].smbus_xfer = nforce2_access_virt4;
30258
30259 /* Register virtual adapters */
30260 for (i = 0; i < 5; i++) {
30261 diff -urNp linux-2.6.32.45/drivers/ide/ide-cd.c linux-2.6.32.45/drivers/ide/ide-cd.c
30262 --- linux-2.6.32.45/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
30263 +++ linux-2.6.32.45/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
30264 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
30265 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30266 if ((unsigned long)buf & alignment
30267 || blk_rq_bytes(rq) & q->dma_pad_mask
30268 - || object_is_on_stack(buf))
30269 + || object_starts_on_stack(buf))
30270 drive->dma = 0;
30271 }
30272 }
30273 diff -urNp linux-2.6.32.45/drivers/ide/ide-floppy.c linux-2.6.32.45/drivers/ide/ide-floppy.c
30274 --- linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
30275 +++ linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
30276 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
30277 u8 pc_buf[256], header_len, desc_cnt;
30278 int i, rc = 1, blocks, length;
30279
30280 + pax_track_stack();
30281 +
30282 ide_debug_log(IDE_DBG_FUNC, "enter");
30283
30284 drive->bios_cyl = 0;
30285 diff -urNp linux-2.6.32.45/drivers/ide/setup-pci.c linux-2.6.32.45/drivers/ide/setup-pci.c
30286 --- linux-2.6.32.45/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
30287 +++ linux-2.6.32.45/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
30288 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
30289 int ret, i, n_ports = dev2 ? 4 : 2;
30290 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
30291
30292 + pax_track_stack();
30293 +
30294 for (i = 0; i < n_ports / 2; i++) {
30295 ret = ide_setup_pci_controller(pdev[i], d, !i);
30296 if (ret < 0)
30297 diff -urNp linux-2.6.32.45/drivers/ieee1394/dv1394.c linux-2.6.32.45/drivers/ieee1394/dv1394.c
30298 --- linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
30299 +++ linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
30300 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
30301 based upon DIF section and sequence
30302 */
30303
30304 -static void inline
30305 +static inline void
30306 frame_put_packet (struct frame *f, struct packet *p)
30307 {
30308 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
30309 diff -urNp linux-2.6.32.45/drivers/ieee1394/hosts.c linux-2.6.32.45/drivers/ieee1394/hosts.c
30310 --- linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
30311 +++ linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
30312 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
30313 }
30314
30315 static struct hpsb_host_driver dummy_driver = {
30316 + .name = "dummy",
30317 .transmit_packet = dummy_transmit_packet,
30318 .devctl = dummy_devctl,
30319 .isoctl = dummy_isoctl
30320 diff -urNp linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c
30321 --- linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
30322 +++ linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
30323 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
30324 for (func = 0; func < 8; func++) {
30325 u32 class = read_pci_config(num,slot,func,
30326 PCI_CLASS_REVISION);
30327 - if ((class == 0xffffffff))
30328 + if (class == 0xffffffff)
30329 continue; /* No device at this func */
30330
30331 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
30332 diff -urNp linux-2.6.32.45/drivers/ieee1394/ohci1394.c linux-2.6.32.45/drivers/ieee1394/ohci1394.c
30333 --- linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
30334 +++ linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
30335 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
30336 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
30337
30338 /* Module Parameters */
30339 -static int phys_dma = 1;
30340 +static int phys_dma;
30341 module_param(phys_dma, int, 0444);
30342 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
30343 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
30344
30345 static void dma_trm_tasklet(unsigned long data);
30346 static void dma_trm_reset(struct dma_trm_ctx *d);
30347 diff -urNp linux-2.6.32.45/drivers/ieee1394/sbp2.c linux-2.6.32.45/drivers/ieee1394/sbp2.c
30348 --- linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
30349 +++ linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
30350 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
30351 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
30352 MODULE_LICENSE("GPL");
30353
30354 -static int sbp2_module_init(void)
30355 +static int __init sbp2_module_init(void)
30356 {
30357 int ret;
30358
30359 diff -urNp linux-2.6.32.45/drivers/infiniband/core/cm.c linux-2.6.32.45/drivers/infiniband/core/cm.c
30360 --- linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
30361 +++ linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
30362 @@ -112,7 +112,7 @@ static char const counter_group_names[CM
30363
30364 struct cm_counter_group {
30365 struct kobject obj;
30366 - atomic_long_t counter[CM_ATTR_COUNT];
30367 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
30368 };
30369
30370 struct cm_counter_attribute {
30371 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
30372 struct ib_mad_send_buf *msg = NULL;
30373 int ret;
30374
30375 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30376 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30377 counter[CM_REQ_COUNTER]);
30378
30379 /* Quick state check to discard duplicate REQs. */
30380 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
30381 if (!cm_id_priv)
30382 return;
30383
30384 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30385 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30386 counter[CM_REP_COUNTER]);
30387 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30388 if (ret)
30389 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
30390 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30391 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30392 spin_unlock_irq(&cm_id_priv->lock);
30393 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30394 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30395 counter[CM_RTU_COUNTER]);
30396 goto out;
30397 }
30398 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
30399 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30400 dreq_msg->local_comm_id);
30401 if (!cm_id_priv) {
30402 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30403 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30404 counter[CM_DREQ_COUNTER]);
30405 cm_issue_drep(work->port, work->mad_recv_wc);
30406 return -EINVAL;
30407 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
30408 case IB_CM_MRA_REP_RCVD:
30409 break;
30410 case IB_CM_TIMEWAIT:
30411 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30412 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30413 counter[CM_DREQ_COUNTER]);
30414 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30415 goto unlock;
30416 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
30417 cm_free_msg(msg);
30418 goto deref;
30419 case IB_CM_DREQ_RCVD:
30420 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30421 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30422 counter[CM_DREQ_COUNTER]);
30423 goto unlock;
30424 default:
30425 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
30426 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30427 cm_id_priv->msg, timeout)) {
30428 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30429 - atomic_long_inc(&work->port->
30430 + atomic_long_inc_unchecked(&work->port->
30431 counter_group[CM_RECV_DUPLICATES].
30432 counter[CM_MRA_COUNTER]);
30433 goto out;
30434 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
30435 break;
30436 case IB_CM_MRA_REQ_RCVD:
30437 case IB_CM_MRA_REP_RCVD:
30438 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30439 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30440 counter[CM_MRA_COUNTER]);
30441 /* fall through */
30442 default:
30443 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
30444 case IB_CM_LAP_IDLE:
30445 break;
30446 case IB_CM_MRA_LAP_SENT:
30447 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30448 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30449 counter[CM_LAP_COUNTER]);
30450 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30451 goto unlock;
30452 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
30453 cm_free_msg(msg);
30454 goto deref;
30455 case IB_CM_LAP_RCVD:
30456 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30457 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30458 counter[CM_LAP_COUNTER]);
30459 goto unlock;
30460 default:
30461 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
30462 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30463 if (cur_cm_id_priv) {
30464 spin_unlock_irq(&cm.lock);
30465 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30466 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30467 counter[CM_SIDR_REQ_COUNTER]);
30468 goto out; /* Duplicate message. */
30469 }
30470 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
30471 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30472 msg->retries = 1;
30473
30474 - atomic_long_add(1 + msg->retries,
30475 + atomic_long_add_unchecked(1 + msg->retries,
30476 &port->counter_group[CM_XMIT].counter[attr_index]);
30477 if (msg->retries)
30478 - atomic_long_add(msg->retries,
30479 + atomic_long_add_unchecked(msg->retries,
30480 &port->counter_group[CM_XMIT_RETRIES].
30481 counter[attr_index]);
30482
30483 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
30484 }
30485
30486 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30487 - atomic_long_inc(&port->counter_group[CM_RECV].
30488 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30489 counter[attr_id - CM_ATTR_ID_OFFSET]);
30490
30491 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30492 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
30493 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30494
30495 return sprintf(buf, "%ld\n",
30496 - atomic_long_read(&group->counter[cm_attr->index]));
30497 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30498 }
30499
30500 -static struct sysfs_ops cm_counter_ops = {
30501 +static const struct sysfs_ops cm_counter_ops = {
30502 .show = cm_show_counter
30503 };
30504
30505 diff -urNp linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c
30506 --- linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
30507 +++ linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
30508 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
30509
30510 struct task_struct *thread;
30511
30512 - atomic_t req_ser;
30513 - atomic_t flush_ser;
30514 + atomic_unchecked_t req_ser;
30515 + atomic_unchecked_t flush_ser;
30516
30517 wait_queue_head_t force_wait;
30518 };
30519 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
30520 struct ib_fmr_pool *pool = pool_ptr;
30521
30522 do {
30523 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30524 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30525 ib_fmr_batch_release(pool);
30526
30527 - atomic_inc(&pool->flush_ser);
30528 + atomic_inc_unchecked(&pool->flush_ser);
30529 wake_up_interruptible(&pool->force_wait);
30530
30531 if (pool->flush_function)
30532 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
30533 }
30534
30535 set_current_state(TASK_INTERRUPTIBLE);
30536 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30537 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30538 !kthread_should_stop())
30539 schedule();
30540 __set_current_state(TASK_RUNNING);
30541 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
30542 pool->dirty_watermark = params->dirty_watermark;
30543 pool->dirty_len = 0;
30544 spin_lock_init(&pool->pool_lock);
30545 - atomic_set(&pool->req_ser, 0);
30546 - atomic_set(&pool->flush_ser, 0);
30547 + atomic_set_unchecked(&pool->req_ser, 0);
30548 + atomic_set_unchecked(&pool->flush_ser, 0);
30549 init_waitqueue_head(&pool->force_wait);
30550
30551 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30552 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
30553 }
30554 spin_unlock_irq(&pool->pool_lock);
30555
30556 - serial = atomic_inc_return(&pool->req_ser);
30557 + serial = atomic_inc_return_unchecked(&pool->req_ser);
30558 wake_up_process(pool->thread);
30559
30560 if (wait_event_interruptible(pool->force_wait,
30561 - atomic_read(&pool->flush_ser) - serial >= 0))
30562 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30563 return -EINTR;
30564
30565 return 0;
30566 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
30567 } else {
30568 list_add_tail(&fmr->list, &pool->dirty_list);
30569 if (++pool->dirty_len >= pool->dirty_watermark) {
30570 - atomic_inc(&pool->req_ser);
30571 + atomic_inc_unchecked(&pool->req_ser);
30572 wake_up_process(pool->thread);
30573 }
30574 }
30575 diff -urNp linux-2.6.32.45/drivers/infiniband/core/sysfs.c linux-2.6.32.45/drivers/infiniband/core/sysfs.c
30576 --- linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
30577 +++ linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
30578 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
30579 return port_attr->show(p, port_attr, buf);
30580 }
30581
30582 -static struct sysfs_ops port_sysfs_ops = {
30583 +static const struct sysfs_ops port_sysfs_ops = {
30584 .show = port_attr_show
30585 };
30586
30587 diff -urNp linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c
30588 --- linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
30589 +++ linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
30590 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
30591 dst->grh.sgid_index = src->grh.sgid_index;
30592 dst->grh.hop_limit = src->grh.hop_limit;
30593 dst->grh.traffic_class = src->grh.traffic_class;
30594 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
30595 dst->dlid = src->dlid;
30596 dst->sl = src->sl;
30597 dst->src_path_bits = src->src_path_bits;
30598 dst->static_rate = src->static_rate;
30599 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
30600 dst->port_num = src->port_num;
30601 + dst->reserved = 0;
30602 }
30603 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
30604
30605 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
30606 struct ib_qp_attr *src)
30607 {
30608 + dst->qp_state = src->qp_state;
30609 dst->cur_qp_state = src->cur_qp_state;
30610 dst->path_mtu = src->path_mtu;
30611 dst->path_mig_state = src->path_mig_state;
30612 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
30613 dst->rnr_retry = src->rnr_retry;
30614 dst->alt_port_num = src->alt_port_num;
30615 dst->alt_timeout = src->alt_timeout;
30616 + memset(dst->reserved, 0, sizeof(dst->reserved));
30617 }
30618 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
30619
30620 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c
30621 --- linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
30622 +++ linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
30623 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
30624 struct infinipath_counters counters;
30625 struct ipath_devdata *dd;
30626
30627 + pax_track_stack();
30628 +
30629 dd = file->f_path.dentry->d_inode->i_private;
30630 dd->ipath_f_read_counters(dd, &counters);
30631
30632 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c
30633 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
30634 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
30635 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
30636 LIST_HEAD(nes_adapter_list);
30637 static LIST_HEAD(nes_dev_list);
30638
30639 -atomic_t qps_destroyed;
30640 +atomic_unchecked_t qps_destroyed;
30641
30642 static unsigned int ee_flsh_adapter;
30643 static unsigned int sysfs_nonidx_addr;
30644 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
30645 struct nes_adapter *nesadapter = nesdev->nesadapter;
30646 u32 qp_id;
30647
30648 - atomic_inc(&qps_destroyed);
30649 + atomic_inc_unchecked(&qps_destroyed);
30650
30651 /* Free the control structures */
30652
30653 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c
30654 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
30655 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
30656 @@ -69,11 +69,11 @@ u32 cm_packets_received;
30657 u32 cm_listens_created;
30658 u32 cm_listens_destroyed;
30659 u32 cm_backlog_drops;
30660 -atomic_t cm_loopbacks;
30661 -atomic_t cm_nodes_created;
30662 -atomic_t cm_nodes_destroyed;
30663 -atomic_t cm_accel_dropped_pkts;
30664 -atomic_t cm_resets_recvd;
30665 +atomic_unchecked_t cm_loopbacks;
30666 +atomic_unchecked_t cm_nodes_created;
30667 +atomic_unchecked_t cm_nodes_destroyed;
30668 +atomic_unchecked_t cm_accel_dropped_pkts;
30669 +atomic_unchecked_t cm_resets_recvd;
30670
30671 static inline int mini_cm_accelerated(struct nes_cm_core *,
30672 struct nes_cm_node *);
30673 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
30674
30675 static struct nes_cm_core *g_cm_core;
30676
30677 -atomic_t cm_connects;
30678 -atomic_t cm_accepts;
30679 -atomic_t cm_disconnects;
30680 -atomic_t cm_closes;
30681 -atomic_t cm_connecteds;
30682 -atomic_t cm_connect_reqs;
30683 -atomic_t cm_rejects;
30684 +atomic_unchecked_t cm_connects;
30685 +atomic_unchecked_t cm_accepts;
30686 +atomic_unchecked_t cm_disconnects;
30687 +atomic_unchecked_t cm_closes;
30688 +atomic_unchecked_t cm_connecteds;
30689 +atomic_unchecked_t cm_connect_reqs;
30690 +atomic_unchecked_t cm_rejects;
30691
30692
30693 /**
30694 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
30695 cm_node->rem_mac);
30696
30697 add_hte_node(cm_core, cm_node);
30698 - atomic_inc(&cm_nodes_created);
30699 + atomic_inc_unchecked(&cm_nodes_created);
30700
30701 return cm_node;
30702 }
30703 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
30704 }
30705
30706 atomic_dec(&cm_core->node_cnt);
30707 - atomic_inc(&cm_nodes_destroyed);
30708 + atomic_inc_unchecked(&cm_nodes_destroyed);
30709 nesqp = cm_node->nesqp;
30710 if (nesqp) {
30711 nesqp->cm_node = NULL;
30712 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
30713
30714 static void drop_packet(struct sk_buff *skb)
30715 {
30716 - atomic_inc(&cm_accel_dropped_pkts);
30717 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30718 dev_kfree_skb_any(skb);
30719 }
30720
30721 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
30722
30723 int reset = 0; /* whether to send reset in case of err.. */
30724 int passive_state;
30725 - atomic_inc(&cm_resets_recvd);
30726 + atomic_inc_unchecked(&cm_resets_recvd);
30727 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30728 " refcnt=%d\n", cm_node, cm_node->state,
30729 atomic_read(&cm_node->ref_count));
30730 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
30731 rem_ref_cm_node(cm_node->cm_core, cm_node);
30732 return NULL;
30733 }
30734 - atomic_inc(&cm_loopbacks);
30735 + atomic_inc_unchecked(&cm_loopbacks);
30736 loopbackremotenode->loopbackpartner = cm_node;
30737 loopbackremotenode->tcp_cntxt.rcv_wscale =
30738 NES_CM_DEFAULT_RCV_WND_SCALE;
30739 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
30740 add_ref_cm_node(cm_node);
30741 } else if (cm_node->state == NES_CM_STATE_TSA) {
30742 rem_ref_cm_node(cm_core, cm_node);
30743 - atomic_inc(&cm_accel_dropped_pkts);
30744 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30745 dev_kfree_skb_any(skb);
30746 break;
30747 }
30748 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
30749
30750 if ((cm_id) && (cm_id->event_handler)) {
30751 if (issue_disconn) {
30752 - atomic_inc(&cm_disconnects);
30753 + atomic_inc_unchecked(&cm_disconnects);
30754 cm_event.event = IW_CM_EVENT_DISCONNECT;
30755 cm_event.status = disconn_status;
30756 cm_event.local_addr = cm_id->local_addr;
30757 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
30758 }
30759
30760 if (issue_close) {
30761 - atomic_inc(&cm_closes);
30762 + atomic_inc_unchecked(&cm_closes);
30763 nes_disconnect(nesqp, 1);
30764
30765 cm_id->provider_data = nesqp;
30766 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
30767
30768 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30769 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30770 - atomic_inc(&cm_accepts);
30771 + atomic_inc_unchecked(&cm_accepts);
30772
30773 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30774 atomic_read(&nesvnic->netdev->refcnt));
30775 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
30776
30777 struct nes_cm_core *cm_core;
30778
30779 - atomic_inc(&cm_rejects);
30780 + atomic_inc_unchecked(&cm_rejects);
30781 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30782 loopback = cm_node->loopbackpartner;
30783 cm_core = cm_node->cm_core;
30784 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
30785 ntohl(cm_id->local_addr.sin_addr.s_addr),
30786 ntohs(cm_id->local_addr.sin_port));
30787
30788 - atomic_inc(&cm_connects);
30789 + atomic_inc_unchecked(&cm_connects);
30790 nesqp->active_conn = 1;
30791
30792 /* cache the cm_id in the qp */
30793 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
30794 if (nesqp->destroyed) {
30795 return;
30796 }
30797 - atomic_inc(&cm_connecteds);
30798 + atomic_inc_unchecked(&cm_connecteds);
30799 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30800 " local port 0x%04X. jiffies = %lu.\n",
30801 nesqp->hwqp.qp_id,
30802 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
30803
30804 ret = cm_id->event_handler(cm_id, &cm_event);
30805 cm_id->add_ref(cm_id);
30806 - atomic_inc(&cm_closes);
30807 + atomic_inc_unchecked(&cm_closes);
30808 cm_event.event = IW_CM_EVENT_CLOSE;
30809 cm_event.status = IW_CM_EVENT_STATUS_OK;
30810 cm_event.provider_data = cm_id->provider_data;
30811 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
30812 return;
30813 cm_id = cm_node->cm_id;
30814
30815 - atomic_inc(&cm_connect_reqs);
30816 + atomic_inc_unchecked(&cm_connect_reqs);
30817 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30818 cm_node, cm_id, jiffies);
30819
30820 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
30821 return;
30822 cm_id = cm_node->cm_id;
30823
30824 - atomic_inc(&cm_connect_reqs);
30825 + atomic_inc_unchecked(&cm_connect_reqs);
30826 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30827 cm_node, cm_id, jiffies);
30828
30829 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h
30830 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
30831 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
30832 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
30833 extern unsigned int wqm_quanta;
30834 extern struct list_head nes_adapter_list;
30835
30836 -extern atomic_t cm_connects;
30837 -extern atomic_t cm_accepts;
30838 -extern atomic_t cm_disconnects;
30839 -extern atomic_t cm_closes;
30840 -extern atomic_t cm_connecteds;
30841 -extern atomic_t cm_connect_reqs;
30842 -extern atomic_t cm_rejects;
30843 -extern atomic_t mod_qp_timouts;
30844 -extern atomic_t qps_created;
30845 -extern atomic_t qps_destroyed;
30846 -extern atomic_t sw_qps_destroyed;
30847 +extern atomic_unchecked_t cm_connects;
30848 +extern atomic_unchecked_t cm_accepts;
30849 +extern atomic_unchecked_t cm_disconnects;
30850 +extern atomic_unchecked_t cm_closes;
30851 +extern atomic_unchecked_t cm_connecteds;
30852 +extern atomic_unchecked_t cm_connect_reqs;
30853 +extern atomic_unchecked_t cm_rejects;
30854 +extern atomic_unchecked_t mod_qp_timouts;
30855 +extern atomic_unchecked_t qps_created;
30856 +extern atomic_unchecked_t qps_destroyed;
30857 +extern atomic_unchecked_t sw_qps_destroyed;
30858 extern u32 mh_detected;
30859 extern u32 mh_pauses_sent;
30860 extern u32 cm_packets_sent;
30861 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
30862 extern u32 cm_listens_created;
30863 extern u32 cm_listens_destroyed;
30864 extern u32 cm_backlog_drops;
30865 -extern atomic_t cm_loopbacks;
30866 -extern atomic_t cm_nodes_created;
30867 -extern atomic_t cm_nodes_destroyed;
30868 -extern atomic_t cm_accel_dropped_pkts;
30869 -extern atomic_t cm_resets_recvd;
30870 +extern atomic_unchecked_t cm_loopbacks;
30871 +extern atomic_unchecked_t cm_nodes_created;
30872 +extern atomic_unchecked_t cm_nodes_destroyed;
30873 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30874 +extern atomic_unchecked_t cm_resets_recvd;
30875
30876 extern u32 int_mod_timer_init;
30877 extern u32 int_mod_cq_depth_256;
30878 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c
30879 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
30880 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
30881 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
30882 target_stat_values[++index] = mh_detected;
30883 target_stat_values[++index] = mh_pauses_sent;
30884 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30885 - target_stat_values[++index] = atomic_read(&cm_connects);
30886 - target_stat_values[++index] = atomic_read(&cm_accepts);
30887 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30888 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30889 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30890 - target_stat_values[++index] = atomic_read(&cm_rejects);
30891 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30892 - target_stat_values[++index] = atomic_read(&qps_created);
30893 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30894 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30895 - target_stat_values[++index] = atomic_read(&cm_closes);
30896 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30897 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30898 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30899 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30900 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30901 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30902 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30903 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30904 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30905 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30906 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30907 target_stat_values[++index] = cm_packets_sent;
30908 target_stat_values[++index] = cm_packets_bounced;
30909 target_stat_values[++index] = cm_packets_created;
30910 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
30911 target_stat_values[++index] = cm_listens_created;
30912 target_stat_values[++index] = cm_listens_destroyed;
30913 target_stat_values[++index] = cm_backlog_drops;
30914 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30915 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30916 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30917 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30918 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30919 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30920 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30921 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30922 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30923 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30924 target_stat_values[++index] = int_mod_timer_init;
30925 target_stat_values[++index] = int_mod_cq_depth_1;
30926 target_stat_values[++index] = int_mod_cq_depth_4;
30927 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c
30928 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
30929 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
30930 @@ -45,9 +45,9 @@
30931
30932 #include <rdma/ib_umem.h>
30933
30934 -atomic_t mod_qp_timouts;
30935 -atomic_t qps_created;
30936 -atomic_t sw_qps_destroyed;
30937 +atomic_unchecked_t mod_qp_timouts;
30938 +atomic_unchecked_t qps_created;
30939 +atomic_unchecked_t sw_qps_destroyed;
30940
30941 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30942
30943 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
30944 if (init_attr->create_flags)
30945 return ERR_PTR(-EINVAL);
30946
30947 - atomic_inc(&qps_created);
30948 + atomic_inc_unchecked(&qps_created);
30949 switch (init_attr->qp_type) {
30950 case IB_QPT_RC:
30951 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30952 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
30953 struct iw_cm_event cm_event;
30954 int ret;
30955
30956 - atomic_inc(&sw_qps_destroyed);
30957 + atomic_inc_unchecked(&sw_qps_destroyed);
30958 nesqp->destroyed = 1;
30959
30960 /* Blow away the connection if it exists. */
30961 diff -urNp linux-2.6.32.45/drivers/input/gameport/gameport.c linux-2.6.32.45/drivers/input/gameport/gameport.c
30962 --- linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
30963 +++ linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
30964 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
30965 */
30966 static void gameport_init_port(struct gameport *gameport)
30967 {
30968 - static atomic_t gameport_no = ATOMIC_INIT(0);
30969 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30970
30971 __module_get(THIS_MODULE);
30972
30973 mutex_init(&gameport->drv_mutex);
30974 device_initialize(&gameport->dev);
30975 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
30976 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30977 gameport->dev.bus = &gameport_bus;
30978 gameport->dev.release = gameport_release_port;
30979 if (gameport->parent)
30980 diff -urNp linux-2.6.32.45/drivers/input/input.c linux-2.6.32.45/drivers/input/input.c
30981 --- linux-2.6.32.45/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
30982 +++ linux-2.6.32.45/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
30983 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
30984 */
30985 int input_register_device(struct input_dev *dev)
30986 {
30987 - static atomic_t input_no = ATOMIC_INIT(0);
30988 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30989 struct input_handler *handler;
30990 const char *path;
30991 int error;
30992 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
30993 dev->setkeycode = input_default_setkeycode;
30994
30995 dev_set_name(&dev->dev, "input%ld",
30996 - (unsigned long) atomic_inc_return(&input_no) - 1);
30997 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30998
30999 error = device_add(&dev->dev);
31000 if (error)
31001 diff -urNp linux-2.6.32.45/drivers/input/joystick/sidewinder.c linux-2.6.32.45/drivers/input/joystick/sidewinder.c
31002 --- linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
31003 +++ linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
31004 @@ -30,6 +30,7 @@
31005 #include <linux/kernel.h>
31006 #include <linux/module.h>
31007 #include <linux/slab.h>
31008 +#include <linux/sched.h>
31009 #include <linux/init.h>
31010 #include <linux/input.h>
31011 #include <linux/gameport.h>
31012 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
31013 unsigned char buf[SW_LENGTH];
31014 int i;
31015
31016 + pax_track_stack();
31017 +
31018 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
31019
31020 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
31021 diff -urNp linux-2.6.32.45/drivers/input/joystick/xpad.c linux-2.6.32.45/drivers/input/joystick/xpad.c
31022 --- linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
31023 +++ linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
31024 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
31025
31026 static int xpad_led_probe(struct usb_xpad *xpad)
31027 {
31028 - static atomic_t led_seq = ATOMIC_INIT(0);
31029 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
31030 long led_no;
31031 struct xpad_led *led;
31032 struct led_classdev *led_cdev;
31033 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
31034 if (!led)
31035 return -ENOMEM;
31036
31037 - led_no = (long)atomic_inc_return(&led_seq) - 1;
31038 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
31039
31040 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
31041 led->xpad = xpad;
31042 diff -urNp linux-2.6.32.45/drivers/input/serio/serio.c linux-2.6.32.45/drivers/input/serio/serio.c
31043 --- linux-2.6.32.45/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
31044 +++ linux-2.6.32.45/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
31045 @@ -527,7 +527,7 @@ static void serio_release_port(struct de
31046 */
31047 static void serio_init_port(struct serio *serio)
31048 {
31049 - static atomic_t serio_no = ATOMIC_INIT(0);
31050 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
31051
31052 __module_get(THIS_MODULE);
31053
31054 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio
31055 mutex_init(&serio->drv_mutex);
31056 device_initialize(&serio->dev);
31057 dev_set_name(&serio->dev, "serio%ld",
31058 - (long)atomic_inc_return(&serio_no) - 1);
31059 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
31060 serio->dev.bus = &serio_bus;
31061 serio->dev.release = serio_release_port;
31062 if (serio->parent) {
31063 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/common.c linux-2.6.32.45/drivers/isdn/gigaset/common.c
31064 --- linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
31065 +++ linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
31066 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
31067 cs->commands_pending = 0;
31068 cs->cur_at_seq = 0;
31069 cs->gotfwver = -1;
31070 - cs->open_count = 0;
31071 + local_set(&cs->open_count, 0);
31072 cs->dev = NULL;
31073 cs->tty = NULL;
31074 cs->tty_dev = NULL;
31075 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h
31076 --- linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
31077 +++ linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
31078 @@ -34,6 +34,7 @@
31079 #include <linux/tty_driver.h>
31080 #include <linux/list.h>
31081 #include <asm/atomic.h>
31082 +#include <asm/local.h>
31083
31084 #define GIG_VERSION {0,5,0,0}
31085 #define GIG_COMPAT {0,4,0,0}
31086 @@ -446,7 +447,7 @@ struct cardstate {
31087 spinlock_t cmdlock;
31088 unsigned curlen, cmdbytes;
31089
31090 - unsigned open_count;
31091 + local_t open_count;
31092 struct tty_struct *tty;
31093 struct tasklet_struct if_wake_tasklet;
31094 unsigned control_state;
31095 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/interface.c linux-2.6.32.45/drivers/isdn/gigaset/interface.c
31096 --- linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
31097 +++ linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
31098 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
31099 return -ERESTARTSYS; // FIXME -EINTR?
31100 tty->driver_data = cs;
31101
31102 - ++cs->open_count;
31103 -
31104 - if (cs->open_count == 1) {
31105 + if (local_inc_return(&cs->open_count) == 1) {
31106 spin_lock_irqsave(&cs->lock, flags);
31107 cs->tty = tty;
31108 spin_unlock_irqrestore(&cs->lock, flags);
31109 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
31110
31111 if (!cs->connected)
31112 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31113 - else if (!cs->open_count)
31114 + else if (!local_read(&cs->open_count))
31115 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31116 else {
31117 - if (!--cs->open_count) {
31118 + if (!local_dec_return(&cs->open_count)) {
31119 spin_lock_irqsave(&cs->lock, flags);
31120 cs->tty = NULL;
31121 spin_unlock_irqrestore(&cs->lock, flags);
31122 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
31123 if (!cs->connected) {
31124 gig_dbg(DEBUG_IF, "not connected");
31125 retval = -ENODEV;
31126 - } else if (!cs->open_count)
31127 + } else if (!local_read(&cs->open_count))
31128 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31129 else {
31130 retval = 0;
31131 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
31132 if (!cs->connected) {
31133 gig_dbg(DEBUG_IF, "not connected");
31134 retval = -ENODEV;
31135 - } else if (!cs->open_count)
31136 + } else if (!local_read(&cs->open_count))
31137 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31138 else if (cs->mstate != MS_LOCKED) {
31139 dev_warn(cs->dev, "can't write to unlocked device\n");
31140 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
31141 if (!cs->connected) {
31142 gig_dbg(DEBUG_IF, "not connected");
31143 retval = -ENODEV;
31144 - } else if (!cs->open_count)
31145 + } else if (!local_read(&cs->open_count))
31146 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31147 else if (cs->mstate != MS_LOCKED) {
31148 dev_warn(cs->dev, "can't write to unlocked device\n");
31149 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
31150
31151 if (!cs->connected)
31152 gig_dbg(DEBUG_IF, "not connected");
31153 - else if (!cs->open_count)
31154 + else if (!local_read(&cs->open_count))
31155 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31156 else if (cs->mstate != MS_LOCKED)
31157 dev_warn(cs->dev, "can't write to unlocked device\n");
31158 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
31159
31160 if (!cs->connected)
31161 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31162 - else if (!cs->open_count)
31163 + else if (!local_read(&cs->open_count))
31164 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31165 else {
31166 //FIXME
31167 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
31168
31169 if (!cs->connected)
31170 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31171 - else if (!cs->open_count)
31172 + else if (!local_read(&cs->open_count))
31173 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31174 else {
31175 //FIXME
31176 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
31177 goto out;
31178 }
31179
31180 - if (!cs->open_count) {
31181 + if (!local_read(&cs->open_count)) {
31182 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31183 goto out;
31184 }
31185 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c
31186 --- linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
31187 +++ linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
31188 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
31189 }
31190 if (left) {
31191 if (t4file->user) {
31192 - if (copy_from_user(buf, dp, left))
31193 + if (left > sizeof buf || copy_from_user(buf, dp, left))
31194 return -EFAULT;
31195 } else {
31196 memcpy(buf, dp, left);
31197 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
31198 }
31199 if (left) {
31200 if (config->user) {
31201 - if (copy_from_user(buf, dp, left))
31202 + if (left > sizeof buf || copy_from_user(buf, dp, left))
31203 return -EFAULT;
31204 } else {
31205 memcpy(buf, dp, left);
31206 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c
31207 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
31208 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
31209 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
31210 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
31211 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
31212
31213 + pax_track_stack();
31214
31215 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
31216 {
31217 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c
31218 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
31219 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
31220 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
31221 IDI_SYNC_REQ req;
31222 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31223
31224 + pax_track_stack();
31225 +
31226 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31227
31228 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31229 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c
31230 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
31231 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
31232 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
31233 IDI_SYNC_REQ req;
31234 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31235
31236 + pax_track_stack();
31237 +
31238 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31239
31240 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31241 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c
31242 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
31243 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
31244 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
31245 IDI_SYNC_REQ req;
31246 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31247
31248 + pax_track_stack();
31249 +
31250 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31251
31252 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31253 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h
31254 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-03-27 14:31:47.000000000 -0400
31255 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:33:55.000000000 -0400
31256 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31257 } diva_didd_add_adapter_t;
31258 typedef struct _diva_didd_remove_adapter {
31259 IDI_CALL p_request;
31260 -} diva_didd_remove_adapter_t;
31261 +} __no_const diva_didd_remove_adapter_t;
31262 typedef struct _diva_didd_read_adapter_array {
31263 void * buffer;
31264 dword length;
31265 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c
31266 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
31267 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
31268 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
31269 IDI_SYNC_REQ req;
31270 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31271
31272 + pax_track_stack();
31273 +
31274 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31275
31276 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31277 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c
31278 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
31279 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
31280 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
31281 dword d;
31282 word w;
31283
31284 + pax_track_stack();
31285 +
31286 a = plci->adapter;
31287 Id = ((word)plci->Id<<8)|a->Id;
31288 PUT_WORD(&SS_Ind[4],0x0000);
31289 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
31290 word j, n, w;
31291 dword d;
31292
31293 + pax_track_stack();
31294 +
31295
31296 for(i=0;i<8;i++) bp_parms[i].length = 0;
31297 for(i=0;i<2;i++) global_config[i].length = 0;
31298 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
31299 const byte llc3[] = {4,3,2,2,6,6,0};
31300 const byte header[] = {0,2,3,3,0,0,0};
31301
31302 + pax_track_stack();
31303 +
31304 for(i=0;i<8;i++) bp_parms[i].length = 0;
31305 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
31306 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
31307 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
31308 word appl_number_group_type[MAX_APPL];
31309 PLCI *auxplci;
31310
31311 + pax_track_stack();
31312 +
31313 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
31314
31315 if(!a->group_optimization_enabled)
31316 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c
31317 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
31318 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
31319 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
31320 IDI_SYNC_REQ req;
31321 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31322
31323 + pax_track_stack();
31324 +
31325 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31326
31327 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31328 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h
31329 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-03-27 14:31:47.000000000 -0400
31330 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:33:55.000000000 -0400
31331 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31332 typedef struct _diva_os_idi_adapter_interface {
31333 diva_init_card_proc_t cleanup_adapter_proc;
31334 diva_cmd_card_proc_t cmd_proc;
31335 -} diva_os_idi_adapter_interface_t;
31336 +} __no_const diva_os_idi_adapter_interface_t;
31337
31338 typedef struct _diva_os_xdi_adapter {
31339 struct list_head link;
31340 diff -urNp linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c
31341 --- linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
31342 +++ linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
31343 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
31344 } iocpar;
31345 void __user *argp = (void __user *)arg;
31346
31347 + pax_track_stack();
31348 +
31349 #define name iocpar.name
31350 #define bname iocpar.bname
31351 #define iocts iocpar.iocts
31352 diff -urNp linux-2.6.32.45/drivers/isdn/icn/icn.c linux-2.6.32.45/drivers/isdn/icn/icn.c
31353 --- linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
31354 +++ linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
31355 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
31356 if (count > len)
31357 count = len;
31358 if (user) {
31359 - if (copy_from_user(msg, buf, count))
31360 + if (count > sizeof msg || copy_from_user(msg, buf, count))
31361 return -EFAULT;
31362 } else
31363 memcpy(msg, buf, count);
31364 diff -urNp linux-2.6.32.45/drivers/isdn/mISDN/socket.c linux-2.6.32.45/drivers/isdn/mISDN/socket.c
31365 --- linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
31366 +++ linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
31367 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
31368 if (dev) {
31369 struct mISDN_devinfo di;
31370
31371 + memset(&di, 0, sizeof(di));
31372 di.id = dev->id;
31373 di.Dprotocols = dev->Dprotocols;
31374 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31375 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
31376 if (dev) {
31377 struct mISDN_devinfo di;
31378
31379 + memset(&di, 0, sizeof(di));
31380 di.id = dev->id;
31381 di.Dprotocols = dev->Dprotocols;
31382 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31383 diff -urNp linux-2.6.32.45/drivers/isdn/sc/interrupt.c linux-2.6.32.45/drivers/isdn/sc/interrupt.c
31384 --- linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
31385 +++ linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
31386 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
31387 }
31388 else if(callid>=0x0000 && callid<=0x7FFF)
31389 {
31390 + int len;
31391 +
31392 pr_debug("%s: Got Incoming Call\n",
31393 sc_adapter[card]->devicename);
31394 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
31395 - strcpy(setup.eazmsn,
31396 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
31397 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
31398 + sizeof(setup.phone));
31399 + if (len >= sizeof(setup.phone))
31400 + continue;
31401 + len = strlcpy(setup.eazmsn,
31402 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31403 + sizeof(setup.eazmsn));
31404 + if (len >= sizeof(setup.eazmsn))
31405 + continue;
31406 setup.si1 = 7;
31407 setup.si2 = 0;
31408 setup.plan = 0;
31409 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
31410 * Handle a GetMyNumber Rsp
31411 */
31412 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
31413 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
31414 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31415 + rcvmsg.msg_data.byte_array,
31416 + sizeof(rcvmsg.msg_data.byte_array));
31417 continue;
31418 }
31419
31420 diff -urNp linux-2.6.32.45/drivers/lguest/core.c linux-2.6.32.45/drivers/lguest/core.c
31421 --- linux-2.6.32.45/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
31422 +++ linux-2.6.32.45/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
31423 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
31424 * it's worked so far. The end address needs +1 because __get_vm_area
31425 * allocates an extra guard page, so we need space for that.
31426 */
31427 +
31428 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31429 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31430 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31431 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31432 +#else
31433 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31434 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31435 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31436 +#endif
31437 +
31438 if (!switcher_vma) {
31439 err = -ENOMEM;
31440 printk("lguest: could not map switcher pages high\n");
31441 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
31442 * Now the Switcher is mapped at the right address, we can't fail!
31443 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
31444 */
31445 - memcpy(switcher_vma->addr, start_switcher_text,
31446 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31447 end_switcher_text - start_switcher_text);
31448
31449 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31450 diff -urNp linux-2.6.32.45/drivers/lguest/x86/core.c linux-2.6.32.45/drivers/lguest/x86/core.c
31451 --- linux-2.6.32.45/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
31452 +++ linux-2.6.32.45/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
31453 @@ -59,7 +59,7 @@ static struct {
31454 /* Offset from where switcher.S was compiled to where we've copied it */
31455 static unsigned long switcher_offset(void)
31456 {
31457 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31458 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31459 }
31460
31461 /* This cpu's struct lguest_pages. */
31462 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
31463 * These copies are pretty cheap, so we do them unconditionally: */
31464 /* Save the current Host top-level page directory.
31465 */
31466 +
31467 +#ifdef CONFIG_PAX_PER_CPU_PGD
31468 + pages->state.host_cr3 = read_cr3();
31469 +#else
31470 pages->state.host_cr3 = __pa(current->mm->pgd);
31471 +#endif
31472 +
31473 /*
31474 * Set up the Guest's page tables to see this CPU's pages (and no
31475 * other CPU's pages).
31476 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
31477 * compiled-in switcher code and the high-mapped copy we just made.
31478 */
31479 for (i = 0; i < IDT_ENTRIES; i++)
31480 - default_idt_entries[i] += switcher_offset();
31481 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31482
31483 /*
31484 * Set up the Switcher's per-cpu areas.
31485 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
31486 * it will be undisturbed when we switch. To change %cs and jump we
31487 * need this structure to feed to Intel's "lcall" instruction.
31488 */
31489 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31490 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31491 lguest_entry.segment = LGUEST_CS;
31492
31493 /*
31494 diff -urNp linux-2.6.32.45/drivers/lguest/x86/switcher_32.S linux-2.6.32.45/drivers/lguest/x86/switcher_32.S
31495 --- linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
31496 +++ linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
31497 @@ -87,6 +87,7 @@
31498 #include <asm/page.h>
31499 #include <asm/segment.h>
31500 #include <asm/lguest.h>
31501 +#include <asm/processor-flags.h>
31502
31503 // We mark the start of the code to copy
31504 // It's placed in .text tho it's never run here
31505 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31506 // Changes type when we load it: damn Intel!
31507 // For after we switch over our page tables
31508 // That entry will be read-only: we'd crash.
31509 +
31510 +#ifdef CONFIG_PAX_KERNEXEC
31511 + mov %cr0, %edx
31512 + xor $X86_CR0_WP, %edx
31513 + mov %edx, %cr0
31514 +#endif
31515 +
31516 movl $(GDT_ENTRY_TSS*8), %edx
31517 ltr %dx
31518
31519 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31520 // Let's clear it again for our return.
31521 // The GDT descriptor of the Host
31522 // Points to the table after two "size" bytes
31523 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31524 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31525 // Clear "used" from type field (byte 5, bit 2)
31526 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31527 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31528 +
31529 +#ifdef CONFIG_PAX_KERNEXEC
31530 + mov %cr0, %eax
31531 + xor $X86_CR0_WP, %eax
31532 + mov %eax, %cr0
31533 +#endif
31534
31535 // Once our page table's switched, the Guest is live!
31536 // The Host fades as we run this final step.
31537 @@ -295,13 +309,12 @@ deliver_to_host:
31538 // I consulted gcc, and it gave
31539 // These instructions, which I gladly credit:
31540 leal (%edx,%ebx,8), %eax
31541 - movzwl (%eax),%edx
31542 - movl 4(%eax), %eax
31543 - xorw %ax, %ax
31544 - orl %eax, %edx
31545 + movl 4(%eax), %edx
31546 + movw (%eax), %dx
31547 // Now the address of the handler's in %edx
31548 // We call it now: its "iret" drops us home.
31549 - jmp *%edx
31550 + ljmp $__KERNEL_CS, $1f
31551 +1: jmp *%edx
31552
31553 // Every interrupt can come to us here
31554 // But we must truly tell each apart.
31555 diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c
31556 --- linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
31557 +++ linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
31558 @@ -15,7 +15,7 @@
31559
31560 #define MAX_PMU_LEVEL 0xFF
31561
31562 -static struct backlight_ops pmu_backlight_data;
31563 +static const struct backlight_ops pmu_backlight_data;
31564 static DEFINE_SPINLOCK(pmu_backlight_lock);
31565 static int sleeping, uses_pmu_bl;
31566 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
31567 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
31568 return bd->props.brightness;
31569 }
31570
31571 -static struct backlight_ops pmu_backlight_data = {
31572 +static const struct backlight_ops pmu_backlight_data = {
31573 .get_brightness = pmu_backlight_get_brightness,
31574 .update_status = pmu_backlight_update_status,
31575
31576 diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu.c linux-2.6.32.45/drivers/macintosh/via-pmu.c
31577 --- linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
31578 +++ linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
31579 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
31580 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
31581 }
31582
31583 -static struct platform_suspend_ops pmu_pm_ops = {
31584 +static const struct platform_suspend_ops pmu_pm_ops = {
31585 .enter = powerbook_sleep,
31586 .valid = pmu_sleep_valid,
31587 };
31588 diff -urNp linux-2.6.32.45/drivers/md/dm.c linux-2.6.32.45/drivers/md/dm.c
31589 --- linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:35:29.000000000 -0400
31590 +++ linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:33:59.000000000 -0400
31591 @@ -165,9 +165,9 @@ struct mapped_device {
31592 /*
31593 * Event handling.
31594 */
31595 - atomic_t event_nr;
31596 + atomic_unchecked_t event_nr;
31597 wait_queue_head_t eventq;
31598 - atomic_t uevent_seq;
31599 + atomic_unchecked_t uevent_seq;
31600 struct list_head uevent_list;
31601 spinlock_t uevent_lock; /* Protect access to uevent_list */
31602
31603 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(i
31604 rwlock_init(&md->map_lock);
31605 atomic_set(&md->holders, 1);
31606 atomic_set(&md->open_count, 0);
31607 - atomic_set(&md->event_nr, 0);
31608 - atomic_set(&md->uevent_seq, 0);
31609 + atomic_set_unchecked(&md->event_nr, 0);
31610 + atomic_set_unchecked(&md->uevent_seq, 0);
31611 INIT_LIST_HEAD(&md->uevent_list);
31612 spin_lock_init(&md->uevent_lock);
31613
31614 @@ -1927,7 +1927,7 @@ static void event_callback(void *context
31615
31616 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31617
31618 - atomic_inc(&md->event_nr);
31619 + atomic_inc_unchecked(&md->event_nr);
31620 wake_up(&md->eventq);
31621 }
31622
31623 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_dev
31624
31625 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31626 {
31627 - return atomic_add_return(1, &md->uevent_seq);
31628 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31629 }
31630
31631 uint32_t dm_get_event_nr(struct mapped_device *md)
31632 {
31633 - return atomic_read(&md->event_nr);
31634 + return atomic_read_unchecked(&md->event_nr);
31635 }
31636
31637 int dm_wait_event(struct mapped_device *md, int event_nr)
31638 {
31639 return wait_event_interruptible(md->eventq,
31640 - (event_nr != atomic_read(&md->event_nr)));
31641 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31642 }
31643
31644 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31645 diff -urNp linux-2.6.32.45/drivers/md/dm-ioctl.c linux-2.6.32.45/drivers/md/dm-ioctl.c
31646 --- linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
31647 +++ linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
31648 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
31649 cmd == DM_LIST_VERSIONS_CMD)
31650 return 0;
31651
31652 - if ((cmd == DM_DEV_CREATE_CMD)) {
31653 + if (cmd == DM_DEV_CREATE_CMD) {
31654 if (!*param->name) {
31655 DMWARN("name not supplied when creating device");
31656 return -EINVAL;
31657 diff -urNp linux-2.6.32.45/drivers/md/dm-raid1.c linux-2.6.32.45/drivers/md/dm-raid1.c
31658 --- linux-2.6.32.45/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
31659 +++ linux-2.6.32.45/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
31660 @@ -41,7 +41,7 @@ enum dm_raid1_error {
31661
31662 struct mirror {
31663 struct mirror_set *ms;
31664 - atomic_t error_count;
31665 + atomic_unchecked_t error_count;
31666 unsigned long error_type;
31667 struct dm_dev *dev;
31668 sector_t offset;
31669 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
31670 * simple way to tell if a device has encountered
31671 * errors.
31672 */
31673 - atomic_inc(&m->error_count);
31674 + atomic_inc_unchecked(&m->error_count);
31675
31676 if (test_and_set_bit(error_type, &m->error_type))
31677 return;
31678 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
31679 }
31680
31681 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
31682 - if (!atomic_read(&new->error_count)) {
31683 + if (!atomic_read_unchecked(&new->error_count)) {
31684 set_default_mirror(new);
31685 break;
31686 }
31687 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
31688 struct mirror *m = get_default_mirror(ms);
31689
31690 do {
31691 - if (likely(!atomic_read(&m->error_count)))
31692 + if (likely(!atomic_read_unchecked(&m->error_count)))
31693 return m;
31694
31695 if (m-- == ms->mirror)
31696 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
31697 {
31698 struct mirror *default_mirror = get_default_mirror(m->ms);
31699
31700 - return !atomic_read(&default_mirror->error_count);
31701 + return !atomic_read_unchecked(&default_mirror->error_count);
31702 }
31703
31704 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31705 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
31706 */
31707 if (likely(region_in_sync(ms, region, 1)))
31708 m = choose_mirror(ms, bio->bi_sector);
31709 - else if (m && atomic_read(&m->error_count))
31710 + else if (m && atomic_read_unchecked(&m->error_count))
31711 m = NULL;
31712
31713 if (likely(m))
31714 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
31715 }
31716
31717 ms->mirror[mirror].ms = ms;
31718 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31719 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31720 ms->mirror[mirror].error_type = 0;
31721 ms->mirror[mirror].offset = offset;
31722
31723 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
31724 */
31725 static char device_status_char(struct mirror *m)
31726 {
31727 - if (!atomic_read(&(m->error_count)))
31728 + if (!atomic_read_unchecked(&(m->error_count)))
31729 return 'A';
31730
31731 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
31732 diff -urNp linux-2.6.32.45/drivers/md/dm-stripe.c linux-2.6.32.45/drivers/md/dm-stripe.c
31733 --- linux-2.6.32.45/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
31734 +++ linux-2.6.32.45/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
31735 @@ -20,7 +20,7 @@ struct stripe {
31736 struct dm_dev *dev;
31737 sector_t physical_start;
31738
31739 - atomic_t error_count;
31740 + atomic_unchecked_t error_count;
31741 };
31742
31743 struct stripe_c {
31744 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
31745 kfree(sc);
31746 return r;
31747 }
31748 - atomic_set(&(sc->stripe[i].error_count), 0);
31749 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31750 }
31751
31752 ti->private = sc;
31753 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
31754 DMEMIT("%d ", sc->stripes);
31755 for (i = 0; i < sc->stripes; i++) {
31756 DMEMIT("%s ", sc->stripe[i].dev->name);
31757 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31758 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31759 'D' : 'A';
31760 }
31761 buffer[i] = '\0';
31762 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
31763 */
31764 for (i = 0; i < sc->stripes; i++)
31765 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31766 - atomic_inc(&(sc->stripe[i].error_count));
31767 - if (atomic_read(&(sc->stripe[i].error_count)) <
31768 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31769 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31770 DM_IO_ERROR_THRESHOLD)
31771 queue_work(kstriped, &sc->kstriped_ws);
31772 }
31773 diff -urNp linux-2.6.32.45/drivers/md/dm-sysfs.c linux-2.6.32.45/drivers/md/dm-sysfs.c
31774 --- linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
31775 +++ linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
31776 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
31777 NULL,
31778 };
31779
31780 -static struct sysfs_ops dm_sysfs_ops = {
31781 +static const struct sysfs_ops dm_sysfs_ops = {
31782 .show = dm_attr_show,
31783 };
31784
31785 diff -urNp linux-2.6.32.45/drivers/md/dm-table.c linux-2.6.32.45/drivers/md/dm-table.c
31786 --- linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
31787 +++ linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
31788 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
31789 if (!dev_size)
31790 return 0;
31791
31792 - if ((start >= dev_size) || (start + len > dev_size)) {
31793 + if ((start >= dev_size) || (len > dev_size - start)) {
31794 DMWARN("%s: %s too small for target: "
31795 "start=%llu, len=%llu, dev_size=%llu",
31796 dm_device_name(ti->table->md), bdevname(bdev, b),
31797 diff -urNp linux-2.6.32.45/drivers/md/md.c linux-2.6.32.45/drivers/md/md.c
31798 --- linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:04.000000000 -0400
31799 +++ linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:18.000000000 -0400
31800 @@ -153,10 +153,10 @@ static int start_readonly;
31801 * start build, activate spare
31802 */
31803 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31804 -static atomic_t md_event_count;
31805 +static atomic_unchecked_t md_event_count;
31806 void md_new_event(mddev_t *mddev)
31807 {
31808 - atomic_inc(&md_event_count);
31809 + atomic_inc_unchecked(&md_event_count);
31810 wake_up(&md_event_waiters);
31811 }
31812 EXPORT_SYMBOL_GPL(md_new_event);
31813 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31814 */
31815 static void md_new_event_inintr(mddev_t *mddev)
31816 {
31817 - atomic_inc(&md_event_count);
31818 + atomic_inc_unchecked(&md_event_count);
31819 wake_up(&md_event_waiters);
31820 }
31821
31822 @@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
31823
31824 rdev->preferred_minor = 0xffff;
31825 rdev->data_offset = le64_to_cpu(sb->data_offset);
31826 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31827 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31828
31829 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31830 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31831 @@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
31832 else
31833 sb->resync_offset = cpu_to_le64(0);
31834
31835 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31836 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31837
31838 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31839 sb->size = cpu_to_le64(mddev->dev_sectors);
31840 @@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
31841 static ssize_t
31842 errors_show(mdk_rdev_t *rdev, char *page)
31843 {
31844 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31845 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31846 }
31847
31848 static ssize_t
31849 @@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
31850 char *e;
31851 unsigned long n = simple_strtoul(buf, &e, 10);
31852 if (*buf && (*e == 0 || *e == '\n')) {
31853 - atomic_set(&rdev->corrected_errors, n);
31854 + atomic_set_unchecked(&rdev->corrected_errors, n);
31855 return len;
31856 }
31857 return -EINVAL;
31858 @@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
31859 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
31860 kfree(rdev);
31861 }
31862 -static struct sysfs_ops rdev_sysfs_ops = {
31863 +static const struct sysfs_ops rdev_sysfs_ops = {
31864 .show = rdev_attr_show,
31865 .store = rdev_attr_store,
31866 };
31867 @@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
31868 rdev->data_offset = 0;
31869 rdev->sb_events = 0;
31870 atomic_set(&rdev->nr_pending, 0);
31871 - atomic_set(&rdev->read_errors, 0);
31872 - atomic_set(&rdev->corrected_errors, 0);
31873 + atomic_set_unchecked(&rdev->read_errors, 0);
31874 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31875
31876 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
31877 if (!size) {
31878 @@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
31879 kfree(mddev);
31880 }
31881
31882 -static struct sysfs_ops md_sysfs_ops = {
31883 +static const struct sysfs_ops md_sysfs_ops = {
31884 .show = md_attr_show,
31885 .store = md_attr_store,
31886 };
31887 @@ -4474,7 +4474,8 @@ out:
31888 err = 0;
31889 blk_integrity_unregister(disk);
31890 md_new_event(mddev);
31891 - sysfs_notify_dirent(mddev->sysfs_state);
31892 + if (mddev->sysfs_state)
31893 + sysfs_notify_dirent(mddev->sysfs_state);
31894 return err;
31895 }
31896
31897 @@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
31898
31899 spin_unlock(&pers_lock);
31900 seq_printf(seq, "\n");
31901 - mi->event = atomic_read(&md_event_count);
31902 + mi->event = atomic_read_unchecked(&md_event_count);
31903 return 0;
31904 }
31905 if (v == (void*)2) {
31906 @@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
31907 chunk_kb ? "KB" : "B");
31908 if (bitmap->file) {
31909 seq_printf(seq, ", file: ");
31910 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31911 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31912 }
31913
31914 seq_printf(seq, "\n");
31915 @@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
31916 else {
31917 struct seq_file *p = file->private_data;
31918 p->private = mi;
31919 - mi->event = atomic_read(&md_event_count);
31920 + mi->event = atomic_read_unchecked(&md_event_count);
31921 }
31922 return error;
31923 }
31924 @@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
31925 /* always allow read */
31926 mask = POLLIN | POLLRDNORM;
31927
31928 - if (mi->event != atomic_read(&md_event_count))
31929 + if (mi->event != atomic_read_unchecked(&md_event_count))
31930 mask |= POLLERR | POLLPRI;
31931 return mask;
31932 }
31933 @@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
31934 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31935 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31936 (int)part_stat_read(&disk->part0, sectors[1]) -
31937 - atomic_read(&disk->sync_io);
31938 + atomic_read_unchecked(&disk->sync_io);
31939 /* sync IO will cause sync_io to increase before the disk_stats
31940 * as sync_io is counted when a request starts, and
31941 * disk_stats is counted when it completes.
31942 diff -urNp linux-2.6.32.45/drivers/md/md.h linux-2.6.32.45/drivers/md/md.h
31943 --- linux-2.6.32.45/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
31944 +++ linux-2.6.32.45/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
31945 @@ -94,10 +94,10 @@ struct mdk_rdev_s
31946 * only maintained for arrays that
31947 * support hot removal
31948 */
31949 - atomic_t read_errors; /* number of consecutive read errors that
31950 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31951 * we have tried to ignore.
31952 */
31953 - atomic_t corrected_errors; /* number of corrected read errors,
31954 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31955 * for reporting to userspace and storing
31956 * in superblock.
31957 */
31958 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
31959
31960 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31961 {
31962 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31963 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31964 }
31965
31966 struct mdk_personality
31967 diff -urNp linux-2.6.32.45/drivers/md/raid10.c linux-2.6.32.45/drivers/md/raid10.c
31968 --- linux-2.6.32.45/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
31969 +++ linux-2.6.32.45/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
31970 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
31971 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
31972 set_bit(R10BIO_Uptodate, &r10_bio->state);
31973 else {
31974 - atomic_add(r10_bio->sectors,
31975 + atomic_add_unchecked(r10_bio->sectors,
31976 &conf->mirrors[d].rdev->corrected_errors);
31977 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
31978 md_error(r10_bio->mddev,
31979 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
31980 test_bit(In_sync, &rdev->flags)) {
31981 atomic_inc(&rdev->nr_pending);
31982 rcu_read_unlock();
31983 - atomic_add(s, &rdev->corrected_errors);
31984 + atomic_add_unchecked(s, &rdev->corrected_errors);
31985 if (sync_page_io(rdev->bdev,
31986 r10_bio->devs[sl].addr +
31987 sect + rdev->data_offset,
31988 diff -urNp linux-2.6.32.45/drivers/md/raid1.c linux-2.6.32.45/drivers/md/raid1.c
31989 --- linux-2.6.32.45/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
31990 +++ linux-2.6.32.45/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
31991 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
31992 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
31993 continue;
31994 rdev = conf->mirrors[d].rdev;
31995 - atomic_add(s, &rdev->corrected_errors);
31996 + atomic_add_unchecked(s, &rdev->corrected_errors);
31997 if (sync_page_io(rdev->bdev,
31998 sect + rdev->data_offset,
31999 s<<9,
32000 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
32001 /* Well, this device is dead */
32002 md_error(mddev, rdev);
32003 else {
32004 - atomic_add(s, &rdev->corrected_errors);
32005 + atomic_add_unchecked(s, &rdev->corrected_errors);
32006 printk(KERN_INFO
32007 "raid1:%s: read error corrected "
32008 "(%d sectors at %llu on %s)\n",
32009 diff -urNp linux-2.6.32.45/drivers/md/raid5.c linux-2.6.32.45/drivers/md/raid5.c
32010 --- linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
32011 +++ linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
32012 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
32013 bi->bi_next = NULL;
32014 if ((rw & WRITE) &&
32015 test_bit(R5_ReWrite, &sh->dev[i].flags))
32016 - atomic_add(STRIPE_SECTORS,
32017 + atomic_add_unchecked(STRIPE_SECTORS,
32018 &rdev->corrected_errors);
32019 generic_make_request(bi);
32020 } else {
32021 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
32022 clear_bit(R5_ReadError, &sh->dev[i].flags);
32023 clear_bit(R5_ReWrite, &sh->dev[i].flags);
32024 }
32025 - if (atomic_read(&conf->disks[i].rdev->read_errors))
32026 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
32027 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
32028 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
32029 } else {
32030 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
32031 int retry = 0;
32032 rdev = conf->disks[i].rdev;
32033
32034 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
32035 - atomic_inc(&rdev->read_errors);
32036 + atomic_inc_unchecked(&rdev->read_errors);
32037 if (conf->mddev->degraded >= conf->max_degraded)
32038 printk_rl(KERN_WARNING
32039 "raid5:%s: read error not correctable "
32040 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
32041 (unsigned long long)(sh->sector
32042 + rdev->data_offset),
32043 bdn);
32044 - else if (atomic_read(&rdev->read_errors)
32045 + else if (atomic_read_unchecked(&rdev->read_errors)
32046 > conf->max_nr_stripes)
32047 printk(KERN_WARNING
32048 "raid5:%s: Too many read errors, failing device %s.\n",
32049 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
32050 sector_t r_sector;
32051 struct stripe_head sh2;
32052
32053 + pax_track_stack();
32054
32055 chunk_offset = sector_div(new_sector, sectors_per_chunk);
32056 stripe = new_sector;
32057 diff -urNp linux-2.6.32.45/drivers/media/common/saa7146_fops.c linux-2.6.32.45/drivers/media/common/saa7146_fops.c
32058 --- linux-2.6.32.45/drivers/media/common/saa7146_fops.c 2011-03-27 14:31:47.000000000 -0400
32059 +++ linux-2.6.32.45/drivers/media/common/saa7146_fops.c 2011-08-05 20:33:55.000000000 -0400
32060 @@ -458,7 +458,7 @@ int saa7146_vv_init(struct saa7146_dev*
32061 ERR(("out of memory. aborting.\n"));
32062 return -ENOMEM;
32063 }
32064 - ext_vv->ops = saa7146_video_ioctl_ops;
32065 + memcpy((void *)&ext_vv->ops, &saa7146_video_ioctl_ops, sizeof(saa7146_video_ioctl_ops));
32066 ext_vv->core_ops = &saa7146_video_ioctl_ops;
32067
32068 DEB_EE(("dev:%p\n",dev));
32069 diff -urNp linux-2.6.32.45/drivers/media/common/saa7146_hlp.c linux-2.6.32.45/drivers/media/common/saa7146_hlp.c
32070 --- linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
32071 +++ linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
32072 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
32073
32074 int x[32], y[32], w[32], h[32];
32075
32076 + pax_track_stack();
32077 +
32078 /* clear out memory */
32079 memset(&line_list[0], 0x00, sizeof(u32)*32);
32080 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
32081 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
32082 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
32083 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
32084 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
32085 u8 buf[HOST_LINK_BUF_SIZE];
32086 int i;
32087
32088 + pax_track_stack();
32089 +
32090 dprintk("%s\n", __func__);
32091
32092 /* check if we have space for a link buf in the rx_buffer */
32093 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
32094 unsigned long timeout;
32095 int written;
32096
32097 + pax_track_stack();
32098 +
32099 dprintk("%s\n", __func__);
32100
32101 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
32102 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h
32103 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-03-27 14:31:47.000000000 -0400
32104 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:33:55.000000000 -0400
32105 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
32106 union {
32107 dmx_ts_cb ts;
32108 dmx_section_cb sec;
32109 - } cb;
32110 + } __no_const cb;
32111
32112 struct dvb_demux *demux;
32113 void *priv;
32114 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c
32115 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
32116 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-05 20:33:55.000000000 -0400
32117 @@ -228,8 +228,8 @@ int dvb_register_device(struct dvb_adapt
32118 dvbdev->fops = dvbdevfops;
32119 init_waitqueue_head (&dvbdev->wait_queue);
32120
32121 - memcpy(dvbdevfops, template->fops, sizeof(struct file_operations));
32122 - dvbdevfops->owner = adap->module;
32123 + memcpy((void *)dvbdevfops, template->fops, sizeof(struct file_operations));
32124 + *(void **)&dvbdevfops->owner = adap->module;
32125
32126 list_add_tail (&dvbdev->list_head, &adap->device_list);
32127
32128 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c
32129 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-03-27 14:31:47.000000000 -0400
32130 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:33:55.000000000 -0400
32131 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_di
32132 struct dib0700_adapter_state {
32133 int (*set_param_save) (struct dvb_frontend *,
32134 struct dvb_frontend_parameters *);
32135 -};
32136 +} __no_const;
32137
32138 static int dib7070_set_param_override(struct dvb_frontend *fe,
32139 struct dvb_frontend_parameters *fep)
32140 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c
32141 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
32142 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
32143 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
32144
32145 u8 buf[260];
32146
32147 + pax_track_stack();
32148 +
32149 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
32150 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
32151
32152 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c
32153 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-05-10 22:12:01.000000000 -0400
32154 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-08-05 20:33:55.000000000 -0400
32155 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "
32156
32157 struct dib0700_adapter_state {
32158 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
32159 -};
32160 +} __no_const;
32161
32162 /* Hauppauge Nova-T 500 (aka Bristol)
32163 * has a LNA on GPIO0 which is enabled by setting 1 */
32164 diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h
32165 --- linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-03-27 14:31:47.000000000 -0400
32166 +++ linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:33:55.000000000 -0400
32167 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
32168 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
32169 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
32170 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
32171 -};
32172 +} __no_const;
32173
32174 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
32175 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
32176 diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c
32177 --- linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
32178 +++ linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
32179 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
32180 u8 tudata[585];
32181 int i;
32182
32183 + pax_track_stack();
32184 +
32185 dprintk("Firmware is %zd bytes\n",fw->size);
32186
32187 /* Get eprom data */
32188 diff -urNp linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c
32189 --- linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c 2011-03-27 14:31:47.000000000 -0400
32190 +++ linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c 2011-08-05 20:33:55.000000000 -0400
32191 @@ -796,18 +796,18 @@ int av7110_init_v4l(struct av7110 *av711
32192 ERR(("cannot init capture device. skipping.\n"));
32193 return -ENODEV;
32194 }
32195 - vv_data->ops.vidioc_enum_input = vidioc_enum_input;
32196 - vv_data->ops.vidioc_g_input = vidioc_g_input;
32197 - vv_data->ops.vidioc_s_input = vidioc_s_input;
32198 - vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
32199 - vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
32200 - vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
32201 - vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
32202 - vv_data->ops.vidioc_g_audio = vidioc_g_audio;
32203 - vv_data->ops.vidioc_s_audio = vidioc_s_audio;
32204 - vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
32205 - vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
32206 - vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
32207 + *(void **)&vv_data->ops.vidioc_enum_input = vidioc_enum_input;
32208 + *(void **)&vv_data->ops.vidioc_g_input = vidioc_g_input;
32209 + *(void **)&vv_data->ops.vidioc_s_input = vidioc_s_input;
32210 + *(void **)&vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
32211 + *(void **)&vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
32212 + *(void **)&vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
32213 + *(void **)&vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
32214 + *(void **)&vv_data->ops.vidioc_g_audio = vidioc_g_audio;
32215 + *(void **)&vv_data->ops.vidioc_s_audio = vidioc_s_audio;
32216 + *(void **)&vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
32217 + *(void **)&vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
32218 + *(void **)&vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
32219
32220 if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) {
32221 ERR(("cannot register capture device. skipping.\n"));
32222 diff -urNp linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c
32223 --- linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c 2011-03-27 14:31:47.000000000 -0400
32224 +++ linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c 2011-08-05 20:33:55.000000000 -0400
32225 @@ -1477,9 +1477,9 @@ static int budget_av_attach(struct saa71
32226 ERR(("cannot init vv subsystem.\n"));
32227 return err;
32228 }
32229 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32230 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32231 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32232 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32233 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32234 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32235
32236 if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) {
32237 /* fixme: proper cleanup here */
32238 diff -urNp linux-2.6.32.45/drivers/media/radio/radio-cadet.c linux-2.6.32.45/drivers/media/radio/radio-cadet.c
32239 --- linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
32240 +++ linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
32241 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
32242 while (i < count && dev->rdsin != dev->rdsout)
32243 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
32244
32245 - if (copy_to_user(data, readbuf, i))
32246 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
32247 return -EFAULT;
32248 return i;
32249 }
32250 diff -urNp linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c
32251 --- linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
32252 +++ linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
32253 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
32254
32255 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
32256
32257 -static atomic_t cx18_instance = ATOMIC_INIT(0);
32258 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
32259
32260 /* Parameter declarations */
32261 static int cardtype[CX18_MAX_CARDS];
32262 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
32263 struct i2c_client c;
32264 u8 eedata[256];
32265
32266 + pax_track_stack();
32267 +
32268 memset(&c, 0, sizeof(c));
32269 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
32270 c.adapter = &cx->i2c_adap[0];
32271 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
32272 struct cx18 *cx;
32273
32274 /* FIXME - module parameter arrays constrain max instances */
32275 - i = atomic_inc_return(&cx18_instance) - 1;
32276 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
32277 if (i >= CX18_MAX_CARDS) {
32278 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
32279 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
32280 diff -urNp linux-2.6.32.45/drivers/media/video/hexium_gemini.c linux-2.6.32.45/drivers/media/video/hexium_gemini.c
32281 --- linux-2.6.32.45/drivers/media/video/hexium_gemini.c 2011-03-27 14:31:47.000000000 -0400
32282 +++ linux-2.6.32.45/drivers/media/video/hexium_gemini.c 2011-08-05 20:33:55.000000000 -0400
32283 @@ -394,12 +394,12 @@ static int hexium_attach(struct saa7146_
32284 hexium->cur_input = 0;
32285
32286 saa7146_vv_init(dev, &vv_data);
32287 - vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32288 - vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32289 - vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32290 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32291 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32292 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32293 + *(void **)&vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32294 + *(void **)&vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32295 + *(void **)&vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32296 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32297 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32298 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32299 if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER)) {
32300 printk("hexium_gemini: cannot register capture v4l2 device. skipping.\n");
32301 return -1;
32302 diff -urNp linux-2.6.32.45/drivers/media/video/hexium_orion.c linux-2.6.32.45/drivers/media/video/hexium_orion.c
32303 --- linux-2.6.32.45/drivers/media/video/hexium_orion.c 2011-03-27 14:31:47.000000000 -0400
32304 +++ linux-2.6.32.45/drivers/media/video/hexium_orion.c 2011-08-05 20:33:55.000000000 -0400
32305 @@ -369,9 +369,9 @@ static int hexium_attach(struct saa7146_
32306 DEB_EE((".\n"));
32307
32308 saa7146_vv_init(dev, &vv_data);
32309 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32310 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32311 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32312 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32313 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32314 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32315 if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) {
32316 printk("hexium_orion: cannot register capture v4l2 device. skipping.\n");
32317 return -1;
32318 diff -urNp linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c
32319 --- linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
32320 +++ linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
32321 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
32322 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
32323
32324 /* ivtv instance counter */
32325 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
32326 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
32327
32328 /* Parameter declarations */
32329 static int cardtype[IVTV_MAX_CARDS];
32330 diff -urNp linux-2.6.32.45/drivers/media/video/mxb.c linux-2.6.32.45/drivers/media/video/mxb.c
32331 --- linux-2.6.32.45/drivers/media/video/mxb.c 2011-03-27 14:31:47.000000000 -0400
32332 +++ linux-2.6.32.45/drivers/media/video/mxb.c 2011-08-05 20:33:55.000000000 -0400
32333 @@ -703,23 +703,23 @@ static int mxb_attach(struct saa7146_dev
32334 already did this in "mxb_vl42_probe" */
32335
32336 saa7146_vv_init(dev, &vv_data);
32337 - vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32338 - vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32339 - vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32340 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32341 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32342 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32343 - vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
32344 - vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
32345 - vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
32346 - vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
32347 - vv_data.ops.vidioc_g_audio = vidioc_g_audio;
32348 - vv_data.ops.vidioc_s_audio = vidioc_s_audio;
32349 + *(void **)&vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32350 + *(void **)&vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32351 + *(void **)&vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32352 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32353 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32354 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32355 + *(void **)&vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
32356 + *(void **)&vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
32357 + *(void **)&vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
32358 + *(void **)&vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
32359 + *(void **)&vv_data.ops.vidioc_g_audio = vidioc_g_audio;
32360 + *(void **)&vv_data.ops.vidioc_s_audio = vidioc_s_audio;
32361 #ifdef CONFIG_VIDEO_ADV_DEBUG
32362 - vv_data.ops.vidioc_g_register = vidioc_g_register;
32363 - vv_data.ops.vidioc_s_register = vidioc_s_register;
32364 + *(void **)&vv_data.ops.vidioc_g_register = vidioc_g_register;
32365 + *(void **)&vv_data.ops.vidioc_s_register = vidioc_s_register;
32366 #endif
32367 - vv_data.ops.vidioc_default = vidioc_default;
32368 + *(void **)&vv_data.ops.vidioc_default = vidioc_default;
32369 if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
32370 ERR(("cannot register capture v4l2 device. skipping.\n"));
32371 return -1;
32372 diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.c linux-2.6.32.45/drivers/media/video/omap24xxcam.c
32373 --- linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
32374 +++ linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
32375 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
32376 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
32377
32378 do_gettimeofday(&vb->ts);
32379 - vb->field_count = atomic_add_return(2, &fh->field_count);
32380 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
32381 if (csr & csr_error) {
32382 vb->state = VIDEOBUF_ERROR;
32383 if (!atomic_read(&fh->cam->in_reset)) {
32384 diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.h linux-2.6.32.45/drivers/media/video/omap24xxcam.h
32385 --- linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
32386 +++ linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
32387 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
32388 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
32389 struct videobuf_queue vbq;
32390 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
32391 - atomic_t field_count; /* field counter for videobuf_buffer */
32392 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
32393 /* accessing cam here doesn't need serialisation: it's constant */
32394 struct omap24xxcam_device *cam;
32395 };
32396 diff -urNp linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32397 --- linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
32398 +++ linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
32399 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
32400 u8 *eeprom;
32401 struct tveeprom tvdata;
32402
32403 + pax_track_stack();
32404 +
32405 memset(&tvdata,0,sizeof(tvdata));
32406
32407 eeprom = pvr2_eeprom_fetch(hdw);
32408 diff -urNp linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c
32409 --- linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
32410 +++ linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
32411 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
32412 unsigned char localPAT[256];
32413 unsigned char localPMT[256];
32414
32415 + pax_track_stack();
32416 +
32417 /* Set video format - must be done first as it resets other settings */
32418 set_reg8(client, 0x41, h->video_format);
32419
32420 diff -urNp linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c
32421 --- linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
32422 +++ linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
32423 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
32424 wait_queue_head_t *q = 0;
32425 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32426
32427 + pax_track_stack();
32428 +
32429 /* While any outstand message on the bus exists... */
32430 do {
32431
32432 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
32433 u8 tmp[512];
32434 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32435
32436 + pax_track_stack();
32437 +
32438 while (loop) {
32439
32440 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
32441 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c
32442 --- linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-03-27 14:31:47.000000000 -0400
32443 +++ linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-08-05 20:33:55.000000000 -0400
32444 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] =
32445 static int __init ibmcam_init(void)
32446 {
32447 struct usbvideo_cb cbTbl;
32448 - memset(&cbTbl, 0, sizeof(cbTbl));
32449 - cbTbl.probe = ibmcam_probe;
32450 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
32451 - cbTbl.videoStart = ibmcam_video_start;
32452 - cbTbl.videoStop = ibmcam_video_stop;
32453 - cbTbl.processData = ibmcam_ProcessIsocData;
32454 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32455 - cbTbl.adjustPicture = ibmcam_adjust_picture;
32456 - cbTbl.getFPS = ibmcam_calculate_fps;
32457 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
32458 + *(void **)&cbTbl.probe = ibmcam_probe;
32459 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
32460 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
32461 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
32462 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
32463 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32464 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
32465 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
32466 return usbvideo_register(
32467 &cams,
32468 MAX_IBMCAM,
32469 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c
32470 --- linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
32471 +++ linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-08-05 20:33:55.000000000 -0400
32472 @@ -225,7 +225,7 @@ static void konicawc_register_input(stru
32473 int error;
32474
32475 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32476 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32477 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32478
32479 cam->input = input_dev = input_allocate_device();
32480 if (!input_dev) {
32481 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
32482 struct usbvideo_cb cbTbl;
32483 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
32484 DRIVER_DESC "\n");
32485 - memset(&cbTbl, 0, sizeof(cbTbl));
32486 - cbTbl.probe = konicawc_probe;
32487 - cbTbl.setupOnOpen = konicawc_setup_on_open;
32488 - cbTbl.processData = konicawc_process_isoc;
32489 - cbTbl.getFPS = konicawc_calculate_fps;
32490 - cbTbl.setVideoMode = konicawc_set_video_mode;
32491 - cbTbl.startDataPump = konicawc_start_data;
32492 - cbTbl.stopDataPump = konicawc_stop_data;
32493 - cbTbl.adjustPicture = konicawc_adjust_picture;
32494 - cbTbl.userFree = konicawc_free_uvd;
32495 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
32496 + *(void **)&cbTbl.probe = konicawc_probe;
32497 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
32498 + *(void **)&cbTbl.processData = konicawc_process_isoc;
32499 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
32500 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
32501 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
32502 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
32503 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
32504 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
32505 return usbvideo_register(
32506 &cams,
32507 MAX_CAMERAS,
32508 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c
32509 --- linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
32510 +++ linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
32511 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
32512 int error;
32513
32514 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32515 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32516 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32517
32518 cam->input = input_dev = input_allocate_device();
32519 if (!input_dev) {
32520 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c
32521 --- linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-03-27 14:31:47.000000000 -0400
32522 +++ linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-08-05 20:33:55.000000000 -0400
32523 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
32524 {
32525 struct usbvideo_cb cbTbl;
32526 memset(&cbTbl, 0, sizeof(cbTbl));
32527 - cbTbl.probe = ultracam_probe;
32528 - cbTbl.setupOnOpen = ultracam_setup_on_open;
32529 - cbTbl.videoStart = ultracam_video_start;
32530 - cbTbl.videoStop = ultracam_video_stop;
32531 - cbTbl.processData = ultracam_ProcessIsocData;
32532 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32533 - cbTbl.adjustPicture = ultracam_adjust_picture;
32534 - cbTbl.getFPS = ultracam_calculate_fps;
32535 + *(void **)&cbTbl.probe = ultracam_probe;
32536 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
32537 + *(void **)&cbTbl.videoStart = ultracam_video_start;
32538 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
32539 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
32540 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32541 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
32542 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
32543 return usbvideo_register(
32544 &cams,
32545 MAX_CAMERAS,
32546 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c
32547 --- linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-03-27 14:31:47.000000000 -0400
32548 +++ linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-08-05 20:33:55.000000000 -0400
32549 @@ -697,15 +697,15 @@ int usbvideo_register(
32550 __func__, cams, base_size, num_cams);
32551
32552 /* Copy callbacks, apply defaults for those that are not set */
32553 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
32554 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
32555 if (cams->cb.getFrame == NULL)
32556 - cams->cb.getFrame = usbvideo_GetFrame;
32557 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
32558 if (cams->cb.disconnect == NULL)
32559 - cams->cb.disconnect = usbvideo_Disconnect;
32560 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
32561 if (cams->cb.startDataPump == NULL)
32562 - cams->cb.startDataPump = usbvideo_StartDataPump;
32563 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
32564 if (cams->cb.stopDataPump == NULL)
32565 - cams->cb.stopDataPump = usbvideo_StopDataPump;
32566 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
32567
32568 cams->num_cameras = num_cams;
32569 cams->cam = (struct uvd *) &cams[1];
32570 diff -urNp linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c
32571 --- linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
32572 +++ linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
32573 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
32574 unsigned char rv, gv, bv;
32575 static unsigned char *Y, *U, *V;
32576
32577 + pax_track_stack();
32578 +
32579 frame = usbvision->curFrame;
32580 imageSize = frame->frmwidth * frame->frmheight;
32581 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
32582 diff -urNp linux-2.6.32.45/drivers/media/video/v4l2-device.c linux-2.6.32.45/drivers/media/video/v4l2-device.c
32583 --- linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
32584 +++ linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
32585 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
32586 EXPORT_SYMBOL_GPL(v4l2_device_register);
32587
32588 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
32589 - atomic_t *instance)
32590 + atomic_unchecked_t *instance)
32591 {
32592 - int num = atomic_inc_return(instance) - 1;
32593 + int num = atomic_inc_return_unchecked(instance) - 1;
32594 int len = strlen(basename);
32595
32596 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
32597 diff -urNp linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c
32598 --- linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
32599 +++ linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
32600 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
32601 {
32602 struct videobuf_queue q;
32603
32604 + pax_track_stack();
32605 +
32606 /* Required to make generic handler to call __videobuf_alloc */
32607 q.int_ops = &sg_ops;
32608
32609 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptbase.c linux-2.6.32.45/drivers/message/fusion/mptbase.c
32610 --- linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
32611 +++ linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
32612 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
32613 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
32614 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
32615
32616 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32617 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32618 + NULL, NULL);
32619 +#else
32620 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32621 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
32622 +#endif
32623 +
32624 /*
32625 * Rounding UP to nearest 4-kB boundary here...
32626 */
32627 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptsas.c linux-2.6.32.45/drivers/message/fusion/mptsas.c
32628 --- linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
32629 +++ linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
32630 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
32631 return 0;
32632 }
32633
32634 +static inline void
32635 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32636 +{
32637 + if (phy_info->port_details) {
32638 + phy_info->port_details->rphy = rphy;
32639 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32640 + ioc->name, rphy));
32641 + }
32642 +
32643 + if (rphy) {
32644 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32645 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32646 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32647 + ioc->name, rphy, rphy->dev.release));
32648 + }
32649 +}
32650 +
32651 /* no mutex */
32652 static void
32653 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
32654 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
32655 return NULL;
32656 }
32657
32658 -static inline void
32659 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32660 -{
32661 - if (phy_info->port_details) {
32662 - phy_info->port_details->rphy = rphy;
32663 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32664 - ioc->name, rphy));
32665 - }
32666 -
32667 - if (rphy) {
32668 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32669 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32670 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32671 - ioc->name, rphy, rphy->dev.release));
32672 - }
32673 -}
32674 -
32675 static inline struct sas_port *
32676 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32677 {
32678 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptscsih.c linux-2.6.32.45/drivers/message/fusion/mptscsih.c
32679 --- linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
32680 +++ linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
32681 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32682
32683 h = shost_priv(SChost);
32684
32685 - if (h) {
32686 - if (h->info_kbuf == NULL)
32687 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32688 - return h->info_kbuf;
32689 - h->info_kbuf[0] = '\0';
32690 + if (!h)
32691 + return NULL;
32692
32693 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32694 - h->info_kbuf[size-1] = '\0';
32695 - }
32696 + if (h->info_kbuf == NULL)
32697 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32698 + return h->info_kbuf;
32699 + h->info_kbuf[0] = '\0';
32700 +
32701 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32702 + h->info_kbuf[size-1] = '\0';
32703
32704 return h->info_kbuf;
32705 }
32706 diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_config.c linux-2.6.32.45/drivers/message/i2o/i2o_config.c
32707 --- linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
32708 +++ linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
32709 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
32710 struct i2o_message *msg;
32711 unsigned int iop;
32712
32713 + pax_track_stack();
32714 +
32715 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
32716 return -EFAULT;
32717
32718 diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_proc.c linux-2.6.32.45/drivers/message/i2o/i2o_proc.c
32719 --- linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
32720 +++ linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
32721 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
32722 "Array Controller Device"
32723 };
32724
32725 -static char *chtostr(u8 * chars, int n)
32726 -{
32727 - char tmp[256];
32728 - tmp[0] = 0;
32729 - return strncat(tmp, (char *)chars, n);
32730 -}
32731 -
32732 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32733 char *group)
32734 {
32735 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
32736
32737 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32738 seq_printf(seq, "%-#8x", ddm_table.module_id);
32739 - seq_printf(seq, "%-29s",
32740 - chtostr(ddm_table.module_name_version, 28));
32741 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32742 seq_printf(seq, "%9d ", ddm_table.data_size);
32743 seq_printf(seq, "%8d", ddm_table.code_size);
32744
32745 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
32746
32747 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32748 seq_printf(seq, "%-#8x", dst->module_id);
32749 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32750 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32751 + seq_printf(seq, "%-.28s", dst->module_name_version);
32752 + seq_printf(seq, "%-.8s", dst->date);
32753 seq_printf(seq, "%8d ", dst->module_size);
32754 seq_printf(seq, "%8d ", dst->mpb_size);
32755 seq_printf(seq, "0x%04x", dst->module_flags);
32756 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
32757 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32758 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32759 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32760 - seq_printf(seq, "Vendor info : %s\n",
32761 - chtostr((u8 *) (work32 + 2), 16));
32762 - seq_printf(seq, "Product info : %s\n",
32763 - chtostr((u8 *) (work32 + 6), 16));
32764 - seq_printf(seq, "Description : %s\n",
32765 - chtostr((u8 *) (work32 + 10), 16));
32766 - seq_printf(seq, "Product rev. : %s\n",
32767 - chtostr((u8 *) (work32 + 14), 8));
32768 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32769 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32770 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32771 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32772
32773 seq_printf(seq, "Serial number : ");
32774 print_serial_number(seq, (u8 *) (work32 + 16),
32775 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
32776 }
32777
32778 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32779 - seq_printf(seq, "Module name : %s\n",
32780 - chtostr(result.module_name, 24));
32781 - seq_printf(seq, "Module revision : %s\n",
32782 - chtostr(result.module_rev, 8));
32783 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
32784 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32785
32786 seq_printf(seq, "Serial number : ");
32787 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32788 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
32789 return 0;
32790 }
32791
32792 - seq_printf(seq, "Device name : %s\n",
32793 - chtostr(result.device_name, 64));
32794 - seq_printf(seq, "Service name : %s\n",
32795 - chtostr(result.service_name, 64));
32796 - seq_printf(seq, "Physical name : %s\n",
32797 - chtostr(result.physical_location, 64));
32798 - seq_printf(seq, "Instance number : %s\n",
32799 - chtostr(result.instance_number, 4));
32800 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
32801 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
32802 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32803 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32804
32805 return 0;
32806 }
32807 diff -urNp linux-2.6.32.45/drivers/message/i2o/iop.c linux-2.6.32.45/drivers/message/i2o/iop.c
32808 --- linux-2.6.32.45/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
32809 +++ linux-2.6.32.45/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
32810 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
32811
32812 spin_lock_irqsave(&c->context_list_lock, flags);
32813
32814 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32815 - atomic_inc(&c->context_list_counter);
32816 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32817 + atomic_inc_unchecked(&c->context_list_counter);
32818
32819 - entry->context = atomic_read(&c->context_list_counter);
32820 + entry->context = atomic_read_unchecked(&c->context_list_counter);
32821
32822 list_add(&entry->list, &c->context_list);
32823
32824 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
32825
32826 #if BITS_PER_LONG == 64
32827 spin_lock_init(&c->context_list_lock);
32828 - atomic_set(&c->context_list_counter, 0);
32829 + atomic_set_unchecked(&c->context_list_counter, 0);
32830 INIT_LIST_HEAD(&c->context_list);
32831 #endif
32832
32833 diff -urNp linux-2.6.32.45/drivers/mfd/wm8350-i2c.c linux-2.6.32.45/drivers/mfd/wm8350-i2c.c
32834 --- linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
32835 +++ linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
32836 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
32837 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32838 int ret;
32839
32840 + pax_track_stack();
32841 +
32842 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32843 return -EINVAL;
32844
32845 diff -urNp linux-2.6.32.45/drivers/misc/kgdbts.c linux-2.6.32.45/drivers/misc/kgdbts.c
32846 --- linux-2.6.32.45/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
32847 +++ linux-2.6.32.45/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
32848 @@ -118,7 +118,7 @@
32849 } while (0)
32850 #define MAX_CONFIG_LEN 40
32851
32852 -static struct kgdb_io kgdbts_io_ops;
32853 +static const struct kgdb_io kgdbts_io_ops;
32854 static char get_buf[BUFMAX];
32855 static int get_buf_cnt;
32856 static char put_buf[BUFMAX];
32857 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
32858 module_put(THIS_MODULE);
32859 }
32860
32861 -static struct kgdb_io kgdbts_io_ops = {
32862 +static const struct kgdb_io kgdbts_io_ops = {
32863 .name = "kgdbts",
32864 .read_char = kgdbts_get_char,
32865 .write_char = kgdbts_put_char,
32866 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c
32867 --- linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
32868 +++ linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
32869 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
32870
32871 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32872 {
32873 - atomic_long_inc(&mcs_op_statistics[op].count);
32874 - atomic_long_add(clks, &mcs_op_statistics[op].total);
32875 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32876 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
32877 if (mcs_op_statistics[op].max < clks)
32878 mcs_op_statistics[op].max = clks;
32879 }
32880 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c
32881 --- linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
32882 +++ linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
32883 @@ -32,9 +32,9 @@
32884
32885 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32886
32887 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32888 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32889 {
32890 - unsigned long val = atomic_long_read(v);
32891 + unsigned long val = atomic_long_read_unchecked(v);
32892
32893 if (val)
32894 seq_printf(s, "%16lu %s\n", val, id);
32895 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
32896 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
32897
32898 for (op = 0; op < mcsop_last; op++) {
32899 - count = atomic_long_read(&mcs_op_statistics[op].count);
32900 - total = atomic_long_read(&mcs_op_statistics[op].total);
32901 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32902 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32903 max = mcs_op_statistics[op].max;
32904 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32905 count ? total / count : 0, max);
32906 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h
32907 --- linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
32908 +++ linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
32909 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
32910 * GRU statistics.
32911 */
32912 struct gru_stats_s {
32913 - atomic_long_t vdata_alloc;
32914 - atomic_long_t vdata_free;
32915 - atomic_long_t gts_alloc;
32916 - atomic_long_t gts_free;
32917 - atomic_long_t vdata_double_alloc;
32918 - atomic_long_t gts_double_allocate;
32919 - atomic_long_t assign_context;
32920 - atomic_long_t assign_context_failed;
32921 - atomic_long_t free_context;
32922 - atomic_long_t load_user_context;
32923 - atomic_long_t load_kernel_context;
32924 - atomic_long_t lock_kernel_context;
32925 - atomic_long_t unlock_kernel_context;
32926 - atomic_long_t steal_user_context;
32927 - atomic_long_t steal_kernel_context;
32928 - atomic_long_t steal_context_failed;
32929 - atomic_long_t nopfn;
32930 - atomic_long_t break_cow;
32931 - atomic_long_t asid_new;
32932 - atomic_long_t asid_next;
32933 - atomic_long_t asid_wrap;
32934 - atomic_long_t asid_reuse;
32935 - atomic_long_t intr;
32936 - atomic_long_t intr_mm_lock_failed;
32937 - atomic_long_t call_os;
32938 - atomic_long_t call_os_offnode_reference;
32939 - atomic_long_t call_os_check_for_bug;
32940 - atomic_long_t call_os_wait_queue;
32941 - atomic_long_t user_flush_tlb;
32942 - atomic_long_t user_unload_context;
32943 - atomic_long_t user_exception;
32944 - atomic_long_t set_context_option;
32945 - atomic_long_t migrate_check;
32946 - atomic_long_t migrated_retarget;
32947 - atomic_long_t migrated_unload;
32948 - atomic_long_t migrated_unload_delay;
32949 - atomic_long_t migrated_nopfn_retarget;
32950 - atomic_long_t migrated_nopfn_unload;
32951 - atomic_long_t tlb_dropin;
32952 - atomic_long_t tlb_dropin_fail_no_asid;
32953 - atomic_long_t tlb_dropin_fail_upm;
32954 - atomic_long_t tlb_dropin_fail_invalid;
32955 - atomic_long_t tlb_dropin_fail_range_active;
32956 - atomic_long_t tlb_dropin_fail_idle;
32957 - atomic_long_t tlb_dropin_fail_fmm;
32958 - atomic_long_t tlb_dropin_fail_no_exception;
32959 - atomic_long_t tlb_dropin_fail_no_exception_war;
32960 - atomic_long_t tfh_stale_on_fault;
32961 - atomic_long_t mmu_invalidate_range;
32962 - atomic_long_t mmu_invalidate_page;
32963 - atomic_long_t mmu_clear_flush_young;
32964 - atomic_long_t flush_tlb;
32965 - atomic_long_t flush_tlb_gru;
32966 - atomic_long_t flush_tlb_gru_tgh;
32967 - atomic_long_t flush_tlb_gru_zero_asid;
32968 -
32969 - atomic_long_t copy_gpa;
32970 -
32971 - atomic_long_t mesq_receive;
32972 - atomic_long_t mesq_receive_none;
32973 - atomic_long_t mesq_send;
32974 - atomic_long_t mesq_send_failed;
32975 - atomic_long_t mesq_noop;
32976 - atomic_long_t mesq_send_unexpected_error;
32977 - atomic_long_t mesq_send_lb_overflow;
32978 - atomic_long_t mesq_send_qlimit_reached;
32979 - atomic_long_t mesq_send_amo_nacked;
32980 - atomic_long_t mesq_send_put_nacked;
32981 - atomic_long_t mesq_qf_not_full;
32982 - atomic_long_t mesq_qf_locked;
32983 - atomic_long_t mesq_qf_noop_not_full;
32984 - atomic_long_t mesq_qf_switch_head_failed;
32985 - atomic_long_t mesq_qf_unexpected_error;
32986 - atomic_long_t mesq_noop_unexpected_error;
32987 - atomic_long_t mesq_noop_lb_overflow;
32988 - atomic_long_t mesq_noop_qlimit_reached;
32989 - atomic_long_t mesq_noop_amo_nacked;
32990 - atomic_long_t mesq_noop_put_nacked;
32991 + atomic_long_unchecked_t vdata_alloc;
32992 + atomic_long_unchecked_t vdata_free;
32993 + atomic_long_unchecked_t gts_alloc;
32994 + atomic_long_unchecked_t gts_free;
32995 + atomic_long_unchecked_t vdata_double_alloc;
32996 + atomic_long_unchecked_t gts_double_allocate;
32997 + atomic_long_unchecked_t assign_context;
32998 + atomic_long_unchecked_t assign_context_failed;
32999 + atomic_long_unchecked_t free_context;
33000 + atomic_long_unchecked_t load_user_context;
33001 + atomic_long_unchecked_t load_kernel_context;
33002 + atomic_long_unchecked_t lock_kernel_context;
33003 + atomic_long_unchecked_t unlock_kernel_context;
33004 + atomic_long_unchecked_t steal_user_context;
33005 + atomic_long_unchecked_t steal_kernel_context;
33006 + atomic_long_unchecked_t steal_context_failed;
33007 + atomic_long_unchecked_t nopfn;
33008 + atomic_long_unchecked_t break_cow;
33009 + atomic_long_unchecked_t asid_new;
33010 + atomic_long_unchecked_t asid_next;
33011 + atomic_long_unchecked_t asid_wrap;
33012 + atomic_long_unchecked_t asid_reuse;
33013 + atomic_long_unchecked_t intr;
33014 + atomic_long_unchecked_t intr_mm_lock_failed;
33015 + atomic_long_unchecked_t call_os;
33016 + atomic_long_unchecked_t call_os_offnode_reference;
33017 + atomic_long_unchecked_t call_os_check_for_bug;
33018 + atomic_long_unchecked_t call_os_wait_queue;
33019 + atomic_long_unchecked_t user_flush_tlb;
33020 + atomic_long_unchecked_t user_unload_context;
33021 + atomic_long_unchecked_t user_exception;
33022 + atomic_long_unchecked_t set_context_option;
33023 + atomic_long_unchecked_t migrate_check;
33024 + atomic_long_unchecked_t migrated_retarget;
33025 + atomic_long_unchecked_t migrated_unload;
33026 + atomic_long_unchecked_t migrated_unload_delay;
33027 + atomic_long_unchecked_t migrated_nopfn_retarget;
33028 + atomic_long_unchecked_t migrated_nopfn_unload;
33029 + atomic_long_unchecked_t tlb_dropin;
33030 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33031 + atomic_long_unchecked_t tlb_dropin_fail_upm;
33032 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
33033 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
33034 + atomic_long_unchecked_t tlb_dropin_fail_idle;
33035 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
33036 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33037 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
33038 + atomic_long_unchecked_t tfh_stale_on_fault;
33039 + atomic_long_unchecked_t mmu_invalidate_range;
33040 + atomic_long_unchecked_t mmu_invalidate_page;
33041 + atomic_long_unchecked_t mmu_clear_flush_young;
33042 + atomic_long_unchecked_t flush_tlb;
33043 + atomic_long_unchecked_t flush_tlb_gru;
33044 + atomic_long_unchecked_t flush_tlb_gru_tgh;
33045 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33046 +
33047 + atomic_long_unchecked_t copy_gpa;
33048 +
33049 + atomic_long_unchecked_t mesq_receive;
33050 + atomic_long_unchecked_t mesq_receive_none;
33051 + atomic_long_unchecked_t mesq_send;
33052 + atomic_long_unchecked_t mesq_send_failed;
33053 + atomic_long_unchecked_t mesq_noop;
33054 + atomic_long_unchecked_t mesq_send_unexpected_error;
33055 + atomic_long_unchecked_t mesq_send_lb_overflow;
33056 + atomic_long_unchecked_t mesq_send_qlimit_reached;
33057 + atomic_long_unchecked_t mesq_send_amo_nacked;
33058 + atomic_long_unchecked_t mesq_send_put_nacked;
33059 + atomic_long_unchecked_t mesq_qf_not_full;
33060 + atomic_long_unchecked_t mesq_qf_locked;
33061 + atomic_long_unchecked_t mesq_qf_noop_not_full;
33062 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
33063 + atomic_long_unchecked_t mesq_qf_unexpected_error;
33064 + atomic_long_unchecked_t mesq_noop_unexpected_error;
33065 + atomic_long_unchecked_t mesq_noop_lb_overflow;
33066 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
33067 + atomic_long_unchecked_t mesq_noop_amo_nacked;
33068 + atomic_long_unchecked_t mesq_noop_put_nacked;
33069
33070 };
33071
33072 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
33073 cchop_deallocate, tghop_invalidate, mcsop_last};
33074
33075 struct mcs_op_statistic {
33076 - atomic_long_t count;
33077 - atomic_long_t total;
33078 + atomic_long_unchecked_t count;
33079 + atomic_long_unchecked_t total;
33080 unsigned long max;
33081 };
33082
33083 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
33084
33085 #define STAT(id) do { \
33086 if (gru_options & OPT_STATS) \
33087 - atomic_long_inc(&gru_stats.id); \
33088 + atomic_long_inc_unchecked(&gru_stats.id); \
33089 } while (0)
33090
33091 #ifdef CONFIG_SGI_GRU_DEBUG
33092 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h
33093 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-03-27 14:31:47.000000000 -0400
33094 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-08-05 20:33:55.000000000 -0400
33095 @@ -876,7 +876,7 @@ extern struct xpc_registration xpc_regis
33096 /* found in xpc_main.c */
33097 extern struct device *xpc_part;
33098 extern struct device *xpc_chan;
33099 -extern struct xpc_arch_operations xpc_arch_ops;
33100 +extern const struct xpc_arch_operations xpc_arch_ops;
33101 extern int xpc_disengage_timelimit;
33102 extern int xpc_disengage_timedout;
33103 extern int xpc_activate_IRQ_rcvd;
33104 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c
33105 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-03-27 14:31:47.000000000 -0400
33106 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-08-05 20:33:55.000000000 -0400
33107 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_not
33108 .notifier_call = xpc_system_die,
33109 };
33110
33111 -struct xpc_arch_operations xpc_arch_ops;
33112 +const struct xpc_arch_operations xpc_arch_ops;
33113
33114 /*
33115 * Timer function to enforce the timelimit on the partition disengage.
33116 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c
33117 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-03-27 14:31:47.000000000 -0400
33118 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-08-05 20:33:55.000000000 -0400
33119 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_chan
33120 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
33121 }
33122
33123 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
33124 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
33125 .setup_partitions = xpc_setup_partitions_sn2,
33126 .teardown_partitions = xpc_teardown_partitions_sn2,
33127 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
33128 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
33129 int ret;
33130 size_t buf_size;
33131
33132 - xpc_arch_ops = xpc_arch_ops_sn2;
33133 + pax_open_kernel();
33134 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
33135 + pax_close_kernel();
33136
33137 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
33138 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
33139 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c
33140 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-03-27 14:31:47.000000000 -0400
33141 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-08-05 20:33:55.000000000 -0400
33142 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_chann
33143 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
33144 }
33145
33146 -static struct xpc_arch_operations xpc_arch_ops_uv = {
33147 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
33148 .setup_partitions = xpc_setup_partitions_uv,
33149 .teardown_partitions = xpc_teardown_partitions_uv,
33150 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
33151 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_ar
33152 int
33153 xpc_init_uv(void)
33154 {
33155 - xpc_arch_ops = xpc_arch_ops_uv;
33156 + pax_open_kernel();
33157 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
33158 + pax_close_kernel();
33159
33160 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
33161 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
33162 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xp.h linux-2.6.32.45/drivers/misc/sgi-xp/xp.h
33163 --- linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-03-27 14:31:47.000000000 -0400
33164 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-08-05 20:33:55.000000000 -0400
33165 @@ -289,7 +289,7 @@ struct xpc_interface {
33166 xpc_notify_func, void *);
33167 void (*received) (short, int, void *);
33168 enum xp_retval (*partid_to_nasids) (short, void *);
33169 -};
33170 +} __no_const;
33171
33172 extern struct xpc_interface xpc_interface;
33173
33174 diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c
33175 --- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
33176 +++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
33177 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
33178 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
33179 unsigned long timeo = jiffies + HZ;
33180
33181 + pax_track_stack();
33182 +
33183 /* Prevent setting state FL_SYNCING for chip in suspended state. */
33184 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
33185 goto sleep;
33186 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
33187 unsigned long initial_adr;
33188 int initial_len = len;
33189
33190 + pax_track_stack();
33191 +
33192 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
33193 adr += chip->start;
33194 initial_adr = adr;
33195 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
33196 int retries = 3;
33197 int ret;
33198
33199 + pax_track_stack();
33200 +
33201 adr += chip->start;
33202
33203 retry:
33204 diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c
33205 --- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
33206 +++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
33207 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
33208 unsigned long cmd_addr;
33209 struct cfi_private *cfi = map->fldrv_priv;
33210
33211 + pax_track_stack();
33212 +
33213 adr += chip->start;
33214
33215 /* Ensure cmd read/writes are aligned. */
33216 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
33217 DECLARE_WAITQUEUE(wait, current);
33218 int wbufsize, z;
33219
33220 + pax_track_stack();
33221 +
33222 /* M58LW064A requires bus alignment for buffer wriets -- saw */
33223 if (adr & (map_bankwidth(map)-1))
33224 return -EINVAL;
33225 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
33226 DECLARE_WAITQUEUE(wait, current);
33227 int ret = 0;
33228
33229 + pax_track_stack();
33230 +
33231 adr += chip->start;
33232
33233 /* Let's determine this according to the interleave only once */
33234 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
33235 unsigned long timeo = jiffies + HZ;
33236 DECLARE_WAITQUEUE(wait, current);
33237
33238 + pax_track_stack();
33239 +
33240 adr += chip->start;
33241
33242 /* Let's determine this according to the interleave only once */
33243 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
33244 unsigned long timeo = jiffies + HZ;
33245 DECLARE_WAITQUEUE(wait, current);
33246
33247 + pax_track_stack();
33248 +
33249 adr += chip->start;
33250
33251 /* Let's determine this according to the interleave only once */
33252 diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2000.c linux-2.6.32.45/drivers/mtd/devices/doc2000.c
33253 --- linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
33254 +++ linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
33255 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
33256
33257 /* The ECC will not be calculated correctly if less than 512 is written */
33258 /* DBB-
33259 - if (len != 0x200 && eccbuf)
33260 + if (len != 0x200)
33261 printk(KERN_WARNING
33262 "ECC needs a full sector write (adr: %lx size %lx)\n",
33263 (long) to, (long) len);
33264 diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2001.c linux-2.6.32.45/drivers/mtd/devices/doc2001.c
33265 --- linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
33266 +++ linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
33267 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
33268 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33269
33270 /* Don't allow read past end of device */
33271 - if (from >= this->totlen)
33272 + if (from >= this->totlen || !len)
33273 return -EINVAL;
33274
33275 /* Don't allow a single read to cross a 512-byte block boundary */
33276 diff -urNp linux-2.6.32.45/drivers/mtd/ftl.c linux-2.6.32.45/drivers/mtd/ftl.c
33277 --- linux-2.6.32.45/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
33278 +++ linux-2.6.32.45/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
33279 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
33280 loff_t offset;
33281 uint16_t srcunitswap = cpu_to_le16(srcunit);
33282
33283 + pax_track_stack();
33284 +
33285 eun = &part->EUNInfo[srcunit];
33286 xfer = &part->XferInfo[xferunit];
33287 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
33288 diff -urNp linux-2.6.32.45/drivers/mtd/inftlcore.c linux-2.6.32.45/drivers/mtd/inftlcore.c
33289 --- linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
33290 +++ linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
33291 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
33292 struct inftl_oob oob;
33293 size_t retlen;
33294
33295 + pax_track_stack();
33296 +
33297 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
33298 "pending=%d)\n", inftl, thisVUC, pendingblock);
33299
33300 diff -urNp linux-2.6.32.45/drivers/mtd/inftlmount.c linux-2.6.32.45/drivers/mtd/inftlmount.c
33301 --- linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
33302 +++ linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
33303 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
33304 struct INFTLPartition *ip;
33305 size_t retlen;
33306
33307 + pax_track_stack();
33308 +
33309 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
33310
33311 /*
33312 diff -urNp linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c
33313 --- linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
33314 +++ linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
33315 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
33316 {
33317 map_word pfow_val[4];
33318
33319 + pax_track_stack();
33320 +
33321 /* Check identification string */
33322 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
33323 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
33324 diff -urNp linux-2.6.32.45/drivers/mtd/mtdchar.c linux-2.6.32.45/drivers/mtd/mtdchar.c
33325 --- linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
33326 +++ linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
33327 @@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
33328 u_long size;
33329 struct mtd_info_user info;
33330
33331 + pax_track_stack();
33332 +
33333 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
33334
33335 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
33336 diff -urNp linux-2.6.32.45/drivers/mtd/nftlcore.c linux-2.6.32.45/drivers/mtd/nftlcore.c
33337 --- linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
33338 +++ linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
33339 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
33340 int inplace = 1;
33341 size_t retlen;
33342
33343 + pax_track_stack();
33344 +
33345 memset(BlockMap, 0xff, sizeof(BlockMap));
33346 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
33347
33348 diff -urNp linux-2.6.32.45/drivers/mtd/nftlmount.c linux-2.6.32.45/drivers/mtd/nftlmount.c
33349 --- linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
33350 +++ linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
33351 @@ -23,6 +23,7 @@
33352 #include <asm/errno.h>
33353 #include <linux/delay.h>
33354 #include <linux/slab.h>
33355 +#include <linux/sched.h>
33356 #include <linux/mtd/mtd.h>
33357 #include <linux/mtd/nand.h>
33358 #include <linux/mtd/nftl.h>
33359 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
33360 struct mtd_info *mtd = nftl->mbd.mtd;
33361 unsigned int i;
33362
33363 + pax_track_stack();
33364 +
33365 /* Assume logical EraseSize == physical erasesize for starting the scan.
33366 We'll sort it out later if we find a MediaHeader which says otherwise */
33367 /* Actually, we won't. The new DiskOnChip driver has already scanned
33368 diff -urNp linux-2.6.32.45/drivers/mtd/ubi/build.c linux-2.6.32.45/drivers/mtd/ubi/build.c
33369 --- linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
33370 +++ linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
33371 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
33372 static int __init bytes_str_to_int(const char *str)
33373 {
33374 char *endp;
33375 - unsigned long result;
33376 + unsigned long result, scale = 1;
33377
33378 result = simple_strtoul(str, &endp, 0);
33379 if (str == endp || result >= INT_MAX) {
33380 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
33381
33382 switch (*endp) {
33383 case 'G':
33384 - result *= 1024;
33385 + scale *= 1024;
33386 case 'M':
33387 - result *= 1024;
33388 + scale *= 1024;
33389 case 'K':
33390 - result *= 1024;
33391 + scale *= 1024;
33392 if (endp[1] == 'i' && endp[2] == 'B')
33393 endp += 2;
33394 case '\0':
33395 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
33396 return -EINVAL;
33397 }
33398
33399 - return result;
33400 + if ((intoverflow_t)result*scale >= INT_MAX) {
33401 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33402 + str);
33403 + return -EINVAL;
33404 + }
33405 +
33406 + return result*scale;
33407 }
33408
33409 /**
33410 diff -urNp linux-2.6.32.45/drivers/net/bnx2.c linux-2.6.32.45/drivers/net/bnx2.c
33411 --- linux-2.6.32.45/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
33412 +++ linux-2.6.32.45/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
33413 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
33414 int rc = 0;
33415 u32 magic, csum;
33416
33417 + pax_track_stack();
33418 +
33419 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
33420 goto test_nvram_done;
33421
33422 diff -urNp linux-2.6.32.45/drivers/net/cxgb3/l2t.h linux-2.6.32.45/drivers/net/cxgb3/l2t.h
33423 --- linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-03-27 14:31:47.000000000 -0400
33424 +++ linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-08-05 20:33:55.000000000 -0400
33425 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
33426 */
33427 struct l2t_skb_cb {
33428 arp_failure_handler_func arp_failure_handler;
33429 -};
33430 +} __no_const;
33431
33432 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33433
33434 diff -urNp linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c
33435 --- linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
33436 +++ linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
33437 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
33438 int i, addr, ret;
33439 struct t3_vpd vpd;
33440
33441 + pax_track_stack();
33442 +
33443 /*
33444 * Card information is normally at VPD_BASE but some early cards had
33445 * it at 0.
33446 diff -urNp linux-2.6.32.45/drivers/net/e1000e/82571.c linux-2.6.32.45/drivers/net/e1000e/82571.c
33447 --- linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
33448 +++ linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-08-05 20:33:55.000000000 -0400
33449 @@ -245,22 +245,22 @@ static s32 e1000_init_mac_params_82571(s
33450 /* check for link */
33451 switch (hw->phy.media_type) {
33452 case e1000_media_type_copper:
33453 - func->setup_physical_interface = e1000_setup_copper_link_82571;
33454 - func->check_for_link = e1000e_check_for_copper_link;
33455 - func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
33456 + *(void **)&func->setup_physical_interface = e1000_setup_copper_link_82571;
33457 + *(void **)&func->check_for_link = e1000e_check_for_copper_link;
33458 + *(void **)&func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
33459 break;
33460 case e1000_media_type_fiber:
33461 - func->setup_physical_interface =
33462 + *(void **)&func->setup_physical_interface =
33463 e1000_setup_fiber_serdes_link_82571;
33464 - func->check_for_link = e1000e_check_for_fiber_link;
33465 - func->get_link_up_info =
33466 + *(void **)&func->check_for_link = e1000e_check_for_fiber_link;
33467 + *(void **)&func->get_link_up_info =
33468 e1000e_get_speed_and_duplex_fiber_serdes;
33469 break;
33470 case e1000_media_type_internal_serdes:
33471 - func->setup_physical_interface =
33472 + *(void **)&func->setup_physical_interface =
33473 e1000_setup_fiber_serdes_link_82571;
33474 - func->check_for_link = e1000_check_for_serdes_link_82571;
33475 - func->get_link_up_info =
33476 + *(void **)&func->check_for_link = e1000_check_for_serdes_link_82571;
33477 + *(void **)&func->get_link_up_info =
33478 e1000e_get_speed_and_duplex_fiber_serdes;
33479 break;
33480 default:
33481 @@ -271,12 +271,12 @@ static s32 e1000_init_mac_params_82571(s
33482 switch (hw->mac.type) {
33483 case e1000_82574:
33484 case e1000_82583:
33485 - func->check_mng_mode = e1000_check_mng_mode_82574;
33486 - func->led_on = e1000_led_on_82574;
33487 + *(void **)&func->check_mng_mode = e1000_check_mng_mode_82574;
33488 + *(void **)&func->led_on = e1000_led_on_82574;
33489 break;
33490 default:
33491 - func->check_mng_mode = e1000e_check_mng_mode_generic;
33492 - func->led_on = e1000e_led_on_generic;
33493 + *(void **)&func->check_mng_mode = e1000e_check_mng_mode_generic;
33494 + *(void **)&func->led_on = e1000e_led_on_generic;
33495 break;
33496 }
33497
33498 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(s
33499 temp = er32(ICRXDMTC);
33500 }
33501
33502 -static struct e1000_mac_operations e82571_mac_ops = {
33503 +static const struct e1000_mac_operations e82571_mac_ops = {
33504 /* .check_mng_mode: mac type dependent */
33505 /* .check_for_link: media type dependent */
33506 .id_led_init = e1000e_id_led_init,
33507 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e8257
33508 .setup_led = e1000e_setup_led_generic,
33509 };
33510
33511 -static struct e1000_phy_operations e82_phy_ops_igp = {
33512 +static const struct e1000_phy_operations e82_phy_ops_igp = {
33513 .acquire_phy = e1000_get_hw_semaphore_82571,
33514 .check_reset_block = e1000e_check_reset_block_generic,
33515 .commit_phy = NULL,
33516 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_p
33517 .cfg_on_link_up = NULL,
33518 };
33519
33520 -static struct e1000_phy_operations e82_phy_ops_m88 = {
33521 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
33522 .acquire_phy = e1000_get_hw_semaphore_82571,
33523 .check_reset_block = e1000e_check_reset_block_generic,
33524 .commit_phy = e1000e_phy_sw_reset,
33525 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_p
33526 .cfg_on_link_up = NULL,
33527 };
33528
33529 -static struct e1000_phy_operations e82_phy_ops_bm = {
33530 +static const struct e1000_phy_operations e82_phy_ops_bm = {
33531 .acquire_phy = e1000_get_hw_semaphore_82571,
33532 .check_reset_block = e1000e_check_reset_block_generic,
33533 .commit_phy = e1000e_phy_sw_reset,
33534 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_p
33535 .cfg_on_link_up = NULL,
33536 };
33537
33538 -static struct e1000_nvm_operations e82571_nvm_ops = {
33539 +static const struct e1000_nvm_operations e82571_nvm_ops = {
33540 .acquire_nvm = e1000_acquire_nvm_82571,
33541 .read_nvm = e1000e_read_nvm_eerd,
33542 .release_nvm = e1000_release_nvm_82571,
33543 diff -urNp linux-2.6.32.45/drivers/net/e1000e/e1000.h linux-2.6.32.45/drivers/net/e1000e/e1000.h
33544 --- linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
33545 +++ linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
33546 @@ -375,9 +375,9 @@ struct e1000_info {
33547 u32 pba;
33548 u32 max_hw_frame_size;
33549 s32 (*get_variants)(struct e1000_adapter *);
33550 - struct e1000_mac_operations *mac_ops;
33551 - struct e1000_phy_operations *phy_ops;
33552 - struct e1000_nvm_operations *nvm_ops;
33553 + const struct e1000_mac_operations *mac_ops;
33554 + const struct e1000_phy_operations *phy_ops;
33555 + const struct e1000_nvm_operations *nvm_ops;
33556 };
33557
33558 /* hardware capability, feature, and workaround flags */
33559 diff -urNp linux-2.6.32.45/drivers/net/e1000e/es2lan.c linux-2.6.32.45/drivers/net/e1000e/es2lan.c
33560 --- linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
33561 +++ linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-08-05 20:33:55.000000000 -0400
33562 @@ -229,16 +229,16 @@ static s32 e1000_init_mac_params_80003es
33563 /* check for link */
33564 switch (hw->phy.media_type) {
33565 case e1000_media_type_copper:
33566 - func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
33567 - func->check_for_link = e1000e_check_for_copper_link;
33568 + *(void **)&func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
33569 + *(void **)&func->check_for_link = e1000e_check_for_copper_link;
33570 break;
33571 case e1000_media_type_fiber:
33572 - func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33573 - func->check_for_link = e1000e_check_for_fiber_link;
33574 + *(void **)&func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33575 + *(void **)&func->check_for_link = e1000e_check_for_fiber_link;
33576 break;
33577 case e1000_media_type_internal_serdes:
33578 - func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33579 - func->check_for_link = e1000e_check_for_serdes_link;
33580 + *(void **)&func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33581 + *(void **)&func->check_for_link = e1000e_check_for_serdes_link;
33582 break;
33583 default:
33584 return -E1000_ERR_CONFIG;
33585 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es
33586 temp = er32(ICRXDMTC);
33587 }
33588
33589 -static struct e1000_mac_operations es2_mac_ops = {
33590 +static const struct e1000_mac_operations es2_mac_ops = {
33591 .id_led_init = e1000e_id_led_init,
33592 .check_mng_mode = e1000e_check_mng_mode_generic,
33593 /* check_for_link dependent on media type */
33594 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_m
33595 .setup_led = e1000e_setup_led_generic,
33596 };
33597
33598 -static struct e1000_phy_operations es2_phy_ops = {
33599 +static const struct e1000_phy_operations es2_phy_ops = {
33600 .acquire_phy = e1000_acquire_phy_80003es2lan,
33601 .check_reset_block = e1000e_check_reset_block_generic,
33602 .commit_phy = e1000e_phy_sw_reset,
33603 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_p
33604 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
33605 };
33606
33607 -static struct e1000_nvm_operations es2_nvm_ops = {
33608 +static const struct e1000_nvm_operations es2_nvm_ops = {
33609 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
33610 .read_nvm = e1000e_read_nvm_eerd,
33611 .release_nvm = e1000_release_nvm_80003es2lan,
33612 diff -urNp linux-2.6.32.45/drivers/net/e1000e/hw.h linux-2.6.32.45/drivers/net/e1000e/hw.h
33613 --- linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
33614 +++ linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
33615 @@ -756,34 +756,34 @@ struct e1000_mac_operations {
33616
33617 /* Function pointers for the PHY. */
33618 struct e1000_phy_operations {
33619 - s32 (*acquire_phy)(struct e1000_hw *);
33620 - s32 (*check_polarity)(struct e1000_hw *);
33621 - s32 (*check_reset_block)(struct e1000_hw *);
33622 - s32 (*commit_phy)(struct e1000_hw *);
33623 - s32 (*force_speed_duplex)(struct e1000_hw *);
33624 - s32 (*get_cfg_done)(struct e1000_hw *hw);
33625 - s32 (*get_cable_length)(struct e1000_hw *);
33626 - s32 (*get_phy_info)(struct e1000_hw *);
33627 - s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
33628 - s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
33629 - void (*release_phy)(struct e1000_hw *);
33630 - s32 (*reset_phy)(struct e1000_hw *);
33631 - s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
33632 - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33633 - s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
33634 - s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33635 - s32 (*cfg_on_link_up)(struct e1000_hw *);
33636 + s32 (* acquire_phy)(struct e1000_hw *);
33637 + s32 (* check_polarity)(struct e1000_hw *);
33638 + s32 (* check_reset_block)(struct e1000_hw *);
33639 + s32 (* commit_phy)(struct e1000_hw *);
33640 + s32 (* force_speed_duplex)(struct e1000_hw *);
33641 + s32 (* get_cfg_done)(struct e1000_hw *hw);
33642 + s32 (* get_cable_length)(struct e1000_hw *);
33643 + s32 (* get_phy_info)(struct e1000_hw *);
33644 + s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
33645 + s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
33646 + void (* release_phy)(struct e1000_hw *);
33647 + s32 (* reset_phy)(struct e1000_hw *);
33648 + s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
33649 + s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
33650 + s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
33651 + s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33652 + s32 (* cfg_on_link_up)(struct e1000_hw *);
33653 };
33654
33655 /* Function pointers for the NVM. */
33656 struct e1000_nvm_operations {
33657 - s32 (*acquire_nvm)(struct e1000_hw *);
33658 - s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
33659 - void (*release_nvm)(struct e1000_hw *);
33660 - s32 (*update_nvm)(struct e1000_hw *);
33661 - s32 (*valid_led_default)(struct e1000_hw *, u16 *);
33662 - s32 (*validate_nvm)(struct e1000_hw *);
33663 - s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33664 + s32 (* const acquire_nvm)(struct e1000_hw *);
33665 + s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
33666 + void (* const release_nvm)(struct e1000_hw *);
33667 + s32 (* const update_nvm)(struct e1000_hw *);
33668 + s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
33669 + s32 (* const validate_nvm)(struct e1000_hw *);
33670 + s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33671 };
33672
33673 struct e1000_mac_info {
33674 diff -urNp linux-2.6.32.45/drivers/net/e1000e/ich8lan.c linux-2.6.32.45/drivers/net/e1000e/ich8lan.c
33675 --- linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
33676 +++ linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-08-05 20:33:55.000000000 -0400
33677 @@ -265,13 +265,13 @@ static s32 e1000_init_phy_params_pchlan(
33678 phy->addr = 1;
33679 phy->reset_delay_us = 100;
33680
33681 - phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33682 - phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
33683 - phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
33684 - phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
33685 - phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
33686 - phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
33687 - phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
33688 + *(void **)&phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33689 + *(void **)&phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
33690 + *(void **)&phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
33691 + *(void **)&phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
33692 + *(void **)&phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
33693 + *(void **)&phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
33694 + *(void **)&phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
33695 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33696
33697 /*
33698 @@ -289,12 +289,12 @@ static s32 e1000_init_phy_params_pchlan(
33699 phy->type = e1000e_get_phy_type_from_id(phy->id);
33700
33701 if (phy->type == e1000_phy_82577) {
33702 - phy->ops.check_polarity = e1000_check_polarity_82577;
33703 - phy->ops.force_speed_duplex =
33704 + *(void **)&phy->ops.check_polarity = e1000_check_polarity_82577;
33705 + *(void **)&phy->ops.force_speed_duplex =
33706 e1000_phy_force_speed_duplex_82577;
33707 - phy->ops.get_cable_length = e1000_get_cable_length_82577;
33708 - phy->ops.get_phy_info = e1000_get_phy_info_82577;
33709 - phy->ops.commit_phy = e1000e_phy_sw_reset;
33710 + *(void **)&phy->ops.get_cable_length = e1000_get_cable_length_82577;
33711 + *(void **)&phy->ops.get_phy_info = e1000_get_phy_info_82577;
33712 + *(void **)&phy->ops.commit_phy = e1000e_phy_sw_reset;
33713 }
33714
33715 out:
33716 @@ -322,8 +322,8 @@ static s32 e1000_init_phy_params_ich8lan
33717 */
33718 ret_val = e1000e_determine_phy_address(hw);
33719 if (ret_val) {
33720 - hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33721 - hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33722 + *(void **)&hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33723 + *(void **)&hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33724 ret_val = e1000e_determine_phy_address(hw);
33725 if (ret_val)
33726 return ret_val;
33727 @@ -343,8 +343,8 @@ static s32 e1000_init_phy_params_ich8lan
33728 case IGP03E1000_E_PHY_ID:
33729 phy->type = e1000_phy_igp_3;
33730 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33731 - phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
33732 - phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
33733 + *(void **)&phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
33734 + *(void **)&phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
33735 break;
33736 case IFE_E_PHY_ID:
33737 case IFE_PLUS_E_PHY_ID:
33738 @@ -355,16 +355,16 @@ static s32 e1000_init_phy_params_ich8lan
33739 case BME1000_E_PHY_ID:
33740 phy->type = e1000_phy_bm;
33741 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33742 - hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33743 - hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33744 - hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
33745 + *(void **)&hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33746 + *(void **)&hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33747 + *(void **)&hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
33748 break;
33749 default:
33750 return -E1000_ERR_PHY;
33751 break;
33752 }
33753
33754 - phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33755 + *(void **)&phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33756
33757 return 0;
33758 }
33759 @@ -455,25 +455,25 @@ static s32 e1000_init_mac_params_ich8lan
33760 case e1000_ich9lan:
33761 case e1000_ich10lan:
33762 /* ID LED init */
33763 - mac->ops.id_led_init = e1000e_id_led_init;
33764 + *(void **)&mac->ops.id_led_init = e1000e_id_led_init;
33765 /* setup LED */
33766 - mac->ops.setup_led = e1000e_setup_led_generic;
33767 + *(void **)&mac->ops.setup_led = e1000e_setup_led_generic;
33768 /* cleanup LED */
33769 - mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
33770 + *(void **)&mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
33771 /* turn on/off LED */
33772 - mac->ops.led_on = e1000_led_on_ich8lan;
33773 - mac->ops.led_off = e1000_led_off_ich8lan;
33774 + *(void **)&mac->ops.led_on = e1000_led_on_ich8lan;
33775 + *(void **)&mac->ops.led_off = e1000_led_off_ich8lan;
33776 break;
33777 case e1000_pchlan:
33778 /* ID LED init */
33779 - mac->ops.id_led_init = e1000_id_led_init_pchlan;
33780 + *(void **)&mac->ops.id_led_init = e1000_id_led_init_pchlan;
33781 /* setup LED */
33782 - mac->ops.setup_led = e1000_setup_led_pchlan;
33783 + *(void **)&mac->ops.setup_led = e1000_setup_led_pchlan;
33784 /* cleanup LED */
33785 - mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
33786 + *(void **)&mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
33787 /* turn on/off LED */
33788 - mac->ops.led_on = e1000_led_on_pchlan;
33789 - mac->ops.led_off = e1000_led_off_pchlan;
33790 + *(void **)&mac->ops.led_on = e1000_led_on_pchlan;
33791 + *(void **)&mac->ops.led_off = e1000_led_off_pchlan;
33792 break;
33793 default:
33794 break;
33795 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
33796 }
33797 }
33798
33799 -static struct e1000_mac_operations ich8_mac_ops = {
33800 +static const struct e1000_mac_operations ich8_mac_ops = {
33801 .id_led_init = e1000e_id_led_init,
33802 .check_mng_mode = e1000_check_mng_mode_ich8lan,
33803 .check_for_link = e1000_check_for_copper_link_ich8lan,
33804 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
33805 /* id_led_init dependent on mac type */
33806 };
33807
33808 -static struct e1000_phy_operations ich8_phy_ops = {
33809 +static const struct e1000_phy_operations ich8_phy_ops = {
33810 .acquire_phy = e1000_acquire_swflag_ich8lan,
33811 .check_reset_block = e1000_check_reset_block_ich8lan,
33812 .commit_phy = NULL,
33813 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
33814 .write_phy_reg = e1000e_write_phy_reg_igp,
33815 };
33816
33817 -static struct e1000_nvm_operations ich8_nvm_ops = {
33818 +static const struct e1000_nvm_operations ich8_nvm_ops = {
33819 .acquire_nvm = e1000_acquire_nvm_ich8lan,
33820 .read_nvm = e1000_read_nvm_ich8lan,
33821 .release_nvm = e1000_release_nvm_ich8lan,
33822 diff -urNp linux-2.6.32.45/drivers/net/e1000e/netdev.c linux-2.6.32.45/drivers/net/e1000e/netdev.c
33823 --- linux-2.6.32.45/drivers/net/e1000e/netdev.c 2011-03-27 14:31:47.000000000 -0400
33824 +++ linux-2.6.32.45/drivers/net/e1000e/netdev.c 2011-08-05 20:33:55.000000000 -0400
33825 @@ -5071,9 +5071,9 @@ static int __devinit e1000_probe(struct
33826
33827 err = -EIO;
33828
33829 - memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
33830 - memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
33831 - memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
33832 + memcpy((void *)&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
33833 + memcpy((void *)&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
33834 + memcpy((void *)&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
33835
33836 err = ei->get_variants(adapter);
33837 if (err)
33838 diff -urNp linux-2.6.32.45/drivers/net/hamradio/6pack.c linux-2.6.32.45/drivers/net/hamradio/6pack.c
33839 --- linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:04.000000000 -0400
33840 +++ linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:18.000000000 -0400
33841 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
33842 unsigned char buf[512];
33843 int count1;
33844
33845 + pax_track_stack();
33846 +
33847 if (!count)
33848 return;
33849
33850 diff -urNp linux-2.6.32.45/drivers/net/ibmveth.c linux-2.6.32.45/drivers/net/ibmveth.c
33851 --- linux-2.6.32.45/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
33852 +++ linux-2.6.32.45/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
33853 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
33854 NULL,
33855 };
33856
33857 -static struct sysfs_ops veth_pool_ops = {
33858 +static const struct sysfs_ops veth_pool_ops = {
33859 .show = veth_pool_show,
33860 .store = veth_pool_store,
33861 };
33862 diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_82575.c linux-2.6.32.45/drivers/net/igb/e1000_82575.c
33863 --- linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
33864 +++ linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-08-05 20:33:55.000000000 -0400
33865 @@ -135,7 +135,7 @@ static s32 igb_get_invariants_82575(stru
33866 ? true : false;
33867
33868 /* physical interface link setup */
33869 - mac->ops.setup_physical_interface =
33870 + *(void **)&mac->ops.setup_physical_interface =
33871 (hw->phy.media_type == e1000_media_type_copper)
33872 ? igb_setup_copper_link_82575
33873 : igb_setup_serdes_link_82575;
33874 @@ -191,13 +191,13 @@ static s32 igb_get_invariants_82575(stru
33875
33876 /* PHY function pointers */
33877 if (igb_sgmii_active_82575(hw)) {
33878 - phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
33879 - phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
33880 - phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
33881 + *(void **)&phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
33882 + *(void **)&phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
33883 + *(void **)&phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
33884 } else {
33885 - phy->ops.reset = igb_phy_hw_reset;
33886 - phy->ops.read_reg = igb_read_phy_reg_igp;
33887 - phy->ops.write_reg = igb_write_phy_reg_igp;
33888 + *(void **)&phy->ops.reset = igb_phy_hw_reset;
33889 + *(void **)&phy->ops.read_reg = igb_read_phy_reg_igp;
33890 + *(void **)&phy->ops.write_reg = igb_write_phy_reg_igp;
33891 }
33892
33893 /* set lan id */
33894 @@ -213,17 +213,17 @@ static s32 igb_get_invariants_82575(stru
33895 switch (phy->id) {
33896 case M88E1111_I_PHY_ID:
33897 phy->type = e1000_phy_m88;
33898 - phy->ops.get_phy_info = igb_get_phy_info_m88;
33899 - phy->ops.get_cable_length = igb_get_cable_length_m88;
33900 - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
33901 + *(void **)&phy->ops.get_phy_info = igb_get_phy_info_m88;
33902 + *(void **)&phy->ops.get_cable_length = igb_get_cable_length_m88;
33903 + *(void **)&phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
33904 break;
33905 case IGP03E1000_E_PHY_ID:
33906 phy->type = e1000_phy_igp_3;
33907 - phy->ops.get_phy_info = igb_get_phy_info_igp;
33908 - phy->ops.get_cable_length = igb_get_cable_length_igp_2;
33909 - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
33910 - phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
33911 - phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
33912 + *(void **)&phy->ops.get_phy_info = igb_get_phy_info_igp;
33913 + *(void **)&phy->ops.get_cable_length = igb_get_cable_length_igp_2;
33914 + *(void **)&phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
33915 + *(void **)&phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
33916 + *(void **)&phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
33917 break;
33918 default:
33919 return -E1000_ERR_PHY;
33920 @@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
33921 wr32(E1000_VT_CTL, vt_ctl);
33922 }
33923
33924 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
33925 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
33926 .reset_hw = igb_reset_hw_82575,
33927 .init_hw = igb_init_hw_82575,
33928 .check_for_link = igb_check_for_link_82575,
33929 @@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
33930 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
33931 };
33932
33933 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
33934 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
33935 .acquire = igb_acquire_phy_82575,
33936 .get_cfg_done = igb_get_cfg_done_82575,
33937 .release = igb_release_phy_82575,
33938 };
33939
33940 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33941 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33942 .acquire = igb_acquire_nvm_82575,
33943 .read = igb_read_nvm_eerd,
33944 .release = igb_release_nvm_82575,
33945 diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_hw.h linux-2.6.32.45/drivers/net/igb/e1000_hw.h
33946 --- linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
33947 +++ linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
33948 @@ -305,17 +305,17 @@ struct e1000_phy_operations {
33949 };
33950
33951 struct e1000_nvm_operations {
33952 - s32 (*acquire)(struct e1000_hw *);
33953 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
33954 - void (*release)(struct e1000_hw *);
33955 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
33956 + s32 (* const acquire)(struct e1000_hw *);
33957 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
33958 + void (* const release)(struct e1000_hw *);
33959 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
33960 };
33961
33962 struct e1000_info {
33963 s32 (*get_invariants)(struct e1000_hw *);
33964 - struct e1000_mac_operations *mac_ops;
33965 - struct e1000_phy_operations *phy_ops;
33966 - struct e1000_nvm_operations *nvm_ops;
33967 + const struct e1000_mac_operations *mac_ops;
33968 + const struct e1000_phy_operations *phy_ops;
33969 + const struct e1000_nvm_operations *nvm_ops;
33970 };
33971
33972 extern const struct e1000_info e1000_82575_info;
33973 diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_mbx.c linux-2.6.32.45/drivers/net/igb/e1000_mbx.c
33974 --- linux-2.6.32.45/drivers/net/igb/e1000_mbx.c 2011-03-27 14:31:47.000000000 -0400
33975 +++ linux-2.6.32.45/drivers/net/igb/e1000_mbx.c 2011-08-05 20:33:55.000000000 -0400
33976 @@ -414,13 +414,13 @@ s32 igb_init_mbx_params_pf(struct e1000_
33977
33978 mbx->size = E1000_VFMAILBOX_SIZE;
33979
33980 - mbx->ops.read = igb_read_mbx_pf;
33981 - mbx->ops.write = igb_write_mbx_pf;
33982 - mbx->ops.read_posted = igb_read_posted_mbx;
33983 - mbx->ops.write_posted = igb_write_posted_mbx;
33984 - mbx->ops.check_for_msg = igb_check_for_msg_pf;
33985 - mbx->ops.check_for_ack = igb_check_for_ack_pf;
33986 - mbx->ops.check_for_rst = igb_check_for_rst_pf;
33987 + *(void **)&mbx->ops.read = igb_read_mbx_pf;
33988 + *(void **)&mbx->ops.write = igb_write_mbx_pf;
33989 + *(void **)&mbx->ops.read_posted = igb_read_posted_mbx;
33990 + *(void **)&mbx->ops.write_posted = igb_write_posted_mbx;
33991 + *(void **)&mbx->ops.check_for_msg = igb_check_for_msg_pf;
33992 + *(void **)&mbx->ops.check_for_ack = igb_check_for_ack_pf;
33993 + *(void **)&mbx->ops.check_for_rst = igb_check_for_rst_pf;
33994
33995 mbx->stats.msgs_tx = 0;
33996 mbx->stats.msgs_rx = 0;
33997 diff -urNp linux-2.6.32.45/drivers/net/igb/igb_main.c linux-2.6.32.45/drivers/net/igb/igb_main.c
33998 --- linux-2.6.32.45/drivers/net/igb/igb_main.c 2011-03-27 14:31:47.000000000 -0400
33999 +++ linux-2.6.32.45/drivers/net/igb/igb_main.c 2011-08-05 20:33:55.000000000 -0400
34000 @@ -1295,9 +1295,9 @@ static int __devinit igb_probe(struct pc
34001 /* setup the private structure */
34002 hw->back = adapter;
34003 /* Copy the default MAC, PHY and NVM function pointers */
34004 - memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
34005 - memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
34006 - memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
34007 + memcpy((void *)&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
34008 + memcpy((void *)&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
34009 + memcpy((void *)&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
34010 /* Initialize skew-specific constants */
34011 err = ei->get_invariants(hw);
34012 if (err)
34013 diff -urNp linux-2.6.32.45/drivers/net/igbvf/mbx.c linux-2.6.32.45/drivers/net/igbvf/mbx.c
34014 --- linux-2.6.32.45/drivers/net/igbvf/mbx.c 2011-03-27 14:31:47.000000000 -0400
34015 +++ linux-2.6.32.45/drivers/net/igbvf/mbx.c 2011-08-05 20:33:55.000000000 -0400
34016 @@ -331,13 +331,13 @@ s32 e1000_init_mbx_params_vf(struct e100
34017
34018 mbx->size = E1000_VFMAILBOX_SIZE;
34019
34020 - mbx->ops.read = e1000_read_mbx_vf;
34021 - mbx->ops.write = e1000_write_mbx_vf;
34022 - mbx->ops.read_posted = e1000_read_posted_mbx;
34023 - mbx->ops.write_posted = e1000_write_posted_mbx;
34024 - mbx->ops.check_for_msg = e1000_check_for_msg_vf;
34025 - mbx->ops.check_for_ack = e1000_check_for_ack_vf;
34026 - mbx->ops.check_for_rst = e1000_check_for_rst_vf;
34027 + *(void **)&mbx->ops.read = e1000_read_mbx_vf;
34028 + *(void **)&mbx->ops.write = e1000_write_mbx_vf;
34029 + *(void **)&mbx->ops.read_posted = e1000_read_posted_mbx;
34030 + *(void **)&mbx->ops.write_posted = e1000_write_posted_mbx;
34031 + *(void **)&mbx->ops.check_for_msg = e1000_check_for_msg_vf;
34032 + *(void **)&mbx->ops.check_for_ack = e1000_check_for_ack_vf;
34033 + *(void **)&mbx->ops.check_for_rst = e1000_check_for_rst_vf;
34034
34035 mbx->stats.msgs_tx = 0;
34036 mbx->stats.msgs_rx = 0;
34037 diff -urNp linux-2.6.32.45/drivers/net/igbvf/vf.c linux-2.6.32.45/drivers/net/igbvf/vf.c
34038 --- linux-2.6.32.45/drivers/net/igbvf/vf.c 2011-03-27 14:31:47.000000000 -0400
34039 +++ linux-2.6.32.45/drivers/net/igbvf/vf.c 2011-08-05 20:33:55.000000000 -0400
34040 @@ -55,21 +55,21 @@ static s32 e1000_init_mac_params_vf(stru
34041
34042 /* Function pointers */
34043 /* reset */
34044 - mac->ops.reset_hw = e1000_reset_hw_vf;
34045 + *(void **)&mac->ops.reset_hw = e1000_reset_hw_vf;
34046 /* hw initialization */
34047 - mac->ops.init_hw = e1000_init_hw_vf;
34048 + *(void **)&mac->ops.init_hw = e1000_init_hw_vf;
34049 /* check for link */
34050 - mac->ops.check_for_link = e1000_check_for_link_vf;
34051 + *(void **)&mac->ops.check_for_link = e1000_check_for_link_vf;
34052 /* link info */
34053 - mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
34054 + *(void **)&mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
34055 /* multicast address update */
34056 - mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
34057 + *(void **)&mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
34058 /* set mac address */
34059 - mac->ops.rar_set = e1000_rar_set_vf;
34060 + *(void **)&mac->ops.rar_set = e1000_rar_set_vf;
34061 /* read mac address */
34062 - mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
34063 + *(void **)&mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
34064 /* set vlan filter table array */
34065 - mac->ops.set_vfta = e1000_set_vfta_vf;
34066 + *(void **)&mac->ops.set_vfta = e1000_set_vfta_vf;
34067
34068 return E1000_SUCCESS;
34069 }
34070 @@ -80,8 +80,8 @@ static s32 e1000_init_mac_params_vf(stru
34071 **/
34072 void e1000_init_function_pointers_vf(struct e1000_hw *hw)
34073 {
34074 - hw->mac.ops.init_params = e1000_init_mac_params_vf;
34075 - hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
34076 + *(void **)&hw->mac.ops.init_params = e1000_init_mac_params_vf;
34077 + *(void **)&hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
34078 }
34079
34080 /**
34081 diff -urNp linux-2.6.32.45/drivers/net/iseries_veth.c linux-2.6.32.45/drivers/net/iseries_veth.c
34082 --- linux-2.6.32.45/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
34083 +++ linux-2.6.32.45/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
34084 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
34085 NULL
34086 };
34087
34088 -static struct sysfs_ops veth_cnx_sysfs_ops = {
34089 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
34090 .show = veth_cnx_attribute_show
34091 };
34092
34093 @@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
34094 NULL
34095 };
34096
34097 -static struct sysfs_ops veth_port_sysfs_ops = {
34098 +static const struct sysfs_ops veth_port_sysfs_ops = {
34099 .show = veth_port_attribute_show
34100 };
34101
34102 diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c
34103 --- linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
34104 +++ linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
34105 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
34106 u32 rctl;
34107 int i;
34108
34109 + pax_track_stack();
34110 +
34111 /* Check for Promiscuous and All Multicast modes */
34112
34113 rctl = IXGB_READ_REG(hw, RCTL);
34114 diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c
34115 --- linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
34116 +++ linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
34117 @@ -260,6 +260,9 @@ void __devinit
34118 ixgb_check_options(struct ixgb_adapter *adapter)
34119 {
34120 int bd = adapter->bd_number;
34121 +
34122 + pax_track_stack();
34123 +
34124 if (bd >= IXGB_MAX_NIC) {
34125 printk(KERN_NOTICE
34126 "Warning: no configuration for board #%i\n", bd);
34127 diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c
34128 --- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c 2011-03-27 14:31:47.000000000 -0400
34129 +++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c 2011-08-05 20:33:55.000000000 -0400
34130 @@ -154,19 +154,19 @@ static s32 ixgbe_init_phy_ops_82598(stru
34131
34132 /* Overwrite the link function pointers if copper PHY */
34133 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
34134 - mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
34135 - mac->ops.get_link_capabilities =
34136 + *(void **)&mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
34137 + *(void **)&mac->ops.get_link_capabilities =
34138 &ixgbe_get_copper_link_capabilities_82598;
34139 }
34140
34141 switch (hw->phy.type) {
34142 case ixgbe_phy_tn:
34143 - phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34144 - phy->ops.get_firmware_version =
34145 + *(void **)&phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34146 + *(void **)&phy->ops.get_firmware_version =
34147 &ixgbe_get_phy_firmware_version_tnx;
34148 break;
34149 case ixgbe_phy_nl:
34150 - phy->ops.reset = &ixgbe_reset_phy_nl;
34151 + *(void **)&phy->ops.reset = &ixgbe_reset_phy_nl;
34152
34153 /* Call SFP+ identify routine to get the SFP+ module type */
34154 ret_val = phy->ops.identify_sfp(hw);
34155 diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c
34156 --- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c 2011-03-27 14:31:47.000000000 -0400
34157 +++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c 2011-08-05 20:33:55.000000000 -0400
34158 @@ -62,9 +62,9 @@ static void ixgbe_init_mac_link_ops_8259
34159 struct ixgbe_mac_info *mac = &hw->mac;
34160 if (hw->phy.multispeed_fiber) {
34161 /* Set up dual speed SFP+ support */
34162 - mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
34163 + *(void **)&mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
34164 } else {
34165 - mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
34166 + *(void **)&mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
34167 }
34168 }
34169
34170 @@ -76,7 +76,7 @@ static s32 ixgbe_setup_sfp_modules_82599
34171 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
34172 ixgbe_init_mac_link_ops_82599(hw);
34173
34174 - hw->phy.ops.reset = NULL;
34175 + *(void **)&hw->phy.ops.reset = NULL;
34176
34177 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
34178 &data_offset);
34179 @@ -171,16 +171,16 @@ static s32 ixgbe_init_phy_ops_82599(stru
34180
34181 /* If copper media, overwrite with copper function pointers */
34182 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
34183 - mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
34184 - mac->ops.get_link_capabilities =
34185 + *(void **)&mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
34186 + *(void **)&mac->ops.get_link_capabilities =
34187 &ixgbe_get_copper_link_capabilities_82599;
34188 }
34189
34190 /* Set necessary function pointers based on phy type */
34191 switch (hw->phy.type) {
34192 case ixgbe_phy_tn:
34193 - phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34194 - phy->ops.get_firmware_version =
34195 + *(void **)&phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34196 + *(void **)&phy->ops.get_firmware_version =
34197 &ixgbe_get_phy_firmware_version_tnx;
34198 break;
34199 default:
34200 diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c
34201 --- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c 2011-03-27 14:31:47.000000000 -0400
34202 +++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c 2011-08-05 20:33:55.000000000 -0400
34203 @@ -5638,18 +5638,18 @@ static int __devinit ixgbe_probe(struct
34204 adapter->bd_number = cards_found;
34205
34206 /* Setup hw api */
34207 - memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
34208 + memcpy((void *)&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
34209 hw->mac.type = ii->mac;
34210
34211 /* EEPROM */
34212 - memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
34213 + memcpy((void *)&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
34214 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
34215 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
34216 if (!(eec & (1 << 8)))
34217 - hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
34218 + *(void **)&hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
34219
34220 /* PHY */
34221 - memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
34222 + memcpy((void *)&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
34223 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
34224 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
34225 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
34226 diff -urNp linux-2.6.32.45/drivers/net/mlx4/main.c linux-2.6.32.45/drivers/net/mlx4/main.c
34227 --- linux-2.6.32.45/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
34228 +++ linux-2.6.32.45/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
34229 @@ -38,6 +38,7 @@
34230 #include <linux/errno.h>
34231 #include <linux/pci.h>
34232 #include <linux/dma-mapping.h>
34233 +#include <linux/sched.h>
34234
34235 #include <linux/mlx4/device.h>
34236 #include <linux/mlx4/doorbell.h>
34237 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
34238 u64 icm_size;
34239 int err;
34240
34241 + pax_track_stack();
34242 +
34243 err = mlx4_QUERY_FW(dev);
34244 if (err) {
34245 if (err == -EACCES)
34246 diff -urNp linux-2.6.32.45/drivers/net/niu.c linux-2.6.32.45/drivers/net/niu.c
34247 --- linux-2.6.32.45/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
34248 +++ linux-2.6.32.45/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
34249 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
34250 int i, num_irqs, err;
34251 u8 first_ldg;
34252
34253 + pax_track_stack();
34254 +
34255 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
34256 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
34257 ldg_num_map[i] = first_ldg + i;
34258 diff -urNp linux-2.6.32.45/drivers/net/pcnet32.c linux-2.6.32.45/drivers/net/pcnet32.c
34259 --- linux-2.6.32.45/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
34260 +++ linux-2.6.32.45/drivers/net/pcnet32.c 2011-08-05 20:33:55.000000000 -0400
34261 @@ -79,7 +79,7 @@ static int cards_found;
34262 /*
34263 * VLB I/O addresses
34264 */
34265 -static unsigned int pcnet32_portlist[] __initdata =
34266 +static unsigned int pcnet32_portlist[] __devinitdata =
34267 { 0x300, 0x320, 0x340, 0x360, 0 };
34268
34269 static int pcnet32_debug = 0;
34270 @@ -267,7 +267,7 @@ struct pcnet32_private {
34271 struct sk_buff **rx_skbuff;
34272 dma_addr_t *tx_dma_addr;
34273 dma_addr_t *rx_dma_addr;
34274 - struct pcnet32_access a;
34275 + struct pcnet32_access *a;
34276 spinlock_t lock; /* Guard lock */
34277 unsigned int cur_rx, cur_tx; /* The next free ring entry */
34278 unsigned int rx_ring_size; /* current rx ring size */
34279 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct n
34280 u16 val;
34281
34282 netif_wake_queue(dev);
34283 - val = lp->a.read_csr(ioaddr, CSR3);
34284 + val = lp->a->read_csr(ioaddr, CSR3);
34285 val &= 0x00ff;
34286 - lp->a.write_csr(ioaddr, CSR3, val);
34287 + lp->a->write_csr(ioaddr, CSR3, val);
34288 napi_enable(&lp->napi);
34289 }
34290
34291 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_d
34292 r = mii_link_ok(&lp->mii_if);
34293 } else if (lp->chip_version >= PCNET32_79C970A) {
34294 ulong ioaddr = dev->base_addr; /* card base I/O address */
34295 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34296 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34297 } else { /* can not detect link on really old chips */
34298 r = 1;
34299 }
34300 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct
34301 pcnet32_netif_stop(dev);
34302
34303 spin_lock_irqsave(&lp->lock, flags);
34304 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34305 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34306
34307 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
34308
34309 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct
34310 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
34311 {
34312 struct pcnet32_private *lp = netdev_priv(dev);
34313 - struct pcnet32_access *a = &lp->a; /* access to registers */
34314 + struct pcnet32_access *a = lp->a; /* access to registers */
34315 ulong ioaddr = dev->base_addr; /* card base I/O address */
34316 struct sk_buff *skb; /* sk buff */
34317 int x, i; /* counters */
34318 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct
34319 pcnet32_netif_stop(dev);
34320
34321 spin_lock_irqsave(&lp->lock, flags);
34322 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34323 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34324
34325 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
34326
34327 /* Reset the PCNET32 */
34328 - lp->a.reset(ioaddr);
34329 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34330 + lp->a->reset(ioaddr);
34331 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34332
34333 /* switch pcnet32 to 32bit mode */
34334 - lp->a.write_bcr(ioaddr, 20, 2);
34335 + lp->a->write_bcr(ioaddr, 20, 2);
34336
34337 /* purge & init rings but don't actually restart */
34338 pcnet32_restart(dev, 0x0000);
34339
34340 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34341 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34342
34343 /* Initialize Transmit buffers. */
34344 size = data_len + 15;
34345 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct
34346
34347 /* set int loopback in CSR15 */
34348 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
34349 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
34350 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
34351
34352 teststatus = cpu_to_le16(0x8000);
34353 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34354 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34355
34356 /* Check status of descriptors */
34357 for (x = 0; x < numbuffs; x++) {
34358 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct
34359 }
34360 }
34361
34362 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34363 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34364 wmb();
34365 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
34366 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
34367 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct
34368 pcnet32_restart(dev, CSR0_NORMAL);
34369 } else {
34370 pcnet32_purge_rx_ring(dev);
34371 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34372 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34373 }
34374 spin_unlock_irqrestore(&lp->lock, flags);
34375
34376 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct
34377 static void pcnet32_led_blink_callback(struct net_device *dev)
34378 {
34379 struct pcnet32_private *lp = netdev_priv(dev);
34380 - struct pcnet32_access *a = &lp->a;
34381 + struct pcnet32_access *a = lp->a;
34382 ulong ioaddr = dev->base_addr;
34383 unsigned long flags;
34384 int i;
34385 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(s
34386 static int pcnet32_phys_id(struct net_device *dev, u32 data)
34387 {
34388 struct pcnet32_private *lp = netdev_priv(dev);
34389 - struct pcnet32_access *a = &lp->a;
34390 + struct pcnet32_access *a = lp->a;
34391 ulong ioaddr = dev->base_addr;
34392 unsigned long flags;
34393 int i, regs[4];
34394 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_de
34395 {
34396 int csr5;
34397 struct pcnet32_private *lp = netdev_priv(dev);
34398 - struct pcnet32_access *a = &lp->a;
34399 + struct pcnet32_access *a = lp->a;
34400 ulong ioaddr = dev->base_addr;
34401 int ticks;
34402
34403 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_stru
34404 spin_lock_irqsave(&lp->lock, flags);
34405 if (pcnet32_tx(dev)) {
34406 /* reset the chip to clear the error condition, then restart */
34407 - lp->a.reset(ioaddr);
34408 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34409 + lp->a->reset(ioaddr);
34410 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34411 pcnet32_restart(dev, CSR0_START);
34412 netif_wake_queue(dev);
34413 }
34414 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_stru
34415 __napi_complete(napi);
34416
34417 /* clear interrupt masks */
34418 - val = lp->a.read_csr(ioaddr, CSR3);
34419 + val = lp->a->read_csr(ioaddr, CSR3);
34420 val &= 0x00ff;
34421 - lp->a.write_csr(ioaddr, CSR3, val);
34422 + lp->a->write_csr(ioaddr, CSR3, val);
34423
34424 /* Set interrupt enable. */
34425 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
34426 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
34427
34428 spin_unlock_irqrestore(&lp->lock, flags);
34429 }
34430 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_
34431 int i, csr0;
34432 u16 *buff = ptr;
34433 struct pcnet32_private *lp = netdev_priv(dev);
34434 - struct pcnet32_access *a = &lp->a;
34435 + struct pcnet32_access *a = lp->a;
34436 ulong ioaddr = dev->base_addr;
34437 unsigned long flags;
34438
34439 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_
34440 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
34441 if (lp->phymask & (1 << j)) {
34442 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
34443 - lp->a.write_bcr(ioaddr, 33,
34444 + lp->a->write_bcr(ioaddr, 33,
34445 (j << 5) | i);
34446 - *buff++ = lp->a.read_bcr(ioaddr, 34);
34447 + *buff++ = lp->a->read_bcr(ioaddr, 34);
34448 }
34449 }
34450 }
34451 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34452 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
34453 lp->options |= PCNET32_PORT_FD;
34454
34455 - lp->a = *a;
34456 + lp->a = a;
34457
34458 /* prior to register_netdev, dev->name is not yet correct */
34459 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
34460 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34461 if (lp->mii) {
34462 /* lp->phycount and lp->phymask are set to 0 by memset above */
34463
34464 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34465 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34466 /* scan for PHYs */
34467 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34468 unsigned short id1, id2;
34469 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34470 "Found PHY %04x:%04x at address %d.\n",
34471 id1, id2, i);
34472 }
34473 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34474 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34475 if (lp->phycount > 1) {
34476 lp->options |= PCNET32_PORT_MII;
34477 }
34478 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_devic
34479 }
34480
34481 /* Reset the PCNET32 */
34482 - lp->a.reset(ioaddr);
34483 + lp->a->reset(ioaddr);
34484
34485 /* switch pcnet32 to 32bit mode */
34486 - lp->a.write_bcr(ioaddr, 20, 2);
34487 + lp->a->write_bcr(ioaddr, 20, 2);
34488
34489 if (netif_msg_ifup(lp))
34490 printk(KERN_DEBUG
34491 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_devic
34492 (u32) (lp->init_dma_addr));
34493
34494 /* set/reset autoselect bit */
34495 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
34496 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
34497 if (lp->options & PCNET32_PORT_ASEL)
34498 val |= 2;
34499 - lp->a.write_bcr(ioaddr, 2, val);
34500 + lp->a->write_bcr(ioaddr, 2, val);
34501
34502 /* handle full duplex setting */
34503 if (lp->mii_if.full_duplex) {
34504 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
34505 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
34506 if (lp->options & PCNET32_PORT_FD) {
34507 val |= 1;
34508 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
34509 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_devic
34510 if (lp->chip_version == 0x2627)
34511 val |= 3;
34512 }
34513 - lp->a.write_bcr(ioaddr, 9, val);
34514 + lp->a->write_bcr(ioaddr, 9, val);
34515 }
34516
34517 /* set/reset GPSI bit in test register */
34518 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
34519 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
34520 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
34521 val |= 0x10;
34522 - lp->a.write_csr(ioaddr, 124, val);
34523 + lp->a->write_csr(ioaddr, 124, val);
34524
34525 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
34526 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
34527 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_devic
34528 * duplex, and/or enable auto negotiation, and clear DANAS
34529 */
34530 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
34531 - lp->a.write_bcr(ioaddr, 32,
34532 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
34533 + lp->a->write_bcr(ioaddr, 32,
34534 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
34535 /* disable Auto Negotiation, set 10Mpbs, HD */
34536 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
34537 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
34538 if (lp->options & PCNET32_PORT_FD)
34539 val |= 0x10;
34540 if (lp->options & PCNET32_PORT_100)
34541 val |= 0x08;
34542 - lp->a.write_bcr(ioaddr, 32, val);
34543 + lp->a->write_bcr(ioaddr, 32, val);
34544 } else {
34545 if (lp->options & PCNET32_PORT_ASEL) {
34546 - lp->a.write_bcr(ioaddr, 32,
34547 - lp->a.read_bcr(ioaddr,
34548 + lp->a->write_bcr(ioaddr, 32,
34549 + lp->a->read_bcr(ioaddr,
34550 32) | 0x0080);
34551 /* enable auto negotiate, setup, disable fd */
34552 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
34553 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
34554 val |= 0x20;
34555 - lp->a.write_bcr(ioaddr, 32, val);
34556 + lp->a->write_bcr(ioaddr, 32, val);
34557 }
34558 }
34559 } else {
34560 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_devic
34561 * There is really no good other way to handle multiple PHYs
34562 * other than turning off all automatics
34563 */
34564 - val = lp->a.read_bcr(ioaddr, 2);
34565 - lp->a.write_bcr(ioaddr, 2, val & ~2);
34566 - val = lp->a.read_bcr(ioaddr, 32);
34567 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34568 + val = lp->a->read_bcr(ioaddr, 2);
34569 + lp->a->write_bcr(ioaddr, 2, val & ~2);
34570 + val = lp->a->read_bcr(ioaddr, 32);
34571 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34572
34573 if (!(lp->options & PCNET32_PORT_ASEL)) {
34574 /* setup ecmd */
34575 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_devic
34576 ecmd.speed =
34577 lp->
34578 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
34579 - bcr9 = lp->a.read_bcr(ioaddr, 9);
34580 + bcr9 = lp->a->read_bcr(ioaddr, 9);
34581
34582 if (lp->options & PCNET32_PORT_FD) {
34583 ecmd.duplex = DUPLEX_FULL;
34584 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_devic
34585 ecmd.duplex = DUPLEX_HALF;
34586 bcr9 |= ~(1 << 0);
34587 }
34588 - lp->a.write_bcr(ioaddr, 9, bcr9);
34589 + lp->a->write_bcr(ioaddr, 9, bcr9);
34590 }
34591
34592 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34593 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_devic
34594
34595 #ifdef DO_DXSUFLO
34596 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
34597 - val = lp->a.read_csr(ioaddr, CSR3);
34598 + val = lp->a->read_csr(ioaddr, CSR3);
34599 val |= 0x40;
34600 - lp->a.write_csr(ioaddr, CSR3, val);
34601 + lp->a->write_csr(ioaddr, CSR3, val);
34602 }
34603 #endif
34604
34605 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_devic
34606 napi_enable(&lp->napi);
34607
34608 /* Re-initialize the PCNET32, and start it when done. */
34609 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34610 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34611 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34612 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34613
34614 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34615 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34616 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34617 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34618
34619 netif_start_queue(dev);
34620
34621 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_devic
34622
34623 i = 0;
34624 while (i++ < 100)
34625 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34626 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34627 break;
34628 /*
34629 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
34630 * reports that doing so triggers a bug in the '974.
34631 */
34632 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
34633 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
34634
34635 if (netif_msg_ifup(lp))
34636 printk(KERN_DEBUG
34637 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
34638 dev->name, i,
34639 (u32) (lp->init_dma_addr),
34640 - lp->a.read_csr(ioaddr, CSR0));
34641 + lp->a->read_csr(ioaddr, CSR0));
34642
34643 spin_unlock_irqrestore(&lp->lock, flags);
34644
34645 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_devic
34646 * Switch back to 16bit mode to avoid problems with dumb
34647 * DOS packet driver after a warm reboot
34648 */
34649 - lp->a.write_bcr(ioaddr, 20, 4);
34650 + lp->a->write_bcr(ioaddr, 20, 4);
34651
34652 err_free_irq:
34653 spin_unlock_irqrestore(&lp->lock, flags);
34654 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_d
34655
34656 /* wait for stop */
34657 for (i = 0; i < 100; i++)
34658 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
34659 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
34660 break;
34661
34662 if (i >= 100 && netif_msg_drv(lp))
34663 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_d
34664 return;
34665
34666 /* ReInit Ring */
34667 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34668 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34669 i = 0;
34670 while (i++ < 1000)
34671 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34672 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34673 break;
34674
34675 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
34676 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
34677 }
34678
34679 static void pcnet32_tx_timeout(struct net_device *dev)
34680 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct ne
34681 if (pcnet32_debug & NETIF_MSG_DRV)
34682 printk(KERN_ERR
34683 "%s: transmit timed out, status %4.4x, resetting.\n",
34684 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34685 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34686 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34687 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34688 dev->stats.tx_errors++;
34689 if (netif_msg_tx_err(lp)) {
34690 int i;
34691 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34692 if (netif_msg_tx_queued(lp)) {
34693 printk(KERN_DEBUG
34694 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
34695 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34696 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34697 }
34698
34699 /* Default status -- will not enable Successful-TxDone
34700 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34701 dev->stats.tx_bytes += skb->len;
34702
34703 /* Trigger an immediate send poll. */
34704 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34705 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34706
34707 dev->trans_start = jiffies;
34708
34709 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
34710
34711 spin_lock(&lp->lock);
34712
34713 - csr0 = lp->a.read_csr(ioaddr, CSR0);
34714 + csr0 = lp->a->read_csr(ioaddr, CSR0);
34715 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
34716 if (csr0 == 0xffff) {
34717 break; /* PCMCIA remove happened */
34718 }
34719 /* Acknowledge all of the current interrupt sources ASAP. */
34720 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34721 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34722
34723 if (netif_msg_intr(lp))
34724 printk(KERN_DEBUG
34725 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
34726 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
34727 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
34728
34729 /* Log misc errors. */
34730 if (csr0 & 0x4000)
34731 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
34732 if (napi_schedule_prep(&lp->napi)) {
34733 u16 val;
34734 /* set interrupt masks */
34735 - val = lp->a.read_csr(ioaddr, CSR3);
34736 + val = lp->a->read_csr(ioaddr, CSR3);
34737 val |= 0x5f00;
34738 - lp->a.write_csr(ioaddr, CSR3, val);
34739 + lp->a->write_csr(ioaddr, CSR3, val);
34740
34741 __napi_schedule(&lp->napi);
34742 break;
34743 }
34744 - csr0 = lp->a.read_csr(ioaddr, CSR0);
34745 + csr0 = lp->a->read_csr(ioaddr, CSR0);
34746 }
34747
34748 if (netif_msg_intr(lp))
34749 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
34750 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34751 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34752
34753 spin_unlock(&lp->lock);
34754
34755 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_devi
34756
34757 spin_lock_irqsave(&lp->lock, flags);
34758
34759 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34760 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34761
34762 if (netif_msg_ifdown(lp))
34763 printk(KERN_DEBUG
34764 "%s: Shutting down ethercard, status was %2.2x.\n",
34765 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34766 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34767
34768 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
34769 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34770 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34771
34772 /*
34773 * Switch back to 16bit mode to avoid problems with dumb
34774 * DOS packet driver after a warm reboot
34775 */
34776 - lp->a.write_bcr(ioaddr, 20, 4);
34777 + lp->a->write_bcr(ioaddr, 20, 4);
34778
34779 spin_unlock_irqrestore(&lp->lock, flags);
34780
34781 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_
34782 unsigned long flags;
34783
34784 spin_lock_irqsave(&lp->lock, flags);
34785 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34786 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34787 spin_unlock_irqrestore(&lp->lock, flags);
34788
34789 return &dev->stats;
34790 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struc
34791 if (dev->flags & IFF_ALLMULTI) {
34792 ib->filter[0] = cpu_to_le32(~0U);
34793 ib->filter[1] = cpu_to_le32(~0U);
34794 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34795 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34796 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34797 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34798 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34799 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34800 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34801 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34802 return;
34803 }
34804 /* clear the multicast filter */
34805 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struc
34806 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
34807 }
34808 for (i = 0; i < 4; i++)
34809 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
34810 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
34811 le16_to_cpu(mcast_table[i]));
34812 return;
34813 }
34814 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(s
34815
34816 spin_lock_irqsave(&lp->lock, flags);
34817 suspended = pcnet32_suspend(dev, &flags, 0);
34818 - csr15 = lp->a.read_csr(ioaddr, CSR15);
34819 + csr15 = lp->a->read_csr(ioaddr, CSR15);
34820 if (dev->flags & IFF_PROMISC) {
34821 /* Log any net taps. */
34822 if (netif_msg_hw(lp))
34823 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(s
34824 lp->init_block->mode =
34825 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
34826 7);
34827 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
34828 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
34829 } else {
34830 lp->init_block->mode =
34831 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
34832 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34833 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34834 pcnet32_load_multicast(dev);
34835 }
34836
34837 if (suspended) {
34838 int csr5;
34839 /* clear SUSPEND (SPND) - CSR5 bit 0 */
34840 - csr5 = lp->a.read_csr(ioaddr, CSR5);
34841 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34842 + csr5 = lp->a->read_csr(ioaddr, CSR5);
34843 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34844 } else {
34845 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34846 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34847 pcnet32_restart(dev, CSR0_NORMAL);
34848 netif_wake_queue(dev);
34849 }
34850 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *
34851 if (!lp->mii)
34852 return 0;
34853
34854 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34855 - val_out = lp->a.read_bcr(ioaddr, 34);
34856 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34857 + val_out = lp->a->read_bcr(ioaddr, 34);
34858
34859 return val_out;
34860 }
34861 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device
34862 if (!lp->mii)
34863 return;
34864
34865 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34866 - lp->a.write_bcr(ioaddr, 34, val);
34867 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34868 + lp->a->write_bcr(ioaddr, 34, val);
34869 }
34870
34871 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34872 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct n
34873 curr_link = mii_link_ok(&lp->mii_if);
34874 } else {
34875 ulong ioaddr = dev->base_addr; /* card base I/O address */
34876 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34877 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34878 }
34879 if (!curr_link) {
34880 if (prev_link || verbose) {
34881 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct n
34882 (ecmd.duplex ==
34883 DUPLEX_FULL) ? "full" : "half");
34884 }
34885 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
34886 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
34887 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
34888 if (lp->mii_if.full_duplex)
34889 bcr9 |= (1 << 0);
34890 else
34891 bcr9 &= ~(1 << 0);
34892 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
34893 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
34894 }
34895 } else {
34896 if (netif_msg_link(lp))
34897 diff -urNp linux-2.6.32.45/drivers/net/tg3.h linux-2.6.32.45/drivers/net/tg3.h
34898 --- linux-2.6.32.45/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
34899 +++ linux-2.6.32.45/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
34900 @@ -95,6 +95,7 @@
34901 #define CHIPREV_ID_5750_A0 0x4000
34902 #define CHIPREV_ID_5750_A1 0x4001
34903 #define CHIPREV_ID_5750_A3 0x4003
34904 +#define CHIPREV_ID_5750_C1 0x4201
34905 #define CHIPREV_ID_5750_C2 0x4202
34906 #define CHIPREV_ID_5752_A0_HW 0x5000
34907 #define CHIPREV_ID_5752_A0 0x6000
34908 diff -urNp linux-2.6.32.45/drivers/net/tokenring/abyss.c linux-2.6.32.45/drivers/net/tokenring/abyss.c
34909 --- linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-03-27 14:31:47.000000000 -0400
34910 +++ linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-08-05 20:33:55.000000000 -0400
34911 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
34912
34913 static int __init abyss_init (void)
34914 {
34915 - abyss_netdev_ops = tms380tr_netdev_ops;
34916 + pax_open_kernel();
34917 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34918
34919 - abyss_netdev_ops.ndo_open = abyss_open;
34920 - abyss_netdev_ops.ndo_stop = abyss_close;
34921 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34922 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34923 + pax_close_kernel();
34924
34925 return pci_register_driver(&abyss_driver);
34926 }
34927 diff -urNp linux-2.6.32.45/drivers/net/tokenring/madgemc.c linux-2.6.32.45/drivers/net/tokenring/madgemc.c
34928 --- linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-03-27 14:31:47.000000000 -0400
34929 +++ linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-08-05 20:33:55.000000000 -0400
34930 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver
34931
34932 static int __init madgemc_init (void)
34933 {
34934 - madgemc_netdev_ops = tms380tr_netdev_ops;
34935 - madgemc_netdev_ops.ndo_open = madgemc_open;
34936 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34937 + pax_open_kernel();
34938 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34939 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34940 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34941 + pax_close_kernel();
34942
34943 return mca_register_driver (&madgemc_driver);
34944 }
34945 diff -urNp linux-2.6.32.45/drivers/net/tokenring/proteon.c linux-2.6.32.45/drivers/net/tokenring/proteon.c
34946 --- linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-03-27 14:31:47.000000000 -0400
34947 +++ linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-08-05 20:33:55.000000000 -0400
34948 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
34949 struct platform_device *pdev;
34950 int i, num = 0, err = 0;
34951
34952 - proteon_netdev_ops = tms380tr_netdev_ops;
34953 - proteon_netdev_ops.ndo_open = proteon_open;
34954 - proteon_netdev_ops.ndo_stop = tms380tr_close;
34955 + pax_open_kernel();
34956 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34957 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34958 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34959 + pax_close_kernel();
34960
34961 err = platform_driver_register(&proteon_driver);
34962 if (err)
34963 diff -urNp linux-2.6.32.45/drivers/net/tokenring/skisa.c linux-2.6.32.45/drivers/net/tokenring/skisa.c
34964 --- linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-03-27 14:31:47.000000000 -0400
34965 +++ linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-08-05 20:33:55.000000000 -0400
34966 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34967 struct platform_device *pdev;
34968 int i, num = 0, err = 0;
34969
34970 - sk_isa_netdev_ops = tms380tr_netdev_ops;
34971 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
34972 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34973 + pax_open_kernel();
34974 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34975 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34976 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34977 + pax_close_kernel();
34978
34979 err = platform_driver_register(&sk_isa_driver);
34980 if (err)
34981 diff -urNp linux-2.6.32.45/drivers/net/tulip/de2104x.c linux-2.6.32.45/drivers/net/tulip/de2104x.c
34982 --- linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
34983 +++ linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
34984 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
34985 struct de_srom_info_leaf *il;
34986 void *bufp;
34987
34988 + pax_track_stack();
34989 +
34990 /* download entire eeprom */
34991 for (i = 0; i < DE_EEPROM_WORDS; i++)
34992 ((__le16 *)ee_data)[i] =
34993 diff -urNp linux-2.6.32.45/drivers/net/tulip/de4x5.c linux-2.6.32.45/drivers/net/tulip/de4x5.c
34994 --- linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
34995 +++ linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
34996 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
34997 for (i=0; i<ETH_ALEN; i++) {
34998 tmp.addr[i] = dev->dev_addr[i];
34999 }
35000 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35001 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35002 break;
35003
35004 case DE4X5_SET_HWADDR: /* Set the hardware address */
35005 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
35006 spin_lock_irqsave(&lp->lock, flags);
35007 memcpy(&statbuf, &lp->pktStats, ioc->len);
35008 spin_unlock_irqrestore(&lp->lock, flags);
35009 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
35010 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
35011 return -EFAULT;
35012 break;
35013 }
35014 diff -urNp linux-2.6.32.45/drivers/net/usb/hso.c linux-2.6.32.45/drivers/net/usb/hso.c
35015 --- linux-2.6.32.45/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
35016 +++ linux-2.6.32.45/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
35017 @@ -71,7 +71,7 @@
35018 #include <asm/byteorder.h>
35019 #include <linux/serial_core.h>
35020 #include <linux/serial.h>
35021 -
35022 +#include <asm/local.h>
35023
35024 #define DRIVER_VERSION "1.2"
35025 #define MOD_AUTHOR "Option Wireless"
35026 @@ -258,7 +258,7 @@ struct hso_serial {
35027
35028 /* from usb_serial_port */
35029 struct tty_struct *tty;
35030 - int open_count;
35031 + local_t open_count;
35032 spinlock_t serial_lock;
35033
35034 int (*write_data) (struct hso_serial *serial);
35035 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
35036 struct urb *urb;
35037
35038 urb = serial->rx_urb[0];
35039 - if (serial->open_count > 0) {
35040 + if (local_read(&serial->open_count) > 0) {
35041 count = put_rxbuf_data(urb, serial);
35042 if (count == -1)
35043 return;
35044 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
35045 DUMP1(urb->transfer_buffer, urb->actual_length);
35046
35047 /* Anyone listening? */
35048 - if (serial->open_count == 0)
35049 + if (local_read(&serial->open_count) == 0)
35050 return;
35051
35052 if (status == 0) {
35053 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
35054 spin_unlock_irq(&serial->serial_lock);
35055
35056 /* check for port already opened, if not set the termios */
35057 - serial->open_count++;
35058 - if (serial->open_count == 1) {
35059 + if (local_inc_return(&serial->open_count) == 1) {
35060 tty->low_latency = 1;
35061 serial->rx_state = RX_IDLE;
35062 /* Force default termio settings */
35063 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
35064 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35065 if (result) {
35066 hso_stop_serial_device(serial->parent);
35067 - serial->open_count--;
35068 + local_dec(&serial->open_count);
35069 kref_put(&serial->parent->ref, hso_serial_ref_free);
35070 }
35071 } else {
35072 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
35073
35074 /* reset the rts and dtr */
35075 /* do the actual close */
35076 - serial->open_count--;
35077 + local_dec(&serial->open_count);
35078
35079 - if (serial->open_count <= 0) {
35080 - serial->open_count = 0;
35081 + if (local_read(&serial->open_count) <= 0) {
35082 + local_set(&serial->open_count, 0);
35083 spin_lock_irq(&serial->serial_lock);
35084 if (serial->tty == tty) {
35085 serial->tty->driver_data = NULL;
35086 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
35087
35088 /* the actual setup */
35089 spin_lock_irqsave(&serial->serial_lock, flags);
35090 - if (serial->open_count)
35091 + if (local_read(&serial->open_count))
35092 _hso_serial_set_termios(tty, old);
35093 else
35094 tty->termios = old;
35095 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
35096 /* Start all serial ports */
35097 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35098 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35099 - if (dev2ser(serial_table[i])->open_count) {
35100 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
35101 result =
35102 hso_start_serial_device(serial_table[i], GFP_NOIO);
35103 hso_kick_transmit(dev2ser(serial_table[i]));
35104 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-config.h linux-2.6.32.45/drivers/net/vxge/vxge-config.h
35105 --- linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-03-27 14:31:47.000000000 -0400
35106 +++ linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-08-05 20:33:55.000000000 -0400
35107 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
35108 void (*link_down)(struct __vxge_hw_device *devh);
35109 void (*crit_err)(struct __vxge_hw_device *devh,
35110 enum vxge_hw_event type, u64 ext_data);
35111 -};
35112 +} __no_const;
35113
35114 /*
35115 * struct __vxge_hw_blockpool_entry - Block private data structure
35116 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-main.c linux-2.6.32.45/drivers/net/vxge/vxge-main.c
35117 --- linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
35118 +++ linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
35119 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
35120 struct sk_buff *completed[NR_SKB_COMPLETED];
35121 int more;
35122
35123 + pax_track_stack();
35124 +
35125 do {
35126 more = 0;
35127 skb_ptr = completed;
35128 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
35129 u8 mtable[256] = {0}; /* CPU to vpath mapping */
35130 int index;
35131
35132 + pax_track_stack();
35133 +
35134 /*
35135 * Filling
35136 * - itable with bucket numbers
35137 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h
35138 --- linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-03-27 14:31:47.000000000 -0400
35139 +++ linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:33:55.000000000 -0400
35140 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
35141 struct vxge_hw_mempool_dma *dma_object,
35142 u32 index,
35143 u32 is_last);
35144 -};
35145 +} __no_const;
35146
35147 void
35148 __vxge_hw_mempool_destroy(
35149 diff -urNp linux-2.6.32.45/drivers/net/wan/cycx_x25.c linux-2.6.32.45/drivers/net/wan/cycx_x25.c
35150 --- linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
35151 +++ linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
35152 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
35153 unsigned char hex[1024],
35154 * phex = hex;
35155
35156 + pax_track_stack();
35157 +
35158 if (len >= (sizeof(hex) / 2))
35159 len = (sizeof(hex) / 2) - 1;
35160
35161 diff -urNp linux-2.6.32.45/drivers/net/wan/hdlc_x25.c linux-2.6.32.45/drivers/net/wan/hdlc_x25.c
35162 --- linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-03-27 14:31:47.000000000 -0400
35163 +++ linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-08-05 20:33:55.000000000 -0400
35164 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
35165
35166 static int x25_open(struct net_device *dev)
35167 {
35168 - struct lapb_register_struct cb;
35169 + static struct lapb_register_struct cb = {
35170 + .connect_confirmation = x25_connected,
35171 + .connect_indication = x25_connected,
35172 + .disconnect_confirmation = x25_disconnected,
35173 + .disconnect_indication = x25_disconnected,
35174 + .data_indication = x25_data_indication,
35175 + .data_transmit = x25_data_transmit
35176 + };
35177 int result;
35178
35179 - cb.connect_confirmation = x25_connected;
35180 - cb.connect_indication = x25_connected;
35181 - cb.disconnect_confirmation = x25_disconnected;
35182 - cb.disconnect_indication = x25_disconnected;
35183 - cb.data_indication = x25_data_indication;
35184 - cb.data_transmit = x25_data_transmit;
35185 -
35186 result = lapb_register(dev, &cb);
35187 if (result != LAPB_OK)
35188 return result;
35189 diff -urNp linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c
35190 --- linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
35191 +++ linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
35192 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
35193 int do_autopm = 1;
35194 DECLARE_COMPLETION_ONSTACK(notif_completion);
35195
35196 + pax_track_stack();
35197 +
35198 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
35199 i2400m, ack, ack_size);
35200 BUG_ON(_ack == i2400m->bm_ack_buf);
35201 diff -urNp linux-2.6.32.45/drivers/net/wireless/airo.c linux-2.6.32.45/drivers/net/wireless/airo.c
35202 --- linux-2.6.32.45/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
35203 +++ linux-2.6.32.45/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
35204 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
35205 BSSListElement * loop_net;
35206 BSSListElement * tmp_net;
35207
35208 + pax_track_stack();
35209 +
35210 /* Blow away current list of scan results */
35211 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
35212 list_move_tail (&loop_net->list, &ai->network_free_list);
35213 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
35214 WepKeyRid wkr;
35215 int rc;
35216
35217 + pax_track_stack();
35218 +
35219 memset( &mySsid, 0, sizeof( mySsid ) );
35220 kfree (ai->flash);
35221 ai->flash = NULL;
35222 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
35223 __le32 *vals = stats.vals;
35224 int len;
35225
35226 + pax_track_stack();
35227 +
35228 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35229 return -ENOMEM;
35230 data = (struct proc_data *)file->private_data;
35231 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
35232 /* If doLoseSync is not 1, we won't do a Lose Sync */
35233 int doLoseSync = -1;
35234
35235 + pax_track_stack();
35236 +
35237 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35238 return -ENOMEM;
35239 data = (struct proc_data *)file->private_data;
35240 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
35241 int i;
35242 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
35243
35244 + pax_track_stack();
35245 +
35246 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
35247 if (!qual)
35248 return -ENOMEM;
35249 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
35250 CapabilityRid cap_rid;
35251 __le32 *vals = stats_rid.vals;
35252
35253 + pax_track_stack();
35254 +
35255 /* Get stats out of the card */
35256 clear_bit(JOB_WSTATS, &local->jobs);
35257 if (local->power.event) {
35258 diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c
35259 --- linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
35260 +++ linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
35261 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
35262 unsigned int v;
35263 u64 tsf;
35264
35265 + pax_track_stack();
35266 +
35267 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
35268 len += snprintf(buf+len, sizeof(buf)-len,
35269 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
35270 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
35271 unsigned int len = 0;
35272 unsigned int i;
35273
35274 + pax_track_stack();
35275 +
35276 len += snprintf(buf+len, sizeof(buf)-len,
35277 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
35278
35279 diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c
35280 --- linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
35281 +++ linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
35282 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
35283 char buf[512];
35284 unsigned int len = 0;
35285
35286 + pax_track_stack();
35287 +
35288 len += snprintf(buf + len, sizeof(buf) - len,
35289 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
35290 len += snprintf(buf + len, sizeof(buf) - len,
35291 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
35292 int i;
35293 u8 addr[ETH_ALEN];
35294
35295 + pax_track_stack();
35296 +
35297 len += snprintf(buf + len, sizeof(buf) - len,
35298 "primary: %s (%s chan=%d ht=%d)\n",
35299 wiphy_name(sc->pri_wiphy->hw->wiphy),
35300 diff -urNp linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c
35301 --- linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35302 +++ linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35303 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
35304 struct b43_debugfs_fops {
35305 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
35306 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
35307 - struct file_operations fops;
35308 + const struct file_operations fops;
35309 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
35310 size_t file_struct_offset;
35311 };
35312 diff -urNp linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c
35313 --- linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35314 +++ linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35315 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
35316 struct b43legacy_debugfs_fops {
35317 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
35318 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
35319 - struct file_operations fops;
35320 + const struct file_operations fops;
35321 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
35322 size_t file_struct_offset;
35323 /* Take wl->irq_lock before calling read/write? */
35324 diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c
35325 --- linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
35326 +++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
35327 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
35328 int err;
35329 DECLARE_SSID_BUF(ssid);
35330
35331 + pax_track_stack();
35332 +
35333 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
35334
35335 if (ssid_len)
35336 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
35337 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
35338 int err;
35339
35340 + pax_track_stack();
35341 +
35342 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
35343 idx, keylen, len);
35344
35345 diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c
35346 --- linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
35347 +++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
35348 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
35349 unsigned long flags;
35350 DECLARE_SSID_BUF(ssid);
35351
35352 + pax_track_stack();
35353 +
35354 LIBIPW_DEBUG_SCAN("'%s' (%pM"
35355 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
35356 print_ssid(ssid, info_element->data, info_element->len),
35357 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c
35358 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
35359 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
35360 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
35361 },
35362 };
35363
35364 -static struct iwl_ops iwl1000_ops = {
35365 +static const struct iwl_ops iwl1000_ops = {
35366 .ucode = &iwl5000_ucode,
35367 .lib = &iwl1000_lib,
35368 .hcmd = &iwl5000_hcmd,
35369 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c
35370 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-03-27 14:31:47.000000000 -0400
35371 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-08-05 20:33:55.000000000 -0400
35372 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_
35373 */
35374 if (iwl3945_mod_params.disable_hw_scan) {
35375 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
35376 - iwl3945_hw_ops.hw_scan = NULL;
35377 + pax_open_kernel();
35378 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
35379 + pax_close_kernel();
35380 }
35381
35382
35383 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c
35384 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
35385 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
35386 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
35387 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
35388 };
35389
35390 -static struct iwl_ops iwl3945_ops = {
35391 +static const struct iwl_ops iwl3945_ops = {
35392 .ucode = &iwl3945_ucode,
35393 .lib = &iwl3945_lib,
35394 .hcmd = &iwl3945_hcmd,
35395 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c
35396 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
35397 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
35398 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
35399 },
35400 };
35401
35402 -static struct iwl_ops iwl4965_ops = {
35403 +static const struct iwl_ops iwl4965_ops = {
35404 .ucode = &iwl4965_ucode,
35405 .lib = &iwl4965_lib,
35406 .hcmd = &iwl4965_hcmd,
35407 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c
35408 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
35409 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
35410 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
35411 },
35412 };
35413
35414 -struct iwl_ops iwl5000_ops = {
35415 +const struct iwl_ops iwl5000_ops = {
35416 .ucode = &iwl5000_ucode,
35417 .lib = &iwl5000_lib,
35418 .hcmd = &iwl5000_hcmd,
35419 .utils = &iwl5000_hcmd_utils,
35420 };
35421
35422 -static struct iwl_ops iwl5150_ops = {
35423 +static const struct iwl_ops iwl5150_ops = {
35424 .ucode = &iwl5000_ucode,
35425 .lib = &iwl5150_lib,
35426 .hcmd = &iwl5000_hcmd,
35427 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c
35428 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
35429 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
35430 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
35431 .calc_rssi = iwl5000_calc_rssi,
35432 };
35433
35434 -static struct iwl_ops iwl6000_ops = {
35435 +static const struct iwl_ops iwl6000_ops = {
35436 .ucode = &iwl5000_ucode,
35437 .lib = &iwl6000_lib,
35438 .hcmd = &iwl5000_hcmd,
35439 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c
35440 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-03-27 14:31:47.000000000 -0400
35441 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:33:55.000000000 -0400
35442 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev
35443 if (iwl_debug_level & IWL_DL_INFO)
35444 dev_printk(KERN_DEBUG, &(pdev->dev),
35445 "Disabling hw_scan\n");
35446 - iwl_hw_ops.hw_scan = NULL;
35447 + pax_open_kernel();
35448 + *(void **)&iwl_hw_ops.hw_scan = NULL;
35449 + pax_close_kernel();
35450 }
35451
35452 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
35453 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35454 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
35455 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
35456 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
35457 u8 active_index = 0;
35458 s32 tpt = 0;
35459
35460 + pax_track_stack();
35461 +
35462 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
35463
35464 if (!ieee80211_is_data(hdr->frame_control) ||
35465 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
35466 u8 valid_tx_ant = 0;
35467 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
35468
35469 + pax_track_stack();
35470 +
35471 /* Override starting rate (index 0) if needed for debug purposes */
35472 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
35473
35474 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35475 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
35476 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
35477 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
35478 int pos = 0;
35479 const size_t bufsz = sizeof(buf);
35480
35481 + pax_track_stack();
35482 +
35483 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
35484 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
35485 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
35486 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
35487 const size_t bufsz = sizeof(buf);
35488 ssize_t ret;
35489
35490 + pax_track_stack();
35491 +
35492 for (i = 0; i < AC_NUM; i++) {
35493 pos += scnprintf(buf + pos, bufsz - pos,
35494 "\tcw_min\tcw_max\taifsn\ttxop\n");
35495 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h
35496 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
35497 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
35498 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
35499 #endif
35500
35501 #else
35502 -#define IWL_DEBUG(__priv, level, fmt, args...)
35503 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
35504 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
35505 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
35506 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
35507 void *p, u32 len)
35508 {}
35509 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h
35510 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
35511 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
35512 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
35513
35514 /* shared structures from iwl-5000.c */
35515 extern struct iwl_mod_params iwl50_mod_params;
35516 -extern struct iwl_ops iwl5000_ops;
35517 +extern const struct iwl_ops iwl5000_ops;
35518 extern struct iwl_ucode_ops iwl5000_ucode;
35519 extern struct iwl_lib_ops iwl5000_lib;
35520 extern struct iwl_hcmd_ops iwl5000_hcmd;
35521 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c
35522 --- linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35523 +++ linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
35524 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
35525 int buf_len = 512;
35526 size_t len = 0;
35527
35528 + pax_track_stack();
35529 +
35530 if (*ppos != 0)
35531 return 0;
35532 if (count < sizeof(buf))
35533 diff -urNp linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c
35534 --- linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35535 +++ linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35536 @@ -708,7 +708,7 @@ out_unlock:
35537 struct lbs_debugfs_files {
35538 const char *name;
35539 int perm;
35540 - struct file_operations fops;
35541 + const struct file_operations fops;
35542 };
35543
35544 static const struct lbs_debugfs_files debugfs_files[] = {
35545 diff -urNp linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c
35546 --- linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
35547 +++ linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
35548 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
35549
35550 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
35551
35552 - if (rts_threshold < 0 || rts_threshold > 2347)
35553 + if (rts_threshold > 2347)
35554 rts_threshold = 2347;
35555
35556 tmp = cpu_to_le32(rts_threshold);
35557 diff -urNp linux-2.6.32.45/drivers/oprofile/buffer_sync.c linux-2.6.32.45/drivers/oprofile/buffer_sync.c
35558 --- linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
35559 +++ linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
35560 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
35561 if (cookie == NO_COOKIE)
35562 offset = pc;
35563 if (cookie == INVALID_COOKIE) {
35564 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35565 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35566 offset = pc;
35567 }
35568 if (cookie != last_cookie) {
35569 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
35570 /* add userspace sample */
35571
35572 if (!mm) {
35573 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35574 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35575 return 0;
35576 }
35577
35578 cookie = lookup_dcookie(mm, s->eip, &offset);
35579
35580 if (cookie == INVALID_COOKIE) {
35581 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35582 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35583 return 0;
35584 }
35585
35586 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
35587 /* ignore backtraces if failed to add a sample */
35588 if (state == sb_bt_start) {
35589 state = sb_bt_ignore;
35590 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35591 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35592 }
35593 }
35594 release_mm(mm);
35595 diff -urNp linux-2.6.32.45/drivers/oprofile/event_buffer.c linux-2.6.32.45/drivers/oprofile/event_buffer.c
35596 --- linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
35597 +++ linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
35598 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
35599 }
35600
35601 if (buffer_pos == buffer_size) {
35602 - atomic_inc(&oprofile_stats.event_lost_overflow);
35603 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35604 return;
35605 }
35606
35607 diff -urNp linux-2.6.32.45/drivers/oprofile/oprof.c linux-2.6.32.45/drivers/oprofile/oprof.c
35608 --- linux-2.6.32.45/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
35609 +++ linux-2.6.32.45/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
35610 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
35611 if (oprofile_ops.switch_events())
35612 return;
35613
35614 - atomic_inc(&oprofile_stats.multiplex_counter);
35615 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35616 start_switch_worker();
35617 }
35618
35619 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofilefs.c linux-2.6.32.45/drivers/oprofile/oprofilefs.c
35620 --- linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
35621 +++ linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
35622 @@ -187,7 +187,7 @@ static const struct file_operations atom
35623
35624
35625 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35626 - char const *name, atomic_t *val)
35627 + char const *name, atomic_unchecked_t *val)
35628 {
35629 struct dentry *d = __oprofilefs_create_file(sb, root, name,
35630 &atomic_ro_fops, 0444);
35631 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.c linux-2.6.32.45/drivers/oprofile/oprofile_stats.c
35632 --- linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
35633 +++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
35634 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35635 cpu_buf->sample_invalid_eip = 0;
35636 }
35637
35638 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35639 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35640 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35641 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35642 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35643 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35644 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35645 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35646 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35647 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35648 }
35649
35650
35651 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.h linux-2.6.32.45/drivers/oprofile/oprofile_stats.h
35652 --- linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
35653 +++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
35654 @@ -13,11 +13,11 @@
35655 #include <asm/atomic.h>
35656
35657 struct oprofile_stat_struct {
35658 - atomic_t sample_lost_no_mm;
35659 - atomic_t sample_lost_no_mapping;
35660 - atomic_t bt_lost_no_mapping;
35661 - atomic_t event_lost_overflow;
35662 - atomic_t multiplex_counter;
35663 + atomic_unchecked_t sample_lost_no_mm;
35664 + atomic_unchecked_t sample_lost_no_mapping;
35665 + atomic_unchecked_t bt_lost_no_mapping;
35666 + atomic_unchecked_t event_lost_overflow;
35667 + atomic_unchecked_t multiplex_counter;
35668 };
35669
35670 extern struct oprofile_stat_struct oprofile_stats;
35671 diff -urNp linux-2.6.32.45/drivers/parisc/pdc_stable.c linux-2.6.32.45/drivers/parisc/pdc_stable.c
35672 --- linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
35673 +++ linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
35674 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
35675 return ret;
35676 }
35677
35678 -static struct sysfs_ops pdcspath_attr_ops = {
35679 +static const struct sysfs_ops pdcspath_attr_ops = {
35680 .show = pdcspath_attr_show,
35681 .store = pdcspath_attr_store,
35682 };
35683 diff -urNp linux-2.6.32.45/drivers/parport/procfs.c linux-2.6.32.45/drivers/parport/procfs.c
35684 --- linux-2.6.32.45/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
35685 +++ linux-2.6.32.45/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
35686 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
35687
35688 *ppos += len;
35689
35690 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35691 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35692 }
35693
35694 #ifdef CONFIG_PARPORT_1284
35695 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
35696
35697 *ppos += len;
35698
35699 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35700 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35701 }
35702 #endif /* IEEE1284.3 support. */
35703
35704 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c
35705 --- linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
35706 +++ linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
35707 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
35708 }
35709
35710
35711 -static struct acpi_dock_ops acpiphp_dock_ops = {
35712 +static const struct acpi_dock_ops acpiphp_dock_ops = {
35713 .handler = handle_hotplug_event_func,
35714 };
35715
35716 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h
35717 --- linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-03-27 14:31:47.000000000 -0400
35718 +++ linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:33:55.000000000 -0400
35719 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35720 int (*hardware_test) (struct slot* slot, u32 value);
35721 u8 (*get_power) (struct slot* slot);
35722 int (*set_power) (struct slot* slot, int value);
35723 -};
35724 +} __no_const;
35725
35726 struct cpci_hp_controller {
35727 unsigned int irq;
35728 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c
35729 --- linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
35730 +++ linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
35731 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
35732
35733 void compaq_nvram_init (void __iomem *rom_start)
35734 {
35735 +
35736 +#ifndef CONFIG_PAX_KERNEXEC
35737 if (rom_start) {
35738 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35739 }
35740 +#endif
35741 +
35742 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35743
35744 /* initialize our int15 lock */
35745 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/fakephp.c linux-2.6.32.45/drivers/pci/hotplug/fakephp.c
35746 --- linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
35747 +++ linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
35748 @@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
35749 }
35750
35751 static struct kobj_type legacy_ktype = {
35752 - .sysfs_ops = &(struct sysfs_ops){
35753 + .sysfs_ops = &(const struct sysfs_ops){
35754 .store = legacy_store, .show = legacy_show
35755 },
35756 .release = &legacy_release,
35757 diff -urNp linux-2.6.32.45/drivers/pci/intel-iommu.c linux-2.6.32.45/drivers/pci/intel-iommu.c
35758 --- linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
35759 +++ linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
35760 @@ -2643,7 +2643,7 @@ error:
35761 return 0;
35762 }
35763
35764 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
35765 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
35766 unsigned long offset, size_t size,
35767 enum dma_data_direction dir,
35768 struct dma_attrs *attrs)
35769 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
35770 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
35771 }
35772
35773 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35774 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35775 size_t size, enum dma_data_direction dir,
35776 struct dma_attrs *attrs)
35777 {
35778 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
35779 }
35780 }
35781
35782 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
35783 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
35784 dma_addr_t *dma_handle, gfp_t flags)
35785 {
35786 void *vaddr;
35787 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
35788 return NULL;
35789 }
35790
35791 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35792 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35793 dma_addr_t dma_handle)
35794 {
35795 int order;
35796 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
35797 free_pages((unsigned long)vaddr, order);
35798 }
35799
35800 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35801 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35802 int nelems, enum dma_data_direction dir,
35803 struct dma_attrs *attrs)
35804 {
35805 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
35806 return nelems;
35807 }
35808
35809 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35810 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35811 enum dma_data_direction dir, struct dma_attrs *attrs)
35812 {
35813 int i;
35814 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
35815 return nelems;
35816 }
35817
35818 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35819 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35820 {
35821 return !dma_addr;
35822 }
35823
35824 -struct dma_map_ops intel_dma_ops = {
35825 +const struct dma_map_ops intel_dma_ops = {
35826 .alloc_coherent = intel_alloc_coherent,
35827 .free_coherent = intel_free_coherent,
35828 .map_sg = intel_map_sg,
35829 diff -urNp linux-2.6.32.45/drivers/pci/pcie/aspm.c linux-2.6.32.45/drivers/pci/pcie/aspm.c
35830 --- linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
35831 +++ linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
35832 @@ -27,9 +27,9 @@
35833 #define MODULE_PARAM_PREFIX "pcie_aspm."
35834
35835 /* Note: those are not register definitions */
35836 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35837 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35838 -#define ASPM_STATE_L1 (4) /* L1 state */
35839 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35840 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35841 +#define ASPM_STATE_L1 (4U) /* L1 state */
35842 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35843 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35844
35845 diff -urNp linux-2.6.32.45/drivers/pci/probe.c linux-2.6.32.45/drivers/pci/probe.c
35846 --- linux-2.6.32.45/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
35847 +++ linux-2.6.32.45/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
35848 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
35849 return ret;
35850 }
35851
35852 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
35853 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
35854 struct device_attribute *attr,
35855 char *buf)
35856 {
35857 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
35858 }
35859
35860 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
35861 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
35862 struct device_attribute *attr,
35863 char *buf)
35864 {
35865 diff -urNp linux-2.6.32.45/drivers/pci/proc.c linux-2.6.32.45/drivers/pci/proc.c
35866 --- linux-2.6.32.45/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
35867 +++ linux-2.6.32.45/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
35868 @@ -480,7 +480,16 @@ static const struct file_operations proc
35869 static int __init pci_proc_init(void)
35870 {
35871 struct pci_dev *dev = NULL;
35872 +
35873 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
35874 +#ifdef CONFIG_GRKERNSEC_PROC_USER
35875 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35876 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35877 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35878 +#endif
35879 +#else
35880 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35881 +#endif
35882 proc_create("devices", 0, proc_bus_pci_dir,
35883 &proc_bus_pci_dev_operations);
35884 proc_initialized = 1;
35885 diff -urNp linux-2.6.32.45/drivers/pci/slot.c linux-2.6.32.45/drivers/pci/slot.c
35886 --- linux-2.6.32.45/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
35887 +++ linux-2.6.32.45/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
35888 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
35889 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
35890 }
35891
35892 -static struct sysfs_ops pci_slot_sysfs_ops = {
35893 +static const struct sysfs_ops pci_slot_sysfs_ops = {
35894 .show = pci_slot_attr_show,
35895 .store = pci_slot_attr_store,
35896 };
35897 diff -urNp linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c
35898 --- linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
35899 +++ linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
35900 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
35901 return -EFAULT;
35902 }
35903 }
35904 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35905 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35906 if (!buf)
35907 return -ENOMEM;
35908
35909 diff -urNp linux-2.6.32.45/drivers/platform/x86/acer-wmi.c linux-2.6.32.45/drivers/platform/x86/acer-wmi.c
35910 --- linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
35911 +++ linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
35912 @@ -918,7 +918,7 @@ static int update_bl_status(struct backl
35913 return 0;
35914 }
35915
35916 -static struct backlight_ops acer_bl_ops = {
35917 +static const struct backlight_ops acer_bl_ops = {
35918 .get_brightness = read_brightness,
35919 .update_status = update_bl_status,
35920 };
35921 diff -urNp linux-2.6.32.45/drivers/platform/x86/asus_acpi.c linux-2.6.32.45/drivers/platform/x86/asus_acpi.c
35922 --- linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
35923 +++ linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
35924 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
35925 return 0;
35926 }
35927
35928 -static struct backlight_ops asus_backlight_data = {
35929 +static const struct backlight_ops asus_backlight_data = {
35930 .get_brightness = read_brightness,
35931 .update_status = set_brightness_status,
35932 };
35933 diff -urNp linux-2.6.32.45/drivers/platform/x86/asus-laptop.c linux-2.6.32.45/drivers/platform/x86/asus-laptop.c
35934 --- linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
35935 +++ linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
35936 @@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
35937 */
35938 static int read_brightness(struct backlight_device *bd);
35939 static int update_bl_status(struct backlight_device *bd);
35940 -static struct backlight_ops asusbl_ops = {
35941 +static const struct backlight_ops asusbl_ops = {
35942 .get_brightness = read_brightness,
35943 .update_status = update_bl_status,
35944 };
35945 diff -urNp linux-2.6.32.45/drivers/platform/x86/compal-laptop.c linux-2.6.32.45/drivers/platform/x86/compal-laptop.c
35946 --- linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
35947 +++ linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
35948 @@ -163,7 +163,7 @@ static int bl_update_status(struct backl
35949 return set_lcd_level(b->props.brightness);
35950 }
35951
35952 -static struct backlight_ops compalbl_ops = {
35953 +static const struct backlight_ops compalbl_ops = {
35954 .get_brightness = bl_get_brightness,
35955 .update_status = bl_update_status,
35956 };
35957 diff -urNp linux-2.6.32.45/drivers/platform/x86/dell-laptop.c linux-2.6.32.45/drivers/platform/x86/dell-laptop.c
35958 --- linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
35959 +++ linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
35960 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
35961 return buffer.output[1];
35962 }
35963
35964 -static struct backlight_ops dell_ops = {
35965 +static const struct backlight_ops dell_ops = {
35966 .get_brightness = dell_get_intensity,
35967 .update_status = dell_send_intensity,
35968 };
35969 diff -urNp linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c
35970 --- linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
35971 +++ linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
35972 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
35973 */
35974 static int read_brightness(struct backlight_device *bd);
35975 static int update_bl_status(struct backlight_device *bd);
35976 -static struct backlight_ops eeepcbl_ops = {
35977 +static const struct backlight_ops eeepcbl_ops = {
35978 .get_brightness = read_brightness,
35979 .update_status = update_bl_status,
35980 };
35981 diff -urNp linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c
35982 --- linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
35983 +++ linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
35984 @@ -436,7 +436,7 @@ static int bl_update_status(struct backl
35985 return ret;
35986 }
35987
35988 -static struct backlight_ops fujitsubl_ops = {
35989 +static const struct backlight_ops fujitsubl_ops = {
35990 .get_brightness = bl_get_brightness,
35991 .update_status = bl_update_status,
35992 };
35993 diff -urNp linux-2.6.32.45/drivers/platform/x86/msi-laptop.c linux-2.6.32.45/drivers/platform/x86/msi-laptop.c
35994 --- linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
35995 +++ linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
35996 @@ -161,7 +161,7 @@ static int bl_update_status(struct backl
35997 return set_lcd_level(b->props.brightness);
35998 }
35999
36000 -static struct backlight_ops msibl_ops = {
36001 +static const struct backlight_ops msibl_ops = {
36002 .get_brightness = bl_get_brightness,
36003 .update_status = bl_update_status,
36004 };
36005 diff -urNp linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c
36006 --- linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
36007 +++ linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
36008 @@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
36009 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
36010 }
36011
36012 -static struct backlight_ops pcc_backlight_ops = {
36013 +static const struct backlight_ops pcc_backlight_ops = {
36014 .get_brightness = bl_get,
36015 .update_status = bl_set_status,
36016 };
36017 diff -urNp linux-2.6.32.45/drivers/platform/x86/sony-laptop.c linux-2.6.32.45/drivers/platform/x86/sony-laptop.c
36018 --- linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
36019 +++ linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
36020 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
36021 }
36022
36023 static struct backlight_device *sony_backlight_device;
36024 -static struct backlight_ops sony_backlight_ops = {
36025 +static const struct backlight_ops sony_backlight_ops = {
36026 .update_status = sony_backlight_update_status,
36027 .get_brightness = sony_backlight_get_brightness,
36028 };
36029 diff -urNp linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c
36030 --- linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
36031 +++ linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:33:55.000000000 -0400
36032 @@ -2137,7 +2137,7 @@ static int hotkey_mask_get(void)
36033 return 0;
36034 }
36035
36036 -void static hotkey_mask_warn_incomplete_mask(void)
36037 +static void hotkey_mask_warn_incomplete_mask(void)
36038 {
36039 /* log only what the user can fix... */
36040 const u32 wantedmask = hotkey_driver_mask &
36041 @@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
36042 BACKLIGHT_UPDATE_HOTKEY);
36043 }
36044
36045 -static struct backlight_ops ibm_backlight_data = {
36046 +static const struct backlight_ops ibm_backlight_data = {
36047 .get_brightness = brightness_get,
36048 .update_status = brightness_update_status,
36049 };
36050 diff -urNp linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c
36051 --- linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
36052 +++ linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
36053 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
36054 return AE_OK;
36055 }
36056
36057 -static struct backlight_ops toshiba_backlight_data = {
36058 +static const struct backlight_ops toshiba_backlight_data = {
36059 .get_brightness = get_lcd,
36060 .update_status = set_lcd_status,
36061 };
36062 diff -urNp linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c
36063 --- linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
36064 +++ linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
36065 @@ -60,7 +60,7 @@ do { \
36066 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36067 } while(0)
36068
36069 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36070 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36071 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36072
36073 /*
36074 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
36075
36076 cpu = get_cpu();
36077 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36078 +
36079 + pax_open_kernel();
36080 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36081 + pax_close_kernel();
36082
36083 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36084 spin_lock_irqsave(&pnp_bios_lock, flags);
36085 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
36086 :"memory");
36087 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36088
36089 + pax_open_kernel();
36090 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36091 + pax_close_kernel();
36092 +
36093 put_cpu();
36094
36095 /* If we get here and this is set then the PnP BIOS faulted on us. */
36096 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
36097 return status;
36098 }
36099
36100 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
36101 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36102 {
36103 int i;
36104
36105 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
36106 pnp_bios_callpoint.offset = header->fields.pm16offset;
36107 pnp_bios_callpoint.segment = PNP_CS16;
36108
36109 + pax_open_kernel();
36110 +
36111 for_each_possible_cpu(i) {
36112 struct desc_struct *gdt = get_cpu_gdt_table(i);
36113 if (!gdt)
36114 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
36115 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36116 (unsigned long)__va(header->fields.pm16dseg));
36117 }
36118 +
36119 + pax_close_kernel();
36120 }
36121 diff -urNp linux-2.6.32.45/drivers/pnp/resource.c linux-2.6.32.45/drivers/pnp/resource.c
36122 --- linux-2.6.32.45/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
36123 +++ linux-2.6.32.45/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
36124 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
36125 return 1;
36126
36127 /* check if the resource is valid */
36128 - if (*irq < 0 || *irq > 15)
36129 + if (*irq > 15)
36130 return 0;
36131
36132 /* check if the resource is reserved */
36133 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
36134 return 1;
36135
36136 /* check if the resource is valid */
36137 - if (*dma < 0 || *dma == 4 || *dma > 7)
36138 + if (*dma == 4 || *dma > 7)
36139 return 0;
36140
36141 /* check if the resource is reserved */
36142 diff -urNp linux-2.6.32.45/drivers/power/bq27x00_battery.c linux-2.6.32.45/drivers/power/bq27x00_battery.c
36143 --- linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-03-27 14:31:47.000000000 -0400
36144 +++ linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-08-05 20:33:55.000000000 -0400
36145 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
36146 struct bq27x00_access_methods {
36147 int (*read)(u8 reg, int *rt_value, int b_single,
36148 struct bq27x00_device_info *di);
36149 -};
36150 +} __no_const;
36151
36152 struct bq27x00_device_info {
36153 struct device *dev;
36154 diff -urNp linux-2.6.32.45/drivers/rtc/rtc-dev.c linux-2.6.32.45/drivers/rtc/rtc-dev.c
36155 --- linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
36156 +++ linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
36157 @@ -14,6 +14,7 @@
36158 #include <linux/module.h>
36159 #include <linux/rtc.h>
36160 #include <linux/sched.h>
36161 +#include <linux/grsecurity.h>
36162 #include "rtc-core.h"
36163
36164 static dev_t rtc_devt;
36165 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
36166 if (copy_from_user(&tm, uarg, sizeof(tm)))
36167 return -EFAULT;
36168
36169 + gr_log_timechange();
36170 +
36171 return rtc_set_time(rtc, &tm);
36172
36173 case RTC_PIE_ON:
36174 diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.c linux-2.6.32.45/drivers/s390/cio/qdio_perf.c
36175 --- linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
36176 +++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
36177 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
36178 static int qdio_perf_proc_show(struct seq_file *m, void *v)
36179 {
36180 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
36181 - (long)atomic_long_read(&perf_stats.qdio_int));
36182 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
36183 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
36184 - (long)atomic_long_read(&perf_stats.pci_int));
36185 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
36186 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
36187 - (long)atomic_long_read(&perf_stats.thin_int));
36188 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
36189 seq_printf(m, "\n");
36190 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
36191 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
36192 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
36193 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
36194 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
36195 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
36196 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
36197 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
36198 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
36199 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
36200 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
36201 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
36202 - (long)atomic_long_read(&perf_stats.thinint_inbound),
36203 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
36204 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
36205 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
36206 seq_printf(m, "\n");
36207 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
36208 - (long)atomic_long_read(&perf_stats.siga_in));
36209 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
36210 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
36211 - (long)atomic_long_read(&perf_stats.siga_out));
36212 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
36213 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
36214 - (long)atomic_long_read(&perf_stats.siga_sync));
36215 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
36216 seq_printf(m, "\n");
36217 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
36218 - (long)atomic_long_read(&perf_stats.inbound_handler));
36219 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
36220 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
36221 - (long)atomic_long_read(&perf_stats.outbound_handler));
36222 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
36223 seq_printf(m, "\n");
36224 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
36225 - (long)atomic_long_read(&perf_stats.fast_requeue));
36226 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
36227 seq_printf(m, "Number of outbound target full condition\t: %li\n",
36228 - (long)atomic_long_read(&perf_stats.outbound_target_full));
36229 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
36230 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
36231 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
36232 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
36233 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
36234 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
36235 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
36236 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
36237 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
36238 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
36239 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
36240 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
36241 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
36242 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
36243 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
36244 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
36245 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
36246 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
36247 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
36248 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
36249 seq_printf(m, "\n");
36250 return 0;
36251 }
36252 diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.h linux-2.6.32.45/drivers/s390/cio/qdio_perf.h
36253 --- linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
36254 +++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
36255 @@ -13,46 +13,46 @@
36256
36257 struct qdio_perf_stats {
36258 /* interrupt handler calls */
36259 - atomic_long_t qdio_int;
36260 - atomic_long_t pci_int;
36261 - atomic_long_t thin_int;
36262 + atomic_long_unchecked_t qdio_int;
36263 + atomic_long_unchecked_t pci_int;
36264 + atomic_long_unchecked_t thin_int;
36265
36266 /* tasklet runs */
36267 - atomic_long_t tasklet_inbound;
36268 - atomic_long_t tasklet_outbound;
36269 - atomic_long_t tasklet_thinint;
36270 - atomic_long_t tasklet_thinint_loop;
36271 - atomic_long_t thinint_inbound;
36272 - atomic_long_t thinint_inbound_loop;
36273 - atomic_long_t thinint_inbound_loop2;
36274 + atomic_long_unchecked_t tasklet_inbound;
36275 + atomic_long_unchecked_t tasklet_outbound;
36276 + atomic_long_unchecked_t tasklet_thinint;
36277 + atomic_long_unchecked_t tasklet_thinint_loop;
36278 + atomic_long_unchecked_t thinint_inbound;
36279 + atomic_long_unchecked_t thinint_inbound_loop;
36280 + atomic_long_unchecked_t thinint_inbound_loop2;
36281
36282 /* signal adapter calls */
36283 - atomic_long_t siga_out;
36284 - atomic_long_t siga_in;
36285 - atomic_long_t siga_sync;
36286 + atomic_long_unchecked_t siga_out;
36287 + atomic_long_unchecked_t siga_in;
36288 + atomic_long_unchecked_t siga_sync;
36289
36290 /* misc */
36291 - atomic_long_t inbound_handler;
36292 - atomic_long_t outbound_handler;
36293 - atomic_long_t fast_requeue;
36294 - atomic_long_t outbound_target_full;
36295 + atomic_long_unchecked_t inbound_handler;
36296 + atomic_long_unchecked_t outbound_handler;
36297 + atomic_long_unchecked_t fast_requeue;
36298 + atomic_long_unchecked_t outbound_target_full;
36299
36300 /* for debugging */
36301 - atomic_long_t debug_tl_out_timer;
36302 - atomic_long_t debug_stop_polling;
36303 - atomic_long_t debug_eqbs_all;
36304 - atomic_long_t debug_eqbs_incomplete;
36305 - atomic_long_t debug_sqbs_all;
36306 - atomic_long_t debug_sqbs_incomplete;
36307 + atomic_long_unchecked_t debug_tl_out_timer;
36308 + atomic_long_unchecked_t debug_stop_polling;
36309 + atomic_long_unchecked_t debug_eqbs_all;
36310 + atomic_long_unchecked_t debug_eqbs_incomplete;
36311 + atomic_long_unchecked_t debug_sqbs_all;
36312 + atomic_long_unchecked_t debug_sqbs_incomplete;
36313 };
36314
36315 extern struct qdio_perf_stats perf_stats;
36316 extern int qdio_performance_stats;
36317
36318 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
36319 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
36320 {
36321 if (qdio_performance_stats)
36322 - atomic_long_inc(count);
36323 + atomic_long_inc_unchecked(count);
36324 }
36325
36326 int qdio_setup_perf_stats(void);
36327 diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h
36328 --- linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-03-27 14:31:47.000000000 -0400
36329 +++ linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:33:55.000000000 -0400
36330 @@ -471,7 +471,7 @@ struct adapter_ops
36331 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36332 /* Administrative operations */
36333 int (*adapter_comm)(struct aac_dev * dev, int comm);
36334 -};
36335 +} __no_const;
36336
36337 /*
36338 * Define which interrupt handler needs to be installed
36339 diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c
36340 --- linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
36341 +++ linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
36342 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
36343 u32 actual_fibsize64, actual_fibsize = 0;
36344 int i;
36345
36346 + pax_track_stack();
36347
36348 if (dev->in_reset) {
36349 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
36350 diff -urNp linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c
36351 --- linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
36352 +++ linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
36353 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
36354 flash_error_table[i].reason);
36355 }
36356
36357 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
36358 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
36359 asd_show_update_bios, asd_store_update_bios);
36360
36361 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
36362 diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h
36363 --- linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-03-27 14:31:47.000000000 -0400
36364 +++ linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-08-05 20:33:55.000000000 -0400
36365 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
36366 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
36367 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
36368 u32 *nvecs, u32 *maxvec);
36369 -};
36370 +} __no_const;
36371 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36372
36373 struct bfa_iocfc_s {
36374 diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h
36375 --- linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-03-27 14:31:47.000000000 -0400
36376 +++ linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:33:55.000000000 -0400
36377 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
36378 bfa_ioc_disable_cbfn_t disable_cbfn;
36379 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36380 bfa_ioc_reset_cbfn_t reset_cbfn;
36381 -};
36382 +} __no_const;
36383
36384 /**
36385 * Heartbeat failure notification queue element.
36386 diff -urNp linux-2.6.32.45/drivers/scsi/BusLogic.c linux-2.6.32.45/drivers/scsi/BusLogic.c
36387 --- linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
36388 +++ linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
36389 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
36390 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
36391 *PrototypeHostAdapter)
36392 {
36393 + pax_track_stack();
36394 +
36395 /*
36396 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
36397 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
36398 diff -urNp linux-2.6.32.45/drivers/scsi/dpt_i2o.c linux-2.6.32.45/drivers/scsi/dpt_i2o.c
36399 --- linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
36400 +++ linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
36401 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
36402 dma_addr_t addr;
36403 ulong flags = 0;
36404
36405 + pax_track_stack();
36406 +
36407 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
36408 // get user msg size in u32s
36409 if(get_user(size, &user_msg[0])){
36410 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
36411 s32 rcode;
36412 dma_addr_t addr;
36413
36414 + pax_track_stack();
36415 +
36416 memset(msg, 0 , sizeof(msg));
36417 len = scsi_bufflen(cmd);
36418 direction = 0x00000000;
36419 diff -urNp linux-2.6.32.45/drivers/scsi/eata.c linux-2.6.32.45/drivers/scsi/eata.c
36420 --- linux-2.6.32.45/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
36421 +++ linux-2.6.32.45/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
36422 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
36423 struct hostdata *ha;
36424 char name[16];
36425
36426 + pax_track_stack();
36427 +
36428 sprintf(name, "%s%d", driver_name, j);
36429
36430 if (!request_region(port_base, REGION_SIZE, driver_name)) {
36431 diff -urNp linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c
36432 --- linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
36433 +++ linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
36434 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
36435 size_t rlen;
36436 size_t dlen;
36437
36438 + pax_track_stack();
36439 +
36440 fiph = (struct fip_header *)skb->data;
36441 sub = fiph->fip_subcode;
36442 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
36443 diff -urNp linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c
36444 --- linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-03-27 14:31:47.000000000 -0400
36445 +++ linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-08-05 20:33:55.000000000 -0400
36446 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct p
36447 /* Start local port initiatialization */
36448
36449 lp->link_up = 0;
36450 - lp->tt = fnic_transport_template;
36451 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
36452
36453 lp->max_retry_count = fnic->config.flogi_retries;
36454 lp->max_rport_retry_count = fnic->config.plogi_retries;
36455 diff -urNp linux-2.6.32.45/drivers/scsi/gdth.c linux-2.6.32.45/drivers/scsi/gdth.c
36456 --- linux-2.6.32.45/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
36457 +++ linux-2.6.32.45/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
36458 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
36459 ulong flags;
36460 gdth_ha_str *ha;
36461
36462 + pax_track_stack();
36463 +
36464 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
36465 return -EFAULT;
36466 ha = gdth_find_ha(ldrv.ionode);
36467 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
36468 gdth_ha_str *ha;
36469 int rval;
36470
36471 + pax_track_stack();
36472 +
36473 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
36474 res.number >= MAX_HDRIVES)
36475 return -EFAULT;
36476 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
36477 gdth_ha_str *ha;
36478 int rval;
36479
36480 + pax_track_stack();
36481 +
36482 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
36483 return -EFAULT;
36484 ha = gdth_find_ha(gen.ionode);
36485 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
36486 int i;
36487 gdth_cmd_str gdtcmd;
36488 char cmnd[MAX_COMMAND_SIZE];
36489 +
36490 + pax_track_stack();
36491 +
36492 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
36493
36494 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
36495 diff -urNp linux-2.6.32.45/drivers/scsi/gdth_proc.c linux-2.6.32.45/drivers/scsi/gdth_proc.c
36496 --- linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
36497 +++ linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
36498 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
36499 ulong64 paddr;
36500
36501 char cmnd[MAX_COMMAND_SIZE];
36502 +
36503 + pax_track_stack();
36504 +
36505 memset(cmnd, 0xff, 12);
36506 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
36507
36508 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
36509 gdth_hget_str *phg;
36510 char cmnd[MAX_COMMAND_SIZE];
36511
36512 + pax_track_stack();
36513 +
36514 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
36515 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
36516 if (!gdtcmd || !estr)
36517 diff -urNp linux-2.6.32.45/drivers/scsi/hosts.c linux-2.6.32.45/drivers/scsi/hosts.c
36518 --- linux-2.6.32.45/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
36519 +++ linux-2.6.32.45/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
36520 @@ -40,7 +40,7 @@
36521 #include "scsi_logging.h"
36522
36523
36524 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36525 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36526
36527
36528 static void scsi_host_cls_release(struct device *dev)
36529 @@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
36530 * subtract one because we increment first then return, but we need to
36531 * know what the next host number was before increment
36532 */
36533 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36534 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36535 shost->dma_channel = 0xff;
36536
36537 /* These three are default values which can be overridden */
36538 diff -urNp linux-2.6.32.45/drivers/scsi/ipr.c linux-2.6.32.45/drivers/scsi/ipr.c
36539 --- linux-2.6.32.45/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
36540 +++ linux-2.6.32.45/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
36541 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
36542 return true;
36543 }
36544
36545 -static struct ata_port_operations ipr_sata_ops = {
36546 +static const struct ata_port_operations ipr_sata_ops = {
36547 .phy_reset = ipr_ata_phy_reset,
36548 .hardreset = ipr_sata_reset,
36549 .post_internal_cmd = ipr_ata_post_internal,
36550 diff -urNp linux-2.6.32.45/drivers/scsi/ips.h linux-2.6.32.45/drivers/scsi/ips.h
36551 --- linux-2.6.32.45/drivers/scsi/ips.h 2011-03-27 14:31:47.000000000 -0400
36552 +++ linux-2.6.32.45/drivers/scsi/ips.h 2011-08-05 20:33:55.000000000 -0400
36553 @@ -1027,7 +1027,7 @@ typedef struct {
36554 int (*intr)(struct ips_ha *);
36555 void (*enableint)(struct ips_ha *);
36556 uint32_t (*statupd)(struct ips_ha *);
36557 -} ips_hw_func_t;
36558 +} __no_const ips_hw_func_t;
36559
36560 typedef struct ips_ha {
36561 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36562 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c
36563 --- linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c 2011-03-27 14:31:47.000000000 -0400
36564 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c 2011-08-05 20:33:55.000000000 -0400
36565 @@ -715,16 +715,16 @@ int fc_disc_init(struct fc_lport *lport)
36566 struct fc_disc *disc;
36567
36568 if (!lport->tt.disc_start)
36569 - lport->tt.disc_start = fc_disc_start;
36570 + *(void **)&lport->tt.disc_start = fc_disc_start;
36571
36572 if (!lport->tt.disc_stop)
36573 - lport->tt.disc_stop = fc_disc_stop;
36574 + *(void **)&lport->tt.disc_stop = fc_disc_stop;
36575
36576 if (!lport->tt.disc_stop_final)
36577 - lport->tt.disc_stop_final = fc_disc_stop_final;
36578 + *(void **)&lport->tt.disc_stop_final = fc_disc_stop_final;
36579
36580 if (!lport->tt.disc_recv_req)
36581 - lport->tt.disc_recv_req = fc_disc_recv_req;
36582 + *(void **)&lport->tt.disc_recv_req = fc_disc_recv_req;
36583
36584 disc = &lport->disc;
36585 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
36586 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c
36587 --- linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c 2011-03-27 14:31:47.000000000 -0400
36588 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c 2011-08-05 20:33:55.000000000 -0400
36589 @@ -67,7 +67,7 @@ static struct fc_seq *fc_elsct_send(stru
36590 int fc_elsct_init(struct fc_lport *lport)
36591 {
36592 if (!lport->tt.elsct_send)
36593 - lport->tt.elsct_send = fc_elsct_send;
36594 + *(void **)&lport->tt.elsct_send = fc_elsct_send;
36595
36596 return 0;
36597 }
36598 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c
36599 --- linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
36600 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-08-05 20:33:55.000000000 -0400
36601 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
36602 * all together if not used XXX
36603 */
36604 struct {
36605 - atomic_t no_free_exch;
36606 - atomic_t no_free_exch_xid;
36607 - atomic_t xid_not_found;
36608 - atomic_t xid_busy;
36609 - atomic_t seq_not_found;
36610 - atomic_t non_bls_resp;
36611 + atomic_unchecked_t no_free_exch;
36612 + atomic_unchecked_t no_free_exch_xid;
36613 + atomic_unchecked_t xid_not_found;
36614 + atomic_unchecked_t xid_busy;
36615 + atomic_unchecked_t seq_not_found;
36616 + atomic_unchecked_t non_bls_resp;
36617 } stats;
36618 };
36619 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
36620 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
36621 /* allocate memory for exchange */
36622 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36623 if (!ep) {
36624 - atomic_inc(&mp->stats.no_free_exch);
36625 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36626 goto out;
36627 }
36628 memset(ep, 0, sizeof(*ep));
36629 @@ -557,7 +557,7 @@ out:
36630 return ep;
36631 err:
36632 spin_unlock_bh(&pool->lock);
36633 - atomic_inc(&mp->stats.no_free_exch_xid);
36634 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36635 mempool_free(ep, mp->ep_pool);
36636 return NULL;
36637 }
36638 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36639 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36640 ep = fc_exch_find(mp, xid);
36641 if (!ep) {
36642 - atomic_inc(&mp->stats.xid_not_found);
36643 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36644 reject = FC_RJT_OX_ID;
36645 goto out;
36646 }
36647 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36648 ep = fc_exch_find(mp, xid);
36649 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36650 if (ep) {
36651 - atomic_inc(&mp->stats.xid_busy);
36652 + atomic_inc_unchecked(&mp->stats.xid_busy);
36653 reject = FC_RJT_RX_ID;
36654 goto rel;
36655 }
36656 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36657 }
36658 xid = ep->xid; /* get our XID */
36659 } else if (!ep) {
36660 - atomic_inc(&mp->stats.xid_not_found);
36661 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36662 reject = FC_RJT_RX_ID; /* XID not found */
36663 goto out;
36664 }
36665 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36666 } else {
36667 sp = &ep->seq;
36668 if (sp->id != fh->fh_seq_id) {
36669 - atomic_inc(&mp->stats.seq_not_found);
36670 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36671 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
36672 goto rel;
36673 }
36674 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
36675
36676 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36677 if (!ep) {
36678 - atomic_inc(&mp->stats.xid_not_found);
36679 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36680 goto out;
36681 }
36682 if (ep->esb_stat & ESB_ST_COMPLETE) {
36683 - atomic_inc(&mp->stats.xid_not_found);
36684 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36685 goto out;
36686 }
36687 if (ep->rxid == FC_XID_UNKNOWN)
36688 ep->rxid = ntohs(fh->fh_rx_id);
36689 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36690 - atomic_inc(&mp->stats.xid_not_found);
36691 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36692 goto rel;
36693 }
36694 if (ep->did != ntoh24(fh->fh_s_id) &&
36695 ep->did != FC_FID_FLOGI) {
36696 - atomic_inc(&mp->stats.xid_not_found);
36697 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36698 goto rel;
36699 }
36700 sof = fr_sof(fp);
36701 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
36702 } else {
36703 sp = &ep->seq;
36704 if (sp->id != fh->fh_seq_id) {
36705 - atomic_inc(&mp->stats.seq_not_found);
36706 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36707 goto rel;
36708 }
36709 }
36710 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
36711 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36712
36713 if (!sp)
36714 - atomic_inc(&mp->stats.xid_not_found);
36715 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36716 else
36717 - atomic_inc(&mp->stats.non_bls_resp);
36718 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36719
36720 fc_frame_free(fp);
36721 }
36722 @@ -2027,25 +2027,25 @@ EXPORT_SYMBOL(fc_exch_recv);
36723 int fc_exch_init(struct fc_lport *lp)
36724 {
36725 if (!lp->tt.seq_start_next)
36726 - lp->tt.seq_start_next = fc_seq_start_next;
36727 + *(void **)&lp->tt.seq_start_next = fc_seq_start_next;
36728
36729 if (!lp->tt.exch_seq_send)
36730 - lp->tt.exch_seq_send = fc_exch_seq_send;
36731 + *(void **)&lp->tt.exch_seq_send = fc_exch_seq_send;
36732
36733 if (!lp->tt.seq_send)
36734 - lp->tt.seq_send = fc_seq_send;
36735 + *(void **)&lp->tt.seq_send = fc_seq_send;
36736
36737 if (!lp->tt.seq_els_rsp_send)
36738 - lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
36739 + *(void **)&lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
36740
36741 if (!lp->tt.exch_done)
36742 - lp->tt.exch_done = fc_exch_done;
36743 + *(void **)&lp->tt.exch_done = fc_exch_done;
36744
36745 if (!lp->tt.exch_mgr_reset)
36746 - lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
36747 + *(void **)&lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
36748
36749 if (!lp->tt.seq_exch_abort)
36750 - lp->tt.seq_exch_abort = fc_seq_exch_abort;
36751 + *(void **)&lp->tt.seq_exch_abort = fc_seq_exch_abort;
36752
36753 /*
36754 * Initialize fc_cpu_mask and fc_cpu_order. The
36755 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c
36756 --- linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c 2011-03-27 14:31:47.000000000 -0400
36757 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c 2011-08-05 20:33:55.000000000 -0400
36758 @@ -2105,13 +2105,13 @@ int fc_fcp_init(struct fc_lport *lp)
36759 struct fc_fcp_internal *si;
36760
36761 if (!lp->tt.fcp_cmd_send)
36762 - lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
36763 + *(void **)&lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
36764
36765 if (!lp->tt.fcp_cleanup)
36766 - lp->tt.fcp_cleanup = fc_fcp_cleanup;
36767 + *(void **)&lp->tt.fcp_cleanup = fc_fcp_cleanup;
36768
36769 if (!lp->tt.fcp_abort_io)
36770 - lp->tt.fcp_abort_io = fc_fcp_abort_io;
36771 + *(void **)&lp->tt.fcp_abort_io = fc_fcp_abort_io;
36772
36773 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
36774 if (!si)
36775 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c
36776 --- linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c 2011-03-27 14:31:47.000000000 -0400
36777 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c 2011-08-05 20:33:55.000000000 -0400
36778 @@ -569,7 +569,7 @@ int fc_lport_destroy(struct fc_lport *lp
36779 mutex_lock(&lport->lp_mutex);
36780 lport->state = LPORT_ST_DISABLED;
36781 lport->link_up = 0;
36782 - lport->tt.frame_send = fc_frame_drop;
36783 + *(void **)&lport->tt.frame_send = fc_frame_drop;
36784 mutex_unlock(&lport->lp_mutex);
36785
36786 lport->tt.fcp_abort_io(lport);
36787 @@ -1477,10 +1477,10 @@ EXPORT_SYMBOL(fc_lport_config);
36788 int fc_lport_init(struct fc_lport *lport)
36789 {
36790 if (!lport->tt.lport_recv)
36791 - lport->tt.lport_recv = fc_lport_recv_req;
36792 + *(void **)&lport->tt.lport_recv = fc_lport_recv_req;
36793
36794 if (!lport->tt.lport_reset)
36795 - lport->tt.lport_reset = fc_lport_reset;
36796 + *(void **)&lport->tt.lport_reset = fc_lport_reset;
36797
36798 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
36799 fc_host_node_name(lport->host) = lport->wwnn;
36800 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c
36801 --- linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c 2011-03-27 14:31:47.000000000 -0400
36802 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c 2011-08-05 20:33:55.000000000 -0400
36803 @@ -1566,25 +1566,25 @@ static void fc_rport_flush_queue(void)
36804 int fc_rport_init(struct fc_lport *lport)
36805 {
36806 if (!lport->tt.rport_lookup)
36807 - lport->tt.rport_lookup = fc_rport_lookup;
36808 + *(void **)&lport->tt.rport_lookup = fc_rport_lookup;
36809
36810 if (!lport->tt.rport_create)
36811 - lport->tt.rport_create = fc_rport_create;
36812 + *(void **)&lport->tt.rport_create = fc_rport_create;
36813
36814 if (!lport->tt.rport_login)
36815 - lport->tt.rport_login = fc_rport_login;
36816 + *(void **)&lport->tt.rport_login = fc_rport_login;
36817
36818 if (!lport->tt.rport_logoff)
36819 - lport->tt.rport_logoff = fc_rport_logoff;
36820 + *(void **)&lport->tt.rport_logoff = fc_rport_logoff;
36821
36822 if (!lport->tt.rport_recv_req)
36823 - lport->tt.rport_recv_req = fc_rport_recv_req;
36824 + *(void **)&lport->tt.rport_recv_req = fc_rport_recv_req;
36825
36826 if (!lport->tt.rport_flush_queue)
36827 - lport->tt.rport_flush_queue = fc_rport_flush_queue;
36828 + *(void **)&lport->tt.rport_flush_queue = fc_rport_flush_queue;
36829
36830 if (!lport->tt.rport_destroy)
36831 - lport->tt.rport_destroy = fc_rport_destroy;
36832 + *(void **)&lport->tt.rport_destroy = fc_rport_destroy;
36833
36834 return 0;
36835 }
36836 diff -urNp linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c
36837 --- linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
36838 +++ linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
36839 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
36840 }
36841 }
36842
36843 -static struct ata_port_operations sas_sata_ops = {
36844 +static const struct ata_port_operations sas_sata_ops = {
36845 .phy_reset = sas_ata_phy_reset,
36846 .post_internal_cmd = sas_ata_post_internal,
36847 .qc_defer = ata_std_qc_defer,
36848 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c
36849 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
36850 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
36851 @@ -124,7 +124,7 @@ struct lpfc_debug {
36852 int len;
36853 };
36854
36855 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36856 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36857 static unsigned long lpfc_debugfs_start_time = 0L;
36858
36859 /**
36860 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
36861 lpfc_debugfs_enable = 0;
36862
36863 len = 0;
36864 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36865 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36866 (lpfc_debugfs_max_disc_trc - 1);
36867 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36868 dtp = vport->disc_trc + i;
36869 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
36870 lpfc_debugfs_enable = 0;
36871
36872 len = 0;
36873 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36874 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36875 (lpfc_debugfs_max_slow_ring_trc - 1);
36876 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36877 dtp = phba->slow_ring_trc + i;
36878 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
36879 uint32_t *ptr;
36880 char buffer[1024];
36881
36882 + pax_track_stack();
36883 +
36884 off = 0;
36885 spin_lock_irq(&phba->hbalock);
36886
36887 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
36888 !vport || !vport->disc_trc)
36889 return;
36890
36891 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36892 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36893 (lpfc_debugfs_max_disc_trc - 1);
36894 dtp = vport->disc_trc + index;
36895 dtp->fmt = fmt;
36896 dtp->data1 = data1;
36897 dtp->data2 = data2;
36898 dtp->data3 = data3;
36899 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36900 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36901 dtp->jif = jiffies;
36902 #endif
36903 return;
36904 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
36905 !phba || !phba->slow_ring_trc)
36906 return;
36907
36908 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36909 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36910 (lpfc_debugfs_max_slow_ring_trc - 1);
36911 dtp = phba->slow_ring_trc + index;
36912 dtp->fmt = fmt;
36913 dtp->data1 = data1;
36914 dtp->data2 = data2;
36915 dtp->data3 = data3;
36916 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36917 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36918 dtp->jif = jiffies;
36919 #endif
36920 return;
36921 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36922 "slow_ring buffer\n");
36923 goto debug_failed;
36924 }
36925 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36926 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36927 memset(phba->slow_ring_trc, 0,
36928 (sizeof(struct lpfc_debugfs_trc) *
36929 lpfc_debugfs_max_slow_ring_trc));
36930 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36931 "buffer\n");
36932 goto debug_failed;
36933 }
36934 - atomic_set(&vport->disc_trc_cnt, 0);
36935 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36936
36937 snprintf(name, sizeof(name), "discovery_trace");
36938 vport->debug_disc_trc =
36939 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h
36940 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
36941 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
36942 @@ -400,7 +400,7 @@ struct lpfc_vport {
36943 struct dentry *debug_nodelist;
36944 struct dentry *vport_debugfs_root;
36945 struct lpfc_debugfs_trc *disc_trc;
36946 - atomic_t disc_trc_cnt;
36947 + atomic_unchecked_t disc_trc_cnt;
36948 #endif
36949 uint8_t stat_data_enabled;
36950 uint8_t stat_data_blocked;
36951 @@ -725,8 +725,8 @@ struct lpfc_hba {
36952 struct timer_list fabric_block_timer;
36953 unsigned long bit_flags;
36954 #define FABRIC_COMANDS_BLOCKED 0
36955 - atomic_t num_rsrc_err;
36956 - atomic_t num_cmd_success;
36957 + atomic_unchecked_t num_rsrc_err;
36958 + atomic_unchecked_t num_cmd_success;
36959 unsigned long last_rsrc_error_time;
36960 unsigned long last_ramp_down_time;
36961 unsigned long last_ramp_up_time;
36962 @@ -740,7 +740,7 @@ struct lpfc_hba {
36963 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
36964 struct dentry *debug_slow_ring_trc;
36965 struct lpfc_debugfs_trc *slow_ring_trc;
36966 - atomic_t slow_ring_trc_cnt;
36967 + atomic_unchecked_t slow_ring_trc_cnt;
36968 #endif
36969
36970 /* Used for deferred freeing of ELS data buffers */
36971 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c
36972 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-03-27 14:31:47.000000000 -0400
36973 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:33:55.000000000 -0400
36974 @@ -8021,8 +8021,10 @@ lpfc_init(void)
36975 printk(LPFC_COPYRIGHT "\n");
36976
36977 if (lpfc_enable_npiv) {
36978 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36979 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36980 + pax_open_kernel();
36981 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36982 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36983 + pax_close_kernel();
36984 }
36985 lpfc_transport_template =
36986 fc_attach_transport(&lpfc_transport_functions);
36987 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c
36988 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
36989 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
36990 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
36991 uint32_t evt_posted;
36992
36993 spin_lock_irqsave(&phba->hbalock, flags);
36994 - atomic_inc(&phba->num_rsrc_err);
36995 + atomic_inc_unchecked(&phba->num_rsrc_err);
36996 phba->last_rsrc_error_time = jiffies;
36997
36998 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36999 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
37000 unsigned long flags;
37001 struct lpfc_hba *phba = vport->phba;
37002 uint32_t evt_posted;
37003 - atomic_inc(&phba->num_cmd_success);
37004 + atomic_inc_unchecked(&phba->num_cmd_success);
37005
37006 if (vport->cfg_lun_queue_depth <= queue_depth)
37007 return;
37008 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
37009 int i;
37010 struct lpfc_rport_data *rdata;
37011
37012 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
37013 - num_cmd_success = atomic_read(&phba->num_cmd_success);
37014 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
37015 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
37016
37017 vports = lpfc_create_vport_work_array(phba);
37018 if (vports != NULL)
37019 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
37020 }
37021 }
37022 lpfc_destroy_vport_work_array(phba, vports);
37023 - atomic_set(&phba->num_rsrc_err, 0);
37024 - atomic_set(&phba->num_cmd_success, 0);
37025 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37026 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37027 }
37028
37029 /**
37030 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
37031 }
37032 }
37033 lpfc_destroy_vport_work_array(phba, vports);
37034 - atomic_set(&phba->num_rsrc_err, 0);
37035 - atomic_set(&phba->num_cmd_success, 0);
37036 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37037 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37038 }
37039
37040 /**
37041 diff -urNp linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c
37042 --- linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
37043 +++ linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
37044 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
37045 int rval;
37046 int i;
37047
37048 + pax_track_stack();
37049 +
37050 // Allocate memory for the base list of scb for management module.
37051 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
37052
37053 diff -urNp linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c
37054 --- linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
37055 +++ linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
37056 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
37057 int nelem = ARRAY_SIZE(get_attrs), a = 0;
37058 int ret;
37059
37060 + pax_track_stack();
37061 +
37062 or = osd_start_request(od, GFP_KERNEL);
37063 if (!or)
37064 return -ENOMEM;
37065 diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.c linux-2.6.32.45/drivers/scsi/pmcraid.c
37066 --- linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:35:29.000000000 -0400
37067 +++ linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:33:59.000000000 -0400
37068 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
37069 res->scsi_dev = scsi_dev;
37070 scsi_dev->hostdata = res;
37071 res->change_detected = 0;
37072 - atomic_set(&res->read_failures, 0);
37073 - atomic_set(&res->write_failures, 0);
37074 + atomic_set_unchecked(&res->read_failures, 0);
37075 + atomic_set_unchecked(&res->write_failures, 0);
37076 rc = 0;
37077 }
37078 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37079 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
37080
37081 /* If this was a SCSI read/write command keep count of errors */
37082 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37083 - atomic_inc(&res->read_failures);
37084 + atomic_inc_unchecked(&res->read_failures);
37085 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37086 - atomic_inc(&res->write_failures);
37087 + atomic_inc_unchecked(&res->write_failures);
37088
37089 if (!RES_IS_GSCSI(res->cfg_entry) &&
37090 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37091 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(stru
37092
37093 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37094 /* add resources only after host is added into system */
37095 - if (!atomic_read(&pinstance->expose_resources))
37096 + if (!atomic_read_unchecked(&pinstance->expose_resources))
37097 return;
37098
37099 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
37100 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instan
37101 init_waitqueue_head(&pinstance->reset_wait_q);
37102
37103 atomic_set(&pinstance->outstanding_cmds, 0);
37104 - atomic_set(&pinstance->expose_resources, 0);
37105 + atomic_set_unchecked(&pinstance->expose_resources, 0);
37106
37107 INIT_LIST_HEAD(&pinstance->free_res_q);
37108 INIT_LIST_HEAD(&pinstance->used_res_q);
37109 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
37110 /* Schedule worker thread to handle CCN and take care of adding and
37111 * removing devices to OS
37112 */
37113 - atomic_set(&pinstance->expose_resources, 1);
37114 + atomic_set_unchecked(&pinstance->expose_resources, 1);
37115 schedule_work(&pinstance->worker_q);
37116 return rc;
37117
37118 diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.h linux-2.6.32.45/drivers/scsi/pmcraid.h
37119 --- linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
37120 +++ linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
37121 @@ -690,7 +690,7 @@ struct pmcraid_instance {
37122 atomic_t outstanding_cmds;
37123
37124 /* should add/delete resources to mid-layer now ?*/
37125 - atomic_t expose_resources;
37126 + atomic_unchecked_t expose_resources;
37127
37128 /* Tasklet to handle deferred processing */
37129 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
37130 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
37131 struct list_head queue; /* link to "to be exposed" resources */
37132 struct pmcraid_config_table_entry cfg_entry;
37133 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37134 - atomic_t read_failures; /* count of failed READ commands */
37135 - atomic_t write_failures; /* count of failed WRITE commands */
37136 + atomic_unchecked_t read_failures; /* count of failed READ commands */
37137 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37138
37139 /* To indicate add/delete/modify during CCN */
37140 u8 change_detected;
37141 diff -urNp linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h
37142 --- linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-03-27 14:31:47.000000000 -0400
37143 +++ linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:33:55.000000000 -0400
37144 @@ -2089,7 +2089,7 @@ struct isp_operations {
37145
37146 int (*get_flash_version) (struct scsi_qla_host *, void *);
37147 int (*start_scsi) (srb_t *);
37148 -};
37149 +} __no_const;
37150
37151 /* MSI-X Support *************************************************************/
37152
37153 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h
37154 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
37155 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
37156 @@ -240,7 +240,7 @@ struct ddb_entry {
37157 atomic_t retry_relogin_timer; /* Min Time between relogins
37158 * (4000 only) */
37159 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
37160 - atomic_t relogin_retry_count; /* Num of times relogin has been
37161 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37162 * retried */
37163
37164 uint16_t port;
37165 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c
37166 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
37167 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
37168 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
37169 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
37170 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37171 atomic_set(&ddb_entry->relogin_timer, 0);
37172 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37173 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37174 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37175 list_add_tail(&ddb_entry->list, &ha->ddb_list);
37176 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
37177 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
37178 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37179 atomic_set(&ddb_entry->port_down_timer,
37180 ha->port_down_retry_count);
37181 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37182 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37183 atomic_set(&ddb_entry->relogin_timer, 0);
37184 clear_bit(DF_RELOGIN, &ddb_entry->flags);
37185 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
37186 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c
37187 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
37188 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
37189 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
37190 ddb_entry->fw_ddb_device_state ==
37191 DDB_DS_SESSION_FAILED) {
37192 /* Reset retry relogin timer */
37193 - atomic_inc(&ddb_entry->relogin_retry_count);
37194 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37195 DEBUG2(printk("scsi%ld: index[%d] relogin"
37196 " timed out-retrying"
37197 " relogin (%d)\n",
37198 ha->host_no,
37199 ddb_entry->fw_ddb_index,
37200 - atomic_read(&ddb_entry->
37201 + atomic_read_unchecked(&ddb_entry->
37202 relogin_retry_count))
37203 );
37204 start_dpc++;
37205 diff -urNp linux-2.6.32.45/drivers/scsi/scsi.c linux-2.6.32.45/drivers/scsi/scsi.c
37206 --- linux-2.6.32.45/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
37207 +++ linux-2.6.32.45/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
37208 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
37209 unsigned long timeout;
37210 int rtn = 0;
37211
37212 - atomic_inc(&cmd->device->iorequest_cnt);
37213 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37214
37215 /* check if the device is still usable */
37216 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37217 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_debug.c linux-2.6.32.45/drivers/scsi/scsi_debug.c
37218 --- linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
37219 +++ linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
37220 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
37221 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
37222 unsigned char *cmd = (unsigned char *)scp->cmnd;
37223
37224 + pax_track_stack();
37225 +
37226 if ((errsts = check_readiness(scp, 1, devip)))
37227 return errsts;
37228 memset(arr, 0, sizeof(arr));
37229 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
37230 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
37231 unsigned char *cmd = (unsigned char *)scp->cmnd;
37232
37233 + pax_track_stack();
37234 +
37235 if ((errsts = check_readiness(scp, 1, devip)))
37236 return errsts;
37237 memset(arr, 0, sizeof(arr));
37238 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_lib.c linux-2.6.32.45/drivers/scsi/scsi_lib.c
37239 --- linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
37240 +++ linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
37241 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
37242
37243 scsi_init_cmd_errh(cmd);
37244 cmd->result = DID_NO_CONNECT << 16;
37245 - atomic_inc(&cmd->device->iorequest_cnt);
37246 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37247
37248 /*
37249 * SCSI request completion path will do scsi_device_unbusy(),
37250 @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
37251 */
37252 cmd->serial_number = 0;
37253
37254 - atomic_inc(&cmd->device->iodone_cnt);
37255 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
37256 if (cmd->result)
37257 - atomic_inc(&cmd->device->ioerr_cnt);
37258 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37259
37260 disposition = scsi_decide_disposition(cmd);
37261 if (disposition != SUCCESS &&
37262 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_sysfs.c linux-2.6.32.45/drivers/scsi/scsi_sysfs.c
37263 --- linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
37264 +++ linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
37265 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
37266 char *buf) \
37267 { \
37268 struct scsi_device *sdev = to_scsi_device(dev); \
37269 - unsigned long long count = atomic_read(&sdev->field); \
37270 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
37271 return snprintf(buf, 20, "0x%llx\n", count); \
37272 } \
37273 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37274 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c
37275 --- linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
37276 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
37277 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
37278 * Netlink Infrastructure
37279 */
37280
37281 -static atomic_t fc_event_seq;
37282 +static atomic_unchecked_t fc_event_seq;
37283
37284 /**
37285 * fc_get_event_number - Obtain the next sequential FC event number
37286 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
37287 u32
37288 fc_get_event_number(void)
37289 {
37290 - return atomic_add_return(1, &fc_event_seq);
37291 + return atomic_add_return_unchecked(1, &fc_event_seq);
37292 }
37293 EXPORT_SYMBOL(fc_get_event_number);
37294
37295 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void
37296 {
37297 int error;
37298
37299 - atomic_set(&fc_event_seq, 0);
37300 + atomic_set_unchecked(&fc_event_seq, 0);
37301
37302 error = transport_class_register(&fc_host_class);
37303 if (error)
37304 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c
37305 --- linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
37306 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
37307 @@ -81,7 +81,7 @@ struct iscsi_internal {
37308 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
37309 };
37310
37311 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37312 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37313 static struct workqueue_struct *iscsi_eh_timer_workq;
37314
37315 /*
37316 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
37317 int err;
37318
37319 ihost = shost->shost_data;
37320 - session->sid = atomic_add_return(1, &iscsi_session_nr);
37321 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37322
37323 if (id == ISCSI_MAX_TARGET) {
37324 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
37325 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
37326 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37327 ISCSI_TRANSPORT_VERSION);
37328
37329 - atomic_set(&iscsi_session_nr, 0);
37330 + atomic_set_unchecked(&iscsi_session_nr, 0);
37331
37332 err = class_register(&iscsi_transport_class);
37333 if (err)
37334 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c
37335 --- linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
37336 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
37337 @@ -33,7 +33,7 @@
37338 #include "scsi_transport_srp_internal.h"
37339
37340 struct srp_host_attrs {
37341 - atomic_t next_port_id;
37342 + atomic_unchecked_t next_port_id;
37343 };
37344 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37345
37346 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
37347 struct Scsi_Host *shost = dev_to_shost(dev);
37348 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37349
37350 - atomic_set(&srp_host->next_port_id, 0);
37351 + atomic_set_unchecked(&srp_host->next_port_id, 0);
37352 return 0;
37353 }
37354
37355 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
37356 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37357 rport->roles = ids->roles;
37358
37359 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37360 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37361 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37362
37363 transport_setup_device(&rport->dev);
37364 diff -urNp linux-2.6.32.45/drivers/scsi/sg.c linux-2.6.32.45/drivers/scsi/sg.c
37365 --- linux-2.6.32.45/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
37366 +++ linux-2.6.32.45/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
37367 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
37368 const struct file_operations * fops;
37369 };
37370
37371 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37372 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37373 {"allow_dio", &adio_fops},
37374 {"debug", &debug_fops},
37375 {"def_reserved_size", &dressz_fops},
37376 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
37377 {
37378 int k, mask;
37379 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
37380 - struct sg_proc_leaf * leaf;
37381 + const struct sg_proc_leaf * leaf;
37382
37383 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
37384 if (!sg_proc_sgp)
37385 diff -urNp linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c
37386 --- linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
37387 +++ linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
37388 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
37389 int do_iounmap = 0;
37390 int do_disable_device = 1;
37391
37392 + pax_track_stack();
37393 +
37394 memset(&sym_dev, 0, sizeof(sym_dev));
37395 memset(&nvram, 0, sizeof(nvram));
37396 sym_dev.pdev = pdev;
37397 diff -urNp linux-2.6.32.45/drivers/serial/kgdboc.c linux-2.6.32.45/drivers/serial/kgdboc.c
37398 --- linux-2.6.32.45/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
37399 +++ linux-2.6.32.45/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
37400 @@ -18,7 +18,7 @@
37401
37402 #define MAX_CONFIG_LEN 40
37403
37404 -static struct kgdb_io kgdboc_io_ops;
37405 +static const struct kgdb_io kgdboc_io_ops;
37406
37407 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37408 static int configured = -1;
37409 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
37410 module_put(THIS_MODULE);
37411 }
37412
37413 -static struct kgdb_io kgdboc_io_ops = {
37414 +static const struct kgdb_io kgdboc_io_ops = {
37415 .name = "kgdboc",
37416 .read_char = kgdboc_get_char,
37417 .write_char = kgdboc_put_char,
37418 diff -urNp linux-2.6.32.45/drivers/spi/spi.c linux-2.6.32.45/drivers/spi/spi.c
37419 --- linux-2.6.32.45/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
37420 +++ linux-2.6.32.45/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
37421 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
37422 EXPORT_SYMBOL_GPL(spi_sync);
37423
37424 /* portable code must never pass more than 32 bytes */
37425 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37426 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
37427
37428 static u8 *buf;
37429
37430 diff -urNp linux-2.6.32.45/drivers/ssb/driver_gige.c linux-2.6.32.45/drivers/ssb/driver_gige.c
37431 --- linux-2.6.32.45/drivers/ssb/driver_gige.c 2011-03-27 14:31:47.000000000 -0400
37432 +++ linux-2.6.32.45/drivers/ssb/driver_gige.c 2011-08-05 20:33:55.000000000 -0400
37433 @@ -180,8 +180,8 @@ static int ssb_gige_probe(struct ssb_dev
37434 dev->pci_controller.io_resource = &dev->io_resource;
37435 dev->pci_controller.mem_resource = &dev->mem_resource;
37436 dev->pci_controller.io_map_base = 0x800;
37437 - dev->pci_ops.read = ssb_gige_pci_read_config;
37438 - dev->pci_ops.write = ssb_gige_pci_write_config;
37439 + *(void **)&dev->pci_ops.read = ssb_gige_pci_read_config;
37440 + *(void **)&dev->pci_ops.write = ssb_gige_pci_write_config;
37441
37442 dev->io_resource.name = SSB_GIGE_IO_RES_NAME;
37443 dev->io_resource.start = 0x800;
37444 diff -urNp linux-2.6.32.45/drivers/staging/android/binder.c linux-2.6.32.45/drivers/staging/android/binder.c
37445 --- linux-2.6.32.45/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
37446 +++ linux-2.6.32.45/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
37447 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
37448 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
37449 }
37450
37451 -static struct vm_operations_struct binder_vm_ops = {
37452 +static const struct vm_operations_struct binder_vm_ops = {
37453 .open = binder_vma_open,
37454 .close = binder_vma_close,
37455 };
37456 diff -urNp linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c
37457 --- linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
37458 +++ linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
37459 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
37460 return VM_FAULT_NOPAGE;
37461 }
37462
37463 -static struct vm_operations_struct b3dfg_vm_ops = {
37464 +static const struct vm_operations_struct b3dfg_vm_ops = {
37465 .fault = b3dfg_vma_fault,
37466 };
37467
37468 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
37469 return r;
37470 }
37471
37472 -static struct file_operations b3dfg_fops = {
37473 +static const struct file_operations b3dfg_fops = {
37474 .owner = THIS_MODULE,
37475 .open = b3dfg_open,
37476 .release = b3dfg_release,
37477 diff -urNp linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c
37478 --- linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:35:29.000000000 -0400
37479 +++ linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:34:00.000000000 -0400
37480 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
37481 mutex_unlock(&dev->mutex);
37482 }
37483
37484 -static struct vm_operations_struct comedi_vm_ops = {
37485 +static const struct vm_operations_struct comedi_vm_ops = {
37486 .close = comedi_unmap,
37487 };
37488
37489 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c
37490 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
37491 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
37492 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
37493 static dev_t adsp_devno;
37494 static struct class *adsp_class;
37495
37496 -static struct file_operations adsp_fops = {
37497 +static const struct file_operations adsp_fops = {
37498 .owner = THIS_MODULE,
37499 .open = adsp_open,
37500 .unlocked_ioctl = adsp_ioctl,
37501 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c
37502 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
37503 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
37504 @@ -1022,7 +1022,7 @@ done:
37505 return rc;
37506 }
37507
37508 -static struct file_operations audio_aac_fops = {
37509 +static const struct file_operations audio_aac_fops = {
37510 .owner = THIS_MODULE,
37511 .open = audio_open,
37512 .release = audio_release,
37513 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c
37514 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
37515 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
37516 @@ -833,7 +833,7 @@ done:
37517 return rc;
37518 }
37519
37520 -static struct file_operations audio_amrnb_fops = {
37521 +static const struct file_operations audio_amrnb_fops = {
37522 .owner = THIS_MODULE,
37523 .open = audamrnb_open,
37524 .release = audamrnb_release,
37525 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c
37526 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
37527 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
37528 @@ -805,7 +805,7 @@ dma_fail:
37529 return rc;
37530 }
37531
37532 -static struct file_operations audio_evrc_fops = {
37533 +static const struct file_operations audio_evrc_fops = {
37534 .owner = THIS_MODULE,
37535 .open = audevrc_open,
37536 .release = audevrc_release,
37537 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c
37538 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
37539 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
37540 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
37541 return 0;
37542 }
37543
37544 -static struct file_operations audio_fops = {
37545 +static const struct file_operations audio_fops = {
37546 .owner = THIS_MODULE,
37547 .open = audio_in_open,
37548 .release = audio_in_release,
37549 @@ -922,7 +922,7 @@ static struct file_operations audio_fops
37550 .unlocked_ioctl = audio_in_ioctl,
37551 };
37552
37553 -static struct file_operations audpre_fops = {
37554 +static const struct file_operations audpre_fops = {
37555 .owner = THIS_MODULE,
37556 .open = audpre_open,
37557 .unlocked_ioctl = audpre_ioctl,
37558 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c
37559 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
37560 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
37561 @@ -941,7 +941,7 @@ done:
37562 return rc;
37563 }
37564
37565 -static struct file_operations audio_mp3_fops = {
37566 +static const struct file_operations audio_mp3_fops = {
37567 .owner = THIS_MODULE,
37568 .open = audio_open,
37569 .release = audio_release,
37570 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c
37571 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
37572 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
37573 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
37574 return 0;
37575 }
37576
37577 -static struct file_operations audio_fops = {
37578 +static const struct file_operations audio_fops = {
37579 .owner = THIS_MODULE,
37580 .open = audio_open,
37581 .release = audio_release,
37582 @@ -819,7 +819,7 @@ static struct file_operations audio_fops
37583 .unlocked_ioctl = audio_ioctl,
37584 };
37585
37586 -static struct file_operations audpp_fops = {
37587 +static const struct file_operations audpp_fops = {
37588 .owner = THIS_MODULE,
37589 .open = audpp_open,
37590 .unlocked_ioctl = audpp_ioctl,
37591 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c
37592 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
37593 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
37594 @@ -816,7 +816,7 @@ err:
37595 return rc;
37596 }
37597
37598 -static struct file_operations audio_qcelp_fops = {
37599 +static const struct file_operations audio_qcelp_fops = {
37600 .owner = THIS_MODULE,
37601 .open = audqcelp_open,
37602 .release = audqcelp_release,
37603 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c
37604 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
37605 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
37606 @@ -242,7 +242,7 @@ err:
37607 return rc;
37608 }
37609
37610 -static struct file_operations snd_fops = {
37611 +static const struct file_operations snd_fops = {
37612 .owner = THIS_MODULE,
37613 .open = snd_open,
37614 .release = snd_release,
37615 diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c
37616 --- linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
37617 +++ linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
37618 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
37619 return 0;
37620 }
37621
37622 -static struct file_operations qmi_fops = {
37623 +static const struct file_operations qmi_fops = {
37624 .owner = THIS_MODULE,
37625 .read = qmi_read,
37626 .write = qmi_write,
37627 diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c
37628 --- linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
37629 +++ linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
37630 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
37631 return rc;
37632 }
37633
37634 -static struct file_operations rpcrouter_server_fops = {
37635 +static const struct file_operations rpcrouter_server_fops = {
37636 .owner = THIS_MODULE,
37637 .open = rpcrouter_open,
37638 .release = rpcrouter_release,
37639 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
37640 .unlocked_ioctl = rpcrouter_ioctl,
37641 };
37642
37643 -static struct file_operations rpcrouter_router_fops = {
37644 +static const struct file_operations rpcrouter_router_fops = {
37645 .owner = THIS_MODULE,
37646 .open = rpcrouter_open,
37647 .release = rpcrouter_release,
37648 diff -urNp linux-2.6.32.45/drivers/staging/dst/dcore.c linux-2.6.32.45/drivers/staging/dst/dcore.c
37649 --- linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
37650 +++ linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
37651 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
37652 return 0;
37653 }
37654
37655 -static struct block_device_operations dst_blk_ops = {
37656 +static const struct block_device_operations dst_blk_ops = {
37657 .open = dst_bdev_open,
37658 .release = dst_bdev_release,
37659 .owner = THIS_MODULE,
37660 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
37661 n->size = ctl->size;
37662
37663 atomic_set(&n->refcnt, 1);
37664 - atomic_long_set(&n->gen, 0);
37665 + atomic_long_set_unchecked(&n->gen, 0);
37666 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
37667
37668 err = dst_node_sysfs_init(n);
37669 diff -urNp linux-2.6.32.45/drivers/staging/dst/trans.c linux-2.6.32.45/drivers/staging/dst/trans.c
37670 --- linux-2.6.32.45/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
37671 +++ linux-2.6.32.45/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
37672 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
37673 t->error = 0;
37674 t->retries = 0;
37675 atomic_set(&t->refcnt, 1);
37676 - t->gen = atomic_long_inc_return(&n->gen);
37677 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
37678
37679 t->enc = bio_data_dir(bio);
37680 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
37681 diff -urNp linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c
37682 --- linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
37683 +++ linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
37684 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
37685 struct net_device_stats *stats = &etdev->net_stats;
37686
37687 if (pMpTcb->Flags & fMP_DEST_BROAD)
37688 - atomic_inc(&etdev->Stats.brdcstxmt);
37689 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
37690 else if (pMpTcb->Flags & fMP_DEST_MULTI)
37691 - atomic_inc(&etdev->Stats.multixmt);
37692 + atomic_inc_unchecked(&etdev->Stats.multixmt);
37693 else
37694 - atomic_inc(&etdev->Stats.unixmt);
37695 + atomic_inc_unchecked(&etdev->Stats.unixmt);
37696
37697 if (pMpTcb->Packet) {
37698 stats->tx_bytes += pMpTcb->Packet->len;
37699 diff -urNp linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h
37700 --- linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
37701 +++ linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
37702 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
37703 * operations
37704 */
37705 u32 unircv; /* # multicast packets received */
37706 - atomic_t unixmt; /* # multicast packets for Tx */
37707 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
37708 u32 multircv; /* # multicast packets received */
37709 - atomic_t multixmt; /* # multicast packets for Tx */
37710 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
37711 u32 brdcstrcv; /* # broadcast packets received */
37712 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
37713 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
37714 u32 norcvbuf; /* # Rx packets discarded */
37715 u32 noxmtbuf; /* # Tx packets discarded */
37716
37717 diff -urNp linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c
37718 --- linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
37719 +++ linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
37720 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
37721 return 0;
37722 }
37723
37724 -static struct vm_operations_struct go7007_vm_ops = {
37725 +static const struct vm_operations_struct go7007_vm_ops = {
37726 .open = go7007_vm_open,
37727 .close = go7007_vm_close,
37728 .fault = go7007_vm_fault,
37729 diff -urNp linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c
37730 --- linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
37731 +++ linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
37732 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
37733 /* The one and only one */
37734 static struct blkvsc_driver_context g_blkvsc_drv;
37735
37736 -static struct block_device_operations block_ops = {
37737 +static const struct block_device_operations block_ops = {
37738 .owner = THIS_MODULE,
37739 .open = blkvsc_open,
37740 .release = blkvsc_release,
37741 diff -urNp linux-2.6.32.45/drivers/staging/hv/Channel.c linux-2.6.32.45/drivers/staging/hv/Channel.c
37742 --- linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
37743 +++ linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
37744 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
37745
37746 DPRINT_ENTER(VMBUS);
37747
37748 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
37749 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
37750 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
37751 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
37752
37753 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
37754 ASSERT(msgInfo != NULL);
37755 diff -urNp linux-2.6.32.45/drivers/staging/hv/Hv.c linux-2.6.32.45/drivers/staging/hv/Hv.c
37756 --- linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
37757 +++ linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
37758 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
37759 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
37760 u32 outputAddressHi = outputAddress >> 32;
37761 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
37762 - volatile void *hypercallPage = gHvContext.HypercallPage;
37763 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
37764
37765 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
37766 Control, Input, Output);
37767 diff -urNp linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c
37768 --- linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
37769 +++ linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
37770 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
37771 to_device_context(root_device_obj);
37772 struct device_context *child_device_ctx =
37773 to_device_context(child_device_obj);
37774 - static atomic_t device_num = ATOMIC_INIT(0);
37775 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
37776
37777 DPRINT_ENTER(VMBUS_DRV);
37778
37779 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
37780
37781 /* Set the device name. Otherwise, device_register() will fail. */
37782 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
37783 - atomic_inc_return(&device_num));
37784 + atomic_inc_return_unchecked(&device_num));
37785
37786 /* The new device belongs to this bus */
37787 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
37788 diff -urNp linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h
37789 --- linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
37790 +++ linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
37791 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
37792 struct VMBUS_CONNECTION {
37793 enum VMBUS_CONNECT_STATE ConnectState;
37794
37795 - atomic_t NextGpadlHandle;
37796 + atomic_unchecked_t NextGpadlHandle;
37797
37798 /*
37799 * Represents channel interrupts. Each bit position represents a
37800 diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet.c linux-2.6.32.45/drivers/staging/octeon/ethernet.c
37801 --- linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
37802 +++ linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
37803 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
37804 * since the RX tasklet also increments it.
37805 */
37806 #ifdef CONFIG_64BIT
37807 - atomic64_add(rx_status.dropped_packets,
37808 - (atomic64_t *)&priv->stats.rx_dropped);
37809 + atomic64_add_unchecked(rx_status.dropped_packets,
37810 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37811 #else
37812 - atomic_add(rx_status.dropped_packets,
37813 - (atomic_t *)&priv->stats.rx_dropped);
37814 + atomic_add_unchecked(rx_status.dropped_packets,
37815 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37816 #endif
37817 }
37818
37819 diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c
37820 --- linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
37821 +++ linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
37822 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
37823 /* Increment RX stats for virtual ports */
37824 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37825 #ifdef CONFIG_64BIT
37826 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37827 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37828 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37829 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37830 #else
37831 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37832 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37833 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37834 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37835 #endif
37836 }
37837 netif_receive_skb(skb);
37838 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
37839 dev->name);
37840 */
37841 #ifdef CONFIG_64BIT
37842 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37843 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
37844 #else
37845 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37846 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
37847 #endif
37848 dev_kfree_skb_irq(skb);
37849 }
37850 diff -urNp linux-2.6.32.45/drivers/staging/panel/panel.c linux-2.6.32.45/drivers/staging/panel/panel.c
37851 --- linux-2.6.32.45/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
37852 +++ linux-2.6.32.45/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
37853 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
37854 return 0;
37855 }
37856
37857 -static struct file_operations lcd_fops = {
37858 +static const struct file_operations lcd_fops = {
37859 .write = lcd_write,
37860 .open = lcd_open,
37861 .release = lcd_release,
37862 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
37863 return 0;
37864 }
37865
37866 -static struct file_operations keypad_fops = {
37867 +static const struct file_operations keypad_fops = {
37868 .read = keypad_read, /* read */
37869 .open = keypad_open, /* open */
37870 .release = keypad_release, /* close */
37871 diff -urNp linux-2.6.32.45/drivers/staging/phison/phison.c linux-2.6.32.45/drivers/staging/phison/phison.c
37872 --- linux-2.6.32.45/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
37873 +++ linux-2.6.32.45/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
37874 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
37875 ATA_BMDMA_SHT(DRV_NAME),
37876 };
37877
37878 -static struct ata_port_operations phison_ops = {
37879 +static const struct ata_port_operations phison_ops = {
37880 .inherits = &ata_bmdma_port_ops,
37881 .prereset = phison_pre_reset,
37882 };
37883 diff -urNp linux-2.6.32.45/drivers/staging/poch/poch.c linux-2.6.32.45/drivers/staging/poch/poch.c
37884 --- linux-2.6.32.45/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
37885 +++ linux-2.6.32.45/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
37886 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
37887 return 0;
37888 }
37889
37890 -static struct file_operations poch_fops = {
37891 +static const struct file_operations poch_fops = {
37892 .owner = THIS_MODULE,
37893 .open = poch_open,
37894 .release = poch_release,
37895 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/inode.c linux-2.6.32.45/drivers/staging/pohmelfs/inode.c
37896 --- linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37897 +++ linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
37898 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
37899 mutex_init(&psb->mcache_lock);
37900 psb->mcache_root = RB_ROOT;
37901 psb->mcache_timeout = msecs_to_jiffies(5000);
37902 - atomic_long_set(&psb->mcache_gen, 0);
37903 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
37904
37905 psb->trans_max_pages = 100;
37906
37907 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
37908 INIT_LIST_HEAD(&psb->crypto_ready_list);
37909 INIT_LIST_HEAD(&psb->crypto_active_list);
37910
37911 - atomic_set(&psb->trans_gen, 1);
37912 + atomic_set_unchecked(&psb->trans_gen, 1);
37913 atomic_long_set(&psb->total_inodes, 0);
37914
37915 mutex_init(&psb->state_lock);
37916 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c
37917 --- linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
37918 +++ linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
37919 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
37920 m->data = data;
37921 m->start = start;
37922 m->size = size;
37923 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
37924 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
37925
37926 mutex_lock(&psb->mcache_lock);
37927 err = pohmelfs_mcache_insert(psb, m);
37928 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h
37929 --- linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
37930 +++ linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
37931 @@ -570,14 +570,14 @@ struct pohmelfs_config;
37932 struct pohmelfs_sb {
37933 struct rb_root mcache_root;
37934 struct mutex mcache_lock;
37935 - atomic_long_t mcache_gen;
37936 + atomic_long_unchecked_t mcache_gen;
37937 unsigned long mcache_timeout;
37938
37939 unsigned int idx;
37940
37941 unsigned int trans_retries;
37942
37943 - atomic_t trans_gen;
37944 + atomic_unchecked_t trans_gen;
37945
37946 unsigned int crypto_attached_size;
37947 unsigned int crypto_align_size;
37948 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/trans.c linux-2.6.32.45/drivers/staging/pohmelfs/trans.c
37949 --- linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
37950 +++ linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
37951 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
37952 int err;
37953 struct netfs_cmd *cmd = t->iovec.iov_base;
37954
37955 - t->gen = atomic_inc_return(&psb->trans_gen);
37956 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
37957
37958 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
37959 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
37960 diff -urNp linux-2.6.32.45/drivers/staging/sep/sep_driver.c linux-2.6.32.45/drivers/staging/sep/sep_driver.c
37961 --- linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
37962 +++ linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
37963 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
37964 static dev_t sep_devno;
37965
37966 /* the files operations structure of the driver */
37967 -static struct file_operations sep_file_operations = {
37968 +static const struct file_operations sep_file_operations = {
37969 .owner = THIS_MODULE,
37970 .ioctl = sep_ioctl,
37971 .poll = sep_poll,
37972 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci.h linux-2.6.32.45/drivers/staging/usbip/vhci.h
37973 --- linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
37974 +++ linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
37975 @@ -92,7 +92,7 @@ struct vhci_hcd {
37976 unsigned resuming:1;
37977 unsigned long re_timeout;
37978
37979 - atomic_t seqnum;
37980 + atomic_unchecked_t seqnum;
37981
37982 /*
37983 * NOTE:
37984 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c
37985 --- linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
37986 +++ linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
37987 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
37988 return;
37989 }
37990
37991 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37992 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37993 if (priv->seqnum == 0xffff)
37994 usbip_uinfo("seqnum max\n");
37995
37996 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
37997 return -ENOMEM;
37998 }
37999
38000 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
38001 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38002 if (unlink->seqnum == 0xffff)
38003 usbip_uinfo("seqnum max\n");
38004
38005 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
38006 vdev->rhport = rhport;
38007 }
38008
38009 - atomic_set(&vhci->seqnum, 0);
38010 + atomic_set_unchecked(&vhci->seqnum, 0);
38011 spin_lock_init(&vhci->lock);
38012
38013
38014 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c
38015 --- linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
38016 +++ linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
38017 @@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
38018 usbip_uerr("cannot find a urb of seqnum %u\n",
38019 pdu->base.seqnum);
38020 usbip_uinfo("max seqnum %d\n",
38021 - atomic_read(&the_controller->seqnum));
38022 + atomic_read_unchecked(&the_controller->seqnum));
38023 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
38024 return;
38025 }
38026 diff -urNp linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c
38027 --- linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
38028 +++ linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
38029 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
38030 static int __init vme_user_probe(struct device *, int, int);
38031 static int __exit vme_user_remove(struct device *, int, int);
38032
38033 -static struct file_operations vme_user_fops = {
38034 +static const struct file_operations vme_user_fops = {
38035 .open = vme_user_open,
38036 .release = vme_user_release,
38037 .read = vme_user_read,
38038 diff -urNp linux-2.6.32.45/drivers/telephony/ixj.c linux-2.6.32.45/drivers/telephony/ixj.c
38039 --- linux-2.6.32.45/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
38040 +++ linux-2.6.32.45/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
38041 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
38042 bool mContinue;
38043 char *pIn, *pOut;
38044
38045 + pax_track_stack();
38046 +
38047 if (!SCI_Prepare(j))
38048 return 0;
38049
38050 diff -urNp linux-2.6.32.45/drivers/uio/uio.c linux-2.6.32.45/drivers/uio/uio.c
38051 --- linux-2.6.32.45/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
38052 +++ linux-2.6.32.45/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
38053 @@ -23,6 +23,7 @@
38054 #include <linux/string.h>
38055 #include <linux/kobject.h>
38056 #include <linux/uio_driver.h>
38057 +#include <asm/local.h>
38058
38059 #define UIO_MAX_DEVICES 255
38060
38061 @@ -30,10 +31,10 @@ struct uio_device {
38062 struct module *owner;
38063 struct device *dev;
38064 int minor;
38065 - atomic_t event;
38066 + atomic_unchecked_t event;
38067 struct fasync_struct *async_queue;
38068 wait_queue_head_t wait;
38069 - int vma_count;
38070 + local_t vma_count;
38071 struct uio_info *info;
38072 struct kobject *map_dir;
38073 struct kobject *portio_dir;
38074 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
38075 return entry->show(mem, buf);
38076 }
38077
38078 -static struct sysfs_ops map_sysfs_ops = {
38079 +static const struct sysfs_ops map_sysfs_ops = {
38080 .show = map_type_show,
38081 };
38082
38083 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
38084 return entry->show(port, buf);
38085 }
38086
38087 -static struct sysfs_ops portio_sysfs_ops = {
38088 +static const struct sysfs_ops portio_sysfs_ops = {
38089 .show = portio_type_show,
38090 };
38091
38092 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device
38093 struct uio_device *idev = dev_get_drvdata(dev);
38094 if (idev)
38095 return sprintf(buf, "%u\n",
38096 - (unsigned int)atomic_read(&idev->event));
38097 + (unsigned int)atomic_read_unchecked(&idev->event));
38098 else
38099 return -ENODEV;
38100 }
38101 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
38102 {
38103 struct uio_device *idev = info->uio_dev;
38104
38105 - atomic_inc(&idev->event);
38106 + atomic_inc_unchecked(&idev->event);
38107 wake_up_interruptible(&idev->wait);
38108 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38109 }
38110 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
38111 }
38112
38113 listener->dev = idev;
38114 - listener->event_count = atomic_read(&idev->event);
38115 + listener->event_count = atomic_read_unchecked(&idev->event);
38116 filep->private_data = listener;
38117
38118 if (idev->info->open) {
38119 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
38120 return -EIO;
38121
38122 poll_wait(filep, &idev->wait, wait);
38123 - if (listener->event_count != atomic_read(&idev->event))
38124 + if (listener->event_count != atomic_read_unchecked(&idev->event))
38125 return POLLIN | POLLRDNORM;
38126 return 0;
38127 }
38128 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
38129 do {
38130 set_current_state(TASK_INTERRUPTIBLE);
38131
38132 - event_count = atomic_read(&idev->event);
38133 + event_count = atomic_read_unchecked(&idev->event);
38134 if (event_count != listener->event_count) {
38135 if (copy_to_user(buf, &event_count, count))
38136 retval = -EFAULT;
38137 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
38138 static void uio_vma_open(struct vm_area_struct *vma)
38139 {
38140 struct uio_device *idev = vma->vm_private_data;
38141 - idev->vma_count++;
38142 + local_inc(&idev->vma_count);
38143 }
38144
38145 static void uio_vma_close(struct vm_area_struct *vma)
38146 {
38147 struct uio_device *idev = vma->vm_private_data;
38148 - idev->vma_count--;
38149 + local_dec(&idev->vma_count);
38150 }
38151
38152 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38153 @@ -840,7 +841,7 @@ int __uio_register_device(struct module
38154 idev->owner = owner;
38155 idev->info = info;
38156 init_waitqueue_head(&idev->wait);
38157 - atomic_set(&idev->event, 0);
38158 + atomic_set_unchecked(&idev->event, 0);
38159
38160 ret = uio_get_minor(idev);
38161 if (ret)
38162 diff -urNp linux-2.6.32.45/drivers/usb/atm/usbatm.c linux-2.6.32.45/drivers/usb/atm/usbatm.c
38163 --- linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
38164 +++ linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
38165 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
38166 if (printk_ratelimit())
38167 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38168 __func__, vpi, vci);
38169 - atomic_inc(&vcc->stats->rx_err);
38170 + atomic_inc_unchecked(&vcc->stats->rx_err);
38171 return;
38172 }
38173
38174 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
38175 if (length > ATM_MAX_AAL5_PDU) {
38176 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38177 __func__, length, vcc);
38178 - atomic_inc(&vcc->stats->rx_err);
38179 + atomic_inc_unchecked(&vcc->stats->rx_err);
38180 goto out;
38181 }
38182
38183 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
38184 if (sarb->len < pdu_length) {
38185 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38186 __func__, pdu_length, sarb->len, vcc);
38187 - atomic_inc(&vcc->stats->rx_err);
38188 + atomic_inc_unchecked(&vcc->stats->rx_err);
38189 goto out;
38190 }
38191
38192 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38193 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38194 __func__, vcc);
38195 - atomic_inc(&vcc->stats->rx_err);
38196 + atomic_inc_unchecked(&vcc->stats->rx_err);
38197 goto out;
38198 }
38199
38200 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
38201 if (printk_ratelimit())
38202 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38203 __func__, length);
38204 - atomic_inc(&vcc->stats->rx_drop);
38205 + atomic_inc_unchecked(&vcc->stats->rx_drop);
38206 goto out;
38207 }
38208
38209 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
38210
38211 vcc->push(vcc, skb);
38212
38213 - atomic_inc(&vcc->stats->rx);
38214 + atomic_inc_unchecked(&vcc->stats->rx);
38215 out:
38216 skb_trim(sarb, 0);
38217 }
38218 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
38219 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38220
38221 usbatm_pop(vcc, skb);
38222 - atomic_inc(&vcc->stats->tx);
38223 + atomic_inc_unchecked(&vcc->stats->tx);
38224
38225 skb = skb_dequeue(&instance->sndqueue);
38226 }
38227 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
38228 if (!left--)
38229 return sprintf(page,
38230 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38231 - atomic_read(&atm_dev->stats.aal5.tx),
38232 - atomic_read(&atm_dev->stats.aal5.tx_err),
38233 - atomic_read(&atm_dev->stats.aal5.rx),
38234 - atomic_read(&atm_dev->stats.aal5.rx_err),
38235 - atomic_read(&atm_dev->stats.aal5.rx_drop));
38236 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38237 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38238 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38239 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38240 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38241
38242 if (!left--) {
38243 if (instance->disconnected)
38244 diff -urNp linux-2.6.32.45/drivers/usb/class/cdc-wdm.c linux-2.6.32.45/drivers/usb/class/cdc-wdm.c
38245 --- linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
38246 +++ linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
38247 @@ -314,7 +314,7 @@ static ssize_t wdm_write
38248 if (r < 0)
38249 goto outnp;
38250
38251 - if (!file->f_flags && O_NONBLOCK)
38252 + if (!(file->f_flags & O_NONBLOCK))
38253 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
38254 &desc->flags));
38255 else
38256 diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.c linux-2.6.32.45/drivers/usb/core/hcd.c
38257 --- linux-2.6.32.45/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
38258 +++ linux-2.6.32.45/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
38259 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
38260
38261 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38262
38263 -struct usb_mon_operations *mon_ops;
38264 +const struct usb_mon_operations *mon_ops;
38265
38266 /*
38267 * The registration is unlocked.
38268 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
38269 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
38270 */
38271
38272 -int usb_mon_register (struct usb_mon_operations *ops)
38273 +int usb_mon_register (const struct usb_mon_operations *ops)
38274 {
38275
38276 if (mon_ops)
38277 diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.h linux-2.6.32.45/drivers/usb/core/hcd.h
38278 --- linux-2.6.32.45/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
38279 +++ linux-2.6.32.45/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
38280 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
38281 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38282
38283 struct usb_mon_operations {
38284 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
38285 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38286 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38287 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
38288 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38289 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38290 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
38291 };
38292
38293 -extern struct usb_mon_operations *mon_ops;
38294 +extern const struct usb_mon_operations *mon_ops;
38295
38296 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
38297 {
38298 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
38299 (*mon_ops->urb_complete)(bus, urb, status);
38300 }
38301
38302 -int usb_mon_register(struct usb_mon_operations *ops);
38303 +int usb_mon_register(const struct usb_mon_operations *ops);
38304 void usb_mon_deregister(void);
38305
38306 #else
38307 diff -urNp linux-2.6.32.45/drivers/usb/core/message.c linux-2.6.32.45/drivers/usb/core/message.c
38308 --- linux-2.6.32.45/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
38309 +++ linux-2.6.32.45/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
38310 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
38311 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
38312 if (buf) {
38313 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
38314 - if (len > 0) {
38315 - smallbuf = kmalloc(++len, GFP_NOIO);
38316 + if (len++ > 0) {
38317 + smallbuf = kmalloc(len, GFP_NOIO);
38318 if (!smallbuf)
38319 return buf;
38320 memcpy(smallbuf, buf, len);
38321 diff -urNp linux-2.6.32.45/drivers/usb/misc/appledisplay.c linux-2.6.32.45/drivers/usb/misc/appledisplay.c
38322 --- linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
38323 +++ linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
38324 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
38325 return pdata->msgdata[1];
38326 }
38327
38328 -static struct backlight_ops appledisplay_bl_data = {
38329 +static const struct backlight_ops appledisplay_bl_data = {
38330 .get_brightness = appledisplay_bl_get_brightness,
38331 .update_status = appledisplay_bl_update_status,
38332 };
38333 diff -urNp linux-2.6.32.45/drivers/usb/mon/mon_main.c linux-2.6.32.45/drivers/usb/mon/mon_main.c
38334 --- linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
38335 +++ linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
38336 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
38337 /*
38338 * Ops
38339 */
38340 -static struct usb_mon_operations mon_ops_0 = {
38341 +static const struct usb_mon_operations mon_ops_0 = {
38342 .urb_submit = mon_submit,
38343 .urb_submit_error = mon_submit_error,
38344 .urb_complete = mon_complete,
38345 diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h
38346 --- linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
38347 +++ linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
38348 @@ -192,7 +192,7 @@ struct wahc {
38349 struct list_head xfer_delayed_list;
38350 spinlock_t xfer_list_lock;
38351 struct work_struct xfer_work;
38352 - atomic_t xfer_id_count;
38353 + atomic_unchecked_t xfer_id_count;
38354 };
38355
38356
38357 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
38358 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38359 spin_lock_init(&wa->xfer_list_lock);
38360 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38361 - atomic_set(&wa->xfer_id_count, 1);
38362 + atomic_set_unchecked(&wa->xfer_id_count, 1);
38363 }
38364
38365 /**
38366 diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c
38367 --- linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
38368 +++ linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
38369 @@ -293,7 +293,7 @@ out:
38370 */
38371 static void wa_xfer_id_init(struct wa_xfer *xfer)
38372 {
38373 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38374 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38375 }
38376
38377 /*
38378 diff -urNp linux-2.6.32.45/drivers/uwb/wlp/messages.c linux-2.6.32.45/drivers/uwb/wlp/messages.c
38379 --- linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
38380 +++ linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
38381 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
38382 size_t len = skb->len;
38383 size_t used;
38384 ssize_t result;
38385 - struct wlp_nonce enonce, rnonce;
38386 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
38387 enum wlp_assc_error assc_err;
38388 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
38389 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
38390 diff -urNp linux-2.6.32.45/drivers/uwb/wlp/sysfs.c linux-2.6.32.45/drivers/uwb/wlp/sysfs.c
38391 --- linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
38392 +++ linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
38393 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
38394 return ret;
38395 }
38396
38397 -static
38398 -struct sysfs_ops wss_sysfs_ops = {
38399 +static const struct sysfs_ops wss_sysfs_ops = {
38400 .show = wlp_wss_attr_show,
38401 .store = wlp_wss_attr_store,
38402 };
38403 diff -urNp linux-2.6.32.45/drivers/video/atmel_lcdfb.c linux-2.6.32.45/drivers/video/atmel_lcdfb.c
38404 --- linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
38405 +++ linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
38406 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
38407 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
38408 }
38409
38410 -static struct backlight_ops atmel_lcdc_bl_ops = {
38411 +static const struct backlight_ops atmel_lcdc_bl_ops = {
38412 .update_status = atmel_bl_update_status,
38413 .get_brightness = atmel_bl_get_brightness,
38414 };
38415 diff -urNp linux-2.6.32.45/drivers/video/aty/aty128fb.c linux-2.6.32.45/drivers/video/aty/aty128fb.c
38416 --- linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
38417 +++ linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
38418 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
38419 return bd->props.brightness;
38420 }
38421
38422 -static struct backlight_ops aty128_bl_data = {
38423 +static const struct backlight_ops aty128_bl_data = {
38424 .get_brightness = aty128_bl_get_brightness,
38425 .update_status = aty128_bl_update_status,
38426 };
38427 diff -urNp linux-2.6.32.45/drivers/video/aty/atyfb_base.c linux-2.6.32.45/drivers/video/aty/atyfb_base.c
38428 --- linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
38429 +++ linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
38430 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
38431 return bd->props.brightness;
38432 }
38433
38434 -static struct backlight_ops aty_bl_data = {
38435 +static const struct backlight_ops aty_bl_data = {
38436 .get_brightness = aty_bl_get_brightness,
38437 .update_status = aty_bl_update_status,
38438 };
38439 diff -urNp linux-2.6.32.45/drivers/video/aty/radeon_backlight.c linux-2.6.32.45/drivers/video/aty/radeon_backlight.c
38440 --- linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
38441 +++ linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
38442 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
38443 return bd->props.brightness;
38444 }
38445
38446 -static struct backlight_ops radeon_bl_data = {
38447 +static const struct backlight_ops radeon_bl_data = {
38448 .get_brightness = radeon_bl_get_brightness,
38449 .update_status = radeon_bl_update_status,
38450 };
38451 diff -urNp linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c
38452 --- linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
38453 +++ linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
38454 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
38455 return error ? data->current_brightness : reg_val;
38456 }
38457
38458 -static struct backlight_ops adp5520_bl_ops = {
38459 +static const struct backlight_ops adp5520_bl_ops = {
38460 .update_status = adp5520_bl_update_status,
38461 .get_brightness = adp5520_bl_get_brightness,
38462 };
38463 diff -urNp linux-2.6.32.45/drivers/video/backlight/adx_bl.c linux-2.6.32.45/drivers/video/backlight/adx_bl.c
38464 --- linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
38465 +++ linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
38466 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
38467 return 1;
38468 }
38469
38470 -static struct backlight_ops adx_backlight_ops = {
38471 +static const struct backlight_ops adx_backlight_ops = {
38472 .options = 0,
38473 .update_status = adx_backlight_update_status,
38474 .get_brightness = adx_backlight_get_brightness,
38475 diff -urNp linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c
38476 --- linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
38477 +++ linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
38478 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
38479 return pwm_channel_enable(&pwmbl->pwmc);
38480 }
38481
38482 -static struct backlight_ops atmel_pwm_bl_ops = {
38483 +static const struct backlight_ops atmel_pwm_bl_ops = {
38484 .get_brightness = atmel_pwm_bl_get_intensity,
38485 .update_status = atmel_pwm_bl_set_intensity,
38486 };
38487 diff -urNp linux-2.6.32.45/drivers/video/backlight/backlight.c linux-2.6.32.45/drivers/video/backlight/backlight.c
38488 --- linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
38489 +++ linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
38490 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
38491 * ERR_PTR() or a pointer to the newly allocated device.
38492 */
38493 struct backlight_device *backlight_device_register(const char *name,
38494 - struct device *parent, void *devdata, struct backlight_ops *ops)
38495 + struct device *parent, void *devdata, const struct backlight_ops *ops)
38496 {
38497 struct backlight_device *new_bd;
38498 int rc;
38499 diff -urNp linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c
38500 --- linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
38501 +++ linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
38502 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
38503 }
38504 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
38505
38506 -static struct backlight_ops corgi_bl_ops = {
38507 +static const struct backlight_ops corgi_bl_ops = {
38508 .get_brightness = corgi_bl_get_intensity,
38509 .update_status = corgi_bl_update_status,
38510 };
38511 diff -urNp linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c
38512 --- linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
38513 +++ linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
38514 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
38515 return intensity;
38516 }
38517
38518 -static struct backlight_ops cr_backlight_ops = {
38519 +static const struct backlight_ops cr_backlight_ops = {
38520 .get_brightness = cr_backlight_get_intensity,
38521 .update_status = cr_backlight_set_intensity,
38522 };
38523 diff -urNp linux-2.6.32.45/drivers/video/backlight/da903x_bl.c linux-2.6.32.45/drivers/video/backlight/da903x_bl.c
38524 --- linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
38525 +++ linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
38526 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
38527 return data->current_brightness;
38528 }
38529
38530 -static struct backlight_ops da903x_backlight_ops = {
38531 +static const struct backlight_ops da903x_backlight_ops = {
38532 .update_status = da903x_backlight_update_status,
38533 .get_brightness = da903x_backlight_get_brightness,
38534 };
38535 diff -urNp linux-2.6.32.45/drivers/video/backlight/generic_bl.c linux-2.6.32.45/drivers/video/backlight/generic_bl.c
38536 --- linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
38537 +++ linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
38538 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
38539 }
38540 EXPORT_SYMBOL(corgibl_limit_intensity);
38541
38542 -static struct backlight_ops genericbl_ops = {
38543 +static const struct backlight_ops genericbl_ops = {
38544 .options = BL_CORE_SUSPENDRESUME,
38545 .get_brightness = genericbl_get_intensity,
38546 .update_status = genericbl_send_intensity,
38547 diff -urNp linux-2.6.32.45/drivers/video/backlight/hp680_bl.c linux-2.6.32.45/drivers/video/backlight/hp680_bl.c
38548 --- linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
38549 +++ linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
38550 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
38551 return current_intensity;
38552 }
38553
38554 -static struct backlight_ops hp680bl_ops = {
38555 +static const struct backlight_ops hp680bl_ops = {
38556 .get_brightness = hp680bl_get_intensity,
38557 .update_status = hp680bl_set_intensity,
38558 };
38559 diff -urNp linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c
38560 --- linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
38561 +++ linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
38562 @@ -93,7 +93,7 @@ out:
38563 return ret;
38564 }
38565
38566 -static struct backlight_ops jornada_bl_ops = {
38567 +static const struct backlight_ops jornada_bl_ops = {
38568 .get_brightness = jornada_bl_get_brightness,
38569 .update_status = jornada_bl_update_status,
38570 .options = BL_CORE_SUSPENDRESUME,
38571 diff -urNp linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c
38572 --- linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
38573 +++ linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
38574 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
38575 return kb3886bl_intensity;
38576 }
38577
38578 -static struct backlight_ops kb3886bl_ops = {
38579 +static const struct backlight_ops kb3886bl_ops = {
38580 .get_brightness = kb3886bl_get_intensity,
38581 .update_status = kb3886bl_send_intensity,
38582 };
38583 diff -urNp linux-2.6.32.45/drivers/video/backlight/locomolcd.c linux-2.6.32.45/drivers/video/backlight/locomolcd.c
38584 --- linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
38585 +++ linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
38586 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
38587 return current_intensity;
38588 }
38589
38590 -static struct backlight_ops locomobl_data = {
38591 +static const struct backlight_ops locomobl_data = {
38592 .get_brightness = locomolcd_get_intensity,
38593 .update_status = locomolcd_set_intensity,
38594 };
38595 diff -urNp linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c
38596 --- linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
38597 +++ linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
38598 @@ -33,7 +33,7 @@ struct dmi_match_data {
38599 unsigned long iostart;
38600 unsigned long iolen;
38601 /* Backlight operations structure. */
38602 - struct backlight_ops backlight_ops;
38603 + const struct backlight_ops backlight_ops;
38604 };
38605
38606 /* Module parameters. */
38607 diff -urNp linux-2.6.32.45/drivers/video/backlight/omap1_bl.c linux-2.6.32.45/drivers/video/backlight/omap1_bl.c
38608 --- linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
38609 +++ linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
38610 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
38611 return bl->current_intensity;
38612 }
38613
38614 -static struct backlight_ops omapbl_ops = {
38615 +static const struct backlight_ops omapbl_ops = {
38616 .get_brightness = omapbl_get_intensity,
38617 .update_status = omapbl_update_status,
38618 };
38619 diff -urNp linux-2.6.32.45/drivers/video/backlight/progear_bl.c linux-2.6.32.45/drivers/video/backlight/progear_bl.c
38620 --- linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
38621 +++ linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
38622 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
38623 return intensity - HW_LEVEL_MIN;
38624 }
38625
38626 -static struct backlight_ops progearbl_ops = {
38627 +static const struct backlight_ops progearbl_ops = {
38628 .get_brightness = progearbl_get_intensity,
38629 .update_status = progearbl_set_intensity,
38630 };
38631 diff -urNp linux-2.6.32.45/drivers/video/backlight/pwm_bl.c linux-2.6.32.45/drivers/video/backlight/pwm_bl.c
38632 --- linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
38633 +++ linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
38634 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
38635 return bl->props.brightness;
38636 }
38637
38638 -static struct backlight_ops pwm_backlight_ops = {
38639 +static const struct backlight_ops pwm_backlight_ops = {
38640 .update_status = pwm_backlight_update_status,
38641 .get_brightness = pwm_backlight_get_brightness,
38642 };
38643 diff -urNp linux-2.6.32.45/drivers/video/backlight/tosa_bl.c linux-2.6.32.45/drivers/video/backlight/tosa_bl.c
38644 --- linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
38645 +++ linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
38646 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
38647 return props->brightness;
38648 }
38649
38650 -static struct backlight_ops bl_ops = {
38651 +static const struct backlight_ops bl_ops = {
38652 .get_brightness = tosa_bl_get_brightness,
38653 .update_status = tosa_bl_update_status,
38654 };
38655 diff -urNp linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c
38656 --- linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
38657 +++ linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
38658 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
38659 return data->current_brightness;
38660 }
38661
38662 -static struct backlight_ops wm831x_backlight_ops = {
38663 +static const struct backlight_ops wm831x_backlight_ops = {
38664 .options = BL_CORE_SUSPENDRESUME,
38665 .update_status = wm831x_backlight_update_status,
38666 .get_brightness = wm831x_backlight_get_brightness,
38667 diff -urNp linux-2.6.32.45/drivers/video/bf54x-lq043fb.c linux-2.6.32.45/drivers/video/bf54x-lq043fb.c
38668 --- linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
38669 +++ linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
38670 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
38671 return 0;
38672 }
38673
38674 -static struct backlight_ops bfin_lq043fb_bl_ops = {
38675 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
38676 .get_brightness = bl_get_brightness,
38677 };
38678
38679 diff -urNp linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c
38680 --- linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
38681 +++ linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
38682 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
38683 return 0;
38684 }
38685
38686 -static struct backlight_ops bfin_lq043fb_bl_ops = {
38687 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
38688 .get_brightness = bl_get_brightness,
38689 };
38690
38691 diff -urNp linux-2.6.32.45/drivers/video/fbcmap.c linux-2.6.32.45/drivers/video/fbcmap.c
38692 --- linux-2.6.32.45/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
38693 +++ linux-2.6.32.45/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
38694 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
38695 rc = -ENODEV;
38696 goto out;
38697 }
38698 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38699 - !info->fbops->fb_setcmap)) {
38700 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38701 rc = -EINVAL;
38702 goto out1;
38703 }
38704 diff -urNp linux-2.6.32.45/drivers/video/fbmem.c linux-2.6.32.45/drivers/video/fbmem.c
38705 --- linux-2.6.32.45/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
38706 +++ linux-2.6.32.45/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
38707 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
38708 image->dx += image->width + 8;
38709 }
38710 } else if (rotate == FB_ROTATE_UD) {
38711 - for (x = 0; x < num && image->dx >= 0; x++) {
38712 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38713 info->fbops->fb_imageblit(info, image);
38714 image->dx -= image->width + 8;
38715 }
38716 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
38717 image->dy += image->height + 8;
38718 }
38719 } else if (rotate == FB_ROTATE_CCW) {
38720 - for (x = 0; x < num && image->dy >= 0; x++) {
38721 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38722 info->fbops->fb_imageblit(info, image);
38723 image->dy -= image->height + 8;
38724 }
38725 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
38726 int flags = info->flags;
38727 int ret = 0;
38728
38729 + pax_track_stack();
38730 +
38731 if (var->activate & FB_ACTIVATE_INV_MODE) {
38732 struct fb_videomode mode1, mode2;
38733
38734 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
38735 void __user *argp = (void __user *)arg;
38736 long ret = 0;
38737
38738 + pax_track_stack();
38739 +
38740 switch (cmd) {
38741 case FBIOGET_VSCREENINFO:
38742 if (!lock_fb_info(info))
38743 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
38744 return -EFAULT;
38745 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38746 return -EINVAL;
38747 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38748 + if (con2fb.framebuffer >= FB_MAX)
38749 return -EINVAL;
38750 if (!registered_fb[con2fb.framebuffer])
38751 request_module("fb%d", con2fb.framebuffer);
38752 diff -urNp linux-2.6.32.45/drivers/video/i810/i810_accel.c linux-2.6.32.45/drivers/video/i810/i810_accel.c
38753 --- linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
38754 +++ linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
38755 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
38756 }
38757 }
38758 printk("ringbuffer lockup!!!\n");
38759 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38760 i810_report_error(mmio);
38761 par->dev_flags |= LOCKUP;
38762 info->pixmap.scan_align = 1;
38763 diff -urNp linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c
38764 --- linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
38765 +++ linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
38766 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
38767 return bd->props.brightness;
38768 }
38769
38770 -static struct backlight_ops nvidia_bl_ops = {
38771 +static const struct backlight_ops nvidia_bl_ops = {
38772 .get_brightness = nvidia_bl_get_brightness,
38773 .update_status = nvidia_bl_update_status,
38774 };
38775 diff -urNp linux-2.6.32.45/drivers/video/riva/fbdev.c linux-2.6.32.45/drivers/video/riva/fbdev.c
38776 --- linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
38777 +++ linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
38778 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
38779 return bd->props.brightness;
38780 }
38781
38782 -static struct backlight_ops riva_bl_ops = {
38783 +static const struct backlight_ops riva_bl_ops = {
38784 .get_brightness = riva_bl_get_brightness,
38785 .update_status = riva_bl_update_status,
38786 };
38787 diff -urNp linux-2.6.32.45/drivers/video/uvesafb.c linux-2.6.32.45/drivers/video/uvesafb.c
38788 --- linux-2.6.32.45/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
38789 +++ linux-2.6.32.45/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
38790 @@ -18,6 +18,7 @@
38791 #include <linux/fb.h>
38792 #include <linux/io.h>
38793 #include <linux/mutex.h>
38794 +#include <linux/moduleloader.h>
38795 #include <video/edid.h>
38796 #include <video/uvesafb.h>
38797 #ifdef CONFIG_X86
38798 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
38799 NULL,
38800 };
38801
38802 - return call_usermodehelper(v86d_path, argv, envp, 1);
38803 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
38804 }
38805
38806 /*
38807 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
38808 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
38809 par->pmi_setpal = par->ypan = 0;
38810 } else {
38811 +
38812 +#ifdef CONFIG_PAX_KERNEXEC
38813 +#ifdef CONFIG_MODULES
38814 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
38815 +#endif
38816 + if (!par->pmi_code) {
38817 + par->pmi_setpal = par->ypan = 0;
38818 + return 0;
38819 + }
38820 +#endif
38821 +
38822 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
38823 + task->t.regs.edi);
38824 +
38825 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38826 + pax_open_kernel();
38827 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
38828 + pax_close_kernel();
38829 +
38830 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
38831 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
38832 +#else
38833 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
38834 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
38835 +#endif
38836 +
38837 printk(KERN_INFO "uvesafb: protected mode interface info at "
38838 "%04x:%04x\n",
38839 (u16)task->t.regs.es, (u16)task->t.regs.edi);
38840 @@ -1799,6 +1822,11 @@ out:
38841 if (par->vbe_modes)
38842 kfree(par->vbe_modes);
38843
38844 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38845 + if (par->pmi_code)
38846 + module_free_exec(NULL, par->pmi_code);
38847 +#endif
38848 +
38849 framebuffer_release(info);
38850 return err;
38851 }
38852 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
38853 kfree(par->vbe_state_orig);
38854 if (par->vbe_state_saved)
38855 kfree(par->vbe_state_saved);
38856 +
38857 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38858 + if (par->pmi_code)
38859 + module_free_exec(NULL, par->pmi_code);
38860 +#endif
38861 +
38862 }
38863
38864 framebuffer_release(info);
38865 diff -urNp linux-2.6.32.45/drivers/video/vesafb.c linux-2.6.32.45/drivers/video/vesafb.c
38866 --- linux-2.6.32.45/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
38867 +++ linux-2.6.32.45/drivers/video/vesafb.c 2011-08-05 20:33:55.000000000 -0400
38868 @@ -9,6 +9,7 @@
38869 */
38870
38871 #include <linux/module.h>
38872 +#include <linux/moduleloader.h>
38873 #include <linux/kernel.h>
38874 #include <linux/errno.h>
38875 #include <linux/string.h>
38876 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
38877 static int vram_total __initdata; /* Set total amount of memory */
38878 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
38879 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
38880 -static void (*pmi_start)(void) __read_mostly;
38881 -static void (*pmi_pal) (void) __read_mostly;
38882 +static void (*pmi_start)(void) __read_only;
38883 +static void (*pmi_pal) (void) __read_only;
38884 static int depth __read_mostly;
38885 static int vga_compat __read_mostly;
38886 /* --------------------------------------------------------------------- */
38887 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
38888 unsigned int size_vmode;
38889 unsigned int size_remap;
38890 unsigned int size_total;
38891 + void *pmi_code = NULL;
38892
38893 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
38894 return -ENODEV;
38895 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
38896 size_remap = size_total;
38897 vesafb_fix.smem_len = size_remap;
38898
38899 -#ifndef __i386__
38900 - screen_info.vesapm_seg = 0;
38901 -#endif
38902 -
38903 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
38904 printk(KERN_WARNING
38905 "vesafb: cannot reserve video memory at 0x%lx\n",
38906 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
38907 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
38908 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
38909
38910 +#ifdef __i386__
38911 +
38912 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38913 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
38914 + if (!pmi_code)
38915 +#elif !defined(CONFIG_PAX_KERNEXEC)
38916 + if (0)
38917 +#endif
38918 +
38919 +#endif
38920 + screen_info.vesapm_seg = 0;
38921 +
38922 if (screen_info.vesapm_seg) {
38923 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
38924 - screen_info.vesapm_seg,screen_info.vesapm_off);
38925 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
38926 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
38927 }
38928
38929 if (screen_info.vesapm_seg < 0xc000)
38930 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
38931
38932 if (ypan || pmi_setpal) {
38933 unsigned short *pmi_base;
38934 +
38935 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
38936 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
38937 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
38938 +
38939 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38940 + pax_open_kernel();
38941 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
38942 +#else
38943 + pmi_code = pmi_base;
38944 +#endif
38945 +
38946 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
38947 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
38948 +
38949 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38950 + pmi_start = ktva_ktla(pmi_start);
38951 + pmi_pal = ktva_ktla(pmi_pal);
38952 + pax_close_kernel();
38953 +#endif
38954 +
38955 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
38956 if (pmi_base[3]) {
38957 printk(KERN_INFO "vesafb: pmi: ports = ");
38958 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
38959 info->node, info->fix.id);
38960 return 0;
38961 err:
38962 +
38963 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38964 + module_free_exec(NULL, pmi_code);
38965 +#endif
38966 +
38967 if (info->screen_base)
38968 iounmap(info->screen_base);
38969 framebuffer_release(info);
38970 diff -urNp linux-2.6.32.45/drivers/xen/sys-hypervisor.c linux-2.6.32.45/drivers/xen/sys-hypervisor.c
38971 --- linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
38972 +++ linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
38973 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
38974 return 0;
38975 }
38976
38977 -static struct sysfs_ops hyp_sysfs_ops = {
38978 +static const struct sysfs_ops hyp_sysfs_ops = {
38979 .show = hyp_sysfs_show,
38980 .store = hyp_sysfs_store,
38981 };
38982 diff -urNp linux-2.6.32.45/fs/9p/vfs_inode.c linux-2.6.32.45/fs/9p/vfs_inode.c
38983 --- linux-2.6.32.45/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
38984 +++ linux-2.6.32.45/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
38985 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
38986 static void
38987 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38988 {
38989 - char *s = nd_get_link(nd);
38990 + const char *s = nd_get_link(nd);
38991
38992 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
38993 IS_ERR(s) ? "<error>" : s);
38994 diff -urNp linux-2.6.32.45/fs/aio.c linux-2.6.32.45/fs/aio.c
38995 --- linux-2.6.32.45/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
38996 +++ linux-2.6.32.45/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
38997 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
38998 size += sizeof(struct io_event) * nr_events;
38999 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39000
39001 - if (nr_pages < 0)
39002 + if (nr_pages <= 0)
39003 return -EINVAL;
39004
39005 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39006 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
39007 struct aio_timeout to;
39008 int retry = 0;
39009
39010 + pax_track_stack();
39011 +
39012 /* needed to zero any padding within an entry (there shouldn't be
39013 * any, but C is fun!
39014 */
39015 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
39016 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
39017 {
39018 ssize_t ret;
39019 + struct iovec iovstack;
39020
39021 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
39022 kiocb->ki_nbytes, 1,
39023 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
39024 + &iovstack, &kiocb->ki_iovec);
39025 if (ret < 0)
39026 goto out;
39027
39028 + if (kiocb->ki_iovec == &iovstack) {
39029 + kiocb->ki_inline_vec = iovstack;
39030 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
39031 + }
39032 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39033 kiocb->ki_cur_seg = 0;
39034 /* ki_nbytes/left now reflect bytes instead of segs */
39035 diff -urNp linux-2.6.32.45/fs/attr.c linux-2.6.32.45/fs/attr.c
39036 --- linux-2.6.32.45/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
39037 +++ linux-2.6.32.45/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
39038 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
39039 unsigned long limit;
39040
39041 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
39042 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39043 if (limit != RLIM_INFINITY && offset > limit)
39044 goto out_sig;
39045 if (offset > inode->i_sb->s_maxbytes)
39046 diff -urNp linux-2.6.32.45/fs/autofs/root.c linux-2.6.32.45/fs/autofs/root.c
39047 --- linux-2.6.32.45/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
39048 +++ linux-2.6.32.45/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
39049 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
39050 set_bit(n,sbi->symlink_bitmap);
39051 sl = &sbi->symlink[n];
39052 sl->len = strlen(symname);
39053 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
39054 + slsize = sl->len+1;
39055 + sl->data = kmalloc(slsize, GFP_KERNEL);
39056 if (!sl->data) {
39057 clear_bit(n,sbi->symlink_bitmap);
39058 unlock_kernel();
39059 diff -urNp linux-2.6.32.45/fs/autofs4/symlink.c linux-2.6.32.45/fs/autofs4/symlink.c
39060 --- linux-2.6.32.45/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
39061 +++ linux-2.6.32.45/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
39062 @@ -15,7 +15,7 @@
39063 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
39064 {
39065 struct autofs_info *ino = autofs4_dentry_ino(dentry);
39066 - nd_set_link(nd, (char *)ino->u.symlink);
39067 + nd_set_link(nd, ino->u.symlink);
39068 return NULL;
39069 }
39070
39071 diff -urNp linux-2.6.32.45/fs/befs/linuxvfs.c linux-2.6.32.45/fs/befs/linuxvfs.c
39072 --- linux-2.6.32.45/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
39073 +++ linux-2.6.32.45/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
39074 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
39075 {
39076 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39077 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39078 - char *link = nd_get_link(nd);
39079 + const char *link = nd_get_link(nd);
39080 if (!IS_ERR(link))
39081 kfree(link);
39082 }
39083 diff -urNp linux-2.6.32.45/fs/binfmt_aout.c linux-2.6.32.45/fs/binfmt_aout.c
39084 --- linux-2.6.32.45/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
39085 +++ linux-2.6.32.45/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
39086 @@ -16,6 +16,7 @@
39087 #include <linux/string.h>
39088 #include <linux/fs.h>
39089 #include <linux/file.h>
39090 +#include <linux/security.h>
39091 #include <linux/stat.h>
39092 #include <linux/fcntl.h>
39093 #include <linux/ptrace.h>
39094 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
39095 #endif
39096 # define START_STACK(u) (u.start_stack)
39097
39098 + memset(&dump, 0, sizeof(dump));
39099 +
39100 fs = get_fs();
39101 set_fs(KERNEL_DS);
39102 has_dumped = 1;
39103 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
39104
39105 /* If the size of the dump file exceeds the rlimit, then see what would happen
39106 if we wrote the stack, but not the data area. */
39107 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39108 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
39109 dump.u_dsize = 0;
39110
39111 /* Make sure we have enough room to write the stack and data areas. */
39112 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39113 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
39114 dump.u_ssize = 0;
39115
39116 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
39117 dump_size = dump.u_ssize << PAGE_SHIFT;
39118 DUMP_WRITE(dump_start,dump_size);
39119 }
39120 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
39121 - set_fs(KERNEL_DS);
39122 - DUMP_WRITE(current,sizeof(*current));
39123 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
39124 end_coredump:
39125 set_fs(fs);
39126 return has_dumped;
39127 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
39128 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
39129 if (rlim >= RLIM_INFINITY)
39130 rlim = ~0;
39131 +
39132 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39133 if (ex.a_data + ex.a_bss > rlim)
39134 return -ENOMEM;
39135
39136 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
39137 install_exec_creds(bprm);
39138 current->flags &= ~PF_FORKNOEXEC;
39139
39140 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39141 + current->mm->pax_flags = 0UL;
39142 +#endif
39143 +
39144 +#ifdef CONFIG_PAX_PAGEEXEC
39145 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39146 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39147 +
39148 +#ifdef CONFIG_PAX_EMUTRAMP
39149 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39150 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39151 +#endif
39152 +
39153 +#ifdef CONFIG_PAX_MPROTECT
39154 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39155 + current->mm->pax_flags |= MF_PAX_MPROTECT;
39156 +#endif
39157 +
39158 + }
39159 +#endif
39160 +
39161 if (N_MAGIC(ex) == OMAGIC) {
39162 unsigned long text_addr, map_size;
39163 loff_t pos;
39164 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
39165
39166 down_write(&current->mm->mmap_sem);
39167 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39168 - PROT_READ | PROT_WRITE | PROT_EXEC,
39169 + PROT_READ | PROT_WRITE,
39170 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39171 fd_offset + ex.a_text);
39172 up_write(&current->mm->mmap_sem);
39173 diff -urNp linux-2.6.32.45/fs/binfmt_elf.c linux-2.6.32.45/fs/binfmt_elf.c
39174 --- linux-2.6.32.45/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
39175 +++ linux-2.6.32.45/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
39176 @@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
39177 #define elf_core_dump NULL
39178 #endif
39179
39180 +#ifdef CONFIG_PAX_MPROTECT
39181 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39182 +#endif
39183 +
39184 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39185 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39186 #else
39187 @@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
39188 .load_binary = load_elf_binary,
39189 .load_shlib = load_elf_library,
39190 .core_dump = elf_core_dump,
39191 +
39192 +#ifdef CONFIG_PAX_MPROTECT
39193 + .handle_mprotect= elf_handle_mprotect,
39194 +#endif
39195 +
39196 .min_coredump = ELF_EXEC_PAGESIZE,
39197 .hasvdso = 1
39198 };
39199 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39200
39201 static int set_brk(unsigned long start, unsigned long end)
39202 {
39203 + unsigned long e = end;
39204 +
39205 start = ELF_PAGEALIGN(start);
39206 end = ELF_PAGEALIGN(end);
39207 if (end > start) {
39208 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39209 if (BAD_ADDR(addr))
39210 return addr;
39211 }
39212 - current->mm->start_brk = current->mm->brk = end;
39213 + current->mm->start_brk = current->mm->brk = e;
39214 return 0;
39215 }
39216
39217 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39218 elf_addr_t __user *u_rand_bytes;
39219 const char *k_platform = ELF_PLATFORM;
39220 const char *k_base_platform = ELF_BASE_PLATFORM;
39221 - unsigned char k_rand_bytes[16];
39222 + u32 k_rand_bytes[4];
39223 int items;
39224 elf_addr_t *elf_info;
39225 int ei_index = 0;
39226 const struct cred *cred = current_cred();
39227 struct vm_area_struct *vma;
39228 + unsigned long saved_auxv[AT_VECTOR_SIZE];
39229 +
39230 + pax_track_stack();
39231
39232 /*
39233 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39234 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39235 * Generate 16 random bytes for userspace PRNG seeding.
39236 */
39237 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39238 - u_rand_bytes = (elf_addr_t __user *)
39239 - STACK_ALLOC(p, sizeof(k_rand_bytes));
39240 + srandom32(k_rand_bytes[0] ^ random32());
39241 + srandom32(k_rand_bytes[1] ^ random32());
39242 + srandom32(k_rand_bytes[2] ^ random32());
39243 + srandom32(k_rand_bytes[3] ^ random32());
39244 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
39245 + u_rand_bytes = (elf_addr_t __user *) p;
39246 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39247 return -EFAULT;
39248
39249 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39250 return -EFAULT;
39251 current->mm->env_end = p;
39252
39253 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39254 +
39255 /* Put the elf_info on the stack in the right place. */
39256 sp = (elf_addr_t __user *)envp + 1;
39257 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39258 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39259 return -EFAULT;
39260 return 0;
39261 }
39262 @@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
39263 {
39264 struct elf_phdr *elf_phdata;
39265 struct elf_phdr *eppnt;
39266 - unsigned long load_addr = 0;
39267 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39268 int load_addr_set = 0;
39269 unsigned long last_bss = 0, elf_bss = 0;
39270 - unsigned long error = ~0UL;
39271 + unsigned long error = -EINVAL;
39272 unsigned long total_size;
39273 int retval, i, size;
39274
39275 @@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
39276 goto out_close;
39277 }
39278
39279 +#ifdef CONFIG_PAX_SEGMEXEC
39280 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39281 + pax_task_size = SEGMEXEC_TASK_SIZE;
39282 +#endif
39283 +
39284 eppnt = elf_phdata;
39285 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39286 if (eppnt->p_type == PT_LOAD) {
39287 @@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
39288 k = load_addr + eppnt->p_vaddr;
39289 if (BAD_ADDR(k) ||
39290 eppnt->p_filesz > eppnt->p_memsz ||
39291 - eppnt->p_memsz > TASK_SIZE ||
39292 - TASK_SIZE - eppnt->p_memsz < k) {
39293 + eppnt->p_memsz > pax_task_size ||
39294 + pax_task_size - eppnt->p_memsz < k) {
39295 error = -ENOMEM;
39296 goto out_close;
39297 }
39298 @@ -532,6 +557,194 @@ out:
39299 return error;
39300 }
39301
39302 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39303 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39304 +{
39305 + unsigned long pax_flags = 0UL;
39306 +
39307 +#ifdef CONFIG_PAX_PAGEEXEC
39308 + if (elf_phdata->p_flags & PF_PAGEEXEC)
39309 + pax_flags |= MF_PAX_PAGEEXEC;
39310 +#endif
39311 +
39312 +#ifdef CONFIG_PAX_SEGMEXEC
39313 + if (elf_phdata->p_flags & PF_SEGMEXEC)
39314 + pax_flags |= MF_PAX_SEGMEXEC;
39315 +#endif
39316 +
39317 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39318 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39319 + if (nx_enabled)
39320 + pax_flags &= ~MF_PAX_SEGMEXEC;
39321 + else
39322 + pax_flags &= ~MF_PAX_PAGEEXEC;
39323 + }
39324 +#endif
39325 +
39326 +#ifdef CONFIG_PAX_EMUTRAMP
39327 + if (elf_phdata->p_flags & PF_EMUTRAMP)
39328 + pax_flags |= MF_PAX_EMUTRAMP;
39329 +#endif
39330 +
39331 +#ifdef CONFIG_PAX_MPROTECT
39332 + if (elf_phdata->p_flags & PF_MPROTECT)
39333 + pax_flags |= MF_PAX_MPROTECT;
39334 +#endif
39335 +
39336 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39337 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39338 + pax_flags |= MF_PAX_RANDMMAP;
39339 +#endif
39340 +
39341 + return pax_flags;
39342 +}
39343 +#endif
39344 +
39345 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39346 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39347 +{
39348 + unsigned long pax_flags = 0UL;
39349 +
39350 +#ifdef CONFIG_PAX_PAGEEXEC
39351 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39352 + pax_flags |= MF_PAX_PAGEEXEC;
39353 +#endif
39354 +
39355 +#ifdef CONFIG_PAX_SEGMEXEC
39356 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39357 + pax_flags |= MF_PAX_SEGMEXEC;
39358 +#endif
39359 +
39360 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39361 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39362 + if (nx_enabled)
39363 + pax_flags &= ~MF_PAX_SEGMEXEC;
39364 + else
39365 + pax_flags &= ~MF_PAX_PAGEEXEC;
39366 + }
39367 +#endif
39368 +
39369 +#ifdef CONFIG_PAX_EMUTRAMP
39370 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39371 + pax_flags |= MF_PAX_EMUTRAMP;
39372 +#endif
39373 +
39374 +#ifdef CONFIG_PAX_MPROTECT
39375 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39376 + pax_flags |= MF_PAX_MPROTECT;
39377 +#endif
39378 +
39379 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39380 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39381 + pax_flags |= MF_PAX_RANDMMAP;
39382 +#endif
39383 +
39384 + return pax_flags;
39385 +}
39386 +#endif
39387 +
39388 +#ifdef CONFIG_PAX_EI_PAX
39389 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39390 +{
39391 + unsigned long pax_flags = 0UL;
39392 +
39393 +#ifdef CONFIG_PAX_PAGEEXEC
39394 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39395 + pax_flags |= MF_PAX_PAGEEXEC;
39396 +#endif
39397 +
39398 +#ifdef CONFIG_PAX_SEGMEXEC
39399 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39400 + pax_flags |= MF_PAX_SEGMEXEC;
39401 +#endif
39402 +
39403 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39404 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39405 + if (nx_enabled)
39406 + pax_flags &= ~MF_PAX_SEGMEXEC;
39407 + else
39408 + pax_flags &= ~MF_PAX_PAGEEXEC;
39409 + }
39410 +#endif
39411 +
39412 +#ifdef CONFIG_PAX_EMUTRAMP
39413 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39414 + pax_flags |= MF_PAX_EMUTRAMP;
39415 +#endif
39416 +
39417 +#ifdef CONFIG_PAX_MPROTECT
39418 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39419 + pax_flags |= MF_PAX_MPROTECT;
39420 +#endif
39421 +
39422 +#ifdef CONFIG_PAX_ASLR
39423 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39424 + pax_flags |= MF_PAX_RANDMMAP;
39425 +#endif
39426 +
39427 + return pax_flags;
39428 +}
39429 +#endif
39430 +
39431 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39432 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39433 +{
39434 + unsigned long pax_flags = 0UL;
39435 +
39436 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39437 + unsigned long i;
39438 + int found_flags = 0;
39439 +#endif
39440 +
39441 +#ifdef CONFIG_PAX_EI_PAX
39442 + pax_flags = pax_parse_ei_pax(elf_ex);
39443 +#endif
39444 +
39445 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39446 + for (i = 0UL; i < elf_ex->e_phnum; i++)
39447 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39448 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39449 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39450 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39451 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39452 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39453 + return -EINVAL;
39454 +
39455 +#ifdef CONFIG_PAX_SOFTMODE
39456 + if (pax_softmode)
39457 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
39458 + else
39459 +#endif
39460 +
39461 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39462 + found_flags = 1;
39463 + break;
39464 + }
39465 +#endif
39466 +
39467 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39468 + if (found_flags == 0) {
39469 + struct elf_phdr phdr;
39470 + memset(&phdr, 0, sizeof(phdr));
39471 + phdr.p_flags = PF_NOEMUTRAMP;
39472 +#ifdef CONFIG_PAX_SOFTMODE
39473 + if (pax_softmode)
39474 + pax_flags = pax_parse_softmode(&phdr);
39475 + else
39476 +#endif
39477 + pax_flags = pax_parse_hardmode(&phdr);
39478 + }
39479 +#endif
39480 +
39481 +
39482 + if (0 > pax_check_flags(&pax_flags))
39483 + return -EINVAL;
39484 +
39485 + current->mm->pax_flags = pax_flags;
39486 + return 0;
39487 +}
39488 +#endif
39489 +
39490 /*
39491 * These are the functions used to load ELF style executables and shared
39492 * libraries. There is no binary dependent code anywhere else.
39493 @@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
39494 {
39495 unsigned int random_variable = 0;
39496
39497 +#ifdef CONFIG_PAX_RANDUSTACK
39498 + if (randomize_va_space)
39499 + return stack_top - current->mm->delta_stack;
39500 +#endif
39501 +
39502 if ((current->flags & PF_RANDOMIZE) &&
39503 !(current->personality & ADDR_NO_RANDOMIZE)) {
39504 random_variable = get_random_int() & STACK_RND_MASK;
39505 @@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
39506 unsigned long load_addr = 0, load_bias = 0;
39507 int load_addr_set = 0;
39508 char * elf_interpreter = NULL;
39509 - unsigned long error;
39510 + unsigned long error = 0;
39511 struct elf_phdr *elf_ppnt, *elf_phdata;
39512 unsigned long elf_bss, elf_brk;
39513 int retval, i;
39514 @@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
39515 unsigned long start_code, end_code, start_data, end_data;
39516 unsigned long reloc_func_desc = 0;
39517 int executable_stack = EXSTACK_DEFAULT;
39518 - unsigned long def_flags = 0;
39519 struct {
39520 struct elfhdr elf_ex;
39521 struct elfhdr interp_elf_ex;
39522 } *loc;
39523 + unsigned long pax_task_size = TASK_SIZE;
39524
39525 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39526 if (!loc) {
39527 @@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
39528
39529 /* OK, This is the point of no return */
39530 current->flags &= ~PF_FORKNOEXEC;
39531 - current->mm->def_flags = def_flags;
39532 +
39533 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39534 + current->mm->pax_flags = 0UL;
39535 +#endif
39536 +
39537 +#ifdef CONFIG_PAX_DLRESOLVE
39538 + current->mm->call_dl_resolve = 0UL;
39539 +#endif
39540 +
39541 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39542 + current->mm->call_syscall = 0UL;
39543 +#endif
39544 +
39545 +#ifdef CONFIG_PAX_ASLR
39546 + current->mm->delta_mmap = 0UL;
39547 + current->mm->delta_stack = 0UL;
39548 +#endif
39549 +
39550 + current->mm->def_flags = 0;
39551 +
39552 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39553 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39554 + send_sig(SIGKILL, current, 0);
39555 + goto out_free_dentry;
39556 + }
39557 +#endif
39558 +
39559 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39560 + pax_set_initial_flags(bprm);
39561 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39562 + if (pax_set_initial_flags_func)
39563 + (pax_set_initial_flags_func)(bprm);
39564 +#endif
39565 +
39566 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39567 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
39568 + current->mm->context.user_cs_limit = PAGE_SIZE;
39569 + current->mm->def_flags |= VM_PAGEEXEC;
39570 + }
39571 +#endif
39572 +
39573 +#ifdef CONFIG_PAX_SEGMEXEC
39574 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39575 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39576 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39577 + pax_task_size = SEGMEXEC_TASK_SIZE;
39578 + }
39579 +#endif
39580 +
39581 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39582 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39583 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39584 + put_cpu();
39585 + }
39586 +#endif
39587
39588 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39589 may depend on the personality. */
39590 SET_PERSONALITY(loc->elf_ex);
39591 +
39592 +#ifdef CONFIG_PAX_ASLR
39593 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
39594 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
39595 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
39596 + }
39597 +#endif
39598 +
39599 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39600 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39601 + executable_stack = EXSTACK_DISABLE_X;
39602 + current->personality &= ~READ_IMPLIES_EXEC;
39603 + } else
39604 +#endif
39605 +
39606 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
39607 current->personality |= READ_IMPLIES_EXEC;
39608
39609 @@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
39610 #else
39611 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
39612 #endif
39613 +
39614 +#ifdef CONFIG_PAX_RANDMMAP
39615 + /* PaX: randomize base address at the default exe base if requested */
39616 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
39617 +#ifdef CONFIG_SPARC64
39618 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
39619 +#else
39620 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
39621 +#endif
39622 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
39623 + elf_flags |= MAP_FIXED;
39624 + }
39625 +#endif
39626 +
39627 }
39628
39629 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
39630 @@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
39631 * allowed task size. Note that p_filesz must always be
39632 * <= p_memsz so it is only necessary to check p_memsz.
39633 */
39634 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39635 - elf_ppnt->p_memsz > TASK_SIZE ||
39636 - TASK_SIZE - elf_ppnt->p_memsz < k) {
39637 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39638 + elf_ppnt->p_memsz > pax_task_size ||
39639 + pax_task_size - elf_ppnt->p_memsz < k) {
39640 /* set_brk can never work. Avoid overflows. */
39641 send_sig(SIGKILL, current, 0);
39642 retval = -EINVAL;
39643 @@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
39644 start_data += load_bias;
39645 end_data += load_bias;
39646
39647 +#ifdef CONFIG_PAX_RANDMMAP
39648 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
39649 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
39650 +#endif
39651 +
39652 /* Calling set_brk effectively mmaps the pages that we need
39653 * for the bss and break sections. We must do this before
39654 * mapping in the interpreter, to make sure it doesn't wind
39655 @@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
39656 goto out_free_dentry;
39657 }
39658 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
39659 - send_sig(SIGSEGV, current, 0);
39660 - retval = -EFAULT; /* Nobody gets to see this, but.. */
39661 - goto out_free_dentry;
39662 + /*
39663 + * This bss-zeroing can fail if the ELF
39664 + * file specifies odd protections. So
39665 + * we don't check the return value
39666 + */
39667 }
39668
39669 if (elf_interpreter) {
39670 @@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
39671 unsigned long n = off;
39672 if (n > PAGE_SIZE)
39673 n = PAGE_SIZE;
39674 - if (!dump_write(file, buf, n))
39675 + if (!dump_write(file, buf, n)) {
39676 + free_page((unsigned long)buf);
39677 return 0;
39678 + }
39679 off -= n;
39680 }
39681 free_page((unsigned long)buf);
39682 @@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
39683 * Decide what to dump of a segment, part, all or none.
39684 */
39685 static unsigned long vma_dump_size(struct vm_area_struct *vma,
39686 - unsigned long mm_flags)
39687 + unsigned long mm_flags, long signr)
39688 {
39689 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
39690
39691 @@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
39692 if (vma->vm_file == NULL)
39693 return 0;
39694
39695 - if (FILTER(MAPPED_PRIVATE))
39696 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
39697 goto whole;
39698
39699 /*
39700 @@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
39701 #undef DUMP_WRITE
39702
39703 #define DUMP_WRITE(addr, nr) \
39704 + do { \
39705 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
39706 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
39707 - goto end_coredump;
39708 + goto end_coredump; \
39709 + } while (0);
39710
39711 static void fill_elf_header(struct elfhdr *elf, int segs,
39712 u16 machine, u32 flags, u8 osabi)
39713 @@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
39714 {
39715 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
39716 int i = 0;
39717 - do
39718 + do {
39719 i += 2;
39720 - while (auxv[i - 2] != AT_NULL);
39721 + } while (auxv[i - 2] != AT_NULL);
39722 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
39723 }
39724
39725 @@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
39726 phdr.p_offset = offset;
39727 phdr.p_vaddr = vma->vm_start;
39728 phdr.p_paddr = 0;
39729 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
39730 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
39731 phdr.p_memsz = vma->vm_end - vma->vm_start;
39732 offset += phdr.p_filesz;
39733 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
39734 @@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
39735 unsigned long addr;
39736 unsigned long end;
39737
39738 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
39739 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
39740
39741 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
39742 struct page *page;
39743 @@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
39744 page = get_dump_page(addr);
39745 if (page) {
39746 void *kaddr = kmap(page);
39747 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
39748 stop = ((size += PAGE_SIZE) > limit) ||
39749 !dump_write(file, kaddr, PAGE_SIZE);
39750 kunmap(page);
39751 @@ -2042,6 +2356,97 @@ out:
39752
39753 #endif /* USE_ELF_CORE_DUMP */
39754
39755 +#ifdef CONFIG_PAX_MPROTECT
39756 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
39757 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
39758 + * we'll remove VM_MAYWRITE for good on RELRO segments.
39759 + *
39760 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
39761 + * basis because we want to allow the common case and not the special ones.
39762 + */
39763 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
39764 +{
39765 + struct elfhdr elf_h;
39766 + struct elf_phdr elf_p;
39767 + unsigned long i;
39768 + unsigned long oldflags;
39769 + bool is_textrel_rw, is_textrel_rx, is_relro;
39770 +
39771 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
39772 + return;
39773 +
39774 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
39775 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
39776 +
39777 +#ifdef CONFIG_PAX_ELFRELOCS
39778 + /* possible TEXTREL */
39779 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
39780 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
39781 +#else
39782 + is_textrel_rw = false;
39783 + is_textrel_rx = false;
39784 +#endif
39785 +
39786 + /* possible RELRO */
39787 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
39788 +
39789 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
39790 + return;
39791 +
39792 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
39793 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
39794 +
39795 +#ifdef CONFIG_PAX_ETEXECRELOCS
39796 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39797 +#else
39798 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
39799 +#endif
39800 +
39801 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39802 + !elf_check_arch(&elf_h) ||
39803 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
39804 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
39805 + return;
39806 +
39807 + for (i = 0UL; i < elf_h.e_phnum; i++) {
39808 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
39809 + return;
39810 + switch (elf_p.p_type) {
39811 + case PT_DYNAMIC:
39812 + if (!is_textrel_rw && !is_textrel_rx)
39813 + continue;
39814 + i = 0UL;
39815 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
39816 + elf_dyn dyn;
39817 +
39818 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
39819 + return;
39820 + if (dyn.d_tag == DT_NULL)
39821 + return;
39822 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
39823 + gr_log_textrel(vma);
39824 + if (is_textrel_rw)
39825 + vma->vm_flags |= VM_MAYWRITE;
39826 + else
39827 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
39828 + vma->vm_flags &= ~VM_MAYWRITE;
39829 + return;
39830 + }
39831 + i++;
39832 + }
39833 + return;
39834 +
39835 + case PT_GNU_RELRO:
39836 + if (!is_relro)
39837 + continue;
39838 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
39839 + vma->vm_flags &= ~VM_MAYWRITE;
39840 + return;
39841 + }
39842 + }
39843 +}
39844 +#endif
39845 +
39846 static int __init init_elf_binfmt(void)
39847 {
39848 return register_binfmt(&elf_format);
39849 diff -urNp linux-2.6.32.45/fs/binfmt_flat.c linux-2.6.32.45/fs/binfmt_flat.c
39850 --- linux-2.6.32.45/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
39851 +++ linux-2.6.32.45/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
39852 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
39853 realdatastart = (unsigned long) -ENOMEM;
39854 printk("Unable to allocate RAM for process data, errno %d\n",
39855 (int)-realdatastart);
39856 + down_write(&current->mm->mmap_sem);
39857 do_munmap(current->mm, textpos, text_len);
39858 + up_write(&current->mm->mmap_sem);
39859 ret = realdatastart;
39860 goto err;
39861 }
39862 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
39863 }
39864 if (IS_ERR_VALUE(result)) {
39865 printk("Unable to read data+bss, errno %d\n", (int)-result);
39866 + down_write(&current->mm->mmap_sem);
39867 do_munmap(current->mm, textpos, text_len);
39868 do_munmap(current->mm, realdatastart, data_len + extra);
39869 + up_write(&current->mm->mmap_sem);
39870 ret = result;
39871 goto err;
39872 }
39873 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
39874 }
39875 if (IS_ERR_VALUE(result)) {
39876 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
39877 + down_write(&current->mm->mmap_sem);
39878 do_munmap(current->mm, textpos, text_len + data_len + extra +
39879 MAX_SHARED_LIBS * sizeof(unsigned long));
39880 + up_write(&current->mm->mmap_sem);
39881 ret = result;
39882 goto err;
39883 }
39884 diff -urNp linux-2.6.32.45/fs/bio.c linux-2.6.32.45/fs/bio.c
39885 --- linux-2.6.32.45/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
39886 +++ linux-2.6.32.45/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
39887 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
39888
39889 i = 0;
39890 while (i < bio_slab_nr) {
39891 - struct bio_slab *bslab = &bio_slabs[i];
39892 + bslab = &bio_slabs[i];
39893
39894 if (!bslab->slab && entry == -1)
39895 entry = i;
39896 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
39897 const int read = bio_data_dir(bio) == READ;
39898 struct bio_map_data *bmd = bio->bi_private;
39899 int i;
39900 - char *p = bmd->sgvecs[0].iov_base;
39901 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
39902
39903 __bio_for_each_segment(bvec, bio, i, 0) {
39904 char *addr = page_address(bvec->bv_page);
39905 diff -urNp linux-2.6.32.45/fs/block_dev.c linux-2.6.32.45/fs/block_dev.c
39906 --- linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:35:29.000000000 -0400
39907 +++ linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:34:00.000000000 -0400
39908 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
39909 else if (bdev->bd_contains == bdev)
39910 res = 0; /* is a whole device which isn't held */
39911
39912 - else if (bdev->bd_contains->bd_holder == bd_claim)
39913 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
39914 res = 0; /* is a partition of a device that is being partitioned */
39915 else if (bdev->bd_contains->bd_holder != NULL)
39916 res = -EBUSY; /* is a partition of a held device */
39917 diff -urNp linux-2.6.32.45/fs/btrfs/ctree.c linux-2.6.32.45/fs/btrfs/ctree.c
39918 --- linux-2.6.32.45/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
39919 +++ linux-2.6.32.45/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
39920 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
39921 free_extent_buffer(buf);
39922 add_root_to_dirty_list(root);
39923 } else {
39924 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
39925 - parent_start = parent->start;
39926 - else
39927 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
39928 + if (parent)
39929 + parent_start = parent->start;
39930 + else
39931 + parent_start = 0;
39932 + } else
39933 parent_start = 0;
39934
39935 WARN_ON(trans->transid != btrfs_header_generation(parent));
39936 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
39937
39938 ret = 0;
39939 if (slot == 0) {
39940 - struct btrfs_disk_key disk_key;
39941 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
39942 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
39943 }
39944 diff -urNp linux-2.6.32.45/fs/btrfs/disk-io.c linux-2.6.32.45/fs/btrfs/disk-io.c
39945 --- linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
39946 +++ linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
39947 @@ -39,7 +39,7 @@
39948 #include "tree-log.h"
39949 #include "free-space-cache.h"
39950
39951 -static struct extent_io_ops btree_extent_io_ops;
39952 +static const struct extent_io_ops btree_extent_io_ops;
39953 static void end_workqueue_fn(struct btrfs_work *work);
39954 static void free_fs_root(struct btrfs_root *root);
39955
39956 @@ -2607,7 +2607,7 @@ out:
39957 return 0;
39958 }
39959
39960 -static struct extent_io_ops btree_extent_io_ops = {
39961 +static const struct extent_io_ops btree_extent_io_ops = {
39962 .write_cache_pages_lock_hook = btree_lock_page_hook,
39963 .readpage_end_io_hook = btree_readpage_end_io_hook,
39964 .submit_bio_hook = btree_submit_bio_hook,
39965 diff -urNp linux-2.6.32.45/fs/btrfs/extent_io.h linux-2.6.32.45/fs/btrfs/extent_io.h
39966 --- linux-2.6.32.45/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
39967 +++ linux-2.6.32.45/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
39968 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
39969 struct bio *bio, int mirror_num,
39970 unsigned long bio_flags);
39971 struct extent_io_ops {
39972 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
39973 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
39974 u64 start, u64 end, int *page_started,
39975 unsigned long *nr_written);
39976 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
39977 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
39978 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
39979 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
39980 extent_submit_bio_hook_t *submit_bio_hook;
39981 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
39982 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
39983 size_t size, struct bio *bio,
39984 unsigned long bio_flags);
39985 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
39986 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
39987 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
39988 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
39989 u64 start, u64 end,
39990 struct extent_state *state);
39991 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
39992 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
39993 u64 start, u64 end,
39994 struct extent_state *state);
39995 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39996 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39997 struct extent_state *state);
39998 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39999 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
40000 struct extent_state *state, int uptodate);
40001 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
40002 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
40003 unsigned long old, unsigned long bits);
40004 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
40005 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
40006 unsigned long bits);
40007 - int (*merge_extent_hook)(struct inode *inode,
40008 + int (* const merge_extent_hook)(struct inode *inode,
40009 struct extent_state *new,
40010 struct extent_state *other);
40011 - int (*split_extent_hook)(struct inode *inode,
40012 + int (* const split_extent_hook)(struct inode *inode,
40013 struct extent_state *orig, u64 split);
40014 - int (*write_cache_pages_lock_hook)(struct page *page);
40015 + int (* const write_cache_pages_lock_hook)(struct page *page);
40016 };
40017
40018 struct extent_io_tree {
40019 @@ -88,7 +88,7 @@ struct extent_io_tree {
40020 u64 dirty_bytes;
40021 spinlock_t lock;
40022 spinlock_t buffer_lock;
40023 - struct extent_io_ops *ops;
40024 + const struct extent_io_ops *ops;
40025 };
40026
40027 struct extent_state {
40028 diff -urNp linux-2.6.32.45/fs/btrfs/extent-tree.c linux-2.6.32.45/fs/btrfs/extent-tree.c
40029 --- linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
40030 +++ linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
40031 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
40032 u64 group_start = group->key.objectid;
40033 new_extents = kmalloc(sizeof(*new_extents),
40034 GFP_NOFS);
40035 + if (!new_extents) {
40036 + ret = -ENOMEM;
40037 + goto out;
40038 + }
40039 nr_extents = 1;
40040 ret = get_new_locations(reloc_inode,
40041 extent_key,
40042 diff -urNp linux-2.6.32.45/fs/btrfs/free-space-cache.c linux-2.6.32.45/fs/btrfs/free-space-cache.c
40043 --- linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
40044 +++ linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
40045 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
40046
40047 while(1) {
40048 if (entry->bytes < bytes || entry->offset < min_start) {
40049 - struct rb_node *node;
40050 -
40051 node = rb_next(&entry->offset_index);
40052 if (!node)
40053 break;
40054 @@ -1226,7 +1224,7 @@ again:
40055 */
40056 while (entry->bitmap || found_bitmap ||
40057 (!entry->bitmap && entry->bytes < min_bytes)) {
40058 - struct rb_node *node = rb_next(&entry->offset_index);
40059 + node = rb_next(&entry->offset_index);
40060
40061 if (entry->bitmap && entry->bytes > bytes + empty_size) {
40062 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
40063 diff -urNp linux-2.6.32.45/fs/btrfs/inode.c linux-2.6.32.45/fs/btrfs/inode.c
40064 --- linux-2.6.32.45/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40065 +++ linux-2.6.32.45/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
40066 @@ -63,7 +63,7 @@ static const struct inode_operations btr
40067 static const struct address_space_operations btrfs_aops;
40068 static const struct address_space_operations btrfs_symlink_aops;
40069 static const struct file_operations btrfs_dir_file_operations;
40070 -static struct extent_io_ops btrfs_extent_io_ops;
40071 +static const struct extent_io_ops btrfs_extent_io_ops;
40072
40073 static struct kmem_cache *btrfs_inode_cachep;
40074 struct kmem_cache *btrfs_trans_handle_cachep;
40075 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
40076 1, 0, NULL, GFP_NOFS);
40077 while (start < end) {
40078 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
40079 + BUG_ON(!async_cow);
40080 async_cow->inode = inode;
40081 async_cow->root = root;
40082 async_cow->locked_page = locked_page;
40083 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
40084 inline_size = btrfs_file_extent_inline_item_len(leaf,
40085 btrfs_item_nr(leaf, path->slots[0]));
40086 tmp = kmalloc(inline_size, GFP_NOFS);
40087 + if (!tmp)
40088 + return -ENOMEM;
40089 ptr = btrfs_file_extent_inline_start(item);
40090
40091 read_extent_buffer(leaf, tmp, ptr, inline_size);
40092 @@ -5410,7 +5413,7 @@ fail:
40093 return -ENOMEM;
40094 }
40095
40096 -static int btrfs_getattr(struct vfsmount *mnt,
40097 +int btrfs_getattr(struct vfsmount *mnt,
40098 struct dentry *dentry, struct kstat *stat)
40099 {
40100 struct inode *inode = dentry->d_inode;
40101 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
40102 return 0;
40103 }
40104
40105 +EXPORT_SYMBOL(btrfs_getattr);
40106 +
40107 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
40108 +{
40109 + return BTRFS_I(inode)->root->anon_super.s_dev;
40110 +}
40111 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40112 +
40113 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
40114 struct inode *new_dir, struct dentry *new_dentry)
40115 {
40116 @@ -5972,7 +5983,7 @@ static const struct file_operations btrf
40117 .fsync = btrfs_sync_file,
40118 };
40119
40120 -static struct extent_io_ops btrfs_extent_io_ops = {
40121 +static const struct extent_io_ops btrfs_extent_io_ops = {
40122 .fill_delalloc = run_delalloc_range,
40123 .submit_bio_hook = btrfs_submit_bio_hook,
40124 .merge_bio_hook = btrfs_merge_bio_hook,
40125 diff -urNp linux-2.6.32.45/fs/btrfs/relocation.c linux-2.6.32.45/fs/btrfs/relocation.c
40126 --- linux-2.6.32.45/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
40127 +++ linux-2.6.32.45/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
40128 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
40129 }
40130 spin_unlock(&rc->reloc_root_tree.lock);
40131
40132 - BUG_ON((struct btrfs_root *)node->data != root);
40133 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
40134
40135 if (!del) {
40136 spin_lock(&rc->reloc_root_tree.lock);
40137 diff -urNp linux-2.6.32.45/fs/btrfs/sysfs.c linux-2.6.32.45/fs/btrfs/sysfs.c
40138 --- linux-2.6.32.45/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
40139 +++ linux-2.6.32.45/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
40140 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
40141 complete(&root->kobj_unregister);
40142 }
40143
40144 -static struct sysfs_ops btrfs_super_attr_ops = {
40145 +static const struct sysfs_ops btrfs_super_attr_ops = {
40146 .show = btrfs_super_attr_show,
40147 .store = btrfs_super_attr_store,
40148 };
40149
40150 -static struct sysfs_ops btrfs_root_attr_ops = {
40151 +static const struct sysfs_ops btrfs_root_attr_ops = {
40152 .show = btrfs_root_attr_show,
40153 .store = btrfs_root_attr_store,
40154 };
40155 diff -urNp linux-2.6.32.45/fs/buffer.c linux-2.6.32.45/fs/buffer.c
40156 --- linux-2.6.32.45/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
40157 +++ linux-2.6.32.45/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
40158 @@ -25,6 +25,7 @@
40159 #include <linux/percpu.h>
40160 #include <linux/slab.h>
40161 #include <linux/capability.h>
40162 +#include <linux/security.h>
40163 #include <linux/blkdev.h>
40164 #include <linux/file.h>
40165 #include <linux/quotaops.h>
40166 diff -urNp linux-2.6.32.45/fs/cachefiles/bind.c linux-2.6.32.45/fs/cachefiles/bind.c
40167 --- linux-2.6.32.45/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
40168 +++ linux-2.6.32.45/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
40169 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40170 args);
40171
40172 /* start by checking things over */
40173 - ASSERT(cache->fstop_percent >= 0 &&
40174 - cache->fstop_percent < cache->fcull_percent &&
40175 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
40176 cache->fcull_percent < cache->frun_percent &&
40177 cache->frun_percent < 100);
40178
40179 - ASSERT(cache->bstop_percent >= 0 &&
40180 - cache->bstop_percent < cache->bcull_percent &&
40181 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
40182 cache->bcull_percent < cache->brun_percent &&
40183 cache->brun_percent < 100);
40184
40185 diff -urNp linux-2.6.32.45/fs/cachefiles/daemon.c linux-2.6.32.45/fs/cachefiles/daemon.c
40186 --- linux-2.6.32.45/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
40187 +++ linux-2.6.32.45/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
40188 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
40189 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40190 return -EIO;
40191
40192 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
40193 + if (datalen > PAGE_SIZE - 1)
40194 return -EOPNOTSUPP;
40195
40196 /* drag the command string into the kernel so we can parse it */
40197 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
40198 if (args[0] != '%' || args[1] != '\0')
40199 return -EINVAL;
40200
40201 - if (fstop < 0 || fstop >= cache->fcull_percent)
40202 + if (fstop >= cache->fcull_percent)
40203 return cachefiles_daemon_range_error(cache, args);
40204
40205 cache->fstop_percent = fstop;
40206 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
40207 if (args[0] != '%' || args[1] != '\0')
40208 return -EINVAL;
40209
40210 - if (bstop < 0 || bstop >= cache->bcull_percent)
40211 + if (bstop >= cache->bcull_percent)
40212 return cachefiles_daemon_range_error(cache, args);
40213
40214 cache->bstop_percent = bstop;
40215 diff -urNp linux-2.6.32.45/fs/cachefiles/internal.h linux-2.6.32.45/fs/cachefiles/internal.h
40216 --- linux-2.6.32.45/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
40217 +++ linux-2.6.32.45/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
40218 @@ -56,7 +56,7 @@ struct cachefiles_cache {
40219 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40220 struct rb_root active_nodes; /* active nodes (can't be culled) */
40221 rwlock_t active_lock; /* lock for active_nodes */
40222 - atomic_t gravecounter; /* graveyard uniquifier */
40223 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40224 unsigned frun_percent; /* when to stop culling (% files) */
40225 unsigned fcull_percent; /* when to start culling (% files) */
40226 unsigned fstop_percent; /* when to stop allocating (% files) */
40227 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
40228 * proc.c
40229 */
40230 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40231 -extern atomic_t cachefiles_lookup_histogram[HZ];
40232 -extern atomic_t cachefiles_mkdir_histogram[HZ];
40233 -extern atomic_t cachefiles_create_histogram[HZ];
40234 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40235 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40236 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40237
40238 extern int __init cachefiles_proc_init(void);
40239 extern void cachefiles_proc_cleanup(void);
40240 static inline
40241 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40242 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40243 {
40244 unsigned long jif = jiffies - start_jif;
40245 if (jif >= HZ)
40246 jif = HZ - 1;
40247 - atomic_inc(&histogram[jif]);
40248 + atomic_inc_unchecked(&histogram[jif]);
40249 }
40250
40251 #else
40252 diff -urNp linux-2.6.32.45/fs/cachefiles/namei.c linux-2.6.32.45/fs/cachefiles/namei.c
40253 --- linux-2.6.32.45/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
40254 +++ linux-2.6.32.45/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
40255 @@ -250,7 +250,7 @@ try_again:
40256 /* first step is to make up a grave dentry in the graveyard */
40257 sprintf(nbuffer, "%08x%08x",
40258 (uint32_t) get_seconds(),
40259 - (uint32_t) atomic_inc_return(&cache->gravecounter));
40260 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40261
40262 /* do the multiway lock magic */
40263 trap = lock_rename(cache->graveyard, dir);
40264 diff -urNp linux-2.6.32.45/fs/cachefiles/proc.c linux-2.6.32.45/fs/cachefiles/proc.c
40265 --- linux-2.6.32.45/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
40266 +++ linux-2.6.32.45/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
40267 @@ -14,9 +14,9 @@
40268 #include <linux/seq_file.h>
40269 #include "internal.h"
40270
40271 -atomic_t cachefiles_lookup_histogram[HZ];
40272 -atomic_t cachefiles_mkdir_histogram[HZ];
40273 -atomic_t cachefiles_create_histogram[HZ];
40274 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40275 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40276 +atomic_unchecked_t cachefiles_create_histogram[HZ];
40277
40278 /*
40279 * display the latency histogram
40280 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40281 return 0;
40282 default:
40283 index = (unsigned long) v - 3;
40284 - x = atomic_read(&cachefiles_lookup_histogram[index]);
40285 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
40286 - z = atomic_read(&cachefiles_create_histogram[index]);
40287 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40288 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40289 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40290 if (x == 0 && y == 0 && z == 0)
40291 return 0;
40292
40293 diff -urNp linux-2.6.32.45/fs/cachefiles/rdwr.c linux-2.6.32.45/fs/cachefiles/rdwr.c
40294 --- linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
40295 +++ linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
40296 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
40297 old_fs = get_fs();
40298 set_fs(KERNEL_DS);
40299 ret = file->f_op->write(
40300 - file, (const void __user *) data, len, &pos);
40301 + file, (__force const void __user *) data, len, &pos);
40302 set_fs(old_fs);
40303 kunmap(page);
40304 if (ret != len)
40305 diff -urNp linux-2.6.32.45/fs/cifs/cifs_debug.c linux-2.6.32.45/fs/cifs/cifs_debug.c
40306 --- linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
40307 +++ linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
40308 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
40309 tcon = list_entry(tmp3,
40310 struct cifsTconInfo,
40311 tcon_list);
40312 - atomic_set(&tcon->num_smbs_sent, 0);
40313 - atomic_set(&tcon->num_writes, 0);
40314 - atomic_set(&tcon->num_reads, 0);
40315 - atomic_set(&tcon->num_oplock_brks, 0);
40316 - atomic_set(&tcon->num_opens, 0);
40317 - atomic_set(&tcon->num_posixopens, 0);
40318 - atomic_set(&tcon->num_posixmkdirs, 0);
40319 - atomic_set(&tcon->num_closes, 0);
40320 - atomic_set(&tcon->num_deletes, 0);
40321 - atomic_set(&tcon->num_mkdirs, 0);
40322 - atomic_set(&tcon->num_rmdirs, 0);
40323 - atomic_set(&tcon->num_renames, 0);
40324 - atomic_set(&tcon->num_t2renames, 0);
40325 - atomic_set(&tcon->num_ffirst, 0);
40326 - atomic_set(&tcon->num_fnext, 0);
40327 - atomic_set(&tcon->num_fclose, 0);
40328 - atomic_set(&tcon->num_hardlinks, 0);
40329 - atomic_set(&tcon->num_symlinks, 0);
40330 - atomic_set(&tcon->num_locks, 0);
40331 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40332 + atomic_set_unchecked(&tcon->num_writes, 0);
40333 + atomic_set_unchecked(&tcon->num_reads, 0);
40334 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40335 + atomic_set_unchecked(&tcon->num_opens, 0);
40336 + atomic_set_unchecked(&tcon->num_posixopens, 0);
40337 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40338 + atomic_set_unchecked(&tcon->num_closes, 0);
40339 + atomic_set_unchecked(&tcon->num_deletes, 0);
40340 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
40341 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
40342 + atomic_set_unchecked(&tcon->num_renames, 0);
40343 + atomic_set_unchecked(&tcon->num_t2renames, 0);
40344 + atomic_set_unchecked(&tcon->num_ffirst, 0);
40345 + atomic_set_unchecked(&tcon->num_fnext, 0);
40346 + atomic_set_unchecked(&tcon->num_fclose, 0);
40347 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
40348 + atomic_set_unchecked(&tcon->num_symlinks, 0);
40349 + atomic_set_unchecked(&tcon->num_locks, 0);
40350 }
40351 }
40352 }
40353 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
40354 if (tcon->need_reconnect)
40355 seq_puts(m, "\tDISCONNECTED ");
40356 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40357 - atomic_read(&tcon->num_smbs_sent),
40358 - atomic_read(&tcon->num_oplock_brks));
40359 + atomic_read_unchecked(&tcon->num_smbs_sent),
40360 + atomic_read_unchecked(&tcon->num_oplock_brks));
40361 seq_printf(m, "\nReads: %d Bytes: %lld",
40362 - atomic_read(&tcon->num_reads),
40363 + atomic_read_unchecked(&tcon->num_reads),
40364 (long long)(tcon->bytes_read));
40365 seq_printf(m, "\nWrites: %d Bytes: %lld",
40366 - atomic_read(&tcon->num_writes),
40367 + atomic_read_unchecked(&tcon->num_writes),
40368 (long long)(tcon->bytes_written));
40369 seq_printf(m, "\nFlushes: %d",
40370 - atomic_read(&tcon->num_flushes));
40371 + atomic_read_unchecked(&tcon->num_flushes));
40372 seq_printf(m, "\nLocks: %d HardLinks: %d "
40373 "Symlinks: %d",
40374 - atomic_read(&tcon->num_locks),
40375 - atomic_read(&tcon->num_hardlinks),
40376 - atomic_read(&tcon->num_symlinks));
40377 + atomic_read_unchecked(&tcon->num_locks),
40378 + atomic_read_unchecked(&tcon->num_hardlinks),
40379 + atomic_read_unchecked(&tcon->num_symlinks));
40380 seq_printf(m, "\nOpens: %d Closes: %d "
40381 "Deletes: %d",
40382 - atomic_read(&tcon->num_opens),
40383 - atomic_read(&tcon->num_closes),
40384 - atomic_read(&tcon->num_deletes));
40385 + atomic_read_unchecked(&tcon->num_opens),
40386 + atomic_read_unchecked(&tcon->num_closes),
40387 + atomic_read_unchecked(&tcon->num_deletes));
40388 seq_printf(m, "\nPosix Opens: %d "
40389 "Posix Mkdirs: %d",
40390 - atomic_read(&tcon->num_posixopens),
40391 - atomic_read(&tcon->num_posixmkdirs));
40392 + atomic_read_unchecked(&tcon->num_posixopens),
40393 + atomic_read_unchecked(&tcon->num_posixmkdirs));
40394 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40395 - atomic_read(&tcon->num_mkdirs),
40396 - atomic_read(&tcon->num_rmdirs));
40397 + atomic_read_unchecked(&tcon->num_mkdirs),
40398 + atomic_read_unchecked(&tcon->num_rmdirs));
40399 seq_printf(m, "\nRenames: %d T2 Renames %d",
40400 - atomic_read(&tcon->num_renames),
40401 - atomic_read(&tcon->num_t2renames));
40402 + atomic_read_unchecked(&tcon->num_renames),
40403 + atomic_read_unchecked(&tcon->num_t2renames));
40404 seq_printf(m, "\nFindFirst: %d FNext %d "
40405 "FClose %d",
40406 - atomic_read(&tcon->num_ffirst),
40407 - atomic_read(&tcon->num_fnext),
40408 - atomic_read(&tcon->num_fclose));
40409 + atomic_read_unchecked(&tcon->num_ffirst),
40410 + atomic_read_unchecked(&tcon->num_fnext),
40411 + atomic_read_unchecked(&tcon->num_fclose));
40412 }
40413 }
40414 }
40415 diff -urNp linux-2.6.32.45/fs/cifs/cifsglob.h linux-2.6.32.45/fs/cifs/cifsglob.h
40416 --- linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:35:29.000000000 -0400
40417 +++ linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:34:00.000000000 -0400
40418 @@ -252,28 +252,28 @@ struct cifsTconInfo {
40419 __u16 Flags; /* optional support bits */
40420 enum statusEnum tidStatus;
40421 #ifdef CONFIG_CIFS_STATS
40422 - atomic_t num_smbs_sent;
40423 - atomic_t num_writes;
40424 - atomic_t num_reads;
40425 - atomic_t num_flushes;
40426 - atomic_t num_oplock_brks;
40427 - atomic_t num_opens;
40428 - atomic_t num_closes;
40429 - atomic_t num_deletes;
40430 - atomic_t num_mkdirs;
40431 - atomic_t num_posixopens;
40432 - atomic_t num_posixmkdirs;
40433 - atomic_t num_rmdirs;
40434 - atomic_t num_renames;
40435 - atomic_t num_t2renames;
40436 - atomic_t num_ffirst;
40437 - atomic_t num_fnext;
40438 - atomic_t num_fclose;
40439 - atomic_t num_hardlinks;
40440 - atomic_t num_symlinks;
40441 - atomic_t num_locks;
40442 - atomic_t num_acl_get;
40443 - atomic_t num_acl_set;
40444 + atomic_unchecked_t num_smbs_sent;
40445 + atomic_unchecked_t num_writes;
40446 + atomic_unchecked_t num_reads;
40447 + atomic_unchecked_t num_flushes;
40448 + atomic_unchecked_t num_oplock_brks;
40449 + atomic_unchecked_t num_opens;
40450 + atomic_unchecked_t num_closes;
40451 + atomic_unchecked_t num_deletes;
40452 + atomic_unchecked_t num_mkdirs;
40453 + atomic_unchecked_t num_posixopens;
40454 + atomic_unchecked_t num_posixmkdirs;
40455 + atomic_unchecked_t num_rmdirs;
40456 + atomic_unchecked_t num_renames;
40457 + atomic_unchecked_t num_t2renames;
40458 + atomic_unchecked_t num_ffirst;
40459 + atomic_unchecked_t num_fnext;
40460 + atomic_unchecked_t num_fclose;
40461 + atomic_unchecked_t num_hardlinks;
40462 + atomic_unchecked_t num_symlinks;
40463 + atomic_unchecked_t num_locks;
40464 + atomic_unchecked_t num_acl_get;
40465 + atomic_unchecked_t num_acl_set;
40466 #ifdef CONFIG_CIFS_STATS2
40467 unsigned long long time_writes;
40468 unsigned long long time_reads;
40469 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
40470 }
40471
40472 #ifdef CONFIG_CIFS_STATS
40473 -#define cifs_stats_inc atomic_inc
40474 +#define cifs_stats_inc atomic_inc_unchecked
40475
40476 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
40477 unsigned int bytes)
40478 diff -urNp linux-2.6.32.45/fs/cifs/link.c linux-2.6.32.45/fs/cifs/link.c
40479 --- linux-2.6.32.45/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
40480 +++ linux-2.6.32.45/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
40481 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
40482
40483 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40484 {
40485 - char *p = nd_get_link(nd);
40486 + const char *p = nd_get_link(nd);
40487 if (!IS_ERR(p))
40488 kfree(p);
40489 }
40490 diff -urNp linux-2.6.32.45/fs/coda/cache.c linux-2.6.32.45/fs/coda/cache.c
40491 --- linux-2.6.32.45/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
40492 +++ linux-2.6.32.45/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
40493 @@ -24,14 +24,14 @@
40494 #include <linux/coda_fs_i.h>
40495 #include <linux/coda_cache.h>
40496
40497 -static atomic_t permission_epoch = ATOMIC_INIT(0);
40498 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40499
40500 /* replace or extend an acl cache hit */
40501 void coda_cache_enter(struct inode *inode, int mask)
40502 {
40503 struct coda_inode_info *cii = ITOC(inode);
40504
40505 - cii->c_cached_epoch = atomic_read(&permission_epoch);
40506 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40507 if (cii->c_uid != current_fsuid()) {
40508 cii->c_uid = current_fsuid();
40509 cii->c_cached_perm = mask;
40510 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
40511 void coda_cache_clear_inode(struct inode *inode)
40512 {
40513 struct coda_inode_info *cii = ITOC(inode);
40514 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40515 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40516 }
40517
40518 /* remove all acl caches */
40519 void coda_cache_clear_all(struct super_block *sb)
40520 {
40521 - atomic_inc(&permission_epoch);
40522 + atomic_inc_unchecked(&permission_epoch);
40523 }
40524
40525
40526 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
40527
40528 hit = (mask & cii->c_cached_perm) == mask &&
40529 cii->c_uid == current_fsuid() &&
40530 - cii->c_cached_epoch == atomic_read(&permission_epoch);
40531 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40532
40533 return hit;
40534 }
40535 diff -urNp linux-2.6.32.45/fs/compat_binfmt_elf.c linux-2.6.32.45/fs/compat_binfmt_elf.c
40536 --- linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
40537 +++ linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
40538 @@ -29,10 +29,12 @@
40539 #undef elfhdr
40540 #undef elf_phdr
40541 #undef elf_note
40542 +#undef elf_dyn
40543 #undef elf_addr_t
40544 #define elfhdr elf32_hdr
40545 #define elf_phdr elf32_phdr
40546 #define elf_note elf32_note
40547 +#define elf_dyn Elf32_Dyn
40548 #define elf_addr_t Elf32_Addr
40549
40550 /*
40551 diff -urNp linux-2.6.32.45/fs/compat.c linux-2.6.32.45/fs/compat.c
40552 --- linux-2.6.32.45/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
40553 +++ linux-2.6.32.45/fs/compat.c 2011-08-11 19:56:56.000000000 -0400
40554 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
40555
40556 struct compat_readdir_callback {
40557 struct compat_old_linux_dirent __user *dirent;
40558 + struct file * file;
40559 int result;
40560 };
40561
40562 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
40563 buf->result = -EOVERFLOW;
40564 return -EOVERFLOW;
40565 }
40566 +
40567 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40568 + return 0;
40569 +
40570 buf->result++;
40571 dirent = buf->dirent;
40572 if (!access_ok(VERIFY_WRITE, dirent,
40573 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
40574
40575 buf.result = 0;
40576 buf.dirent = dirent;
40577 + buf.file = file;
40578
40579 error = vfs_readdir(file, compat_fillonedir, &buf);
40580 if (buf.result)
40581 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
40582 struct compat_getdents_callback {
40583 struct compat_linux_dirent __user *current_dir;
40584 struct compat_linux_dirent __user *previous;
40585 + struct file * file;
40586 int count;
40587 int error;
40588 };
40589 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
40590 buf->error = -EOVERFLOW;
40591 return -EOVERFLOW;
40592 }
40593 +
40594 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40595 + return 0;
40596 +
40597 dirent = buf->previous;
40598 if (dirent) {
40599 if (__put_user(offset, &dirent->d_off))
40600 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
40601 buf.previous = NULL;
40602 buf.count = count;
40603 buf.error = 0;
40604 + buf.file = file;
40605
40606 error = vfs_readdir(file, compat_filldir, &buf);
40607 if (error >= 0)
40608 @@ -987,6 +999,7 @@ out:
40609 struct compat_getdents_callback64 {
40610 struct linux_dirent64 __user *current_dir;
40611 struct linux_dirent64 __user *previous;
40612 + struct file * file;
40613 int count;
40614 int error;
40615 };
40616 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
40617 buf->error = -EINVAL; /* only used if we fail.. */
40618 if (reclen > buf->count)
40619 return -EINVAL;
40620 +
40621 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40622 + return 0;
40623 +
40624 dirent = buf->previous;
40625
40626 if (dirent) {
40627 @@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
40628 buf.previous = NULL;
40629 buf.count = count;
40630 buf.error = 0;
40631 + buf.file = file;
40632
40633 error = vfs_readdir(file, compat_filldir64, &buf);
40634 if (error >= 0)
40635 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
40636 * verify all the pointers
40637 */
40638 ret = -EINVAL;
40639 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
40640 + if (nr_segs > UIO_MAXIOV)
40641 goto out;
40642 if (!file->f_op)
40643 goto out;
40644 @@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
40645 compat_uptr_t __user *envp,
40646 struct pt_regs * regs)
40647 {
40648 +#ifdef CONFIG_GRKERNSEC
40649 + struct file *old_exec_file;
40650 + struct acl_subject_label *old_acl;
40651 + struct rlimit old_rlim[RLIM_NLIMITS];
40652 +#endif
40653 struct linux_binprm *bprm;
40654 struct file *file;
40655 struct files_struct *displaced;
40656 bool clear_in_exec;
40657 int retval;
40658 + const struct cred *cred = current_cred();
40659 +
40660 + /*
40661 + * We move the actual failure in case of RLIMIT_NPROC excess from
40662 + * set*uid() to execve() because too many poorly written programs
40663 + * don't check setuid() return code. Here we additionally recheck
40664 + * whether NPROC limit is still exceeded.
40665 + */
40666 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40667 +
40668 + if ((current->flags & PF_NPROC_EXCEEDED) &&
40669 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40670 + retval = -EAGAIN;
40671 + goto out_ret;
40672 + }
40673 +
40674 + /* We're below the limit (still or again), so we don't want to make
40675 + * further execve() calls fail. */
40676 + current->flags &= ~PF_NPROC_EXCEEDED;
40677
40678 retval = unshare_files(&displaced);
40679 if (retval)
40680 @@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
40681 bprm->filename = filename;
40682 bprm->interp = filename;
40683
40684 + if (gr_process_user_ban()) {
40685 + retval = -EPERM;
40686 + goto out_file;
40687 + }
40688 +
40689 + retval = -EACCES;
40690 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
40691 + goto out_file;
40692 +
40693 retval = bprm_mm_init(bprm);
40694 if (retval)
40695 goto out_file;
40696 @@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
40697 if (retval < 0)
40698 goto out;
40699
40700 + if (!gr_tpe_allow(file)) {
40701 + retval = -EACCES;
40702 + goto out;
40703 + }
40704 +
40705 + if (gr_check_crash_exec(file)) {
40706 + retval = -EACCES;
40707 + goto out;
40708 + }
40709 +
40710 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40711 +
40712 + gr_handle_exec_args_compat(bprm, argv);
40713 +
40714 +#ifdef CONFIG_GRKERNSEC
40715 + old_acl = current->acl;
40716 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40717 + old_exec_file = current->exec_file;
40718 + get_file(file);
40719 + current->exec_file = file;
40720 +#endif
40721 +
40722 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40723 + bprm->unsafe & LSM_UNSAFE_SHARE);
40724 + if (retval < 0)
40725 + goto out_fail;
40726 +
40727 retval = search_binary_handler(bprm, regs);
40728 if (retval < 0)
40729 - goto out;
40730 + goto out_fail;
40731 +#ifdef CONFIG_GRKERNSEC
40732 + if (old_exec_file)
40733 + fput(old_exec_file);
40734 +#endif
40735
40736 /* execve succeeded */
40737 current->fs->in_exec = 0;
40738 @@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
40739 put_files_struct(displaced);
40740 return retval;
40741
40742 +out_fail:
40743 +#ifdef CONFIG_GRKERNSEC
40744 + current->acl = old_acl;
40745 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40746 + fput(current->exec_file);
40747 + current->exec_file = old_exec_file;
40748 +#endif
40749 +
40750 out:
40751 if (bprm->mm) {
40752 acct_arg_size(bprm, 0);
40753 @@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat
40754 struct fdtable *fdt;
40755 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40756
40757 + pax_track_stack();
40758 +
40759 if (n < 0)
40760 goto out_nofds;
40761
40762 diff -urNp linux-2.6.32.45/fs/compat_ioctl.c linux-2.6.32.45/fs/compat_ioctl.c
40763 --- linux-2.6.32.45/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
40764 +++ linux-2.6.32.45/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
40765 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
40766 up = (struct compat_video_spu_palette __user *) arg;
40767 err = get_user(palp, &up->palette);
40768 err |= get_user(length, &up->length);
40769 + if (err)
40770 + return -EFAULT;
40771
40772 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
40773 err = put_user(compat_ptr(palp), &up_native->palette);
40774 diff -urNp linux-2.6.32.45/fs/configfs/dir.c linux-2.6.32.45/fs/configfs/dir.c
40775 --- linux-2.6.32.45/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
40776 +++ linux-2.6.32.45/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
40777 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
40778 }
40779 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
40780 struct configfs_dirent *next;
40781 - const char * name;
40782 + const unsigned char * name;
40783 + char d_name[sizeof(next->s_dentry->d_iname)];
40784 int len;
40785
40786 next = list_entry(p, struct configfs_dirent,
40787 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
40788 continue;
40789
40790 name = configfs_get_name(next);
40791 - len = strlen(name);
40792 + if (next->s_dentry && name == next->s_dentry->d_iname) {
40793 + len = next->s_dentry->d_name.len;
40794 + memcpy(d_name, name, len);
40795 + name = d_name;
40796 + } else
40797 + len = strlen(name);
40798 if (next->s_dentry)
40799 ino = next->s_dentry->d_inode->i_ino;
40800 else
40801 diff -urNp linux-2.6.32.45/fs/dcache.c linux-2.6.32.45/fs/dcache.c
40802 --- linux-2.6.32.45/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
40803 +++ linux-2.6.32.45/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
40804 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
40805
40806 static struct kmem_cache *dentry_cache __read_mostly;
40807
40808 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
40809 -
40810 /*
40811 * This is the single most critical data structure when it comes
40812 * to the dcache: the hashtable for lookups. Somebody should try
40813 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
40814 mempages -= reserve;
40815
40816 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
40817 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
40818 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
40819
40820 dcache_init();
40821 inode_init();
40822 diff -urNp linux-2.6.32.45/fs/dlm/lockspace.c linux-2.6.32.45/fs/dlm/lockspace.c
40823 --- linux-2.6.32.45/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
40824 +++ linux-2.6.32.45/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
40825 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
40826 kfree(ls);
40827 }
40828
40829 -static struct sysfs_ops dlm_attr_ops = {
40830 +static const struct sysfs_ops dlm_attr_ops = {
40831 .show = dlm_attr_show,
40832 .store = dlm_attr_store,
40833 };
40834 diff -urNp linux-2.6.32.45/fs/ecryptfs/inode.c linux-2.6.32.45/fs/ecryptfs/inode.c
40835 --- linux-2.6.32.45/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40836 +++ linux-2.6.32.45/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40837 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
40838 old_fs = get_fs();
40839 set_fs(get_ds());
40840 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
40841 - (char __user *)lower_buf,
40842 + (__force char __user *)lower_buf,
40843 lower_bufsiz);
40844 set_fs(old_fs);
40845 if (rc < 0)
40846 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
40847 }
40848 old_fs = get_fs();
40849 set_fs(get_ds());
40850 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
40851 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
40852 set_fs(old_fs);
40853 if (rc < 0)
40854 goto out_free;
40855 diff -urNp linux-2.6.32.45/fs/exec.c linux-2.6.32.45/fs/exec.c
40856 --- linux-2.6.32.45/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
40857 +++ linux-2.6.32.45/fs/exec.c 2011-08-11 19:56:19.000000000 -0400
40858 @@ -56,12 +56,24 @@
40859 #include <linux/fsnotify.h>
40860 #include <linux/fs_struct.h>
40861 #include <linux/pipe_fs_i.h>
40862 +#include <linux/random.h>
40863 +#include <linux/seq_file.h>
40864 +
40865 +#ifdef CONFIG_PAX_REFCOUNT
40866 +#include <linux/kallsyms.h>
40867 +#include <linux/kdebug.h>
40868 +#endif
40869
40870 #include <asm/uaccess.h>
40871 #include <asm/mmu_context.h>
40872 #include <asm/tlb.h>
40873 #include "internal.h"
40874
40875 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
40876 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
40877 +EXPORT_SYMBOL(pax_set_initial_flags_func);
40878 +#endif
40879 +
40880 int core_uses_pid;
40881 char core_pattern[CORENAME_MAX_SIZE] = "core";
40882 unsigned int core_pipe_limit;
40883 @@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
40884 goto out;
40885
40886 file = do_filp_open(AT_FDCWD, tmp,
40887 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40888 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40889 MAY_READ | MAY_EXEC | MAY_OPEN);
40890 putname(tmp);
40891 error = PTR_ERR(file);
40892 @@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
40893 int write)
40894 {
40895 struct page *page;
40896 - int ret;
40897
40898 -#ifdef CONFIG_STACK_GROWSUP
40899 - if (write) {
40900 - ret = expand_stack_downwards(bprm->vma, pos);
40901 - if (ret < 0)
40902 - return NULL;
40903 - }
40904 -#endif
40905 - ret = get_user_pages(current, bprm->mm, pos,
40906 - 1, write, 1, &page, NULL);
40907 - if (ret <= 0)
40908 + if (0 > expand_stack_downwards(bprm->vma, pos))
40909 + return NULL;
40910 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
40911 return NULL;
40912
40913 if (write) {
40914 @@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
40915 vma->vm_end = STACK_TOP_MAX;
40916 vma->vm_start = vma->vm_end - PAGE_SIZE;
40917 vma->vm_flags = VM_STACK_FLAGS;
40918 +
40919 +#ifdef CONFIG_PAX_SEGMEXEC
40920 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
40921 +#endif
40922 +
40923 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
40924
40925 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
40926 @@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
40927 mm->stack_vm = mm->total_vm = 1;
40928 up_write(&mm->mmap_sem);
40929 bprm->p = vma->vm_end - sizeof(void *);
40930 +
40931 +#ifdef CONFIG_PAX_RANDUSTACK
40932 + if (randomize_va_space)
40933 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
40934 +#endif
40935 +
40936 return 0;
40937 err:
40938 up_write(&mm->mmap_sem);
40939 @@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
40940 int r;
40941 mm_segment_t oldfs = get_fs();
40942 set_fs(KERNEL_DS);
40943 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
40944 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
40945 set_fs(oldfs);
40946 return r;
40947 }
40948 @@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
40949 unsigned long new_end = old_end - shift;
40950 struct mmu_gather *tlb;
40951
40952 - BUG_ON(new_start > new_end);
40953 + if (new_start >= new_end || new_start < mmap_min_addr)
40954 + return -ENOMEM;
40955
40956 /*
40957 * ensure there are no vmas between where we want to go
40958 @@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
40959 if (vma != find_vma(mm, new_start))
40960 return -EFAULT;
40961
40962 +#ifdef CONFIG_PAX_SEGMEXEC
40963 + BUG_ON(pax_find_mirror_vma(vma));
40964 +#endif
40965 +
40966 /*
40967 * cover the whole range: [new_start, old_end)
40968 */
40969 @@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
40970 stack_top = arch_align_stack(stack_top);
40971 stack_top = PAGE_ALIGN(stack_top);
40972
40973 - if (unlikely(stack_top < mmap_min_addr) ||
40974 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
40975 - return -ENOMEM;
40976 -
40977 stack_shift = vma->vm_end - stack_top;
40978
40979 bprm->p -= stack_shift;
40980 @@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
40981 bprm->exec -= stack_shift;
40982
40983 down_write(&mm->mmap_sem);
40984 +
40985 + /* Move stack pages down in memory. */
40986 + if (stack_shift) {
40987 + ret = shift_arg_pages(vma, stack_shift);
40988 + if (ret)
40989 + goto out_unlock;
40990 + }
40991 +
40992 vm_flags = VM_STACK_FLAGS;
40993
40994 /*
40995 @@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
40996 vm_flags &= ~VM_EXEC;
40997 vm_flags |= mm->def_flags;
40998
40999 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41000 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41001 + vm_flags &= ~VM_EXEC;
41002 +
41003 +#ifdef CONFIG_PAX_MPROTECT
41004 + if (mm->pax_flags & MF_PAX_MPROTECT)
41005 + vm_flags &= ~VM_MAYEXEC;
41006 +#endif
41007 +
41008 + }
41009 +#endif
41010 +
41011 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
41012 vm_flags);
41013 if (ret)
41014 goto out_unlock;
41015 BUG_ON(prev != vma);
41016
41017 - /* Move stack pages down in memory. */
41018 - if (stack_shift) {
41019 - ret = shift_arg_pages(vma, stack_shift);
41020 - if (ret)
41021 - goto out_unlock;
41022 - }
41023 -
41024 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
41025 stack_size = vma->vm_end - vma->vm_start;
41026 /*
41027 @@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
41028 int err;
41029
41030 file = do_filp_open(AT_FDCWD, name,
41031 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
41032 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
41033 MAY_EXEC | MAY_OPEN);
41034 if (IS_ERR(file))
41035 goto out;
41036 @@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
41037 old_fs = get_fs();
41038 set_fs(get_ds());
41039 /* The cast to a user pointer is valid due to the set_fs() */
41040 - result = vfs_read(file, (void __user *)addr, count, &pos);
41041 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
41042 set_fs(old_fs);
41043 return result;
41044 }
41045 @@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
41046 }
41047 rcu_read_unlock();
41048
41049 - if (p->fs->users > n_fs) {
41050 + if (atomic_read(&p->fs->users) > n_fs) {
41051 bprm->unsafe |= LSM_UNSAFE_SHARE;
41052 } else {
41053 res = -EAGAIN;
41054 @@ -1347,11 +1376,35 @@ int do_execve(char * filename,
41055 char __user *__user *envp,
41056 struct pt_regs * regs)
41057 {
41058 +#ifdef CONFIG_GRKERNSEC
41059 + struct file *old_exec_file;
41060 + struct acl_subject_label *old_acl;
41061 + struct rlimit old_rlim[RLIM_NLIMITS];
41062 +#endif
41063 struct linux_binprm *bprm;
41064 struct file *file;
41065 struct files_struct *displaced;
41066 bool clear_in_exec;
41067 int retval;
41068 + const struct cred *cred = current_cred();
41069 +
41070 + /*
41071 + * We move the actual failure in case of RLIMIT_NPROC excess from
41072 + * set*uid() to execve() because too many poorly written programs
41073 + * don't check setuid() return code. Here we additionally recheck
41074 + * whether NPROC limit is still exceeded.
41075 + */
41076 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41077 +
41078 + if ((current->flags & PF_NPROC_EXCEEDED) &&
41079 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
41080 + retval = -EAGAIN;
41081 + goto out_ret;
41082 + }
41083 +
41084 + /* We're below the limit (still or again), so we don't want to make
41085 + * further execve() calls fail. */
41086 + current->flags &= ~PF_NPROC_EXCEEDED;
41087
41088 retval = unshare_files(&displaced);
41089 if (retval)
41090 @@ -1383,6 +1436,16 @@ int do_execve(char * filename,
41091 bprm->filename = filename;
41092 bprm->interp = filename;
41093
41094 + if (gr_process_user_ban()) {
41095 + retval = -EPERM;
41096 + goto out_file;
41097 + }
41098 +
41099 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41100 + retval = -EACCES;
41101 + goto out_file;
41102 + }
41103 +
41104 retval = bprm_mm_init(bprm);
41105 if (retval)
41106 goto out_file;
41107 @@ -1412,10 +1475,41 @@ int do_execve(char * filename,
41108 if (retval < 0)
41109 goto out;
41110
41111 + if (!gr_tpe_allow(file)) {
41112 + retval = -EACCES;
41113 + goto out;
41114 + }
41115 +
41116 + if (gr_check_crash_exec(file)) {
41117 + retval = -EACCES;
41118 + goto out;
41119 + }
41120 +
41121 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41122 +
41123 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
41124 +
41125 +#ifdef CONFIG_GRKERNSEC
41126 + old_acl = current->acl;
41127 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41128 + old_exec_file = current->exec_file;
41129 + get_file(file);
41130 + current->exec_file = file;
41131 +#endif
41132 +
41133 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41134 + bprm->unsafe & LSM_UNSAFE_SHARE);
41135 + if (retval < 0)
41136 + goto out_fail;
41137 +
41138 current->flags &= ~PF_KTHREAD;
41139 retval = search_binary_handler(bprm,regs);
41140 if (retval < 0)
41141 - goto out;
41142 + goto out_fail;
41143 +#ifdef CONFIG_GRKERNSEC
41144 + if (old_exec_file)
41145 + fput(old_exec_file);
41146 +#endif
41147
41148 /* execve succeeded */
41149 current->fs->in_exec = 0;
41150 @@ -1426,6 +1520,14 @@ int do_execve(char * filename,
41151 put_files_struct(displaced);
41152 return retval;
41153
41154 +out_fail:
41155 +#ifdef CONFIG_GRKERNSEC
41156 + current->acl = old_acl;
41157 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41158 + fput(current->exec_file);
41159 + current->exec_file = old_exec_file;
41160 +#endif
41161 +
41162 out:
41163 if (bprm->mm) {
41164 acct_arg_size(bprm, 0);
41165 @@ -1591,6 +1693,220 @@ out:
41166 return ispipe;
41167 }
41168
41169 +int pax_check_flags(unsigned long *flags)
41170 +{
41171 + int retval = 0;
41172 +
41173 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41174 + if (*flags & MF_PAX_SEGMEXEC)
41175 + {
41176 + *flags &= ~MF_PAX_SEGMEXEC;
41177 + retval = -EINVAL;
41178 + }
41179 +#endif
41180 +
41181 + if ((*flags & MF_PAX_PAGEEXEC)
41182 +
41183 +#ifdef CONFIG_PAX_PAGEEXEC
41184 + && (*flags & MF_PAX_SEGMEXEC)
41185 +#endif
41186 +
41187 + )
41188 + {
41189 + *flags &= ~MF_PAX_PAGEEXEC;
41190 + retval = -EINVAL;
41191 + }
41192 +
41193 + if ((*flags & MF_PAX_MPROTECT)
41194 +
41195 +#ifdef CONFIG_PAX_MPROTECT
41196 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41197 +#endif
41198 +
41199 + )
41200 + {
41201 + *flags &= ~MF_PAX_MPROTECT;
41202 + retval = -EINVAL;
41203 + }
41204 +
41205 + if ((*flags & MF_PAX_EMUTRAMP)
41206 +
41207 +#ifdef CONFIG_PAX_EMUTRAMP
41208 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41209 +#endif
41210 +
41211 + )
41212 + {
41213 + *flags &= ~MF_PAX_EMUTRAMP;
41214 + retval = -EINVAL;
41215 + }
41216 +
41217 + return retval;
41218 +}
41219 +
41220 +EXPORT_SYMBOL(pax_check_flags);
41221 +
41222 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41223 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41224 +{
41225 + struct task_struct *tsk = current;
41226 + struct mm_struct *mm = current->mm;
41227 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41228 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41229 + char *path_exec = NULL;
41230 + char *path_fault = NULL;
41231 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
41232 +
41233 + if (buffer_exec && buffer_fault) {
41234 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41235 +
41236 + down_read(&mm->mmap_sem);
41237 + vma = mm->mmap;
41238 + while (vma && (!vma_exec || !vma_fault)) {
41239 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41240 + vma_exec = vma;
41241 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41242 + vma_fault = vma;
41243 + vma = vma->vm_next;
41244 + }
41245 + if (vma_exec) {
41246 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41247 + if (IS_ERR(path_exec))
41248 + path_exec = "<path too long>";
41249 + else {
41250 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41251 + if (path_exec) {
41252 + *path_exec = 0;
41253 + path_exec = buffer_exec;
41254 + } else
41255 + path_exec = "<path too long>";
41256 + }
41257 + }
41258 + if (vma_fault) {
41259 + start = vma_fault->vm_start;
41260 + end = vma_fault->vm_end;
41261 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41262 + if (vma_fault->vm_file) {
41263 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41264 + if (IS_ERR(path_fault))
41265 + path_fault = "<path too long>";
41266 + else {
41267 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41268 + if (path_fault) {
41269 + *path_fault = 0;
41270 + path_fault = buffer_fault;
41271 + } else
41272 + path_fault = "<path too long>";
41273 + }
41274 + } else
41275 + path_fault = "<anonymous mapping>";
41276 + }
41277 + up_read(&mm->mmap_sem);
41278 + }
41279 + if (tsk->signal->curr_ip)
41280 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41281 + else
41282 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41283 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41284 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41285 + task_uid(tsk), task_euid(tsk), pc, sp);
41286 + free_page((unsigned long)buffer_exec);
41287 + free_page((unsigned long)buffer_fault);
41288 + pax_report_insns(pc, sp);
41289 + do_coredump(SIGKILL, SIGKILL, regs);
41290 +}
41291 +#endif
41292 +
41293 +#ifdef CONFIG_PAX_REFCOUNT
41294 +void pax_report_refcount_overflow(struct pt_regs *regs)
41295 +{
41296 + if (current->signal->curr_ip)
41297 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41298 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41299 + else
41300 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41301 + current->comm, task_pid_nr(current), current_uid(), current_euid());
41302 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41303 + show_regs(regs);
41304 + force_sig_specific(SIGKILL, current);
41305 +}
41306 +#endif
41307 +
41308 +#ifdef CONFIG_PAX_USERCOPY
41309 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41310 +int object_is_on_stack(const void *obj, unsigned long len)
41311 +{
41312 + const void * const stack = task_stack_page(current);
41313 + const void * const stackend = stack + THREAD_SIZE;
41314 +
41315 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41316 + const void *frame = NULL;
41317 + const void *oldframe;
41318 +#endif
41319 +
41320 + if (obj + len < obj)
41321 + return -1;
41322 +
41323 + if (obj + len <= stack || stackend <= obj)
41324 + return 0;
41325 +
41326 + if (obj < stack || stackend < obj + len)
41327 + return -1;
41328 +
41329 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41330 + oldframe = __builtin_frame_address(1);
41331 + if (oldframe)
41332 + frame = __builtin_frame_address(2);
41333 + /*
41334 + low ----------------------------------------------> high
41335 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
41336 + ^----------------^
41337 + allow copies only within here
41338 + */
41339 + while (stack <= frame && frame < stackend) {
41340 + /* if obj + len extends past the last frame, this
41341 + check won't pass and the next frame will be 0,
41342 + causing us to bail out and correctly report
41343 + the copy as invalid
41344 + */
41345 + if (obj + len <= frame)
41346 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41347 + oldframe = frame;
41348 + frame = *(const void * const *)frame;
41349 + }
41350 + return -1;
41351 +#else
41352 + return 1;
41353 +#endif
41354 +}
41355 +
41356 +
41357 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41358 +{
41359 + if (current->signal->curr_ip)
41360 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41361 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41362 + else
41363 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41364 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41365 +
41366 + dump_stack();
41367 + gr_handle_kernel_exploit();
41368 + do_group_exit(SIGKILL);
41369 +}
41370 +#endif
41371 +
41372 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41373 +void pax_track_stack(void)
41374 +{
41375 + unsigned long sp = (unsigned long)&sp;
41376 + if (sp < current_thread_info()->lowest_stack &&
41377 + sp > (unsigned long)task_stack_page(current))
41378 + current_thread_info()->lowest_stack = sp;
41379 +}
41380 +EXPORT_SYMBOL(pax_track_stack);
41381 +#endif
41382 +
41383 static int zap_process(struct task_struct *start)
41384 {
41385 struct task_struct *t;
41386 @@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct
41387 pipe = file->f_path.dentry->d_inode->i_pipe;
41388
41389 pipe_lock(pipe);
41390 - pipe->readers++;
41391 - pipe->writers--;
41392 + atomic_inc(&pipe->readers);
41393 + atomic_dec(&pipe->writers);
41394
41395 - while ((pipe->readers > 1) && (!signal_pending(current))) {
41396 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41397 wake_up_interruptible_sync(&pipe->wait);
41398 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41399 pipe_wait(pipe);
41400 }
41401
41402 - pipe->readers--;
41403 - pipe->writers++;
41404 + atomic_dec(&pipe->readers);
41405 + atomic_inc(&pipe->writers);
41406 pipe_unlock(pipe);
41407
41408 }
41409 @@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_co
41410 char **helper_argv = NULL;
41411 int helper_argc = 0;
41412 int dump_count = 0;
41413 - static atomic_t core_dump_count = ATOMIC_INIT(0);
41414 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41415
41416 audit_core_dumps(signr);
41417
41418 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41419 + gr_handle_brute_attach(current, mm->flags);
41420 +
41421 binfmt = mm->binfmt;
41422 if (!binfmt || !binfmt->core_dump)
41423 goto fail;
41424 @@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_co
41425 */
41426 clear_thread_flag(TIF_SIGPENDING);
41427
41428 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41429 +
41430 /*
41431 * lock_kernel() because format_corename() is controlled by sysctl, which
41432 * uses lock_kernel()
41433 @@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_co
41434 goto fail_unlock;
41435 }
41436
41437 - dump_count = atomic_inc_return(&core_dump_count);
41438 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
41439 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41440 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41441 task_tgid_vnr(current), current->comm);
41442 @@ -1972,7 +2293,7 @@ close_fail:
41443 filp_close(file, NULL);
41444 fail_dropcount:
41445 if (dump_count)
41446 - atomic_dec(&core_dump_count);
41447 + atomic_dec_unchecked(&core_dump_count);
41448 fail_unlock:
41449 if (helper_argv)
41450 argv_free(helper_argv);
41451 diff -urNp linux-2.6.32.45/fs/ext2/balloc.c linux-2.6.32.45/fs/ext2/balloc.c
41452 --- linux-2.6.32.45/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
41453 +++ linux-2.6.32.45/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
41454 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41455
41456 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41457 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41458 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41459 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41460 sbi->s_resuid != current_fsuid() &&
41461 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41462 return 0;
41463 diff -urNp linux-2.6.32.45/fs/ext3/balloc.c linux-2.6.32.45/fs/ext3/balloc.c
41464 --- linux-2.6.32.45/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
41465 +++ linux-2.6.32.45/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
41466 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
41467
41468 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41469 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41470 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41471 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41472 sbi->s_resuid != current_fsuid() &&
41473 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41474 return 0;
41475 diff -urNp linux-2.6.32.45/fs/ext4/balloc.c linux-2.6.32.45/fs/ext4/balloc.c
41476 --- linux-2.6.32.45/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
41477 +++ linux-2.6.32.45/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
41478 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
41479 /* Hm, nope. Are (enough) root reserved blocks available? */
41480 if (sbi->s_resuid == current_fsuid() ||
41481 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41482 - capable(CAP_SYS_RESOURCE)) {
41483 + capable_nolog(CAP_SYS_RESOURCE)) {
41484 if (free_blocks >= (nblocks + dirty_blocks))
41485 return 1;
41486 }
41487 diff -urNp linux-2.6.32.45/fs/ext4/ext4.h linux-2.6.32.45/fs/ext4/ext4.h
41488 --- linux-2.6.32.45/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
41489 +++ linux-2.6.32.45/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
41490 @@ -1078,19 +1078,19 @@ struct ext4_sb_info {
41491
41492 /* stats for buddy allocator */
41493 spinlock_t s_mb_pa_lock;
41494 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41495 - atomic_t s_bal_success; /* we found long enough chunks */
41496 - atomic_t s_bal_allocated; /* in blocks */
41497 - atomic_t s_bal_ex_scanned; /* total extents scanned */
41498 - atomic_t s_bal_goals; /* goal hits */
41499 - atomic_t s_bal_breaks; /* too long searches */
41500 - atomic_t s_bal_2orders; /* 2^order hits */
41501 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41502 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41503 + atomic_unchecked_t s_bal_allocated; /* in blocks */
41504 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41505 + atomic_unchecked_t s_bal_goals; /* goal hits */
41506 + atomic_unchecked_t s_bal_breaks; /* too long searches */
41507 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41508 spinlock_t s_bal_lock;
41509 unsigned long s_mb_buddies_generated;
41510 unsigned long long s_mb_generation_time;
41511 - atomic_t s_mb_lost_chunks;
41512 - atomic_t s_mb_preallocated;
41513 - atomic_t s_mb_discarded;
41514 + atomic_unchecked_t s_mb_lost_chunks;
41515 + atomic_unchecked_t s_mb_preallocated;
41516 + atomic_unchecked_t s_mb_discarded;
41517 atomic_t s_lock_busy;
41518
41519 /* locality groups */
41520 diff -urNp linux-2.6.32.45/fs/ext4/mballoc.c linux-2.6.32.45/fs/ext4/mballoc.c
41521 --- linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
41522 +++ linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
41523 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
41524 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41525
41526 if (EXT4_SB(sb)->s_mb_stats)
41527 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41528 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41529
41530 break;
41531 }
41532 @@ -2131,7 +2131,7 @@ repeat:
41533 ac->ac_status = AC_STATUS_CONTINUE;
41534 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41535 cr = 3;
41536 - atomic_inc(&sbi->s_mb_lost_chunks);
41537 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41538 goto repeat;
41539 }
41540 }
41541 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
41542 ext4_grpblk_t counters[16];
41543 } sg;
41544
41545 + pax_track_stack();
41546 +
41547 group--;
41548 if (group == 0)
41549 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41550 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
41551 if (sbi->s_mb_stats) {
41552 printk(KERN_INFO
41553 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41554 - atomic_read(&sbi->s_bal_allocated),
41555 - atomic_read(&sbi->s_bal_reqs),
41556 - atomic_read(&sbi->s_bal_success));
41557 + atomic_read_unchecked(&sbi->s_bal_allocated),
41558 + atomic_read_unchecked(&sbi->s_bal_reqs),
41559 + atomic_read_unchecked(&sbi->s_bal_success));
41560 printk(KERN_INFO
41561 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41562 "%u 2^N hits, %u breaks, %u lost\n",
41563 - atomic_read(&sbi->s_bal_ex_scanned),
41564 - atomic_read(&sbi->s_bal_goals),
41565 - atomic_read(&sbi->s_bal_2orders),
41566 - atomic_read(&sbi->s_bal_breaks),
41567 - atomic_read(&sbi->s_mb_lost_chunks));
41568 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41569 + atomic_read_unchecked(&sbi->s_bal_goals),
41570 + atomic_read_unchecked(&sbi->s_bal_2orders),
41571 + atomic_read_unchecked(&sbi->s_bal_breaks),
41572 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41573 printk(KERN_INFO
41574 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41575 sbi->s_mb_buddies_generated++,
41576 sbi->s_mb_generation_time);
41577 printk(KERN_INFO
41578 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41579 - atomic_read(&sbi->s_mb_preallocated),
41580 - atomic_read(&sbi->s_mb_discarded));
41581 + atomic_read_unchecked(&sbi->s_mb_preallocated),
41582 + atomic_read_unchecked(&sbi->s_mb_discarded));
41583 }
41584
41585 free_percpu(sbi->s_locality_groups);
41586 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
41587 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41588
41589 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41590 - atomic_inc(&sbi->s_bal_reqs);
41591 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41592 + atomic_inc_unchecked(&sbi->s_bal_reqs);
41593 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41594 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
41595 - atomic_inc(&sbi->s_bal_success);
41596 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41597 + atomic_inc_unchecked(&sbi->s_bal_success);
41598 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41599 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41600 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41601 - atomic_inc(&sbi->s_bal_goals);
41602 + atomic_inc_unchecked(&sbi->s_bal_goals);
41603 if (ac->ac_found > sbi->s_mb_max_to_scan)
41604 - atomic_inc(&sbi->s_bal_breaks);
41605 + atomic_inc_unchecked(&sbi->s_bal_breaks);
41606 }
41607
41608 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41609 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41610 trace_ext4_mb_new_inode_pa(ac, pa);
41611
41612 ext4_mb_use_inode_pa(ac, pa);
41613 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41614 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41615
41616 ei = EXT4_I(ac->ac_inode);
41617 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41618 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41619 trace_ext4_mb_new_group_pa(ac, pa);
41620
41621 ext4_mb_use_group_pa(ac, pa);
41622 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41623 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41624
41625 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41626 lg = ac->ac_lg;
41627 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
41628 * from the bitmap and continue.
41629 */
41630 }
41631 - atomic_add(free, &sbi->s_mb_discarded);
41632 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
41633
41634 return err;
41635 }
41636 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
41637 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
41638 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
41639 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
41640 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41641 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41642
41643 if (ac) {
41644 ac->ac_sb = sb;
41645 diff -urNp linux-2.6.32.45/fs/ext4/super.c linux-2.6.32.45/fs/ext4/super.c
41646 --- linux-2.6.32.45/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
41647 +++ linux-2.6.32.45/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
41648 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
41649 }
41650
41651
41652 -static struct sysfs_ops ext4_attr_ops = {
41653 +static const struct sysfs_ops ext4_attr_ops = {
41654 .show = ext4_attr_show,
41655 .store = ext4_attr_store,
41656 };
41657 diff -urNp linux-2.6.32.45/fs/fcntl.c linux-2.6.32.45/fs/fcntl.c
41658 --- linux-2.6.32.45/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
41659 +++ linux-2.6.32.45/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
41660 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
41661 if (err)
41662 return err;
41663
41664 + if (gr_handle_chroot_fowner(pid, type))
41665 + return -ENOENT;
41666 + if (gr_check_protected_task_fowner(pid, type))
41667 + return -EACCES;
41668 +
41669 f_modown(filp, pid, type, force);
41670 return 0;
41671 }
41672 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
41673 switch (cmd) {
41674 case F_DUPFD:
41675 case F_DUPFD_CLOEXEC:
41676 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
41677 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41678 break;
41679 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
41680 diff -urNp linux-2.6.32.45/fs/fifo.c linux-2.6.32.45/fs/fifo.c
41681 --- linux-2.6.32.45/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
41682 +++ linux-2.6.32.45/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
41683 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
41684 */
41685 filp->f_op = &read_pipefifo_fops;
41686 pipe->r_counter++;
41687 - if (pipe->readers++ == 0)
41688 + if (atomic_inc_return(&pipe->readers) == 1)
41689 wake_up_partner(inode);
41690
41691 - if (!pipe->writers) {
41692 + if (!atomic_read(&pipe->writers)) {
41693 if ((filp->f_flags & O_NONBLOCK)) {
41694 /* suppress POLLHUP until we have
41695 * seen a writer */
41696 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
41697 * errno=ENXIO when there is no process reading the FIFO.
41698 */
41699 ret = -ENXIO;
41700 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
41701 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
41702 goto err;
41703
41704 filp->f_op = &write_pipefifo_fops;
41705 pipe->w_counter++;
41706 - if (!pipe->writers++)
41707 + if (atomic_inc_return(&pipe->writers) == 1)
41708 wake_up_partner(inode);
41709
41710 - if (!pipe->readers) {
41711 + if (!atomic_read(&pipe->readers)) {
41712 wait_for_partner(inode, &pipe->r_counter);
41713 if (signal_pending(current))
41714 goto err_wr;
41715 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
41716 */
41717 filp->f_op = &rdwr_pipefifo_fops;
41718
41719 - pipe->readers++;
41720 - pipe->writers++;
41721 + atomic_inc(&pipe->readers);
41722 + atomic_inc(&pipe->writers);
41723 pipe->r_counter++;
41724 pipe->w_counter++;
41725 - if (pipe->readers == 1 || pipe->writers == 1)
41726 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
41727 wake_up_partner(inode);
41728 break;
41729
41730 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
41731 return 0;
41732
41733 err_rd:
41734 - if (!--pipe->readers)
41735 + if (atomic_dec_and_test(&pipe->readers))
41736 wake_up_interruptible(&pipe->wait);
41737 ret = -ERESTARTSYS;
41738 goto err;
41739
41740 err_wr:
41741 - if (!--pipe->writers)
41742 + if (atomic_dec_and_test(&pipe->writers))
41743 wake_up_interruptible(&pipe->wait);
41744 ret = -ERESTARTSYS;
41745 goto err;
41746
41747 err:
41748 - if (!pipe->readers && !pipe->writers)
41749 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
41750 free_pipe_info(inode);
41751
41752 err_nocleanup:
41753 diff -urNp linux-2.6.32.45/fs/file.c linux-2.6.32.45/fs/file.c
41754 --- linux-2.6.32.45/fs/file.c 2011-03-27 14:31:47.000000000 -0400
41755 +++ linux-2.6.32.45/fs/file.c 2011-04-17 15:56:46.000000000 -0400
41756 @@ -14,6 +14,7 @@
41757 #include <linux/slab.h>
41758 #include <linux/vmalloc.h>
41759 #include <linux/file.h>
41760 +#include <linux/security.h>
41761 #include <linux/fdtable.h>
41762 #include <linux/bitops.h>
41763 #include <linux/interrupt.h>
41764 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
41765 * N.B. For clone tasks sharing a files structure, this test
41766 * will limit the total number of files that can be opened.
41767 */
41768 +
41769 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
41770 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41771 return -EMFILE;
41772
41773 diff -urNp linux-2.6.32.45/fs/filesystems.c linux-2.6.32.45/fs/filesystems.c
41774 --- linux-2.6.32.45/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
41775 +++ linux-2.6.32.45/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
41776 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
41777 int len = dot ? dot - name : strlen(name);
41778
41779 fs = __get_fs_type(name, len);
41780 +
41781 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
41782 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
41783 +#else
41784 if (!fs && (request_module("%.*s", len, name) == 0))
41785 +#endif
41786 fs = __get_fs_type(name, len);
41787
41788 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
41789 diff -urNp linux-2.6.32.45/fs/fscache/cookie.c linux-2.6.32.45/fs/fscache/cookie.c
41790 --- linux-2.6.32.45/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
41791 +++ linux-2.6.32.45/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
41792 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
41793 parent ? (char *) parent->def->name : "<no-parent>",
41794 def->name, netfs_data);
41795
41796 - fscache_stat(&fscache_n_acquires);
41797 + fscache_stat_unchecked(&fscache_n_acquires);
41798
41799 /* if there's no parent cookie, then we don't create one here either */
41800 if (!parent) {
41801 - fscache_stat(&fscache_n_acquires_null);
41802 + fscache_stat_unchecked(&fscache_n_acquires_null);
41803 _leave(" [no parent]");
41804 return NULL;
41805 }
41806 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
41807 /* allocate and initialise a cookie */
41808 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
41809 if (!cookie) {
41810 - fscache_stat(&fscache_n_acquires_oom);
41811 + fscache_stat_unchecked(&fscache_n_acquires_oom);
41812 _leave(" [ENOMEM]");
41813 return NULL;
41814 }
41815 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
41816
41817 switch (cookie->def->type) {
41818 case FSCACHE_COOKIE_TYPE_INDEX:
41819 - fscache_stat(&fscache_n_cookie_index);
41820 + fscache_stat_unchecked(&fscache_n_cookie_index);
41821 break;
41822 case FSCACHE_COOKIE_TYPE_DATAFILE:
41823 - fscache_stat(&fscache_n_cookie_data);
41824 + fscache_stat_unchecked(&fscache_n_cookie_data);
41825 break;
41826 default:
41827 - fscache_stat(&fscache_n_cookie_special);
41828 + fscache_stat_unchecked(&fscache_n_cookie_special);
41829 break;
41830 }
41831
41832 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
41833 if (fscache_acquire_non_index_cookie(cookie) < 0) {
41834 atomic_dec(&parent->n_children);
41835 __fscache_cookie_put(cookie);
41836 - fscache_stat(&fscache_n_acquires_nobufs);
41837 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
41838 _leave(" = NULL");
41839 return NULL;
41840 }
41841 }
41842
41843 - fscache_stat(&fscache_n_acquires_ok);
41844 + fscache_stat_unchecked(&fscache_n_acquires_ok);
41845 _leave(" = %p", cookie);
41846 return cookie;
41847 }
41848 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
41849 cache = fscache_select_cache_for_object(cookie->parent);
41850 if (!cache) {
41851 up_read(&fscache_addremove_sem);
41852 - fscache_stat(&fscache_n_acquires_no_cache);
41853 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
41854 _leave(" = -ENOMEDIUM [no cache]");
41855 return -ENOMEDIUM;
41856 }
41857 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
41858 object = cache->ops->alloc_object(cache, cookie);
41859 fscache_stat_d(&fscache_n_cop_alloc_object);
41860 if (IS_ERR(object)) {
41861 - fscache_stat(&fscache_n_object_no_alloc);
41862 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
41863 ret = PTR_ERR(object);
41864 goto error;
41865 }
41866
41867 - fscache_stat(&fscache_n_object_alloc);
41868 + fscache_stat_unchecked(&fscache_n_object_alloc);
41869
41870 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
41871
41872 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
41873 struct fscache_object *object;
41874 struct hlist_node *_p;
41875
41876 - fscache_stat(&fscache_n_updates);
41877 + fscache_stat_unchecked(&fscache_n_updates);
41878
41879 if (!cookie) {
41880 - fscache_stat(&fscache_n_updates_null);
41881 + fscache_stat_unchecked(&fscache_n_updates_null);
41882 _leave(" [no cookie]");
41883 return;
41884 }
41885 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
41886 struct fscache_object *object;
41887 unsigned long event;
41888
41889 - fscache_stat(&fscache_n_relinquishes);
41890 + fscache_stat_unchecked(&fscache_n_relinquishes);
41891 if (retire)
41892 - fscache_stat(&fscache_n_relinquishes_retire);
41893 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
41894
41895 if (!cookie) {
41896 - fscache_stat(&fscache_n_relinquishes_null);
41897 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
41898 _leave(" [no cookie]");
41899 return;
41900 }
41901 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
41902
41903 /* wait for the cookie to finish being instantiated (or to fail) */
41904 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
41905 - fscache_stat(&fscache_n_relinquishes_waitcrt);
41906 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
41907 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
41908 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
41909 }
41910 diff -urNp linux-2.6.32.45/fs/fscache/internal.h linux-2.6.32.45/fs/fscache/internal.h
41911 --- linux-2.6.32.45/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
41912 +++ linux-2.6.32.45/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
41913 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
41914 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
41915 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
41916
41917 -extern atomic_t fscache_n_op_pend;
41918 -extern atomic_t fscache_n_op_run;
41919 -extern atomic_t fscache_n_op_enqueue;
41920 -extern atomic_t fscache_n_op_deferred_release;
41921 -extern atomic_t fscache_n_op_release;
41922 -extern atomic_t fscache_n_op_gc;
41923 -extern atomic_t fscache_n_op_cancelled;
41924 -extern atomic_t fscache_n_op_rejected;
41925 -
41926 -extern atomic_t fscache_n_attr_changed;
41927 -extern atomic_t fscache_n_attr_changed_ok;
41928 -extern atomic_t fscache_n_attr_changed_nobufs;
41929 -extern atomic_t fscache_n_attr_changed_nomem;
41930 -extern atomic_t fscache_n_attr_changed_calls;
41931 -
41932 -extern atomic_t fscache_n_allocs;
41933 -extern atomic_t fscache_n_allocs_ok;
41934 -extern atomic_t fscache_n_allocs_wait;
41935 -extern atomic_t fscache_n_allocs_nobufs;
41936 -extern atomic_t fscache_n_allocs_intr;
41937 -extern atomic_t fscache_n_allocs_object_dead;
41938 -extern atomic_t fscache_n_alloc_ops;
41939 -extern atomic_t fscache_n_alloc_op_waits;
41940 -
41941 -extern atomic_t fscache_n_retrievals;
41942 -extern atomic_t fscache_n_retrievals_ok;
41943 -extern atomic_t fscache_n_retrievals_wait;
41944 -extern atomic_t fscache_n_retrievals_nodata;
41945 -extern atomic_t fscache_n_retrievals_nobufs;
41946 -extern atomic_t fscache_n_retrievals_intr;
41947 -extern atomic_t fscache_n_retrievals_nomem;
41948 -extern atomic_t fscache_n_retrievals_object_dead;
41949 -extern atomic_t fscache_n_retrieval_ops;
41950 -extern atomic_t fscache_n_retrieval_op_waits;
41951 -
41952 -extern atomic_t fscache_n_stores;
41953 -extern atomic_t fscache_n_stores_ok;
41954 -extern atomic_t fscache_n_stores_again;
41955 -extern atomic_t fscache_n_stores_nobufs;
41956 -extern atomic_t fscache_n_stores_oom;
41957 -extern atomic_t fscache_n_store_ops;
41958 -extern atomic_t fscache_n_store_calls;
41959 -extern atomic_t fscache_n_store_pages;
41960 -extern atomic_t fscache_n_store_radix_deletes;
41961 -extern atomic_t fscache_n_store_pages_over_limit;
41962 -
41963 -extern atomic_t fscache_n_store_vmscan_not_storing;
41964 -extern atomic_t fscache_n_store_vmscan_gone;
41965 -extern atomic_t fscache_n_store_vmscan_busy;
41966 -extern atomic_t fscache_n_store_vmscan_cancelled;
41967 -
41968 -extern atomic_t fscache_n_marks;
41969 -extern atomic_t fscache_n_uncaches;
41970 -
41971 -extern atomic_t fscache_n_acquires;
41972 -extern atomic_t fscache_n_acquires_null;
41973 -extern atomic_t fscache_n_acquires_no_cache;
41974 -extern atomic_t fscache_n_acquires_ok;
41975 -extern atomic_t fscache_n_acquires_nobufs;
41976 -extern atomic_t fscache_n_acquires_oom;
41977 -
41978 -extern atomic_t fscache_n_updates;
41979 -extern atomic_t fscache_n_updates_null;
41980 -extern atomic_t fscache_n_updates_run;
41981 -
41982 -extern atomic_t fscache_n_relinquishes;
41983 -extern atomic_t fscache_n_relinquishes_null;
41984 -extern atomic_t fscache_n_relinquishes_waitcrt;
41985 -extern atomic_t fscache_n_relinquishes_retire;
41986 -
41987 -extern atomic_t fscache_n_cookie_index;
41988 -extern atomic_t fscache_n_cookie_data;
41989 -extern atomic_t fscache_n_cookie_special;
41990 -
41991 -extern atomic_t fscache_n_object_alloc;
41992 -extern atomic_t fscache_n_object_no_alloc;
41993 -extern atomic_t fscache_n_object_lookups;
41994 -extern atomic_t fscache_n_object_lookups_negative;
41995 -extern atomic_t fscache_n_object_lookups_positive;
41996 -extern atomic_t fscache_n_object_lookups_timed_out;
41997 -extern atomic_t fscache_n_object_created;
41998 -extern atomic_t fscache_n_object_avail;
41999 -extern atomic_t fscache_n_object_dead;
42000 -
42001 -extern atomic_t fscache_n_checkaux_none;
42002 -extern atomic_t fscache_n_checkaux_okay;
42003 -extern atomic_t fscache_n_checkaux_update;
42004 -extern atomic_t fscache_n_checkaux_obsolete;
42005 +extern atomic_unchecked_t fscache_n_op_pend;
42006 +extern atomic_unchecked_t fscache_n_op_run;
42007 +extern atomic_unchecked_t fscache_n_op_enqueue;
42008 +extern atomic_unchecked_t fscache_n_op_deferred_release;
42009 +extern atomic_unchecked_t fscache_n_op_release;
42010 +extern atomic_unchecked_t fscache_n_op_gc;
42011 +extern atomic_unchecked_t fscache_n_op_cancelled;
42012 +extern atomic_unchecked_t fscache_n_op_rejected;
42013 +
42014 +extern atomic_unchecked_t fscache_n_attr_changed;
42015 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
42016 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42017 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42018 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
42019 +
42020 +extern atomic_unchecked_t fscache_n_allocs;
42021 +extern atomic_unchecked_t fscache_n_allocs_ok;
42022 +extern atomic_unchecked_t fscache_n_allocs_wait;
42023 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
42024 +extern atomic_unchecked_t fscache_n_allocs_intr;
42025 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
42026 +extern atomic_unchecked_t fscache_n_alloc_ops;
42027 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
42028 +
42029 +extern atomic_unchecked_t fscache_n_retrievals;
42030 +extern atomic_unchecked_t fscache_n_retrievals_ok;
42031 +extern atomic_unchecked_t fscache_n_retrievals_wait;
42032 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
42033 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42034 +extern atomic_unchecked_t fscache_n_retrievals_intr;
42035 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
42036 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42037 +extern atomic_unchecked_t fscache_n_retrieval_ops;
42038 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42039 +
42040 +extern atomic_unchecked_t fscache_n_stores;
42041 +extern atomic_unchecked_t fscache_n_stores_ok;
42042 +extern atomic_unchecked_t fscache_n_stores_again;
42043 +extern atomic_unchecked_t fscache_n_stores_nobufs;
42044 +extern atomic_unchecked_t fscache_n_stores_oom;
42045 +extern atomic_unchecked_t fscache_n_store_ops;
42046 +extern atomic_unchecked_t fscache_n_store_calls;
42047 +extern atomic_unchecked_t fscache_n_store_pages;
42048 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
42049 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42050 +
42051 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42052 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42053 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42054 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42055 +
42056 +extern atomic_unchecked_t fscache_n_marks;
42057 +extern atomic_unchecked_t fscache_n_uncaches;
42058 +
42059 +extern atomic_unchecked_t fscache_n_acquires;
42060 +extern atomic_unchecked_t fscache_n_acquires_null;
42061 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
42062 +extern atomic_unchecked_t fscache_n_acquires_ok;
42063 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
42064 +extern atomic_unchecked_t fscache_n_acquires_oom;
42065 +
42066 +extern atomic_unchecked_t fscache_n_updates;
42067 +extern atomic_unchecked_t fscache_n_updates_null;
42068 +extern atomic_unchecked_t fscache_n_updates_run;
42069 +
42070 +extern atomic_unchecked_t fscache_n_relinquishes;
42071 +extern atomic_unchecked_t fscache_n_relinquishes_null;
42072 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42073 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
42074 +
42075 +extern atomic_unchecked_t fscache_n_cookie_index;
42076 +extern atomic_unchecked_t fscache_n_cookie_data;
42077 +extern atomic_unchecked_t fscache_n_cookie_special;
42078 +
42079 +extern atomic_unchecked_t fscache_n_object_alloc;
42080 +extern atomic_unchecked_t fscache_n_object_no_alloc;
42081 +extern atomic_unchecked_t fscache_n_object_lookups;
42082 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
42083 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
42084 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42085 +extern atomic_unchecked_t fscache_n_object_created;
42086 +extern atomic_unchecked_t fscache_n_object_avail;
42087 +extern atomic_unchecked_t fscache_n_object_dead;
42088 +
42089 +extern atomic_unchecked_t fscache_n_checkaux_none;
42090 +extern atomic_unchecked_t fscache_n_checkaux_okay;
42091 +extern atomic_unchecked_t fscache_n_checkaux_update;
42092 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42093
42094 extern atomic_t fscache_n_cop_alloc_object;
42095 extern atomic_t fscache_n_cop_lookup_object;
42096 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
42097 atomic_inc(stat);
42098 }
42099
42100 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42101 +{
42102 + atomic_inc_unchecked(stat);
42103 +}
42104 +
42105 static inline void fscache_stat_d(atomic_t *stat)
42106 {
42107 atomic_dec(stat);
42108 @@ -259,6 +264,7 @@ extern const struct file_operations fsca
42109
42110 #define __fscache_stat(stat) (NULL)
42111 #define fscache_stat(stat) do {} while (0)
42112 +#define fscache_stat_unchecked(stat) do {} while (0)
42113 #define fscache_stat_d(stat) do {} while (0)
42114 #endif
42115
42116 diff -urNp linux-2.6.32.45/fs/fscache/object.c linux-2.6.32.45/fs/fscache/object.c
42117 --- linux-2.6.32.45/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
42118 +++ linux-2.6.32.45/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
42119 @@ -144,7 +144,7 @@ static void fscache_object_state_machine
42120 /* update the object metadata on disk */
42121 case FSCACHE_OBJECT_UPDATING:
42122 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42123 - fscache_stat(&fscache_n_updates_run);
42124 + fscache_stat_unchecked(&fscache_n_updates_run);
42125 fscache_stat(&fscache_n_cop_update_object);
42126 object->cache->ops->update_object(object);
42127 fscache_stat_d(&fscache_n_cop_update_object);
42128 @@ -233,7 +233,7 @@ static void fscache_object_state_machine
42129 spin_lock(&object->lock);
42130 object->state = FSCACHE_OBJECT_DEAD;
42131 spin_unlock(&object->lock);
42132 - fscache_stat(&fscache_n_object_dead);
42133 + fscache_stat_unchecked(&fscache_n_object_dead);
42134 goto terminal_transit;
42135
42136 /* handle the parent cache of this object being withdrawn from
42137 @@ -248,7 +248,7 @@ static void fscache_object_state_machine
42138 spin_lock(&object->lock);
42139 object->state = FSCACHE_OBJECT_DEAD;
42140 spin_unlock(&object->lock);
42141 - fscache_stat(&fscache_n_object_dead);
42142 + fscache_stat_unchecked(&fscache_n_object_dead);
42143 goto terminal_transit;
42144
42145 /* complain about the object being woken up once it is
42146 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
42147 parent->cookie->def->name, cookie->def->name,
42148 object->cache->tag->name);
42149
42150 - fscache_stat(&fscache_n_object_lookups);
42151 + fscache_stat_unchecked(&fscache_n_object_lookups);
42152 fscache_stat(&fscache_n_cop_lookup_object);
42153 ret = object->cache->ops->lookup_object(object);
42154 fscache_stat_d(&fscache_n_cop_lookup_object);
42155 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
42156 if (ret == -ETIMEDOUT) {
42157 /* probably stuck behind another object, so move this one to
42158 * the back of the queue */
42159 - fscache_stat(&fscache_n_object_lookups_timed_out);
42160 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42161 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42162 }
42163
42164 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
42165
42166 spin_lock(&object->lock);
42167 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42168 - fscache_stat(&fscache_n_object_lookups_negative);
42169 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42170
42171 /* transit here to allow write requests to begin stacking up
42172 * and read requests to begin returning ENODATA */
42173 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
42174 * result, in which case there may be data available */
42175 spin_lock(&object->lock);
42176 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42177 - fscache_stat(&fscache_n_object_lookups_positive);
42178 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42179
42180 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42181
42182 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
42183 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42184 } else {
42185 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42186 - fscache_stat(&fscache_n_object_created);
42187 + fscache_stat_unchecked(&fscache_n_object_created);
42188
42189 object->state = FSCACHE_OBJECT_AVAILABLE;
42190 spin_unlock(&object->lock);
42191 @@ -633,7 +633,7 @@ static void fscache_object_available(str
42192 fscache_enqueue_dependents(object);
42193
42194 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42195 - fscache_stat(&fscache_n_object_avail);
42196 + fscache_stat_unchecked(&fscache_n_object_avail);
42197
42198 _leave("");
42199 }
42200 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42201 enum fscache_checkaux result;
42202
42203 if (!object->cookie->def->check_aux) {
42204 - fscache_stat(&fscache_n_checkaux_none);
42205 + fscache_stat_unchecked(&fscache_n_checkaux_none);
42206 return FSCACHE_CHECKAUX_OKAY;
42207 }
42208
42209 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42210 switch (result) {
42211 /* entry okay as is */
42212 case FSCACHE_CHECKAUX_OKAY:
42213 - fscache_stat(&fscache_n_checkaux_okay);
42214 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
42215 break;
42216
42217 /* entry requires update */
42218 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42219 - fscache_stat(&fscache_n_checkaux_update);
42220 + fscache_stat_unchecked(&fscache_n_checkaux_update);
42221 break;
42222
42223 /* entry requires deletion */
42224 case FSCACHE_CHECKAUX_OBSOLETE:
42225 - fscache_stat(&fscache_n_checkaux_obsolete);
42226 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42227 break;
42228
42229 default:
42230 diff -urNp linux-2.6.32.45/fs/fscache/operation.c linux-2.6.32.45/fs/fscache/operation.c
42231 --- linux-2.6.32.45/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
42232 +++ linux-2.6.32.45/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
42233 @@ -16,7 +16,7 @@
42234 #include <linux/seq_file.h>
42235 #include "internal.h"
42236
42237 -atomic_t fscache_op_debug_id;
42238 +atomic_unchecked_t fscache_op_debug_id;
42239 EXPORT_SYMBOL(fscache_op_debug_id);
42240
42241 /**
42242 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
42243 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42244 ASSERTCMP(atomic_read(&op->usage), >, 0);
42245
42246 - fscache_stat(&fscache_n_op_enqueue);
42247 + fscache_stat_unchecked(&fscache_n_op_enqueue);
42248 switch (op->flags & FSCACHE_OP_TYPE) {
42249 case FSCACHE_OP_FAST:
42250 _debug("queue fast");
42251 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
42252 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42253 if (op->processor)
42254 fscache_enqueue_operation(op);
42255 - fscache_stat(&fscache_n_op_run);
42256 + fscache_stat_unchecked(&fscache_n_op_run);
42257 }
42258
42259 /*
42260 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
42261 if (object->n_ops > 0) {
42262 atomic_inc(&op->usage);
42263 list_add_tail(&op->pend_link, &object->pending_ops);
42264 - fscache_stat(&fscache_n_op_pend);
42265 + fscache_stat_unchecked(&fscache_n_op_pend);
42266 } else if (!list_empty(&object->pending_ops)) {
42267 atomic_inc(&op->usage);
42268 list_add_tail(&op->pend_link, &object->pending_ops);
42269 - fscache_stat(&fscache_n_op_pend);
42270 + fscache_stat_unchecked(&fscache_n_op_pend);
42271 fscache_start_operations(object);
42272 } else {
42273 ASSERTCMP(object->n_in_progress, ==, 0);
42274 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
42275 object->n_exclusive++; /* reads and writes must wait */
42276 atomic_inc(&op->usage);
42277 list_add_tail(&op->pend_link, &object->pending_ops);
42278 - fscache_stat(&fscache_n_op_pend);
42279 + fscache_stat_unchecked(&fscache_n_op_pend);
42280 ret = 0;
42281 } else {
42282 /* not allowed to submit ops in any other state */
42283 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
42284 if (object->n_exclusive > 0) {
42285 atomic_inc(&op->usage);
42286 list_add_tail(&op->pend_link, &object->pending_ops);
42287 - fscache_stat(&fscache_n_op_pend);
42288 + fscache_stat_unchecked(&fscache_n_op_pend);
42289 } else if (!list_empty(&object->pending_ops)) {
42290 atomic_inc(&op->usage);
42291 list_add_tail(&op->pend_link, &object->pending_ops);
42292 - fscache_stat(&fscache_n_op_pend);
42293 + fscache_stat_unchecked(&fscache_n_op_pend);
42294 fscache_start_operations(object);
42295 } else {
42296 ASSERTCMP(object->n_exclusive, ==, 0);
42297 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
42298 object->n_ops++;
42299 atomic_inc(&op->usage);
42300 list_add_tail(&op->pend_link, &object->pending_ops);
42301 - fscache_stat(&fscache_n_op_pend);
42302 + fscache_stat_unchecked(&fscache_n_op_pend);
42303 ret = 0;
42304 } else if (object->state == FSCACHE_OBJECT_DYING ||
42305 object->state == FSCACHE_OBJECT_LC_DYING ||
42306 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42307 - fscache_stat(&fscache_n_op_rejected);
42308 + fscache_stat_unchecked(&fscache_n_op_rejected);
42309 ret = -ENOBUFS;
42310 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42311 fscache_report_unexpected_submission(object, op, ostate);
42312 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
42313
42314 ret = -EBUSY;
42315 if (!list_empty(&op->pend_link)) {
42316 - fscache_stat(&fscache_n_op_cancelled);
42317 + fscache_stat_unchecked(&fscache_n_op_cancelled);
42318 list_del_init(&op->pend_link);
42319 object->n_ops--;
42320 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42321 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
42322 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42323 BUG();
42324
42325 - fscache_stat(&fscache_n_op_release);
42326 + fscache_stat_unchecked(&fscache_n_op_release);
42327
42328 if (op->release) {
42329 op->release(op);
42330 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
42331 * lock, and defer it otherwise */
42332 if (!spin_trylock(&object->lock)) {
42333 _debug("defer put");
42334 - fscache_stat(&fscache_n_op_deferred_release);
42335 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
42336
42337 cache = object->cache;
42338 spin_lock(&cache->op_gc_list_lock);
42339 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
42340
42341 _debug("GC DEFERRED REL OBJ%x OP%x",
42342 object->debug_id, op->debug_id);
42343 - fscache_stat(&fscache_n_op_gc);
42344 + fscache_stat_unchecked(&fscache_n_op_gc);
42345
42346 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42347
42348 diff -urNp linux-2.6.32.45/fs/fscache/page.c linux-2.6.32.45/fs/fscache/page.c
42349 --- linux-2.6.32.45/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
42350 +++ linux-2.6.32.45/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
42351 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
42352 val = radix_tree_lookup(&cookie->stores, page->index);
42353 if (!val) {
42354 rcu_read_unlock();
42355 - fscache_stat(&fscache_n_store_vmscan_not_storing);
42356 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42357 __fscache_uncache_page(cookie, page);
42358 return true;
42359 }
42360 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
42361 spin_unlock(&cookie->stores_lock);
42362
42363 if (xpage) {
42364 - fscache_stat(&fscache_n_store_vmscan_cancelled);
42365 - fscache_stat(&fscache_n_store_radix_deletes);
42366 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42367 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42368 ASSERTCMP(xpage, ==, page);
42369 } else {
42370 - fscache_stat(&fscache_n_store_vmscan_gone);
42371 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42372 }
42373
42374 wake_up_bit(&cookie->flags, 0);
42375 @@ -106,7 +106,7 @@ page_busy:
42376 /* we might want to wait here, but that could deadlock the allocator as
42377 * the slow-work threads writing to the cache may all end up sleeping
42378 * on memory allocation */
42379 - fscache_stat(&fscache_n_store_vmscan_busy);
42380 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42381 return false;
42382 }
42383 EXPORT_SYMBOL(__fscache_maybe_release_page);
42384 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
42385 FSCACHE_COOKIE_STORING_TAG);
42386 if (!radix_tree_tag_get(&cookie->stores, page->index,
42387 FSCACHE_COOKIE_PENDING_TAG)) {
42388 - fscache_stat(&fscache_n_store_radix_deletes);
42389 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42390 xpage = radix_tree_delete(&cookie->stores, page->index);
42391 }
42392 spin_unlock(&cookie->stores_lock);
42393 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
42394
42395 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42396
42397 - fscache_stat(&fscache_n_attr_changed_calls);
42398 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42399
42400 if (fscache_object_is_active(object)) {
42401 fscache_set_op_state(op, "CallFS");
42402 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
42403
42404 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42405
42406 - fscache_stat(&fscache_n_attr_changed);
42407 + fscache_stat_unchecked(&fscache_n_attr_changed);
42408
42409 op = kzalloc(sizeof(*op), GFP_KERNEL);
42410 if (!op) {
42411 - fscache_stat(&fscache_n_attr_changed_nomem);
42412 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42413 _leave(" = -ENOMEM");
42414 return -ENOMEM;
42415 }
42416 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
42417 if (fscache_submit_exclusive_op(object, op) < 0)
42418 goto nobufs;
42419 spin_unlock(&cookie->lock);
42420 - fscache_stat(&fscache_n_attr_changed_ok);
42421 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42422 fscache_put_operation(op);
42423 _leave(" = 0");
42424 return 0;
42425 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
42426 nobufs:
42427 spin_unlock(&cookie->lock);
42428 kfree(op);
42429 - fscache_stat(&fscache_n_attr_changed_nobufs);
42430 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42431 _leave(" = %d", -ENOBUFS);
42432 return -ENOBUFS;
42433 }
42434 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
42435 /* allocate a retrieval operation and attempt to submit it */
42436 op = kzalloc(sizeof(*op), GFP_NOIO);
42437 if (!op) {
42438 - fscache_stat(&fscache_n_retrievals_nomem);
42439 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42440 return NULL;
42441 }
42442
42443 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
42444 return 0;
42445 }
42446
42447 - fscache_stat(&fscache_n_retrievals_wait);
42448 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
42449
42450 jif = jiffies;
42451 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42452 fscache_wait_bit_interruptible,
42453 TASK_INTERRUPTIBLE) != 0) {
42454 - fscache_stat(&fscache_n_retrievals_intr);
42455 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42456 _leave(" = -ERESTARTSYS");
42457 return -ERESTARTSYS;
42458 }
42459 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
42460 */
42461 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42462 struct fscache_retrieval *op,
42463 - atomic_t *stat_op_waits,
42464 - atomic_t *stat_object_dead)
42465 + atomic_unchecked_t *stat_op_waits,
42466 + atomic_unchecked_t *stat_object_dead)
42467 {
42468 int ret;
42469
42470 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
42471 goto check_if_dead;
42472
42473 _debug(">>> WT");
42474 - fscache_stat(stat_op_waits);
42475 + fscache_stat_unchecked(stat_op_waits);
42476 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42477 fscache_wait_bit_interruptible,
42478 TASK_INTERRUPTIBLE) < 0) {
42479 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
42480
42481 check_if_dead:
42482 if (unlikely(fscache_object_is_dead(object))) {
42483 - fscache_stat(stat_object_dead);
42484 + fscache_stat_unchecked(stat_object_dead);
42485 return -ENOBUFS;
42486 }
42487 return 0;
42488 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
42489
42490 _enter("%p,%p,,,", cookie, page);
42491
42492 - fscache_stat(&fscache_n_retrievals);
42493 + fscache_stat_unchecked(&fscache_n_retrievals);
42494
42495 if (hlist_empty(&cookie->backing_objects))
42496 goto nobufs;
42497 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
42498 goto nobufs_unlock;
42499 spin_unlock(&cookie->lock);
42500
42501 - fscache_stat(&fscache_n_retrieval_ops);
42502 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
42503
42504 /* pin the netfs read context in case we need to do the actual netfs
42505 * read because we've encountered a cache read failure */
42506 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
42507
42508 error:
42509 if (ret == -ENOMEM)
42510 - fscache_stat(&fscache_n_retrievals_nomem);
42511 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42512 else if (ret == -ERESTARTSYS)
42513 - fscache_stat(&fscache_n_retrievals_intr);
42514 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42515 else if (ret == -ENODATA)
42516 - fscache_stat(&fscache_n_retrievals_nodata);
42517 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42518 else if (ret < 0)
42519 - fscache_stat(&fscache_n_retrievals_nobufs);
42520 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42521 else
42522 - fscache_stat(&fscache_n_retrievals_ok);
42523 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
42524
42525 fscache_put_retrieval(op);
42526 _leave(" = %d", ret);
42527 @@ -453,7 +453,7 @@ nobufs_unlock:
42528 spin_unlock(&cookie->lock);
42529 kfree(op);
42530 nobufs:
42531 - fscache_stat(&fscache_n_retrievals_nobufs);
42532 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42533 _leave(" = -ENOBUFS");
42534 return -ENOBUFS;
42535 }
42536 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
42537
42538 _enter("%p,,%d,,,", cookie, *nr_pages);
42539
42540 - fscache_stat(&fscache_n_retrievals);
42541 + fscache_stat_unchecked(&fscache_n_retrievals);
42542
42543 if (hlist_empty(&cookie->backing_objects))
42544 goto nobufs;
42545 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
42546 goto nobufs_unlock;
42547 spin_unlock(&cookie->lock);
42548
42549 - fscache_stat(&fscache_n_retrieval_ops);
42550 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
42551
42552 /* pin the netfs read context in case we need to do the actual netfs
42553 * read because we've encountered a cache read failure */
42554 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
42555
42556 error:
42557 if (ret == -ENOMEM)
42558 - fscache_stat(&fscache_n_retrievals_nomem);
42559 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42560 else if (ret == -ERESTARTSYS)
42561 - fscache_stat(&fscache_n_retrievals_intr);
42562 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42563 else if (ret == -ENODATA)
42564 - fscache_stat(&fscache_n_retrievals_nodata);
42565 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42566 else if (ret < 0)
42567 - fscache_stat(&fscache_n_retrievals_nobufs);
42568 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42569 else
42570 - fscache_stat(&fscache_n_retrievals_ok);
42571 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
42572
42573 fscache_put_retrieval(op);
42574 _leave(" = %d", ret);
42575 @@ -570,7 +570,7 @@ nobufs_unlock:
42576 spin_unlock(&cookie->lock);
42577 kfree(op);
42578 nobufs:
42579 - fscache_stat(&fscache_n_retrievals_nobufs);
42580 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42581 _leave(" = -ENOBUFS");
42582 return -ENOBUFS;
42583 }
42584 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
42585
42586 _enter("%p,%p,,,", cookie, page);
42587
42588 - fscache_stat(&fscache_n_allocs);
42589 + fscache_stat_unchecked(&fscache_n_allocs);
42590
42591 if (hlist_empty(&cookie->backing_objects))
42592 goto nobufs;
42593 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
42594 goto nobufs_unlock;
42595 spin_unlock(&cookie->lock);
42596
42597 - fscache_stat(&fscache_n_alloc_ops);
42598 + fscache_stat_unchecked(&fscache_n_alloc_ops);
42599
42600 ret = fscache_wait_for_retrieval_activation(
42601 object, op,
42602 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
42603
42604 error:
42605 if (ret == -ERESTARTSYS)
42606 - fscache_stat(&fscache_n_allocs_intr);
42607 + fscache_stat_unchecked(&fscache_n_allocs_intr);
42608 else if (ret < 0)
42609 - fscache_stat(&fscache_n_allocs_nobufs);
42610 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42611 else
42612 - fscache_stat(&fscache_n_allocs_ok);
42613 + fscache_stat_unchecked(&fscache_n_allocs_ok);
42614
42615 fscache_put_retrieval(op);
42616 _leave(" = %d", ret);
42617 @@ -651,7 +651,7 @@ nobufs_unlock:
42618 spin_unlock(&cookie->lock);
42619 kfree(op);
42620 nobufs:
42621 - fscache_stat(&fscache_n_allocs_nobufs);
42622 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42623 _leave(" = -ENOBUFS");
42624 return -ENOBUFS;
42625 }
42626 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
42627
42628 spin_lock(&cookie->stores_lock);
42629
42630 - fscache_stat(&fscache_n_store_calls);
42631 + fscache_stat_unchecked(&fscache_n_store_calls);
42632
42633 /* find a page to store */
42634 page = NULL;
42635 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
42636 page = results[0];
42637 _debug("gang %d [%lx]", n, page->index);
42638 if (page->index > op->store_limit) {
42639 - fscache_stat(&fscache_n_store_pages_over_limit);
42640 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
42641 goto superseded;
42642 }
42643
42644 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
42645
42646 if (page) {
42647 fscache_set_op_state(&op->op, "Store");
42648 - fscache_stat(&fscache_n_store_pages);
42649 + fscache_stat_unchecked(&fscache_n_store_pages);
42650 fscache_stat(&fscache_n_cop_write_page);
42651 ret = object->cache->ops->write_page(op, page);
42652 fscache_stat_d(&fscache_n_cop_write_page);
42653 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
42654 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42655 ASSERT(PageFsCache(page));
42656
42657 - fscache_stat(&fscache_n_stores);
42658 + fscache_stat_unchecked(&fscache_n_stores);
42659
42660 op = kzalloc(sizeof(*op), GFP_NOIO);
42661 if (!op)
42662 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
42663 spin_unlock(&cookie->stores_lock);
42664 spin_unlock(&object->lock);
42665
42666 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
42667 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
42668 op->store_limit = object->store_limit;
42669
42670 if (fscache_submit_op(object, &op->op) < 0)
42671 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
42672
42673 spin_unlock(&cookie->lock);
42674 radix_tree_preload_end();
42675 - fscache_stat(&fscache_n_store_ops);
42676 - fscache_stat(&fscache_n_stores_ok);
42677 + fscache_stat_unchecked(&fscache_n_store_ops);
42678 + fscache_stat_unchecked(&fscache_n_stores_ok);
42679
42680 /* the slow work queue now carries its own ref on the object */
42681 fscache_put_operation(&op->op);
42682 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
42683 return 0;
42684
42685 already_queued:
42686 - fscache_stat(&fscache_n_stores_again);
42687 + fscache_stat_unchecked(&fscache_n_stores_again);
42688 already_pending:
42689 spin_unlock(&cookie->stores_lock);
42690 spin_unlock(&object->lock);
42691 spin_unlock(&cookie->lock);
42692 radix_tree_preload_end();
42693 kfree(op);
42694 - fscache_stat(&fscache_n_stores_ok);
42695 + fscache_stat_unchecked(&fscache_n_stores_ok);
42696 _leave(" = 0");
42697 return 0;
42698
42699 @@ -886,14 +886,14 @@ nobufs:
42700 spin_unlock(&cookie->lock);
42701 radix_tree_preload_end();
42702 kfree(op);
42703 - fscache_stat(&fscache_n_stores_nobufs);
42704 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
42705 _leave(" = -ENOBUFS");
42706 return -ENOBUFS;
42707
42708 nomem_free:
42709 kfree(op);
42710 nomem:
42711 - fscache_stat(&fscache_n_stores_oom);
42712 + fscache_stat_unchecked(&fscache_n_stores_oom);
42713 _leave(" = -ENOMEM");
42714 return -ENOMEM;
42715 }
42716 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
42717 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42718 ASSERTCMP(page, !=, NULL);
42719
42720 - fscache_stat(&fscache_n_uncaches);
42721 + fscache_stat_unchecked(&fscache_n_uncaches);
42722
42723 /* cache withdrawal may beat us to it */
42724 if (!PageFsCache(page))
42725 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
42726 unsigned long loop;
42727
42728 #ifdef CONFIG_FSCACHE_STATS
42729 - atomic_add(pagevec->nr, &fscache_n_marks);
42730 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
42731 #endif
42732
42733 for (loop = 0; loop < pagevec->nr; loop++) {
42734 diff -urNp linux-2.6.32.45/fs/fscache/stats.c linux-2.6.32.45/fs/fscache/stats.c
42735 --- linux-2.6.32.45/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
42736 +++ linux-2.6.32.45/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
42737 @@ -18,95 +18,95 @@
42738 /*
42739 * operation counters
42740 */
42741 -atomic_t fscache_n_op_pend;
42742 -atomic_t fscache_n_op_run;
42743 -atomic_t fscache_n_op_enqueue;
42744 -atomic_t fscache_n_op_requeue;
42745 -atomic_t fscache_n_op_deferred_release;
42746 -atomic_t fscache_n_op_release;
42747 -atomic_t fscache_n_op_gc;
42748 -atomic_t fscache_n_op_cancelled;
42749 -atomic_t fscache_n_op_rejected;
42750 -
42751 -atomic_t fscache_n_attr_changed;
42752 -atomic_t fscache_n_attr_changed_ok;
42753 -atomic_t fscache_n_attr_changed_nobufs;
42754 -atomic_t fscache_n_attr_changed_nomem;
42755 -atomic_t fscache_n_attr_changed_calls;
42756 -
42757 -atomic_t fscache_n_allocs;
42758 -atomic_t fscache_n_allocs_ok;
42759 -atomic_t fscache_n_allocs_wait;
42760 -atomic_t fscache_n_allocs_nobufs;
42761 -atomic_t fscache_n_allocs_intr;
42762 -atomic_t fscache_n_allocs_object_dead;
42763 -atomic_t fscache_n_alloc_ops;
42764 -atomic_t fscache_n_alloc_op_waits;
42765 -
42766 -atomic_t fscache_n_retrievals;
42767 -atomic_t fscache_n_retrievals_ok;
42768 -atomic_t fscache_n_retrievals_wait;
42769 -atomic_t fscache_n_retrievals_nodata;
42770 -atomic_t fscache_n_retrievals_nobufs;
42771 -atomic_t fscache_n_retrievals_intr;
42772 -atomic_t fscache_n_retrievals_nomem;
42773 -atomic_t fscache_n_retrievals_object_dead;
42774 -atomic_t fscache_n_retrieval_ops;
42775 -atomic_t fscache_n_retrieval_op_waits;
42776 -
42777 -atomic_t fscache_n_stores;
42778 -atomic_t fscache_n_stores_ok;
42779 -atomic_t fscache_n_stores_again;
42780 -atomic_t fscache_n_stores_nobufs;
42781 -atomic_t fscache_n_stores_oom;
42782 -atomic_t fscache_n_store_ops;
42783 -atomic_t fscache_n_store_calls;
42784 -atomic_t fscache_n_store_pages;
42785 -atomic_t fscache_n_store_radix_deletes;
42786 -atomic_t fscache_n_store_pages_over_limit;
42787 -
42788 -atomic_t fscache_n_store_vmscan_not_storing;
42789 -atomic_t fscache_n_store_vmscan_gone;
42790 -atomic_t fscache_n_store_vmscan_busy;
42791 -atomic_t fscache_n_store_vmscan_cancelled;
42792 -
42793 -atomic_t fscache_n_marks;
42794 -atomic_t fscache_n_uncaches;
42795 -
42796 -atomic_t fscache_n_acquires;
42797 -atomic_t fscache_n_acquires_null;
42798 -atomic_t fscache_n_acquires_no_cache;
42799 -atomic_t fscache_n_acquires_ok;
42800 -atomic_t fscache_n_acquires_nobufs;
42801 -atomic_t fscache_n_acquires_oom;
42802 -
42803 -atomic_t fscache_n_updates;
42804 -atomic_t fscache_n_updates_null;
42805 -atomic_t fscache_n_updates_run;
42806 -
42807 -atomic_t fscache_n_relinquishes;
42808 -atomic_t fscache_n_relinquishes_null;
42809 -atomic_t fscache_n_relinquishes_waitcrt;
42810 -atomic_t fscache_n_relinquishes_retire;
42811 -
42812 -atomic_t fscache_n_cookie_index;
42813 -atomic_t fscache_n_cookie_data;
42814 -atomic_t fscache_n_cookie_special;
42815 -
42816 -atomic_t fscache_n_object_alloc;
42817 -atomic_t fscache_n_object_no_alloc;
42818 -atomic_t fscache_n_object_lookups;
42819 -atomic_t fscache_n_object_lookups_negative;
42820 -atomic_t fscache_n_object_lookups_positive;
42821 -atomic_t fscache_n_object_lookups_timed_out;
42822 -atomic_t fscache_n_object_created;
42823 -atomic_t fscache_n_object_avail;
42824 -atomic_t fscache_n_object_dead;
42825 -
42826 -atomic_t fscache_n_checkaux_none;
42827 -atomic_t fscache_n_checkaux_okay;
42828 -atomic_t fscache_n_checkaux_update;
42829 -atomic_t fscache_n_checkaux_obsolete;
42830 +atomic_unchecked_t fscache_n_op_pend;
42831 +atomic_unchecked_t fscache_n_op_run;
42832 +atomic_unchecked_t fscache_n_op_enqueue;
42833 +atomic_unchecked_t fscache_n_op_requeue;
42834 +atomic_unchecked_t fscache_n_op_deferred_release;
42835 +atomic_unchecked_t fscache_n_op_release;
42836 +atomic_unchecked_t fscache_n_op_gc;
42837 +atomic_unchecked_t fscache_n_op_cancelled;
42838 +atomic_unchecked_t fscache_n_op_rejected;
42839 +
42840 +atomic_unchecked_t fscache_n_attr_changed;
42841 +atomic_unchecked_t fscache_n_attr_changed_ok;
42842 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
42843 +atomic_unchecked_t fscache_n_attr_changed_nomem;
42844 +atomic_unchecked_t fscache_n_attr_changed_calls;
42845 +
42846 +atomic_unchecked_t fscache_n_allocs;
42847 +atomic_unchecked_t fscache_n_allocs_ok;
42848 +atomic_unchecked_t fscache_n_allocs_wait;
42849 +atomic_unchecked_t fscache_n_allocs_nobufs;
42850 +atomic_unchecked_t fscache_n_allocs_intr;
42851 +atomic_unchecked_t fscache_n_allocs_object_dead;
42852 +atomic_unchecked_t fscache_n_alloc_ops;
42853 +atomic_unchecked_t fscache_n_alloc_op_waits;
42854 +
42855 +atomic_unchecked_t fscache_n_retrievals;
42856 +atomic_unchecked_t fscache_n_retrievals_ok;
42857 +atomic_unchecked_t fscache_n_retrievals_wait;
42858 +atomic_unchecked_t fscache_n_retrievals_nodata;
42859 +atomic_unchecked_t fscache_n_retrievals_nobufs;
42860 +atomic_unchecked_t fscache_n_retrievals_intr;
42861 +atomic_unchecked_t fscache_n_retrievals_nomem;
42862 +atomic_unchecked_t fscache_n_retrievals_object_dead;
42863 +atomic_unchecked_t fscache_n_retrieval_ops;
42864 +atomic_unchecked_t fscache_n_retrieval_op_waits;
42865 +
42866 +atomic_unchecked_t fscache_n_stores;
42867 +atomic_unchecked_t fscache_n_stores_ok;
42868 +atomic_unchecked_t fscache_n_stores_again;
42869 +atomic_unchecked_t fscache_n_stores_nobufs;
42870 +atomic_unchecked_t fscache_n_stores_oom;
42871 +atomic_unchecked_t fscache_n_store_ops;
42872 +atomic_unchecked_t fscache_n_store_calls;
42873 +atomic_unchecked_t fscache_n_store_pages;
42874 +atomic_unchecked_t fscache_n_store_radix_deletes;
42875 +atomic_unchecked_t fscache_n_store_pages_over_limit;
42876 +
42877 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42878 +atomic_unchecked_t fscache_n_store_vmscan_gone;
42879 +atomic_unchecked_t fscache_n_store_vmscan_busy;
42880 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42881 +
42882 +atomic_unchecked_t fscache_n_marks;
42883 +atomic_unchecked_t fscache_n_uncaches;
42884 +
42885 +atomic_unchecked_t fscache_n_acquires;
42886 +atomic_unchecked_t fscache_n_acquires_null;
42887 +atomic_unchecked_t fscache_n_acquires_no_cache;
42888 +atomic_unchecked_t fscache_n_acquires_ok;
42889 +atomic_unchecked_t fscache_n_acquires_nobufs;
42890 +atomic_unchecked_t fscache_n_acquires_oom;
42891 +
42892 +atomic_unchecked_t fscache_n_updates;
42893 +atomic_unchecked_t fscache_n_updates_null;
42894 +atomic_unchecked_t fscache_n_updates_run;
42895 +
42896 +atomic_unchecked_t fscache_n_relinquishes;
42897 +atomic_unchecked_t fscache_n_relinquishes_null;
42898 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42899 +atomic_unchecked_t fscache_n_relinquishes_retire;
42900 +
42901 +atomic_unchecked_t fscache_n_cookie_index;
42902 +atomic_unchecked_t fscache_n_cookie_data;
42903 +atomic_unchecked_t fscache_n_cookie_special;
42904 +
42905 +atomic_unchecked_t fscache_n_object_alloc;
42906 +atomic_unchecked_t fscache_n_object_no_alloc;
42907 +atomic_unchecked_t fscache_n_object_lookups;
42908 +atomic_unchecked_t fscache_n_object_lookups_negative;
42909 +atomic_unchecked_t fscache_n_object_lookups_positive;
42910 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
42911 +atomic_unchecked_t fscache_n_object_created;
42912 +atomic_unchecked_t fscache_n_object_avail;
42913 +atomic_unchecked_t fscache_n_object_dead;
42914 +
42915 +atomic_unchecked_t fscache_n_checkaux_none;
42916 +atomic_unchecked_t fscache_n_checkaux_okay;
42917 +atomic_unchecked_t fscache_n_checkaux_update;
42918 +atomic_unchecked_t fscache_n_checkaux_obsolete;
42919
42920 atomic_t fscache_n_cop_alloc_object;
42921 atomic_t fscache_n_cop_lookup_object;
42922 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
42923 seq_puts(m, "FS-Cache statistics\n");
42924
42925 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
42926 - atomic_read(&fscache_n_cookie_index),
42927 - atomic_read(&fscache_n_cookie_data),
42928 - atomic_read(&fscache_n_cookie_special));
42929 + atomic_read_unchecked(&fscache_n_cookie_index),
42930 + atomic_read_unchecked(&fscache_n_cookie_data),
42931 + atomic_read_unchecked(&fscache_n_cookie_special));
42932
42933 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
42934 - atomic_read(&fscache_n_object_alloc),
42935 - atomic_read(&fscache_n_object_no_alloc),
42936 - atomic_read(&fscache_n_object_avail),
42937 - atomic_read(&fscache_n_object_dead));
42938 + atomic_read_unchecked(&fscache_n_object_alloc),
42939 + atomic_read_unchecked(&fscache_n_object_no_alloc),
42940 + atomic_read_unchecked(&fscache_n_object_avail),
42941 + atomic_read_unchecked(&fscache_n_object_dead));
42942 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
42943 - atomic_read(&fscache_n_checkaux_none),
42944 - atomic_read(&fscache_n_checkaux_okay),
42945 - atomic_read(&fscache_n_checkaux_update),
42946 - atomic_read(&fscache_n_checkaux_obsolete));
42947 + atomic_read_unchecked(&fscache_n_checkaux_none),
42948 + atomic_read_unchecked(&fscache_n_checkaux_okay),
42949 + atomic_read_unchecked(&fscache_n_checkaux_update),
42950 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
42951
42952 seq_printf(m, "Pages : mrk=%u unc=%u\n",
42953 - atomic_read(&fscache_n_marks),
42954 - atomic_read(&fscache_n_uncaches));
42955 + atomic_read_unchecked(&fscache_n_marks),
42956 + atomic_read_unchecked(&fscache_n_uncaches));
42957
42958 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
42959 " oom=%u\n",
42960 - atomic_read(&fscache_n_acquires),
42961 - atomic_read(&fscache_n_acquires_null),
42962 - atomic_read(&fscache_n_acquires_no_cache),
42963 - atomic_read(&fscache_n_acquires_ok),
42964 - atomic_read(&fscache_n_acquires_nobufs),
42965 - atomic_read(&fscache_n_acquires_oom));
42966 + atomic_read_unchecked(&fscache_n_acquires),
42967 + atomic_read_unchecked(&fscache_n_acquires_null),
42968 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
42969 + atomic_read_unchecked(&fscache_n_acquires_ok),
42970 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
42971 + atomic_read_unchecked(&fscache_n_acquires_oom));
42972
42973 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
42974 - atomic_read(&fscache_n_object_lookups),
42975 - atomic_read(&fscache_n_object_lookups_negative),
42976 - atomic_read(&fscache_n_object_lookups_positive),
42977 - atomic_read(&fscache_n_object_lookups_timed_out),
42978 - atomic_read(&fscache_n_object_created));
42979 + atomic_read_unchecked(&fscache_n_object_lookups),
42980 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
42981 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
42982 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
42983 + atomic_read_unchecked(&fscache_n_object_created));
42984
42985 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
42986 - atomic_read(&fscache_n_updates),
42987 - atomic_read(&fscache_n_updates_null),
42988 - atomic_read(&fscache_n_updates_run));
42989 + atomic_read_unchecked(&fscache_n_updates),
42990 + atomic_read_unchecked(&fscache_n_updates_null),
42991 + atomic_read_unchecked(&fscache_n_updates_run));
42992
42993 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
42994 - atomic_read(&fscache_n_relinquishes),
42995 - atomic_read(&fscache_n_relinquishes_null),
42996 - atomic_read(&fscache_n_relinquishes_waitcrt),
42997 - atomic_read(&fscache_n_relinquishes_retire));
42998 + atomic_read_unchecked(&fscache_n_relinquishes),
42999 + atomic_read_unchecked(&fscache_n_relinquishes_null),
43000 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43001 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
43002
43003 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43004 - atomic_read(&fscache_n_attr_changed),
43005 - atomic_read(&fscache_n_attr_changed_ok),
43006 - atomic_read(&fscache_n_attr_changed_nobufs),
43007 - atomic_read(&fscache_n_attr_changed_nomem),
43008 - atomic_read(&fscache_n_attr_changed_calls));
43009 + atomic_read_unchecked(&fscache_n_attr_changed),
43010 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
43011 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43012 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43013 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
43014
43015 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43016 - atomic_read(&fscache_n_allocs),
43017 - atomic_read(&fscache_n_allocs_ok),
43018 - atomic_read(&fscache_n_allocs_wait),
43019 - atomic_read(&fscache_n_allocs_nobufs),
43020 - atomic_read(&fscache_n_allocs_intr));
43021 + atomic_read_unchecked(&fscache_n_allocs),
43022 + atomic_read_unchecked(&fscache_n_allocs_ok),
43023 + atomic_read_unchecked(&fscache_n_allocs_wait),
43024 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
43025 + atomic_read_unchecked(&fscache_n_allocs_intr));
43026 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43027 - atomic_read(&fscache_n_alloc_ops),
43028 - atomic_read(&fscache_n_alloc_op_waits),
43029 - atomic_read(&fscache_n_allocs_object_dead));
43030 + atomic_read_unchecked(&fscache_n_alloc_ops),
43031 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
43032 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
43033
43034 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43035 " int=%u oom=%u\n",
43036 - atomic_read(&fscache_n_retrievals),
43037 - atomic_read(&fscache_n_retrievals_ok),
43038 - atomic_read(&fscache_n_retrievals_wait),
43039 - atomic_read(&fscache_n_retrievals_nodata),
43040 - atomic_read(&fscache_n_retrievals_nobufs),
43041 - atomic_read(&fscache_n_retrievals_intr),
43042 - atomic_read(&fscache_n_retrievals_nomem));
43043 + atomic_read_unchecked(&fscache_n_retrievals),
43044 + atomic_read_unchecked(&fscache_n_retrievals_ok),
43045 + atomic_read_unchecked(&fscache_n_retrievals_wait),
43046 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
43047 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43048 + atomic_read_unchecked(&fscache_n_retrievals_intr),
43049 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
43050 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43051 - atomic_read(&fscache_n_retrieval_ops),
43052 - atomic_read(&fscache_n_retrieval_op_waits),
43053 - atomic_read(&fscache_n_retrievals_object_dead));
43054 + atomic_read_unchecked(&fscache_n_retrieval_ops),
43055 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43056 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43057
43058 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43059 - atomic_read(&fscache_n_stores),
43060 - atomic_read(&fscache_n_stores_ok),
43061 - atomic_read(&fscache_n_stores_again),
43062 - atomic_read(&fscache_n_stores_nobufs),
43063 - atomic_read(&fscache_n_stores_oom));
43064 + atomic_read_unchecked(&fscache_n_stores),
43065 + atomic_read_unchecked(&fscache_n_stores_ok),
43066 + atomic_read_unchecked(&fscache_n_stores_again),
43067 + atomic_read_unchecked(&fscache_n_stores_nobufs),
43068 + atomic_read_unchecked(&fscache_n_stores_oom));
43069 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43070 - atomic_read(&fscache_n_store_ops),
43071 - atomic_read(&fscache_n_store_calls),
43072 - atomic_read(&fscache_n_store_pages),
43073 - atomic_read(&fscache_n_store_radix_deletes),
43074 - atomic_read(&fscache_n_store_pages_over_limit));
43075 + atomic_read_unchecked(&fscache_n_store_ops),
43076 + atomic_read_unchecked(&fscache_n_store_calls),
43077 + atomic_read_unchecked(&fscache_n_store_pages),
43078 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
43079 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43080
43081 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43082 - atomic_read(&fscache_n_store_vmscan_not_storing),
43083 - atomic_read(&fscache_n_store_vmscan_gone),
43084 - atomic_read(&fscache_n_store_vmscan_busy),
43085 - atomic_read(&fscache_n_store_vmscan_cancelled));
43086 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43087 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43088 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43089 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43090
43091 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43092 - atomic_read(&fscache_n_op_pend),
43093 - atomic_read(&fscache_n_op_run),
43094 - atomic_read(&fscache_n_op_enqueue),
43095 - atomic_read(&fscache_n_op_cancelled),
43096 - atomic_read(&fscache_n_op_rejected));
43097 + atomic_read_unchecked(&fscache_n_op_pend),
43098 + atomic_read_unchecked(&fscache_n_op_run),
43099 + atomic_read_unchecked(&fscache_n_op_enqueue),
43100 + atomic_read_unchecked(&fscache_n_op_cancelled),
43101 + atomic_read_unchecked(&fscache_n_op_rejected));
43102 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43103 - atomic_read(&fscache_n_op_deferred_release),
43104 - atomic_read(&fscache_n_op_release),
43105 - atomic_read(&fscache_n_op_gc));
43106 + atomic_read_unchecked(&fscache_n_op_deferred_release),
43107 + atomic_read_unchecked(&fscache_n_op_release),
43108 + atomic_read_unchecked(&fscache_n_op_gc));
43109
43110 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43111 atomic_read(&fscache_n_cop_alloc_object),
43112 diff -urNp linux-2.6.32.45/fs/fs_struct.c linux-2.6.32.45/fs/fs_struct.c
43113 --- linux-2.6.32.45/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
43114 +++ linux-2.6.32.45/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
43115 @@ -4,6 +4,7 @@
43116 #include <linux/path.h>
43117 #include <linux/slab.h>
43118 #include <linux/fs_struct.h>
43119 +#include <linux/grsecurity.h>
43120
43121 /*
43122 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
43123 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
43124 old_root = fs->root;
43125 fs->root = *path;
43126 path_get(path);
43127 + gr_set_chroot_entries(current, path);
43128 write_unlock(&fs->lock);
43129 if (old_root.dentry)
43130 path_put(&old_root);
43131 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
43132 && fs->root.mnt == old_root->mnt) {
43133 path_get(new_root);
43134 fs->root = *new_root;
43135 + gr_set_chroot_entries(p, new_root);
43136 count++;
43137 }
43138 if (fs->pwd.dentry == old_root->dentry
43139 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
43140 task_lock(tsk);
43141 write_lock(&fs->lock);
43142 tsk->fs = NULL;
43143 - kill = !--fs->users;
43144 + gr_clear_chroot_entries(tsk);
43145 + kill = !atomic_dec_return(&fs->users);
43146 write_unlock(&fs->lock);
43147 task_unlock(tsk);
43148 if (kill)
43149 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
43150 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43151 /* We don't need to lock fs - think why ;-) */
43152 if (fs) {
43153 - fs->users = 1;
43154 + atomic_set(&fs->users, 1);
43155 fs->in_exec = 0;
43156 rwlock_init(&fs->lock);
43157 fs->umask = old->umask;
43158 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
43159
43160 task_lock(current);
43161 write_lock(&fs->lock);
43162 - kill = !--fs->users;
43163 + kill = !atomic_dec_return(&fs->users);
43164 current->fs = new_fs;
43165 + gr_set_chroot_entries(current, &new_fs->root);
43166 write_unlock(&fs->lock);
43167 task_unlock(current);
43168
43169 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
43170
43171 /* to be mentioned only in INIT_TASK */
43172 struct fs_struct init_fs = {
43173 - .users = 1,
43174 + .users = ATOMIC_INIT(1),
43175 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
43176 .umask = 0022,
43177 };
43178 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
43179 task_lock(current);
43180
43181 write_lock(&init_fs.lock);
43182 - init_fs.users++;
43183 + atomic_inc(&init_fs.users);
43184 write_unlock(&init_fs.lock);
43185
43186 write_lock(&fs->lock);
43187 current->fs = &init_fs;
43188 - kill = !--fs->users;
43189 + gr_set_chroot_entries(current, &current->fs->root);
43190 + kill = !atomic_dec_return(&fs->users);
43191 write_unlock(&fs->lock);
43192
43193 task_unlock(current);
43194 diff -urNp linux-2.6.32.45/fs/fuse/cuse.c linux-2.6.32.45/fs/fuse/cuse.c
43195 --- linux-2.6.32.45/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
43196 +++ linux-2.6.32.45/fs/fuse/cuse.c 2011-08-05 20:33:55.000000000 -0400
43197 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
43198 INIT_LIST_HEAD(&cuse_conntbl[i]);
43199
43200 /* inherit and extend fuse_dev_operations */
43201 - cuse_channel_fops = fuse_dev_operations;
43202 - cuse_channel_fops.owner = THIS_MODULE;
43203 - cuse_channel_fops.open = cuse_channel_open;
43204 - cuse_channel_fops.release = cuse_channel_release;
43205 + pax_open_kernel();
43206 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43207 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43208 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
43209 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
43210 + pax_close_kernel();
43211
43212 cuse_class = class_create(THIS_MODULE, "cuse");
43213 if (IS_ERR(cuse_class))
43214 diff -urNp linux-2.6.32.45/fs/fuse/dev.c linux-2.6.32.45/fs/fuse/dev.c
43215 --- linux-2.6.32.45/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
43216 +++ linux-2.6.32.45/fs/fuse/dev.c 2011-08-05 20:33:55.000000000 -0400
43217 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struc
43218 {
43219 struct fuse_notify_inval_entry_out outarg;
43220 int err = -EINVAL;
43221 - char buf[FUSE_NAME_MAX+1];
43222 + char *buf = NULL;
43223 struct qstr name;
43224
43225 if (size < sizeof(outarg))
43226 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struc
43227 if (outarg.namelen > FUSE_NAME_MAX)
43228 goto err;
43229
43230 + err = -ENOMEM;
43231 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
43232 + if (!buf)
43233 + goto err;
43234 +
43235 name.name = buf;
43236 name.len = outarg.namelen;
43237 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
43238 @@ -910,17 +915,15 @@ static int fuse_notify_inval_entry(struc
43239
43240 down_read(&fc->killsb);
43241 err = -ENOENT;
43242 - if (!fc->sb)
43243 - goto err_unlock;
43244 -
43245 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
43246 -
43247 -err_unlock:
43248 + if (fc->sb)
43249 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
43250 up_read(&fc->killsb);
43251 + kfree(buf);
43252 return err;
43253
43254 err:
43255 fuse_copy_finish(cs);
43256 + kfree(buf);
43257 return err;
43258 }
43259
43260 diff -urNp linux-2.6.32.45/fs/fuse/dir.c linux-2.6.32.45/fs/fuse/dir.c
43261 --- linux-2.6.32.45/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
43262 +++ linux-2.6.32.45/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
43263 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
43264 return link;
43265 }
43266
43267 -static void free_link(char *link)
43268 +static void free_link(const char *link)
43269 {
43270 if (!IS_ERR(link))
43271 free_page((unsigned long) link);
43272 diff -urNp linux-2.6.32.45/fs/gfs2/ops_inode.c linux-2.6.32.45/fs/gfs2/ops_inode.c
43273 --- linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
43274 +++ linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
43275 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
43276 unsigned int x;
43277 int error;
43278
43279 + pax_track_stack();
43280 +
43281 if (ndentry->d_inode) {
43282 nip = GFS2_I(ndentry->d_inode);
43283 if (ip == nip)
43284 diff -urNp linux-2.6.32.45/fs/gfs2/sys.c linux-2.6.32.45/fs/gfs2/sys.c
43285 --- linux-2.6.32.45/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
43286 +++ linux-2.6.32.45/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
43287 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
43288 return a->store ? a->store(sdp, buf, len) : len;
43289 }
43290
43291 -static struct sysfs_ops gfs2_attr_ops = {
43292 +static const struct sysfs_ops gfs2_attr_ops = {
43293 .show = gfs2_attr_show,
43294 .store = gfs2_attr_store,
43295 };
43296 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
43297 return 0;
43298 }
43299
43300 -static struct kset_uevent_ops gfs2_uevent_ops = {
43301 +static const struct kset_uevent_ops gfs2_uevent_ops = {
43302 .uevent = gfs2_uevent,
43303 };
43304
43305 diff -urNp linux-2.6.32.45/fs/hfsplus/catalog.c linux-2.6.32.45/fs/hfsplus/catalog.c
43306 --- linux-2.6.32.45/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
43307 +++ linux-2.6.32.45/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
43308 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
43309 int err;
43310 u16 type;
43311
43312 + pax_track_stack();
43313 +
43314 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43315 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43316 if (err)
43317 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
43318 int entry_size;
43319 int err;
43320
43321 + pax_track_stack();
43322 +
43323 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
43324 sb = dir->i_sb;
43325 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
43326 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
43327 int entry_size, type;
43328 int err = 0;
43329
43330 + pax_track_stack();
43331 +
43332 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
43333 dst_dir->i_ino, dst_name->name);
43334 sb = src_dir->i_sb;
43335 diff -urNp linux-2.6.32.45/fs/hfsplus/dir.c linux-2.6.32.45/fs/hfsplus/dir.c
43336 --- linux-2.6.32.45/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
43337 +++ linux-2.6.32.45/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
43338 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
43339 struct hfsplus_readdir_data *rd;
43340 u16 type;
43341
43342 + pax_track_stack();
43343 +
43344 if (filp->f_pos >= inode->i_size)
43345 return 0;
43346
43347 diff -urNp linux-2.6.32.45/fs/hfsplus/inode.c linux-2.6.32.45/fs/hfsplus/inode.c
43348 --- linux-2.6.32.45/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
43349 +++ linux-2.6.32.45/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
43350 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
43351 int res = 0;
43352 u16 type;
43353
43354 + pax_track_stack();
43355 +
43356 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43357
43358 HFSPLUS_I(inode).dev = 0;
43359 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
43360 struct hfs_find_data fd;
43361 hfsplus_cat_entry entry;
43362
43363 + pax_track_stack();
43364 +
43365 if (HFSPLUS_IS_RSRC(inode))
43366 main_inode = HFSPLUS_I(inode).rsrc_inode;
43367
43368 diff -urNp linux-2.6.32.45/fs/hfsplus/ioctl.c linux-2.6.32.45/fs/hfsplus/ioctl.c
43369 --- linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43370 +++ linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
43371 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
43372 struct hfsplus_cat_file *file;
43373 int res;
43374
43375 + pax_track_stack();
43376 +
43377 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43378 return -EOPNOTSUPP;
43379
43380 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43381 struct hfsplus_cat_file *file;
43382 ssize_t res = 0;
43383
43384 + pax_track_stack();
43385 +
43386 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43387 return -EOPNOTSUPP;
43388
43389 diff -urNp linux-2.6.32.45/fs/hfsplus/super.c linux-2.6.32.45/fs/hfsplus/super.c
43390 --- linux-2.6.32.45/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
43391 +++ linux-2.6.32.45/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
43392 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
43393 struct nls_table *nls = NULL;
43394 int err = -EINVAL;
43395
43396 + pax_track_stack();
43397 +
43398 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43399 if (!sbi)
43400 return -ENOMEM;
43401 diff -urNp linux-2.6.32.45/fs/hugetlbfs/inode.c linux-2.6.32.45/fs/hugetlbfs/inode.c
43402 --- linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43403 +++ linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
43404 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
43405 .kill_sb = kill_litter_super,
43406 };
43407
43408 -static struct vfsmount *hugetlbfs_vfsmount;
43409 +struct vfsmount *hugetlbfs_vfsmount;
43410
43411 static int can_do_hugetlb_shm(void)
43412 {
43413 diff -urNp linux-2.6.32.45/fs/ioctl.c linux-2.6.32.45/fs/ioctl.c
43414 --- linux-2.6.32.45/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43415 +++ linux-2.6.32.45/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
43416 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
43417 u64 phys, u64 len, u32 flags)
43418 {
43419 struct fiemap_extent extent;
43420 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
43421 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
43422
43423 /* only count the extents */
43424 if (fieinfo->fi_extents_max == 0) {
43425 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
43426
43427 fieinfo.fi_flags = fiemap.fm_flags;
43428 fieinfo.fi_extents_max = fiemap.fm_extent_count;
43429 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
43430 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
43431
43432 if (fiemap.fm_extent_count != 0 &&
43433 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
43434 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
43435 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
43436 fiemap.fm_flags = fieinfo.fi_flags;
43437 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
43438 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
43439 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
43440 error = -EFAULT;
43441
43442 return error;
43443 diff -urNp linux-2.6.32.45/fs/jbd/checkpoint.c linux-2.6.32.45/fs/jbd/checkpoint.c
43444 --- linux-2.6.32.45/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
43445 +++ linux-2.6.32.45/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
43446 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
43447 tid_t this_tid;
43448 int result;
43449
43450 + pax_track_stack();
43451 +
43452 jbd_debug(1, "Start checkpoint\n");
43453
43454 /*
43455 diff -urNp linux-2.6.32.45/fs/jffs2/compr_rtime.c linux-2.6.32.45/fs/jffs2/compr_rtime.c
43456 --- linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
43457 +++ linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
43458 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43459 int outpos = 0;
43460 int pos=0;
43461
43462 + pax_track_stack();
43463 +
43464 memset(positions,0,sizeof(positions));
43465
43466 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43467 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
43468 int outpos = 0;
43469 int pos=0;
43470
43471 + pax_track_stack();
43472 +
43473 memset(positions,0,sizeof(positions));
43474
43475 while (outpos<destlen) {
43476 diff -urNp linux-2.6.32.45/fs/jffs2/compr_rubin.c linux-2.6.32.45/fs/jffs2/compr_rubin.c
43477 --- linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
43478 +++ linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
43479 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43480 int ret;
43481 uint32_t mysrclen, mydstlen;
43482
43483 + pax_track_stack();
43484 +
43485 mysrclen = *sourcelen;
43486 mydstlen = *dstlen - 8;
43487
43488 diff -urNp linux-2.6.32.45/fs/jffs2/erase.c linux-2.6.32.45/fs/jffs2/erase.c
43489 --- linux-2.6.32.45/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
43490 +++ linux-2.6.32.45/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
43491 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
43492 struct jffs2_unknown_node marker = {
43493 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43494 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43495 - .totlen = cpu_to_je32(c->cleanmarker_size)
43496 + .totlen = cpu_to_je32(c->cleanmarker_size),
43497 + .hdr_crc = cpu_to_je32(0)
43498 };
43499
43500 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43501 diff -urNp linux-2.6.32.45/fs/jffs2/wbuf.c linux-2.6.32.45/fs/jffs2/wbuf.c
43502 --- linux-2.6.32.45/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
43503 +++ linux-2.6.32.45/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
43504 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43505 {
43506 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43507 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43508 - .totlen = constant_cpu_to_je32(8)
43509 + .totlen = constant_cpu_to_je32(8),
43510 + .hdr_crc = constant_cpu_to_je32(0)
43511 };
43512
43513 /*
43514 diff -urNp linux-2.6.32.45/fs/jffs2/xattr.c linux-2.6.32.45/fs/jffs2/xattr.c
43515 --- linux-2.6.32.45/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
43516 +++ linux-2.6.32.45/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
43517 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43518
43519 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43520
43521 + pax_track_stack();
43522 +
43523 /* Phase.1 : Merge same xref */
43524 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43525 xref_tmphash[i] = NULL;
43526 diff -urNp linux-2.6.32.45/fs/jfs/super.c linux-2.6.32.45/fs/jfs/super.c
43527 --- linux-2.6.32.45/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
43528 +++ linux-2.6.32.45/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
43529 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
43530
43531 jfs_inode_cachep =
43532 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43533 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43534 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43535 init_once);
43536 if (jfs_inode_cachep == NULL)
43537 return -ENOMEM;
43538 diff -urNp linux-2.6.32.45/fs/Kconfig.binfmt linux-2.6.32.45/fs/Kconfig.binfmt
43539 --- linux-2.6.32.45/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
43540 +++ linux-2.6.32.45/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
43541 @@ -86,7 +86,7 @@ config HAVE_AOUT
43542
43543 config BINFMT_AOUT
43544 tristate "Kernel support for a.out and ECOFF binaries"
43545 - depends on HAVE_AOUT
43546 + depends on HAVE_AOUT && BROKEN
43547 ---help---
43548 A.out (Assembler.OUTput) is a set of formats for libraries and
43549 executables used in the earliest versions of UNIX. Linux used
43550 diff -urNp linux-2.6.32.45/fs/libfs.c linux-2.6.32.45/fs/libfs.c
43551 --- linux-2.6.32.45/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
43552 +++ linux-2.6.32.45/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
43553 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
43554
43555 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43556 struct dentry *next;
43557 + char d_name[sizeof(next->d_iname)];
43558 + const unsigned char *name;
43559 +
43560 next = list_entry(p, struct dentry, d_u.d_child);
43561 if (d_unhashed(next) || !next->d_inode)
43562 continue;
43563
43564 spin_unlock(&dcache_lock);
43565 - if (filldir(dirent, next->d_name.name,
43566 + name = next->d_name.name;
43567 + if (name == next->d_iname) {
43568 + memcpy(d_name, name, next->d_name.len);
43569 + name = d_name;
43570 + }
43571 + if (filldir(dirent, name,
43572 next->d_name.len, filp->f_pos,
43573 next->d_inode->i_ino,
43574 dt_type(next->d_inode)) < 0)
43575 diff -urNp linux-2.6.32.45/fs/lockd/clntproc.c linux-2.6.32.45/fs/lockd/clntproc.c
43576 --- linux-2.6.32.45/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
43577 +++ linux-2.6.32.45/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
43578 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43579 /*
43580 * Cookie counter for NLM requests
43581 */
43582 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43583 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43584
43585 void nlmclnt_next_cookie(struct nlm_cookie *c)
43586 {
43587 - u32 cookie = atomic_inc_return(&nlm_cookie);
43588 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43589
43590 memcpy(c->data, &cookie, 4);
43591 c->len=4;
43592 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43593 struct nlm_rqst reqst, *req;
43594 int status;
43595
43596 + pax_track_stack();
43597 +
43598 req = &reqst;
43599 memset(req, 0, sizeof(*req));
43600 locks_init_lock(&req->a_args.lock.fl);
43601 diff -urNp linux-2.6.32.45/fs/lockd/svc.c linux-2.6.32.45/fs/lockd/svc.c
43602 --- linux-2.6.32.45/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
43603 +++ linux-2.6.32.45/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
43604 @@ -43,7 +43,7 @@
43605
43606 static struct svc_program nlmsvc_program;
43607
43608 -struct nlmsvc_binding * nlmsvc_ops;
43609 +const struct nlmsvc_binding * nlmsvc_ops;
43610 EXPORT_SYMBOL_GPL(nlmsvc_ops);
43611
43612 static DEFINE_MUTEX(nlmsvc_mutex);
43613 diff -urNp linux-2.6.32.45/fs/locks.c linux-2.6.32.45/fs/locks.c
43614 --- linux-2.6.32.45/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
43615 +++ linux-2.6.32.45/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
43616 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
43617
43618 static struct kmem_cache *filelock_cache __read_mostly;
43619
43620 +static void locks_init_lock_always(struct file_lock *fl)
43621 +{
43622 + fl->fl_next = NULL;
43623 + fl->fl_fasync = NULL;
43624 + fl->fl_owner = NULL;
43625 + fl->fl_pid = 0;
43626 + fl->fl_nspid = NULL;
43627 + fl->fl_file = NULL;
43628 + fl->fl_flags = 0;
43629 + fl->fl_type = 0;
43630 + fl->fl_start = fl->fl_end = 0;
43631 +}
43632 +
43633 /* Allocate an empty lock structure. */
43634 static struct file_lock *locks_alloc_lock(void)
43635 {
43636 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43637 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43638 +
43639 + if (fl)
43640 + locks_init_lock_always(fl);
43641 +
43642 + return fl;
43643 }
43644
43645 void locks_release_private(struct file_lock *fl)
43646 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
43647 INIT_LIST_HEAD(&fl->fl_link);
43648 INIT_LIST_HEAD(&fl->fl_block);
43649 init_waitqueue_head(&fl->fl_wait);
43650 - fl->fl_next = NULL;
43651 - fl->fl_fasync = NULL;
43652 - fl->fl_owner = NULL;
43653 - fl->fl_pid = 0;
43654 - fl->fl_nspid = NULL;
43655 - fl->fl_file = NULL;
43656 - fl->fl_flags = 0;
43657 - fl->fl_type = 0;
43658 - fl->fl_start = fl->fl_end = 0;
43659 fl->fl_ops = NULL;
43660 fl->fl_lmops = NULL;
43661 + locks_init_lock_always(fl);
43662 }
43663
43664 EXPORT_SYMBOL(locks_init_lock);
43665 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
43666 return;
43667
43668 if (filp->f_op && filp->f_op->flock) {
43669 - struct file_lock fl = {
43670 + struct file_lock flock = {
43671 .fl_pid = current->tgid,
43672 .fl_file = filp,
43673 .fl_flags = FL_FLOCK,
43674 .fl_type = F_UNLCK,
43675 .fl_end = OFFSET_MAX,
43676 };
43677 - filp->f_op->flock(filp, F_SETLKW, &fl);
43678 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
43679 - fl.fl_ops->fl_release_private(&fl);
43680 + filp->f_op->flock(filp, F_SETLKW, &flock);
43681 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
43682 + flock.fl_ops->fl_release_private(&flock);
43683 }
43684
43685 lock_kernel();
43686 diff -urNp linux-2.6.32.45/fs/mbcache.c linux-2.6.32.45/fs/mbcache.c
43687 --- linux-2.6.32.45/fs/mbcache.c 2011-03-27 14:31:47.000000000 -0400
43688 +++ linux-2.6.32.45/fs/mbcache.c 2011-08-05 20:33:55.000000000 -0400
43689 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct
43690 if (!cache)
43691 goto fail;
43692 cache->c_name = name;
43693 - cache->c_op.free = NULL;
43694 + *(void **)&cache->c_op.free = NULL;
43695 if (cache_op)
43696 - cache->c_op.free = cache_op->free;
43697 + *(void **)&cache->c_op.free = cache_op->free;
43698 atomic_set(&cache->c_entry_count, 0);
43699 cache->c_bucket_bits = bucket_bits;
43700 #ifdef MB_CACHE_INDEXES_COUNT
43701 diff -urNp linux-2.6.32.45/fs/namei.c linux-2.6.32.45/fs/namei.c
43702 --- linux-2.6.32.45/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
43703 +++ linux-2.6.32.45/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
43704 @@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
43705 return ret;
43706
43707 /*
43708 - * Read/write DACs are always overridable.
43709 - * Executable DACs are overridable if at least one exec bit is set.
43710 - */
43711 - if (!(mask & MAY_EXEC) || execute_ok(inode))
43712 - if (capable(CAP_DAC_OVERRIDE))
43713 - return 0;
43714 -
43715 - /*
43716 * Searching includes executable on directories, else just read.
43717 */
43718 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43719 @@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
43720 if (capable(CAP_DAC_READ_SEARCH))
43721 return 0;
43722
43723 + /*
43724 + * Read/write DACs are always overridable.
43725 + * Executable DACs are overridable if at least one exec bit is set.
43726 + */
43727 + if (!(mask & MAY_EXEC) || execute_ok(inode))
43728 + if (capable(CAP_DAC_OVERRIDE))
43729 + return 0;
43730 +
43731 return -EACCES;
43732 }
43733
43734 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
43735 if (!ret)
43736 goto ok;
43737
43738 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
43739 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
43740 + capable(CAP_DAC_OVERRIDE))
43741 goto ok;
43742
43743 return ret;
43744 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
43745 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
43746 error = PTR_ERR(cookie);
43747 if (!IS_ERR(cookie)) {
43748 - char *s = nd_get_link(nd);
43749 + const char *s = nd_get_link(nd);
43750 error = 0;
43751 if (s)
43752 error = __vfs_follow_link(nd, s);
43753 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct
43754 err = security_inode_follow_link(path->dentry, nd);
43755 if (err)
43756 goto loop;
43757 +
43758 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
43759 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
43760 + err = -EACCES;
43761 + goto loop;
43762 + }
43763 +
43764 current->link_count++;
43765 current->total_link_count++;
43766 nd->depth++;
43767 @@ -1016,11 +1024,18 @@ return_reval:
43768 break;
43769 }
43770 return_base:
43771 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
43772 + path_put(&nd->path);
43773 + return -ENOENT;
43774 + }
43775 return 0;
43776 out_dput:
43777 path_put_conditional(&next, nd);
43778 break;
43779 }
43780 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
43781 + err = -ENOENT;
43782 +
43783 path_put(&nd->path);
43784 return_err:
43785 return err;
43786 @@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
43787 int retval = path_init(dfd, name, flags, nd);
43788 if (!retval)
43789 retval = path_walk(name, nd);
43790 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
43791 - nd->path.dentry->d_inode))
43792 - audit_inode(name, nd->path.dentry);
43793 +
43794 + if (likely(!retval)) {
43795 + if (nd->path.dentry && nd->path.dentry->d_inode) {
43796 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
43797 + retval = -ENOENT;
43798 + if (!audit_dummy_context())
43799 + audit_inode(name, nd->path.dentry);
43800 + }
43801 + }
43802 if (nd->root.mnt) {
43803 path_put(&nd->root);
43804 nd->root.mnt = NULL;
43805 }
43806 +
43807 return retval;
43808 }
43809
43810 @@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
43811 if (error)
43812 goto err_out;
43813
43814 +
43815 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
43816 + error = -EPERM;
43817 + goto err_out;
43818 + }
43819 + if (gr_handle_rawio(inode)) {
43820 + error = -EPERM;
43821 + goto err_out;
43822 + }
43823 + if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
43824 + error = -EACCES;
43825 + goto err_out;
43826 + }
43827 +
43828 if (flag & O_TRUNC) {
43829 error = get_write_access(inode);
43830 if (error)
43831 @@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
43832 int error;
43833 struct dentry *dir = nd->path.dentry;
43834
43835 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
43836 + error = -EACCES;
43837 + goto out_unlock;
43838 + }
43839 +
43840 if (!IS_POSIXACL(dir->d_inode))
43841 mode &= ~current_umask();
43842 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
43843 if (error)
43844 goto out_unlock;
43845 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
43846 + if (!error)
43847 + gr_handle_create(path->dentry, nd->path.mnt);
43848 out_unlock:
43849 mutex_unlock(&dir->d_inode->i_mutex);
43850 dput(nd->path.dentry);
43851 @@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
43852 &nd, flag);
43853 if (error)
43854 return ERR_PTR(error);
43855 +
43856 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
43857 + error = -EPERM;
43858 + goto exit;
43859 + }
43860 +
43861 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
43862 + error = -EPERM;
43863 + goto exit;
43864 + }
43865 +
43866 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
43867 + error = -EACCES;
43868 + goto exit;
43869 + }
43870 +
43871 goto ok;
43872 }
43873
43874 @@ -1795,6 +1854,14 @@ do_last:
43875 /*
43876 * It already exists.
43877 */
43878 +
43879 + /* only check if O_CREAT is specified, all other checks need
43880 + to go into may_open */
43881 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
43882 + error = -EACCES;
43883 + goto exit_mutex_unlock;
43884 + }
43885 +
43886 mutex_unlock(&dir->d_inode->i_mutex);
43887 audit_inode(pathname, path.dentry);
43888
43889 @@ -1887,6 +1954,13 @@ do_link:
43890 error = security_inode_follow_link(path.dentry, &nd);
43891 if (error)
43892 goto exit_dput;
43893 +
43894 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
43895 + path.dentry, nd.path.mnt)) {
43896 + error = -EACCES;
43897 + goto exit_dput;
43898 + }
43899 +
43900 error = __do_follow_link(&path, &nd);
43901 if (error) {
43902 /* Does someone understand code flow here? Or it is only
43903 @@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43904 error = may_mknod(mode);
43905 if (error)
43906 goto out_dput;
43907 +
43908 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
43909 + error = -EPERM;
43910 + goto out_dput;
43911 + }
43912 +
43913 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
43914 + error = -EACCES;
43915 + goto out_dput;
43916 + }
43917 +
43918 error = mnt_want_write(nd.path.mnt);
43919 if (error)
43920 goto out_dput;
43921 @@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43922 }
43923 out_drop_write:
43924 mnt_drop_write(nd.path.mnt);
43925 +
43926 + if (!error)
43927 + gr_handle_create(dentry, nd.path.mnt);
43928 out_dput:
43929 dput(dentry);
43930 out_unlock:
43931 @@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43932 if (IS_ERR(dentry))
43933 goto out_unlock;
43934
43935 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
43936 + error = -EACCES;
43937 + goto out_dput;
43938 + }
43939 +
43940 if (!IS_POSIXACL(nd.path.dentry->d_inode))
43941 mode &= ~current_umask();
43942 error = mnt_want_write(nd.path.mnt);
43943 @@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43944 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
43945 out_drop_write:
43946 mnt_drop_write(nd.path.mnt);
43947 +
43948 + if (!error)
43949 + gr_handle_create(dentry, nd.path.mnt);
43950 +
43951 out_dput:
43952 dput(dentry);
43953 out_unlock:
43954 @@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
43955 char * name;
43956 struct dentry *dentry;
43957 struct nameidata nd;
43958 + ino_t saved_ino = 0;
43959 + dev_t saved_dev = 0;
43960
43961 error = user_path_parent(dfd, pathname, &nd, &name);
43962 if (error)
43963 @@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
43964 error = PTR_ERR(dentry);
43965 if (IS_ERR(dentry))
43966 goto exit2;
43967 +
43968 + if (dentry->d_inode != NULL) {
43969 + if (dentry->d_inode->i_nlink <= 1) {
43970 + saved_ino = dentry->d_inode->i_ino;
43971 + saved_dev = gr_get_dev_from_dentry(dentry);
43972 + }
43973 +
43974 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
43975 + error = -EACCES;
43976 + goto exit3;
43977 + }
43978 + }
43979 +
43980 error = mnt_want_write(nd.path.mnt);
43981 if (error)
43982 goto exit3;
43983 @@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
43984 if (error)
43985 goto exit4;
43986 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
43987 + if (!error && (saved_dev || saved_ino))
43988 + gr_handle_delete(saved_ino, saved_dev);
43989 exit4:
43990 mnt_drop_write(nd.path.mnt);
43991 exit3:
43992 @@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
43993 struct dentry *dentry;
43994 struct nameidata nd;
43995 struct inode *inode = NULL;
43996 + ino_t saved_ino = 0;
43997 + dev_t saved_dev = 0;
43998
43999 error = user_path_parent(dfd, pathname, &nd, &name);
44000 if (error)
44001 @@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
44002 if (nd.last.name[nd.last.len])
44003 goto slashes;
44004 inode = dentry->d_inode;
44005 - if (inode)
44006 + if (inode) {
44007 + if (inode->i_nlink <= 1) {
44008 + saved_ino = inode->i_ino;
44009 + saved_dev = gr_get_dev_from_dentry(dentry);
44010 + }
44011 +
44012 atomic_inc(&inode->i_count);
44013 +
44014 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44015 + error = -EACCES;
44016 + goto exit2;
44017 + }
44018 + }
44019 error = mnt_want_write(nd.path.mnt);
44020 if (error)
44021 goto exit2;
44022 @@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
44023 if (error)
44024 goto exit3;
44025 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44026 + if (!error && (saved_ino || saved_dev))
44027 + gr_handle_delete(saved_ino, saved_dev);
44028 exit3:
44029 mnt_drop_write(nd.path.mnt);
44030 exit2:
44031 @@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
44032 if (IS_ERR(dentry))
44033 goto out_unlock;
44034
44035 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
44036 + error = -EACCES;
44037 + goto out_dput;
44038 + }
44039 +
44040 error = mnt_want_write(nd.path.mnt);
44041 if (error)
44042 goto out_dput;
44043 @@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
44044 if (error)
44045 goto out_drop_write;
44046 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
44047 + if (!error)
44048 + gr_handle_create(dentry, nd.path.mnt);
44049 out_drop_write:
44050 mnt_drop_write(nd.path.mnt);
44051 out_dput:
44052 @@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44053 error = PTR_ERR(new_dentry);
44054 if (IS_ERR(new_dentry))
44055 goto out_unlock;
44056 +
44057 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44058 + old_path.dentry->d_inode,
44059 + old_path.dentry->d_inode->i_mode, to)) {
44060 + error = -EACCES;
44061 + goto out_dput;
44062 + }
44063 +
44064 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
44065 + old_path.dentry, old_path.mnt, to)) {
44066 + error = -EACCES;
44067 + goto out_dput;
44068 + }
44069 +
44070 error = mnt_want_write(nd.path.mnt);
44071 if (error)
44072 goto out_dput;
44073 @@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44074 if (error)
44075 goto out_drop_write;
44076 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
44077 + if (!error)
44078 + gr_handle_create(new_dentry, nd.path.mnt);
44079 out_drop_write:
44080 mnt_drop_write(nd.path.mnt);
44081 out_dput:
44082 @@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44083 char *to;
44084 int error;
44085
44086 + pax_track_stack();
44087 +
44088 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44089 if (error)
44090 goto exit;
44091 @@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44092 if (new_dentry == trap)
44093 goto exit5;
44094
44095 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44096 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
44097 + to);
44098 + if (error)
44099 + goto exit5;
44100 +
44101 error = mnt_want_write(oldnd.path.mnt);
44102 if (error)
44103 goto exit5;
44104 @@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44105 goto exit6;
44106 error = vfs_rename(old_dir->d_inode, old_dentry,
44107 new_dir->d_inode, new_dentry);
44108 + if (!error)
44109 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44110 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44111 exit6:
44112 mnt_drop_write(oldnd.path.mnt);
44113 exit5:
44114 @@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
44115
44116 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44117 {
44118 + char tmpbuf[64];
44119 + const char *newlink;
44120 int len;
44121
44122 len = PTR_ERR(link);
44123 @@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
44124 len = strlen(link);
44125 if (len > (unsigned) buflen)
44126 len = buflen;
44127 - if (copy_to_user(buffer, link, len))
44128 +
44129 + if (len < sizeof(tmpbuf)) {
44130 + memcpy(tmpbuf, link, len);
44131 + newlink = tmpbuf;
44132 + } else
44133 + newlink = link;
44134 +
44135 + if (copy_to_user(buffer, newlink, len))
44136 len = -EFAULT;
44137 out:
44138 return len;
44139 diff -urNp linux-2.6.32.45/fs/namespace.c linux-2.6.32.45/fs/namespace.c
44140 --- linux-2.6.32.45/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
44141 +++ linux-2.6.32.45/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
44142 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
44143 if (!(sb->s_flags & MS_RDONLY))
44144 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44145 up_write(&sb->s_umount);
44146 +
44147 + gr_log_remount(mnt->mnt_devname, retval);
44148 +
44149 return retval;
44150 }
44151
44152 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
44153 security_sb_umount_busy(mnt);
44154 up_write(&namespace_sem);
44155 release_mounts(&umount_list);
44156 +
44157 + gr_log_unmount(mnt->mnt_devname, retval);
44158 +
44159 return retval;
44160 }
44161
44162 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
44163 if (retval)
44164 goto dput_out;
44165
44166 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44167 + retval = -EPERM;
44168 + goto dput_out;
44169 + }
44170 +
44171 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44172 + retval = -EPERM;
44173 + goto dput_out;
44174 + }
44175 +
44176 if (flags & MS_REMOUNT)
44177 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44178 data_page);
44179 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
44180 dev_name, data_page);
44181 dput_out:
44182 path_put(&path);
44183 +
44184 + gr_log_mount(dev_name, dir_name, retval);
44185 +
44186 return retval;
44187 }
44188
44189 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
44190 goto out1;
44191 }
44192
44193 + if (gr_handle_chroot_pivot()) {
44194 + error = -EPERM;
44195 + path_put(&old);
44196 + goto out1;
44197 + }
44198 +
44199 read_lock(&current->fs->lock);
44200 root = current->fs->root;
44201 path_get(&current->fs->root);
44202 diff -urNp linux-2.6.32.45/fs/ncpfs/dir.c linux-2.6.32.45/fs/ncpfs/dir.c
44203 --- linux-2.6.32.45/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44204 +++ linux-2.6.32.45/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
44205 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
44206 int res, val = 0, len;
44207 __u8 __name[NCP_MAXPATHLEN + 1];
44208
44209 + pax_track_stack();
44210 +
44211 parent = dget_parent(dentry);
44212 dir = parent->d_inode;
44213
44214 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
44215 int error, res, len;
44216 __u8 __name[NCP_MAXPATHLEN + 1];
44217
44218 + pax_track_stack();
44219 +
44220 lock_kernel();
44221 error = -EIO;
44222 if (!ncp_conn_valid(server))
44223 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
44224 int error, result, len;
44225 int opmode;
44226 __u8 __name[NCP_MAXPATHLEN + 1];
44227 -
44228 +
44229 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44230 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44231
44232 + pax_track_stack();
44233 +
44234 error = -EIO;
44235 lock_kernel();
44236 if (!ncp_conn_valid(server))
44237 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
44238 int error, len;
44239 __u8 __name[NCP_MAXPATHLEN + 1];
44240
44241 + pax_track_stack();
44242 +
44243 DPRINTK("ncp_mkdir: making %s/%s\n",
44244 dentry->d_parent->d_name.name, dentry->d_name.name);
44245
44246 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
44247 if (!ncp_conn_valid(server))
44248 goto out;
44249
44250 + pax_track_stack();
44251 +
44252 ncp_age_dentry(server, dentry);
44253 len = sizeof(__name);
44254 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44255 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
44256 int old_len, new_len;
44257 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44258
44259 + pax_track_stack();
44260 +
44261 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44262 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44263 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44264 diff -urNp linux-2.6.32.45/fs/ncpfs/inode.c linux-2.6.32.45/fs/ncpfs/inode.c
44265 --- linux-2.6.32.45/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
44266 +++ linux-2.6.32.45/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
44267 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
44268 #endif
44269 struct ncp_entry_info finfo;
44270
44271 + pax_track_stack();
44272 +
44273 data.wdog_pid = NULL;
44274 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44275 if (!server)
44276 diff -urNp linux-2.6.32.45/fs/nfs/inode.c linux-2.6.32.45/fs/nfs/inode.c
44277 --- linux-2.6.32.45/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
44278 +++ linux-2.6.32.45/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
44279 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
44280 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44281 nfsi->attrtimeo_timestamp = jiffies;
44282
44283 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44284 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44285 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44286 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44287 else
44288 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
44289 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44290 }
44291
44292 -static atomic_long_t nfs_attr_generation_counter;
44293 +static atomic_long_unchecked_t nfs_attr_generation_counter;
44294
44295 static unsigned long nfs_read_attr_generation_counter(void)
44296 {
44297 - return atomic_long_read(&nfs_attr_generation_counter);
44298 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44299 }
44300
44301 unsigned long nfs_inc_attr_generation_counter(void)
44302 {
44303 - return atomic_long_inc_return(&nfs_attr_generation_counter);
44304 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44305 }
44306
44307 void nfs_fattr_init(struct nfs_fattr *fattr)
44308 diff -urNp linux-2.6.32.45/fs/nfsd/lockd.c linux-2.6.32.45/fs/nfsd/lockd.c
44309 --- linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
44310 +++ linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
44311 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
44312 fput(filp);
44313 }
44314
44315 -static struct nlmsvc_binding nfsd_nlm_ops = {
44316 +static const struct nlmsvc_binding nfsd_nlm_ops = {
44317 .fopen = nlm_fopen, /* open file for locking */
44318 .fclose = nlm_fclose, /* close file */
44319 };
44320 diff -urNp linux-2.6.32.45/fs/nfsd/nfs4state.c linux-2.6.32.45/fs/nfsd/nfs4state.c
44321 --- linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
44322 +++ linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
44323 @@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44324 unsigned int cmd;
44325 int err;
44326
44327 + pax_track_stack();
44328 +
44329 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44330 (long long) lock->lk_offset,
44331 (long long) lock->lk_length);
44332 diff -urNp linux-2.6.32.45/fs/nfsd/nfs4xdr.c linux-2.6.32.45/fs/nfsd/nfs4xdr.c
44333 --- linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
44334 +++ linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
44335 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44336 struct nfsd4_compoundres *resp = rqstp->rq_resp;
44337 u32 minorversion = resp->cstate.minorversion;
44338
44339 + pax_track_stack();
44340 +
44341 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44342 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44343 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44344 diff -urNp linux-2.6.32.45/fs/nfsd/vfs.c linux-2.6.32.45/fs/nfsd/vfs.c
44345 --- linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
44346 +++ linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
44347 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44348 } else {
44349 oldfs = get_fs();
44350 set_fs(KERNEL_DS);
44351 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44352 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
44353 set_fs(oldfs);
44354 }
44355
44356 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44357
44358 /* Write the data. */
44359 oldfs = get_fs(); set_fs(KERNEL_DS);
44360 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44361 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
44362 set_fs(oldfs);
44363 if (host_err < 0)
44364 goto out_nfserr;
44365 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44366 */
44367
44368 oldfs = get_fs(); set_fs(KERNEL_DS);
44369 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
44370 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
44371 set_fs(oldfs);
44372
44373 if (host_err < 0)
44374 diff -urNp linux-2.6.32.45/fs/nilfs2/ioctl.c linux-2.6.32.45/fs/nilfs2/ioctl.c
44375 --- linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
44376 +++ linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
44377 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
44378 unsigned int cmd, void __user *argp)
44379 {
44380 struct nilfs_argv argv[5];
44381 - const static size_t argsz[5] = {
44382 + static const size_t argsz[5] = {
44383 sizeof(struct nilfs_vdesc),
44384 sizeof(struct nilfs_period),
44385 sizeof(__u64),
44386 diff -urNp linux-2.6.32.45/fs/notify/dnotify/dnotify.c linux-2.6.32.45/fs/notify/dnotify/dnotify.c
44387 --- linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
44388 +++ linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
44389 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
44390 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
44391 }
44392
44393 -static struct fsnotify_ops dnotify_fsnotify_ops = {
44394 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
44395 .handle_event = dnotify_handle_event,
44396 .should_send_event = dnotify_should_send_event,
44397 .free_group_priv = NULL,
44398 diff -urNp linux-2.6.32.45/fs/notify/notification.c linux-2.6.32.45/fs/notify/notification.c
44399 --- linux-2.6.32.45/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
44400 +++ linux-2.6.32.45/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
44401 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44402 * get set to 0 so it will never get 'freed'
44403 */
44404 static struct fsnotify_event q_overflow_event;
44405 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44406 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44407
44408 /**
44409 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44410 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44411 */
44412 u32 fsnotify_get_cookie(void)
44413 {
44414 - return atomic_inc_return(&fsnotify_sync_cookie);
44415 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44416 }
44417 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44418
44419 diff -urNp linux-2.6.32.45/fs/ntfs/dir.c linux-2.6.32.45/fs/ntfs/dir.c
44420 --- linux-2.6.32.45/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44421 +++ linux-2.6.32.45/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
44422 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
44423 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44424 ~(s64)(ndir->itype.index.block_size - 1)));
44425 /* Bounds checks. */
44426 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44427 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44428 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44429 "inode 0x%lx or driver bug.", vdir->i_ino);
44430 goto err_out;
44431 diff -urNp linux-2.6.32.45/fs/ntfs/file.c linux-2.6.32.45/fs/ntfs/file.c
44432 --- linux-2.6.32.45/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
44433 +++ linux-2.6.32.45/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
44434 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
44435 #endif /* NTFS_RW */
44436 };
44437
44438 -const struct file_operations ntfs_empty_file_ops = {};
44439 +const struct file_operations ntfs_empty_file_ops __read_only;
44440
44441 -const struct inode_operations ntfs_empty_inode_ops = {};
44442 +const struct inode_operations ntfs_empty_inode_ops __read_only;
44443 diff -urNp linux-2.6.32.45/fs/ocfs2/cluster/masklog.c linux-2.6.32.45/fs/ocfs2/cluster/masklog.c
44444 --- linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
44445 +++ linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
44446 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
44447 return mlog_mask_store(mlog_attr->mask, buf, count);
44448 }
44449
44450 -static struct sysfs_ops mlog_attr_ops = {
44451 +static const struct sysfs_ops mlog_attr_ops = {
44452 .show = mlog_show,
44453 .store = mlog_store,
44454 };
44455 diff -urNp linux-2.6.32.45/fs/ocfs2/localalloc.c linux-2.6.32.45/fs/ocfs2/localalloc.c
44456 --- linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
44457 +++ linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
44458 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
44459 goto bail;
44460 }
44461
44462 - atomic_inc(&osb->alloc_stats.moves);
44463 + atomic_inc_unchecked(&osb->alloc_stats.moves);
44464
44465 status = 0;
44466 bail:
44467 diff -urNp linux-2.6.32.45/fs/ocfs2/namei.c linux-2.6.32.45/fs/ocfs2/namei.c
44468 --- linux-2.6.32.45/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
44469 +++ linux-2.6.32.45/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
44470 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
44471 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44472 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44473
44474 + pax_track_stack();
44475 +
44476 /* At some point it might be nice to break this function up a
44477 * bit. */
44478
44479 diff -urNp linux-2.6.32.45/fs/ocfs2/ocfs2.h linux-2.6.32.45/fs/ocfs2/ocfs2.h
44480 --- linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
44481 +++ linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
44482 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
44483
44484 struct ocfs2_alloc_stats
44485 {
44486 - atomic_t moves;
44487 - atomic_t local_data;
44488 - atomic_t bitmap_data;
44489 - atomic_t bg_allocs;
44490 - atomic_t bg_extends;
44491 + atomic_unchecked_t moves;
44492 + atomic_unchecked_t local_data;
44493 + atomic_unchecked_t bitmap_data;
44494 + atomic_unchecked_t bg_allocs;
44495 + atomic_unchecked_t bg_extends;
44496 };
44497
44498 enum ocfs2_local_alloc_state
44499 diff -urNp linux-2.6.32.45/fs/ocfs2/suballoc.c linux-2.6.32.45/fs/ocfs2/suballoc.c
44500 --- linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
44501 +++ linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
44502 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
44503 mlog_errno(status);
44504 goto bail;
44505 }
44506 - atomic_inc(&osb->alloc_stats.bg_extends);
44507 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44508
44509 /* You should never ask for this much metadata */
44510 BUG_ON(bits_wanted >
44511 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
44512 mlog_errno(status);
44513 goto bail;
44514 }
44515 - atomic_inc(&osb->alloc_stats.bg_allocs);
44516 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44517
44518 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
44519 ac->ac_bits_given += (*num_bits);
44520 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
44521 mlog_errno(status);
44522 goto bail;
44523 }
44524 - atomic_inc(&osb->alloc_stats.bg_allocs);
44525 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44526
44527 BUG_ON(num_bits != 1);
44528
44529 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44530 cluster_start,
44531 num_clusters);
44532 if (!status)
44533 - atomic_inc(&osb->alloc_stats.local_data);
44534 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
44535 } else {
44536 if (min_clusters > (osb->bitmap_cpg - 1)) {
44537 /* The only paths asking for contiguousness
44538 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44539 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44540 bg_blkno,
44541 bg_bit_off);
44542 - atomic_inc(&osb->alloc_stats.bitmap_data);
44543 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44544 }
44545 }
44546 if (status < 0) {
44547 diff -urNp linux-2.6.32.45/fs/ocfs2/super.c linux-2.6.32.45/fs/ocfs2/super.c
44548 --- linux-2.6.32.45/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
44549 +++ linux-2.6.32.45/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
44550 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44551 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44552 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44553 "Stats",
44554 - atomic_read(&osb->alloc_stats.bitmap_data),
44555 - atomic_read(&osb->alloc_stats.local_data),
44556 - atomic_read(&osb->alloc_stats.bg_allocs),
44557 - atomic_read(&osb->alloc_stats.moves),
44558 - atomic_read(&osb->alloc_stats.bg_extends));
44559 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44560 + atomic_read_unchecked(&osb->alloc_stats.local_data),
44561 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44562 + atomic_read_unchecked(&osb->alloc_stats.moves),
44563 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44564
44565 out += snprintf(buf + out, len - out,
44566 "%10s => State: %u Descriptor: %llu Size: %u bits "
44567 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
44568 spin_lock_init(&osb->osb_xattr_lock);
44569 ocfs2_init_inode_steal_slot(osb);
44570
44571 - atomic_set(&osb->alloc_stats.moves, 0);
44572 - atomic_set(&osb->alloc_stats.local_data, 0);
44573 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
44574 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
44575 - atomic_set(&osb->alloc_stats.bg_extends, 0);
44576 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44577 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44578 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44579 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44580 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44581
44582 /* Copy the blockcheck stats from the superblock probe */
44583 osb->osb_ecc_stats = *stats;
44584 diff -urNp linux-2.6.32.45/fs/open.c linux-2.6.32.45/fs/open.c
44585 --- linux-2.6.32.45/fs/open.c 2011-03-27 14:31:47.000000000 -0400
44586 +++ linux-2.6.32.45/fs/open.c 2011-04-17 15:56:46.000000000 -0400
44587 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
44588 error = locks_verify_truncate(inode, NULL, length);
44589 if (!error)
44590 error = security_path_truncate(&path, length, 0);
44591 +
44592 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44593 + error = -EACCES;
44594 +
44595 if (!error) {
44596 vfs_dq_init(inode);
44597 error = do_truncate(path.dentry, length, 0, NULL);
44598 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44599 if (__mnt_is_readonly(path.mnt))
44600 res = -EROFS;
44601
44602 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44603 + res = -EACCES;
44604 +
44605 out_path_release:
44606 path_put(&path);
44607 out:
44608 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44609 if (error)
44610 goto dput_and_out;
44611
44612 + gr_log_chdir(path.dentry, path.mnt);
44613 +
44614 set_fs_pwd(current->fs, &path);
44615
44616 dput_and_out:
44617 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44618 goto out_putf;
44619
44620 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
44621 +
44622 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44623 + error = -EPERM;
44624 +
44625 + if (!error)
44626 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44627 +
44628 if (!error)
44629 set_fs_pwd(current->fs, &file->f_path);
44630 out_putf:
44631 @@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
44632 if (!capable(CAP_SYS_CHROOT))
44633 goto dput_and_out;
44634
44635 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44636 + goto dput_and_out;
44637 +
44638 + if (gr_handle_chroot_caps(&path)) {
44639 + error = -ENOMEM;
44640 + goto dput_and_out;
44641 + }
44642 +
44643 set_fs_root(current->fs, &path);
44644 +
44645 + gr_handle_chroot_chdir(&path);
44646 +
44647 error = 0;
44648 dput_and_out:
44649 path_put(&path);
44650 @@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
44651 err = mnt_want_write_file(file);
44652 if (err)
44653 goto out_putf;
44654 +
44655 mutex_lock(&inode->i_mutex);
44656 +
44657 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
44658 + err = -EACCES;
44659 + goto out_unlock;
44660 + }
44661 +
44662 if (mode == (mode_t) -1)
44663 mode = inode->i_mode;
44664 +
44665 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
44666 + err = -EPERM;
44667 + goto out_unlock;
44668 + }
44669 +
44670 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44671 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44672 err = notify_change(dentry, &newattrs);
44673 +
44674 +out_unlock:
44675 mutex_unlock(&inode->i_mutex);
44676 mnt_drop_write(file->f_path.mnt);
44677 out_putf:
44678 @@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
44679 error = mnt_want_write(path.mnt);
44680 if (error)
44681 goto dput_and_out;
44682 +
44683 mutex_lock(&inode->i_mutex);
44684 +
44685 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
44686 + error = -EACCES;
44687 + goto out_unlock;
44688 + }
44689 +
44690 if (mode == (mode_t) -1)
44691 mode = inode->i_mode;
44692 +
44693 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
44694 + error = -EACCES;
44695 + goto out_unlock;
44696 + }
44697 +
44698 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44699 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44700 error = notify_change(path.dentry, &newattrs);
44701 +
44702 +out_unlock:
44703 mutex_unlock(&inode->i_mutex);
44704 mnt_drop_write(path.mnt);
44705 dput_and_out:
44706 @@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
44707 return sys_fchmodat(AT_FDCWD, filename, mode);
44708 }
44709
44710 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
44711 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
44712 {
44713 struct inode *inode = dentry->d_inode;
44714 int error;
44715 struct iattr newattrs;
44716
44717 + if (!gr_acl_handle_chown(dentry, mnt))
44718 + return -EACCES;
44719 +
44720 newattrs.ia_valid = ATTR_CTIME;
44721 if (user != (uid_t) -1) {
44722 newattrs.ia_valid |= ATTR_UID;
44723 @@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
44724 error = mnt_want_write(path.mnt);
44725 if (error)
44726 goto out_release;
44727 - error = chown_common(path.dentry, user, group);
44728 + error = chown_common(path.dentry, user, group, path.mnt);
44729 mnt_drop_write(path.mnt);
44730 out_release:
44731 path_put(&path);
44732 @@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
44733 error = mnt_want_write(path.mnt);
44734 if (error)
44735 goto out_release;
44736 - error = chown_common(path.dentry, user, group);
44737 + error = chown_common(path.dentry, user, group, path.mnt);
44738 mnt_drop_write(path.mnt);
44739 out_release:
44740 path_put(&path);
44741 @@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
44742 error = mnt_want_write(path.mnt);
44743 if (error)
44744 goto out_release;
44745 - error = chown_common(path.dentry, user, group);
44746 + error = chown_common(path.dentry, user, group, path.mnt);
44747 mnt_drop_write(path.mnt);
44748 out_release:
44749 path_put(&path);
44750 @@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
44751 goto out_fput;
44752 dentry = file->f_path.dentry;
44753 audit_inode(NULL, dentry);
44754 - error = chown_common(dentry, user, group);
44755 + error = chown_common(dentry, user, group, file->f_path.mnt);
44756 mnt_drop_write(file->f_path.mnt);
44757 out_fput:
44758 fput(file);
44759 @@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
44760 if (!IS_ERR(tmp)) {
44761 fd = get_unused_fd_flags(flags);
44762 if (fd >= 0) {
44763 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
44764 + struct file *f;
44765 + /* don't allow to be set by userland */
44766 + flags &= ~FMODE_GREXEC;
44767 + f = do_filp_open(dfd, tmp, flags, mode, 0);
44768 if (IS_ERR(f)) {
44769 put_unused_fd(fd);
44770 fd = PTR_ERR(f);
44771 diff -urNp linux-2.6.32.45/fs/partitions/ldm.c linux-2.6.32.45/fs/partitions/ldm.c
44772 --- linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
44773 +++ linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
44774 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
44775 ldm_error ("A VBLK claims to have %d parts.", num);
44776 return false;
44777 }
44778 +
44779 if (rec >= num) {
44780 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
44781 return false;
44782 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
44783 goto found;
44784 }
44785
44786 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44787 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44788 if (!f) {
44789 ldm_crit ("Out of memory.");
44790 return false;
44791 diff -urNp linux-2.6.32.45/fs/partitions/mac.c linux-2.6.32.45/fs/partitions/mac.c
44792 --- linux-2.6.32.45/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
44793 +++ linux-2.6.32.45/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
44794 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
44795 return 0; /* not a MacOS disk */
44796 }
44797 blocks_in_map = be32_to_cpu(part->map_count);
44798 + printk(" [mac]");
44799 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
44800 put_dev_sector(sect);
44801 return 0;
44802 }
44803 - printk(" [mac]");
44804 for (slot = 1; slot <= blocks_in_map; ++slot) {
44805 int pos = slot * secsize;
44806 put_dev_sector(sect);
44807 diff -urNp linux-2.6.32.45/fs/pipe.c linux-2.6.32.45/fs/pipe.c
44808 --- linux-2.6.32.45/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
44809 +++ linux-2.6.32.45/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
44810 @@ -401,9 +401,9 @@ redo:
44811 }
44812 if (bufs) /* More to do? */
44813 continue;
44814 - if (!pipe->writers)
44815 + if (!atomic_read(&pipe->writers))
44816 break;
44817 - if (!pipe->waiting_writers) {
44818 + if (!atomic_read(&pipe->waiting_writers)) {
44819 /* syscall merging: Usually we must not sleep
44820 * if O_NONBLOCK is set, or if we got some data.
44821 * But if a writer sleeps in kernel space, then
44822 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
44823 mutex_lock(&inode->i_mutex);
44824 pipe = inode->i_pipe;
44825
44826 - if (!pipe->readers) {
44827 + if (!atomic_read(&pipe->readers)) {
44828 send_sig(SIGPIPE, current, 0);
44829 ret = -EPIPE;
44830 goto out;
44831 @@ -511,7 +511,7 @@ redo1:
44832 for (;;) {
44833 int bufs;
44834
44835 - if (!pipe->readers) {
44836 + if (!atomic_read(&pipe->readers)) {
44837 send_sig(SIGPIPE, current, 0);
44838 if (!ret)
44839 ret = -EPIPE;
44840 @@ -597,9 +597,9 @@ redo2:
44841 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44842 do_wakeup = 0;
44843 }
44844 - pipe->waiting_writers++;
44845 + atomic_inc(&pipe->waiting_writers);
44846 pipe_wait(pipe);
44847 - pipe->waiting_writers--;
44848 + atomic_dec(&pipe->waiting_writers);
44849 }
44850 out:
44851 mutex_unlock(&inode->i_mutex);
44852 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
44853 mask = 0;
44854 if (filp->f_mode & FMODE_READ) {
44855 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
44856 - if (!pipe->writers && filp->f_version != pipe->w_counter)
44857 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
44858 mask |= POLLHUP;
44859 }
44860
44861 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
44862 * Most Unices do not set POLLERR for FIFOs but on Linux they
44863 * behave exactly like pipes for poll().
44864 */
44865 - if (!pipe->readers)
44866 + if (!atomic_read(&pipe->readers))
44867 mask |= POLLERR;
44868 }
44869
44870 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
44871
44872 mutex_lock(&inode->i_mutex);
44873 pipe = inode->i_pipe;
44874 - pipe->readers -= decr;
44875 - pipe->writers -= decw;
44876 + atomic_sub(decr, &pipe->readers);
44877 + atomic_sub(decw, &pipe->writers);
44878
44879 - if (!pipe->readers && !pipe->writers) {
44880 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
44881 free_pipe_info(inode);
44882 } else {
44883 wake_up_interruptible_sync(&pipe->wait);
44884 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
44885
44886 if (inode->i_pipe) {
44887 ret = 0;
44888 - inode->i_pipe->readers++;
44889 + atomic_inc(&inode->i_pipe->readers);
44890 }
44891
44892 mutex_unlock(&inode->i_mutex);
44893 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
44894
44895 if (inode->i_pipe) {
44896 ret = 0;
44897 - inode->i_pipe->writers++;
44898 + atomic_inc(&inode->i_pipe->writers);
44899 }
44900
44901 mutex_unlock(&inode->i_mutex);
44902 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
44903 if (inode->i_pipe) {
44904 ret = 0;
44905 if (filp->f_mode & FMODE_READ)
44906 - inode->i_pipe->readers++;
44907 + atomic_inc(&inode->i_pipe->readers);
44908 if (filp->f_mode & FMODE_WRITE)
44909 - inode->i_pipe->writers++;
44910 + atomic_inc(&inode->i_pipe->writers);
44911 }
44912
44913 mutex_unlock(&inode->i_mutex);
44914 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
44915 inode->i_pipe = NULL;
44916 }
44917
44918 -static struct vfsmount *pipe_mnt __read_mostly;
44919 +struct vfsmount *pipe_mnt __read_mostly;
44920 static int pipefs_delete_dentry(struct dentry *dentry)
44921 {
44922 /*
44923 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
44924 goto fail_iput;
44925 inode->i_pipe = pipe;
44926
44927 - pipe->readers = pipe->writers = 1;
44928 + atomic_set(&pipe->readers, 1);
44929 + atomic_set(&pipe->writers, 1);
44930 inode->i_fop = &rdwr_pipefifo_fops;
44931
44932 /*
44933 diff -urNp linux-2.6.32.45/fs/proc/array.c linux-2.6.32.45/fs/proc/array.c
44934 --- linux-2.6.32.45/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
44935 +++ linux-2.6.32.45/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
44936 @@ -60,6 +60,7 @@
44937 #include <linux/tty.h>
44938 #include <linux/string.h>
44939 #include <linux/mman.h>
44940 +#include <linux/grsecurity.h>
44941 #include <linux/proc_fs.h>
44942 #include <linux/ioport.h>
44943 #include <linux/uaccess.h>
44944 @@ -321,6 +322,21 @@ static inline void task_context_switch_c
44945 p->nivcsw);
44946 }
44947
44948 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44949 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
44950 +{
44951 + if (p->mm)
44952 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
44953 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
44954 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
44955 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
44956 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
44957 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
44958 + else
44959 + seq_printf(m, "PaX:\t-----\n");
44960 +}
44961 +#endif
44962 +
44963 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
44964 struct pid *pid, struct task_struct *task)
44965 {
44966 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
44967 task_cap(m, task);
44968 cpuset_task_status_allowed(m, task);
44969 task_context_switch_counts(m, task);
44970 +
44971 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44972 + task_pax(m, task);
44973 +#endif
44974 +
44975 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
44976 + task_grsec_rbac(m, task);
44977 +#endif
44978 +
44979 return 0;
44980 }
44981
44982 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44983 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44984 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
44985 + _mm->pax_flags & MF_PAX_SEGMEXEC))
44986 +#endif
44987 +
44988 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
44989 struct pid *pid, struct task_struct *task, int whole)
44990 {
44991 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
44992 cputime_t cutime, cstime, utime, stime;
44993 cputime_t cgtime, gtime;
44994 unsigned long rsslim = 0;
44995 - char tcomm[sizeof(task->comm)];
44996 + char tcomm[sizeof(task->comm)] = { 0 };
44997 unsigned long flags;
44998
44999 + pax_track_stack();
45000 +
45001 state = *get_task_state(task);
45002 vsize = eip = esp = 0;
45003 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45004 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
45005 gtime = task_gtime(task);
45006 }
45007
45008 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45009 + if (PAX_RAND_FLAGS(mm)) {
45010 + eip = 0;
45011 + esp = 0;
45012 + wchan = 0;
45013 + }
45014 +#endif
45015 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45016 + wchan = 0;
45017 + eip =0;
45018 + esp =0;
45019 +#endif
45020 +
45021 /* scale priority and nice values from timeslices to -20..20 */
45022 /* to make it look like a "normal" Unix priority/nice value */
45023 priority = task_prio(task);
45024 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
45025 vsize,
45026 mm ? get_mm_rss(mm) : 0,
45027 rsslim,
45028 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45029 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45030 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45031 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45032 +#else
45033 mm ? (permitted ? mm->start_code : 1) : 0,
45034 mm ? (permitted ? mm->end_code : 1) : 0,
45035 (permitted && mm) ? mm->start_stack : 0,
45036 +#endif
45037 esp,
45038 eip,
45039 /* The signal information here is obsolete.
45040 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
45041
45042 return 0;
45043 }
45044 +
45045 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45046 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45047 +{
45048 + u32 curr_ip = 0;
45049 + unsigned long flags;
45050 +
45051 + if (lock_task_sighand(task, &flags)) {
45052 + curr_ip = task->signal->curr_ip;
45053 + unlock_task_sighand(task, &flags);
45054 + }
45055 +
45056 + return sprintf(buffer, "%pI4\n", &curr_ip);
45057 +}
45058 +#endif
45059 diff -urNp linux-2.6.32.45/fs/proc/base.c linux-2.6.32.45/fs/proc/base.c
45060 --- linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:35:30.000000000 -0400
45061 +++ linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:34:33.000000000 -0400
45062 @@ -102,6 +102,22 @@ struct pid_entry {
45063 union proc_op op;
45064 };
45065
45066 +struct getdents_callback {
45067 + struct linux_dirent __user * current_dir;
45068 + struct linux_dirent __user * previous;
45069 + struct file * file;
45070 + int count;
45071 + int error;
45072 +};
45073 +
45074 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45075 + loff_t offset, u64 ino, unsigned int d_type)
45076 +{
45077 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
45078 + buf->error = -EINVAL;
45079 + return 0;
45080 +}
45081 +
45082 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45083 .name = (NAME), \
45084 .len = sizeof(NAME) - 1, \
45085 @@ -213,6 +229,9 @@ static int check_mem_permission(struct t
45086 if (task == current)
45087 return 0;
45088
45089 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45090 + return -EPERM;
45091 +
45092 /*
45093 * If current is actively ptrace'ing, and would also be
45094 * permitted to freshly attach with ptrace now, permit it.
45095 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
45096 if (!mm->arg_end)
45097 goto out_mm; /* Shh! No looking before we're done */
45098
45099 + if (gr_acl_handle_procpidmem(task))
45100 + goto out_mm;
45101 +
45102 len = mm->arg_end - mm->arg_start;
45103
45104 if (len > PAGE_SIZE)
45105 @@ -287,12 +309,28 @@ out:
45106 return res;
45107 }
45108
45109 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45110 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45111 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45112 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45113 +#endif
45114 +
45115 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45116 {
45117 int res = 0;
45118 struct mm_struct *mm = get_task_mm(task);
45119 if (mm) {
45120 unsigned int nwords = 0;
45121 +
45122 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45123 + /* allow if we're currently ptracing this task */
45124 + if (PAX_RAND_FLAGS(mm) &&
45125 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45126 + mmput(mm);
45127 + return res;
45128 + }
45129 +#endif
45130 +
45131 do {
45132 nwords += 2;
45133 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45134 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
45135 }
45136
45137
45138 -#ifdef CONFIG_KALLSYMS
45139 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45140 /*
45141 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45142 * Returns the resolved symbol. If that fails, simply return the address.
45143 @@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
45144 }
45145 #endif /* CONFIG_KALLSYMS */
45146
45147 -#ifdef CONFIG_STACKTRACE
45148 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45149
45150 #define MAX_STACK_TRACE_DEPTH 64
45151
45152 @@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
45153 return count;
45154 }
45155
45156 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45157 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45158 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45159 {
45160 long nr;
45161 @@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
45162 /************************************************************************/
45163
45164 /* permission checks */
45165 -static int proc_fd_access_allowed(struct inode *inode)
45166 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45167 {
45168 struct task_struct *task;
45169 int allowed = 0;
45170 @@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
45171 */
45172 task = get_proc_task(inode);
45173 if (task) {
45174 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45175 + if (log)
45176 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45177 + else
45178 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45179 put_task_struct(task);
45180 }
45181 return allowed;
45182 @@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
45183 if (!task)
45184 goto out_no_task;
45185
45186 + if (gr_acl_handle_procpidmem(task))
45187 + goto out;
45188 +
45189 if (!ptrace_may_access(task, PTRACE_MODE_READ))
45190 goto out;
45191
45192 @@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
45193 path_put(&nd->path);
45194
45195 /* Are we allowed to snoop on the tasks file descriptors? */
45196 - if (!proc_fd_access_allowed(inode))
45197 + if (!proc_fd_access_allowed(inode,0))
45198 goto out;
45199
45200 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45201 @@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
45202 struct path path;
45203
45204 /* Are we allowed to snoop on the tasks file descriptors? */
45205 - if (!proc_fd_access_allowed(inode))
45206 - goto out;
45207 + /* logging this is needed for learning on chromium to work properly,
45208 + but we don't want to flood the logs from 'ps' which does a readlink
45209 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45210 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
45211 + */
45212 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45213 + if (!proc_fd_access_allowed(inode,0))
45214 + goto out;
45215 + } else {
45216 + if (!proc_fd_access_allowed(inode,1))
45217 + goto out;
45218 + }
45219
45220 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45221 if (error)
45222 @@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
45223 rcu_read_lock();
45224 cred = __task_cred(task);
45225 inode->i_uid = cred->euid;
45226 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45227 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45228 +#else
45229 inode->i_gid = cred->egid;
45230 +#endif
45231 rcu_read_unlock();
45232 }
45233 security_task_to_inode(task, inode);
45234 @@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
45235 struct inode *inode = dentry->d_inode;
45236 struct task_struct *task;
45237 const struct cred *cred;
45238 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45239 + const struct cred *tmpcred = current_cred();
45240 +#endif
45241
45242 generic_fillattr(inode, stat);
45243
45244 @@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
45245 stat->uid = 0;
45246 stat->gid = 0;
45247 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45248 +
45249 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45250 + rcu_read_unlock();
45251 + return -ENOENT;
45252 + }
45253 +
45254 if (task) {
45255 + cred = __task_cred(task);
45256 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45257 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45258 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45259 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45260 +#endif
45261 + ) {
45262 +#endif
45263 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45264 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45265 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45266 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45267 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45268 +#endif
45269 task_dumpable(task)) {
45270 - cred = __task_cred(task);
45271 stat->uid = cred->euid;
45272 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45273 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45274 +#else
45275 stat->gid = cred->egid;
45276 +#endif
45277 }
45278 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45279 + } else {
45280 + rcu_read_unlock();
45281 + return -ENOENT;
45282 + }
45283 +#endif
45284 }
45285 rcu_read_unlock();
45286 return 0;
45287 @@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
45288
45289 if (task) {
45290 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45291 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45292 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45293 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45294 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45295 +#endif
45296 task_dumpable(task)) {
45297 rcu_read_lock();
45298 cred = __task_cred(task);
45299 inode->i_uid = cred->euid;
45300 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45301 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45302 +#else
45303 inode->i_gid = cred->egid;
45304 +#endif
45305 rcu_read_unlock();
45306 } else {
45307 inode->i_uid = 0;
45308 @@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
45309 int fd = proc_fd(inode);
45310
45311 if (task) {
45312 - files = get_files_struct(task);
45313 + if (!gr_acl_handle_procpidmem(task))
45314 + files = get_files_struct(task);
45315 put_task_struct(task);
45316 }
45317 if (files) {
45318 @@ -1895,12 +1994,22 @@ static const struct file_operations proc
45319 static int proc_fd_permission(struct inode *inode, int mask)
45320 {
45321 int rv;
45322 + struct task_struct *task;
45323
45324 rv = generic_permission(inode, mask, NULL);
45325 - if (rv == 0)
45326 - return 0;
45327 +
45328 if (task_pid(current) == proc_pid(inode))
45329 rv = 0;
45330 +
45331 + task = get_proc_task(inode);
45332 + if (task == NULL)
45333 + return rv;
45334 +
45335 + if (gr_acl_handle_procpidmem(task))
45336 + rv = -EACCES;
45337 +
45338 + put_task_struct(task);
45339 +
45340 return rv;
45341 }
45342
45343 @@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
45344 if (!task)
45345 goto out_no_task;
45346
45347 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45348 + goto out;
45349 +
45350 /*
45351 * Yes, it does not scale. And it should not. Don't add
45352 * new entries into /proc/<tgid>/ without very good reasons.
45353 @@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
45354 if (!task)
45355 goto out_no_task;
45356
45357 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45358 + goto out;
45359 +
45360 ret = 0;
45361 i = filp->f_pos;
45362 switch (i) {
45363 @@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
45364 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45365 void *cookie)
45366 {
45367 - char *s = nd_get_link(nd);
45368 + const char *s = nd_get_link(nd);
45369 if (!IS_ERR(s))
45370 __putname(s);
45371 }
45372 @@ -2522,7 +2637,7 @@ static const struct pid_entry tgid_base_
45373 #ifdef CONFIG_SCHED_DEBUG
45374 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45375 #endif
45376 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45377 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45378 INF("syscall", S_IRUSR, proc_pid_syscall),
45379 #endif
45380 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45381 @@ -2547,10 +2662,10 @@ static const struct pid_entry tgid_base_
45382 #ifdef CONFIG_SECURITY
45383 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45384 #endif
45385 -#ifdef CONFIG_KALLSYMS
45386 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45387 INF("wchan", S_IRUGO, proc_pid_wchan),
45388 #endif
45389 -#ifdef CONFIG_STACKTRACE
45390 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45391 ONE("stack", S_IRUSR, proc_pid_stack),
45392 #endif
45393 #ifdef CONFIG_SCHEDSTATS
45394 @@ -2580,6 +2695,9 @@ static const struct pid_entry tgid_base_
45395 #ifdef CONFIG_TASK_IO_ACCOUNTING
45396 INF("io", S_IRUSR, proc_tgid_io_accounting),
45397 #endif
45398 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45399 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45400 +#endif
45401 };
45402
45403 static int proc_tgid_base_readdir(struct file * filp,
45404 @@ -2704,7 +2822,14 @@ static struct dentry *proc_pid_instantia
45405 if (!inode)
45406 goto out;
45407
45408 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45409 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45410 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45411 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45412 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45413 +#else
45414 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45415 +#endif
45416 inode->i_op = &proc_tgid_base_inode_operations;
45417 inode->i_fop = &proc_tgid_base_operations;
45418 inode->i_flags|=S_IMMUTABLE;
45419 @@ -2746,7 +2871,11 @@ struct dentry *proc_pid_lookup(struct in
45420 if (!task)
45421 goto out;
45422
45423 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45424 + goto out_put_task;
45425 +
45426 result = proc_pid_instantiate(dir, dentry, task, NULL);
45427 +out_put_task:
45428 put_task_struct(task);
45429 out:
45430 return result;
45431 @@ -2811,6 +2940,11 @@ int proc_pid_readdir(struct file * filp,
45432 {
45433 unsigned int nr;
45434 struct task_struct *reaper;
45435 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45436 + const struct cred *tmpcred = current_cred();
45437 + const struct cred *itercred;
45438 +#endif
45439 + filldir_t __filldir = filldir;
45440 struct tgid_iter iter;
45441 struct pid_namespace *ns;
45442
45443 @@ -2834,8 +2968,27 @@ int proc_pid_readdir(struct file * filp,
45444 for (iter = next_tgid(ns, iter);
45445 iter.task;
45446 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45447 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45448 + rcu_read_lock();
45449 + itercred = __task_cred(iter.task);
45450 +#endif
45451 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45452 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45453 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45454 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45455 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45456 +#endif
45457 + )
45458 +#endif
45459 + )
45460 + __filldir = &gr_fake_filldir;
45461 + else
45462 + __filldir = filldir;
45463 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45464 + rcu_read_unlock();
45465 +#endif
45466 filp->f_pos = iter.tgid + TGID_OFFSET;
45467 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45468 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45469 put_task_struct(iter.task);
45470 goto out;
45471 }
45472 @@ -2861,7 +3014,7 @@ static const struct pid_entry tid_base_s
45473 #ifdef CONFIG_SCHED_DEBUG
45474 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45475 #endif
45476 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45477 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45478 INF("syscall", S_IRUSR, proc_pid_syscall),
45479 #endif
45480 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45481 @@ -2885,10 +3038,10 @@ static const struct pid_entry tid_base_s
45482 #ifdef CONFIG_SECURITY
45483 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45484 #endif
45485 -#ifdef CONFIG_KALLSYMS
45486 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45487 INF("wchan", S_IRUGO, proc_pid_wchan),
45488 #endif
45489 -#ifdef CONFIG_STACKTRACE
45490 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45491 ONE("stack", S_IRUSR, proc_pid_stack),
45492 #endif
45493 #ifdef CONFIG_SCHEDSTATS
45494 diff -urNp linux-2.6.32.45/fs/proc/cmdline.c linux-2.6.32.45/fs/proc/cmdline.c
45495 --- linux-2.6.32.45/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
45496 +++ linux-2.6.32.45/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
45497 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
45498
45499 static int __init proc_cmdline_init(void)
45500 {
45501 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45502 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45503 +#else
45504 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45505 +#endif
45506 return 0;
45507 }
45508 module_init(proc_cmdline_init);
45509 diff -urNp linux-2.6.32.45/fs/proc/devices.c linux-2.6.32.45/fs/proc/devices.c
45510 --- linux-2.6.32.45/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
45511 +++ linux-2.6.32.45/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
45512 @@ -64,7 +64,11 @@ static const struct file_operations proc
45513
45514 static int __init proc_devices_init(void)
45515 {
45516 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45517 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45518 +#else
45519 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45520 +#endif
45521 return 0;
45522 }
45523 module_init(proc_devices_init);
45524 diff -urNp linux-2.6.32.45/fs/proc/inode.c linux-2.6.32.45/fs/proc/inode.c
45525 --- linux-2.6.32.45/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
45526 +++ linux-2.6.32.45/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
45527 @@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
45528 if (de->mode) {
45529 inode->i_mode = de->mode;
45530 inode->i_uid = de->uid;
45531 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45532 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45533 +#else
45534 inode->i_gid = de->gid;
45535 +#endif
45536 }
45537 if (de->size)
45538 inode->i_size = de->size;
45539 diff -urNp linux-2.6.32.45/fs/proc/internal.h linux-2.6.32.45/fs/proc/internal.h
45540 --- linux-2.6.32.45/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
45541 +++ linux-2.6.32.45/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
45542 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45543 struct pid *pid, struct task_struct *task);
45544 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45545 struct pid *pid, struct task_struct *task);
45546 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45547 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45548 +#endif
45549 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45550
45551 extern const struct file_operations proc_maps_operations;
45552 diff -urNp linux-2.6.32.45/fs/proc/Kconfig linux-2.6.32.45/fs/proc/Kconfig
45553 --- linux-2.6.32.45/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
45554 +++ linux-2.6.32.45/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
45555 @@ -30,12 +30,12 @@ config PROC_FS
45556
45557 config PROC_KCORE
45558 bool "/proc/kcore support" if !ARM
45559 - depends on PROC_FS && MMU
45560 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45561
45562 config PROC_VMCORE
45563 bool "/proc/vmcore support (EXPERIMENTAL)"
45564 - depends on PROC_FS && CRASH_DUMP
45565 - default y
45566 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45567 + default n
45568 help
45569 Exports the dump image of crashed kernel in ELF format.
45570
45571 @@ -59,8 +59,8 @@ config PROC_SYSCTL
45572 limited in memory.
45573
45574 config PROC_PAGE_MONITOR
45575 - default y
45576 - depends on PROC_FS && MMU
45577 + default n
45578 + depends on PROC_FS && MMU && !GRKERNSEC
45579 bool "Enable /proc page monitoring" if EMBEDDED
45580 help
45581 Various /proc files exist to monitor process memory utilization:
45582 diff -urNp linux-2.6.32.45/fs/proc/kcore.c linux-2.6.32.45/fs/proc/kcore.c
45583 --- linux-2.6.32.45/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
45584 +++ linux-2.6.32.45/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
45585 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
45586 off_t offset = 0;
45587 struct kcore_list *m;
45588
45589 + pax_track_stack();
45590 +
45591 /* setup ELF header */
45592 elf = (struct elfhdr *) bufp;
45593 bufp += sizeof(struct elfhdr);
45594 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
45595 * the addresses in the elf_phdr on our list.
45596 */
45597 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45598 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45599 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45600 + if (tsz > buflen)
45601 tsz = buflen;
45602 -
45603 +
45604 while (buflen) {
45605 struct kcore_list *m;
45606
45607 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
45608 kfree(elf_buf);
45609 } else {
45610 if (kern_addr_valid(start)) {
45611 - unsigned long n;
45612 + char *elf_buf;
45613 + mm_segment_t oldfs;
45614
45615 - n = copy_to_user(buffer, (char *)start, tsz);
45616 - /*
45617 - * We cannot distingush between fault on source
45618 - * and fault on destination. When this happens
45619 - * we clear too and hope it will trigger the
45620 - * EFAULT again.
45621 - */
45622 - if (n) {
45623 - if (clear_user(buffer + tsz - n,
45624 - n))
45625 + elf_buf = kmalloc(tsz, GFP_KERNEL);
45626 + if (!elf_buf)
45627 + return -ENOMEM;
45628 + oldfs = get_fs();
45629 + set_fs(KERNEL_DS);
45630 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45631 + set_fs(oldfs);
45632 + if (copy_to_user(buffer, elf_buf, tsz)) {
45633 + kfree(elf_buf);
45634 return -EFAULT;
45635 + }
45636 }
45637 + set_fs(oldfs);
45638 + kfree(elf_buf);
45639 } else {
45640 if (clear_user(buffer, tsz))
45641 return -EFAULT;
45642 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
45643
45644 static int open_kcore(struct inode *inode, struct file *filp)
45645 {
45646 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45647 + return -EPERM;
45648 +#endif
45649 if (!capable(CAP_SYS_RAWIO))
45650 return -EPERM;
45651 if (kcore_need_update)
45652 diff -urNp linux-2.6.32.45/fs/proc/meminfo.c linux-2.6.32.45/fs/proc/meminfo.c
45653 --- linux-2.6.32.45/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
45654 +++ linux-2.6.32.45/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
45655 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
45656 unsigned long pages[NR_LRU_LISTS];
45657 int lru;
45658
45659 + pax_track_stack();
45660 +
45661 /*
45662 * display in kilobytes.
45663 */
45664 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
45665 vmi.used >> 10,
45666 vmi.largest_chunk >> 10
45667 #ifdef CONFIG_MEMORY_FAILURE
45668 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45669 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45670 #endif
45671 );
45672
45673 diff -urNp linux-2.6.32.45/fs/proc/nommu.c linux-2.6.32.45/fs/proc/nommu.c
45674 --- linux-2.6.32.45/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
45675 +++ linux-2.6.32.45/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
45676 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
45677 if (len < 1)
45678 len = 1;
45679 seq_printf(m, "%*c", len, ' ');
45680 - seq_path(m, &file->f_path, "");
45681 + seq_path(m, &file->f_path, "\n\\");
45682 }
45683
45684 seq_putc(m, '\n');
45685 diff -urNp linux-2.6.32.45/fs/proc/proc_net.c linux-2.6.32.45/fs/proc/proc_net.c
45686 --- linux-2.6.32.45/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
45687 +++ linux-2.6.32.45/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
45688 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
45689 struct task_struct *task;
45690 struct nsproxy *ns;
45691 struct net *net = NULL;
45692 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45693 + const struct cred *cred = current_cred();
45694 +#endif
45695 +
45696 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45697 + if (cred->fsuid)
45698 + return net;
45699 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45700 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45701 + return net;
45702 +#endif
45703
45704 rcu_read_lock();
45705 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45706 diff -urNp linux-2.6.32.45/fs/proc/proc_sysctl.c linux-2.6.32.45/fs/proc/proc_sysctl.c
45707 --- linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
45708 +++ linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
45709 @@ -7,6 +7,8 @@
45710 #include <linux/security.h>
45711 #include "internal.h"
45712
45713 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45714 +
45715 static const struct dentry_operations proc_sys_dentry_operations;
45716 static const struct file_operations proc_sys_file_operations;
45717 static const struct inode_operations proc_sys_inode_operations;
45718 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
45719 if (!p)
45720 goto out;
45721
45722 + if (gr_handle_sysctl(p, MAY_EXEC))
45723 + goto out;
45724 +
45725 err = ERR_PTR(-ENOMEM);
45726 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
45727 if (h)
45728 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
45729 if (*pos < file->f_pos)
45730 continue;
45731
45732 + if (gr_handle_sysctl(table, 0))
45733 + continue;
45734 +
45735 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45736 if (res)
45737 return res;
45738 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
45739 if (IS_ERR(head))
45740 return PTR_ERR(head);
45741
45742 + if (table && gr_handle_sysctl(table, MAY_EXEC))
45743 + return -ENOENT;
45744 +
45745 generic_fillattr(inode, stat);
45746 if (table)
45747 stat->mode = (stat->mode & S_IFMT) | table->mode;
45748 diff -urNp linux-2.6.32.45/fs/proc/root.c linux-2.6.32.45/fs/proc/root.c
45749 --- linux-2.6.32.45/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
45750 +++ linux-2.6.32.45/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
45751 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
45752 #ifdef CONFIG_PROC_DEVICETREE
45753 proc_device_tree_init();
45754 #endif
45755 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45756 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45757 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45758 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45759 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45760 +#endif
45761 +#else
45762 proc_mkdir("bus", NULL);
45763 +#endif
45764 proc_sys_init();
45765 }
45766
45767 diff -urNp linux-2.6.32.45/fs/proc/task_mmu.c linux-2.6.32.45/fs/proc/task_mmu.c
45768 --- linux-2.6.32.45/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
45769 +++ linux-2.6.32.45/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
45770 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
45771 "VmStk:\t%8lu kB\n"
45772 "VmExe:\t%8lu kB\n"
45773 "VmLib:\t%8lu kB\n"
45774 - "VmPTE:\t%8lu kB\n",
45775 - hiwater_vm << (PAGE_SHIFT-10),
45776 + "VmPTE:\t%8lu kB\n"
45777 +
45778 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45779 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
45780 +#endif
45781 +
45782 + ,hiwater_vm << (PAGE_SHIFT-10),
45783 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
45784 mm->locked_vm << (PAGE_SHIFT-10),
45785 hiwater_rss << (PAGE_SHIFT-10),
45786 total_rss << (PAGE_SHIFT-10),
45787 data << (PAGE_SHIFT-10),
45788 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
45789 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
45790 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
45791 +
45792 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45793 + , mm->context.user_cs_base, mm->context.user_cs_limit
45794 +#endif
45795 +
45796 + );
45797 }
45798
45799 unsigned long task_vsize(struct mm_struct *mm)
45800 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
45801 struct proc_maps_private *priv = m->private;
45802 struct vm_area_struct *vma = v;
45803
45804 - vma_stop(priv, vma);
45805 + if (!IS_ERR(vma))
45806 + vma_stop(priv, vma);
45807 if (priv->task)
45808 put_task_struct(priv->task);
45809 }
45810 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
45811 return ret;
45812 }
45813
45814 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45815 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45816 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45817 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45818 +#endif
45819 +
45820 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
45821 {
45822 struct mm_struct *mm = vma->vm_mm;
45823 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
45824 int flags = vma->vm_flags;
45825 unsigned long ino = 0;
45826 unsigned long long pgoff = 0;
45827 - unsigned long start;
45828 dev_t dev = 0;
45829 int len;
45830
45831 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
45832 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
45833 }
45834
45835 - /* We don't show the stack guard page in /proc/maps */
45836 - start = vma->vm_start;
45837 - if (vma->vm_flags & VM_GROWSDOWN)
45838 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
45839 - start += PAGE_SIZE;
45840 -
45841 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
45842 - start,
45843 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45844 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
45845 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
45846 +#else
45847 + vma->vm_start,
45848 vma->vm_end,
45849 +#endif
45850 flags & VM_READ ? 'r' : '-',
45851 flags & VM_WRITE ? 'w' : '-',
45852 flags & VM_EXEC ? 'x' : '-',
45853 flags & VM_MAYSHARE ? 's' : 'p',
45854 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45855 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
45856 +#else
45857 pgoff,
45858 +#endif
45859 MAJOR(dev), MINOR(dev), ino, &len);
45860
45861 /*
45862 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
45863 */
45864 if (file) {
45865 pad_len_spaces(m, len);
45866 - seq_path(m, &file->f_path, "\n");
45867 + seq_path(m, &file->f_path, "\n\\");
45868 } else {
45869 const char *name = arch_vma_name(vma);
45870 if (!name) {
45871 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
45872 if (vma->vm_start <= mm->brk &&
45873 vma->vm_end >= mm->start_brk) {
45874 name = "[heap]";
45875 - } else if (vma->vm_start <= mm->start_stack &&
45876 - vma->vm_end >= mm->start_stack) {
45877 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
45878 + (vma->vm_start <= mm->start_stack &&
45879 + vma->vm_end >= mm->start_stack)) {
45880 name = "[stack]";
45881 }
45882 } else {
45883 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
45884 };
45885
45886 memset(&mss, 0, sizeof mss);
45887 - mss.vma = vma;
45888 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45889 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45890 +
45891 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45892 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
45893 +#endif
45894 + mss.vma = vma;
45895 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45896 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45897 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45898 + }
45899 +#endif
45900
45901 show_map_vma(m, vma);
45902
45903 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
45904 "Swap: %8lu kB\n"
45905 "KernelPageSize: %8lu kB\n"
45906 "MMUPageSize: %8lu kB\n",
45907 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45908 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
45909 +#else
45910 (vma->vm_end - vma->vm_start) >> 10,
45911 +#endif
45912 mss.resident >> 10,
45913 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
45914 mss.shared_clean >> 10,
45915 diff -urNp linux-2.6.32.45/fs/proc/task_nommu.c linux-2.6.32.45/fs/proc/task_nommu.c
45916 --- linux-2.6.32.45/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
45917 +++ linux-2.6.32.45/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
45918 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
45919 else
45920 bytes += kobjsize(mm);
45921
45922 - if (current->fs && current->fs->users > 1)
45923 + if (current->fs && atomic_read(&current->fs->users) > 1)
45924 sbytes += kobjsize(current->fs);
45925 else
45926 bytes += kobjsize(current->fs);
45927 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
45928 if (len < 1)
45929 len = 1;
45930 seq_printf(m, "%*c", len, ' ');
45931 - seq_path(m, &file->f_path, "");
45932 + seq_path(m, &file->f_path, "\n\\");
45933 }
45934
45935 seq_putc(m, '\n');
45936 diff -urNp linux-2.6.32.45/fs/readdir.c linux-2.6.32.45/fs/readdir.c
45937 --- linux-2.6.32.45/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
45938 +++ linux-2.6.32.45/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
45939 @@ -16,6 +16,7 @@
45940 #include <linux/security.h>
45941 #include <linux/syscalls.h>
45942 #include <linux/unistd.h>
45943 +#include <linux/namei.h>
45944
45945 #include <asm/uaccess.h>
45946
45947 @@ -67,6 +68,7 @@ struct old_linux_dirent {
45948
45949 struct readdir_callback {
45950 struct old_linux_dirent __user * dirent;
45951 + struct file * file;
45952 int result;
45953 };
45954
45955 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
45956 buf->result = -EOVERFLOW;
45957 return -EOVERFLOW;
45958 }
45959 +
45960 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45961 + return 0;
45962 +
45963 buf->result++;
45964 dirent = buf->dirent;
45965 if (!access_ok(VERIFY_WRITE, dirent,
45966 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
45967
45968 buf.result = 0;
45969 buf.dirent = dirent;
45970 + buf.file = file;
45971
45972 error = vfs_readdir(file, fillonedir, &buf);
45973 if (buf.result)
45974 @@ -142,6 +149,7 @@ struct linux_dirent {
45975 struct getdents_callback {
45976 struct linux_dirent __user * current_dir;
45977 struct linux_dirent __user * previous;
45978 + struct file * file;
45979 int count;
45980 int error;
45981 };
45982 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
45983 buf->error = -EOVERFLOW;
45984 return -EOVERFLOW;
45985 }
45986 +
45987 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45988 + return 0;
45989 +
45990 dirent = buf->previous;
45991 if (dirent) {
45992 if (__put_user(offset, &dirent->d_off))
45993 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
45994 buf.previous = NULL;
45995 buf.count = count;
45996 buf.error = 0;
45997 + buf.file = file;
45998
45999 error = vfs_readdir(file, filldir, &buf);
46000 if (error >= 0)
46001 @@ -228,6 +241,7 @@ out:
46002 struct getdents_callback64 {
46003 struct linux_dirent64 __user * current_dir;
46004 struct linux_dirent64 __user * previous;
46005 + struct file *file;
46006 int count;
46007 int error;
46008 };
46009 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
46010 buf->error = -EINVAL; /* only used if we fail.. */
46011 if (reclen > buf->count)
46012 return -EINVAL;
46013 +
46014 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46015 + return 0;
46016 +
46017 dirent = buf->previous;
46018 if (dirent) {
46019 if (__put_user(offset, &dirent->d_off))
46020 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46021
46022 buf.current_dir = dirent;
46023 buf.previous = NULL;
46024 + buf.file = file;
46025 buf.count = count;
46026 buf.error = 0;
46027
46028 diff -urNp linux-2.6.32.45/fs/reiserfs/dir.c linux-2.6.32.45/fs/reiserfs/dir.c
46029 --- linux-2.6.32.45/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
46030 +++ linux-2.6.32.45/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
46031 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
46032 struct reiserfs_dir_entry de;
46033 int ret = 0;
46034
46035 + pax_track_stack();
46036 +
46037 reiserfs_write_lock(inode->i_sb);
46038
46039 reiserfs_check_lock_depth(inode->i_sb, "readdir");
46040 diff -urNp linux-2.6.32.45/fs/reiserfs/do_balan.c linux-2.6.32.45/fs/reiserfs/do_balan.c
46041 --- linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
46042 +++ linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
46043 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
46044 return;
46045 }
46046
46047 - atomic_inc(&(fs_generation(tb->tb_sb)));
46048 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46049 do_balance_starts(tb);
46050
46051 /* balance leaf returns 0 except if combining L R and S into
46052 diff -urNp linux-2.6.32.45/fs/reiserfs/item_ops.c linux-2.6.32.45/fs/reiserfs/item_ops.c
46053 --- linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
46054 +++ linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
46055 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
46056 vi->vi_index, vi->vi_type, vi->vi_ih);
46057 }
46058
46059 -static struct item_operations stat_data_ops = {
46060 +static const struct item_operations stat_data_ops = {
46061 .bytes_number = sd_bytes_number,
46062 .decrement_key = sd_decrement_key,
46063 .is_left_mergeable = sd_is_left_mergeable,
46064 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
46065 vi->vi_index, vi->vi_type, vi->vi_ih);
46066 }
46067
46068 -static struct item_operations direct_ops = {
46069 +static const struct item_operations direct_ops = {
46070 .bytes_number = direct_bytes_number,
46071 .decrement_key = direct_decrement_key,
46072 .is_left_mergeable = direct_is_left_mergeable,
46073 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
46074 vi->vi_index, vi->vi_type, vi->vi_ih);
46075 }
46076
46077 -static struct item_operations indirect_ops = {
46078 +static const struct item_operations indirect_ops = {
46079 .bytes_number = indirect_bytes_number,
46080 .decrement_key = indirect_decrement_key,
46081 .is_left_mergeable = indirect_is_left_mergeable,
46082 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
46083 printk("\n");
46084 }
46085
46086 -static struct item_operations direntry_ops = {
46087 +static const struct item_operations direntry_ops = {
46088 .bytes_number = direntry_bytes_number,
46089 .decrement_key = direntry_decrement_key,
46090 .is_left_mergeable = direntry_is_left_mergeable,
46091 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
46092 "Invalid item type observed, run fsck ASAP");
46093 }
46094
46095 -static struct item_operations errcatch_ops = {
46096 +static const struct item_operations errcatch_ops = {
46097 errcatch_bytes_number,
46098 errcatch_decrement_key,
46099 errcatch_is_left_mergeable,
46100 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
46101 #error Item types must use disk-format assigned values.
46102 #endif
46103
46104 -struct item_operations *item_ops[TYPE_ANY + 1] = {
46105 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
46106 &stat_data_ops,
46107 &indirect_ops,
46108 &direct_ops,
46109 diff -urNp linux-2.6.32.45/fs/reiserfs/journal.c linux-2.6.32.45/fs/reiserfs/journal.c
46110 --- linux-2.6.32.45/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
46111 +++ linux-2.6.32.45/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
46112 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
46113 struct buffer_head *bh;
46114 int i, j;
46115
46116 + pax_track_stack();
46117 +
46118 bh = __getblk(dev, block, bufsize);
46119 if (buffer_uptodate(bh))
46120 return (bh);
46121 diff -urNp linux-2.6.32.45/fs/reiserfs/namei.c linux-2.6.32.45/fs/reiserfs/namei.c
46122 --- linux-2.6.32.45/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
46123 +++ linux-2.6.32.45/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
46124 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
46125 unsigned long savelink = 1;
46126 struct timespec ctime;
46127
46128 + pax_track_stack();
46129 +
46130 /* three balancings: (1) old name removal, (2) new name insertion
46131 and (3) maybe "save" link insertion
46132 stat data updates: (1) old directory,
46133 diff -urNp linux-2.6.32.45/fs/reiserfs/procfs.c linux-2.6.32.45/fs/reiserfs/procfs.c
46134 --- linux-2.6.32.45/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
46135 +++ linux-2.6.32.45/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
46136 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
46137 "SMALL_TAILS " : "NO_TAILS ",
46138 replay_only(sb) ? "REPLAY_ONLY " : "",
46139 convert_reiserfs(sb) ? "CONV " : "",
46140 - atomic_read(&r->s_generation_counter),
46141 + atomic_read_unchecked(&r->s_generation_counter),
46142 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46143 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46144 SF(s_good_search_by_key_reada), SF(s_bmaps),
46145 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file
46146 struct journal_params *jp = &rs->s_v1.s_journal;
46147 char b[BDEVNAME_SIZE];
46148
46149 + pax_track_stack();
46150 +
46151 seq_printf(m, /* on-disk fields */
46152 "jp_journal_1st_block: \t%i\n"
46153 "jp_journal_dev: \t%s[%x]\n"
46154 diff -urNp linux-2.6.32.45/fs/reiserfs/stree.c linux-2.6.32.45/fs/reiserfs/stree.c
46155 --- linux-2.6.32.45/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
46156 +++ linux-2.6.32.45/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
46157 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
46158 int iter = 0;
46159 #endif
46160
46161 + pax_track_stack();
46162 +
46163 BUG_ON(!th->t_trans_id);
46164
46165 init_tb_struct(th, &s_del_balance, sb, path,
46166 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
46167 int retval;
46168 int quota_cut_bytes = 0;
46169
46170 + pax_track_stack();
46171 +
46172 BUG_ON(!th->t_trans_id);
46173
46174 le_key2cpu_key(&cpu_key, key);
46175 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
46176 int quota_cut_bytes;
46177 loff_t tail_pos = 0;
46178
46179 + pax_track_stack();
46180 +
46181 BUG_ON(!th->t_trans_id);
46182
46183 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46184 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
46185 int retval;
46186 int fs_gen;
46187
46188 + pax_track_stack();
46189 +
46190 BUG_ON(!th->t_trans_id);
46191
46192 fs_gen = get_generation(inode->i_sb);
46193 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
46194 int fs_gen = 0;
46195 int quota_bytes = 0;
46196
46197 + pax_track_stack();
46198 +
46199 BUG_ON(!th->t_trans_id);
46200
46201 if (inode) { /* Do we count quotas for item? */
46202 diff -urNp linux-2.6.32.45/fs/reiserfs/super.c linux-2.6.32.45/fs/reiserfs/super.c
46203 --- linux-2.6.32.45/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
46204 +++ linux-2.6.32.45/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
46205 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
46206 {.option_name = NULL}
46207 };
46208
46209 + pax_track_stack();
46210 +
46211 *blocks = 0;
46212 if (!options || !*options)
46213 /* use default configuration: create tails, journaling on, no
46214 diff -urNp linux-2.6.32.45/fs/select.c linux-2.6.32.45/fs/select.c
46215 --- linux-2.6.32.45/fs/select.c 2011-03-27 14:31:47.000000000 -0400
46216 +++ linux-2.6.32.45/fs/select.c 2011-05-16 21:46:57.000000000 -0400
46217 @@ -20,6 +20,7 @@
46218 #include <linux/module.h>
46219 #include <linux/slab.h>
46220 #include <linux/poll.h>
46221 +#include <linux/security.h>
46222 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46223 #include <linux/file.h>
46224 #include <linux/fdtable.h>
46225 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
46226 int retval, i, timed_out = 0;
46227 unsigned long slack = 0;
46228
46229 + pax_track_stack();
46230 +
46231 rcu_read_lock();
46232 retval = max_select_fd(n, fds);
46233 rcu_read_unlock();
46234 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
46235 /* Allocate small arguments on the stack to save memory and be faster */
46236 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46237
46238 + pax_track_stack();
46239 +
46240 ret = -EINVAL;
46241 if (n < 0)
46242 goto out_nofds;
46243 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
46244 struct poll_list *walk = head;
46245 unsigned long todo = nfds;
46246
46247 + pax_track_stack();
46248 +
46249 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46250 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
46251 return -EINVAL;
46252
46253 diff -urNp linux-2.6.32.45/fs/seq_file.c linux-2.6.32.45/fs/seq_file.c
46254 --- linux-2.6.32.45/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
46255 +++ linux-2.6.32.45/fs/seq_file.c 2011-08-05 20:33:55.000000000 -0400
46256 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46257 return 0;
46258 }
46259 if (!m->buf) {
46260 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46261 + m->size = PAGE_SIZE;
46262 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46263 if (!m->buf)
46264 return -ENOMEM;
46265 }
46266 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46267 Eoverflow:
46268 m->op->stop(m, p);
46269 kfree(m->buf);
46270 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46271 + m->size <<= 1;
46272 + m->buf = kmalloc(m->size, GFP_KERNEL);
46273 return !m->buf ? -ENOMEM : -EAGAIN;
46274 }
46275
46276 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46277 m->version = file->f_version;
46278 /* grab buffer if we didn't have one */
46279 if (!m->buf) {
46280 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46281 + m->size = PAGE_SIZE;
46282 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46283 if (!m->buf)
46284 goto Enomem;
46285 }
46286 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46287 goto Fill;
46288 m->op->stop(m, p);
46289 kfree(m->buf);
46290 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46291 + m->size <<= 1;
46292 + m->buf = kmalloc(m->size, GFP_KERNEL);
46293 if (!m->buf)
46294 goto Enomem;
46295 m->count = 0;
46296 @@ -555,10 +559,10 @@ int single_open(struct file *file, int (
46297 int res = -ENOMEM;
46298
46299 if (op) {
46300 - op->start = single_start;
46301 - op->next = single_next;
46302 - op->stop = single_stop;
46303 - op->show = show;
46304 + *(void **)&op->start = single_start;
46305 + *(void **)&op->next = single_next;
46306 + *(void **)&op->stop = single_stop;
46307 + *(void **)&op->show = show;
46308 res = seq_open(file, op);
46309 if (!res)
46310 ((struct seq_file *)file->private_data)->private = data;
46311 diff -urNp linux-2.6.32.45/fs/smbfs/proc.c linux-2.6.32.45/fs/smbfs/proc.c
46312 --- linux-2.6.32.45/fs/smbfs/proc.c 2011-03-27 14:31:47.000000000 -0400
46313 +++ linux-2.6.32.45/fs/smbfs/proc.c 2011-08-05 20:33:55.000000000 -0400
46314 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *
46315
46316 out:
46317 if (server->local_nls != NULL && server->remote_nls != NULL)
46318 - server->ops->convert = convert_cp;
46319 + *(void **)&server->ops->convert = convert_cp;
46320 else
46321 - server->ops->convert = convert_memcpy;
46322 + *(void **)&server->ops->convert = convert_memcpy;
46323
46324 smb_unlock_server(server);
46325 return n;
46326 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server,
46327
46328 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
46329 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
46330 - server->ops->getattr = smb_proc_getattr_core;
46331 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
46332 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
46333 - server->ops->getattr = smb_proc_getattr_ff;
46334 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
46335 }
46336
46337 /* Decode server capabilities */
46338 @@ -3439,7 +3439,7 @@ out:
46339 static void
46340 install_ops(struct smb_ops *dst, struct smb_ops *src)
46341 {
46342 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46343 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46344 }
46345
46346 /* < LANMAN2 */
46347 diff -urNp linux-2.6.32.45/fs/smbfs/symlink.c linux-2.6.32.45/fs/smbfs/symlink.c
46348 --- linux-2.6.32.45/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46349 +++ linux-2.6.32.45/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46350 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
46351
46352 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46353 {
46354 - char *s = nd_get_link(nd);
46355 + const char *s = nd_get_link(nd);
46356 if (!IS_ERR(s))
46357 __putname(s);
46358 }
46359 diff -urNp linux-2.6.32.45/fs/splice.c linux-2.6.32.45/fs/splice.c
46360 --- linux-2.6.32.45/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
46361 +++ linux-2.6.32.45/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
46362 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46363 pipe_lock(pipe);
46364
46365 for (;;) {
46366 - if (!pipe->readers) {
46367 + if (!atomic_read(&pipe->readers)) {
46368 send_sig(SIGPIPE, current, 0);
46369 if (!ret)
46370 ret = -EPIPE;
46371 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46372 do_wakeup = 0;
46373 }
46374
46375 - pipe->waiting_writers++;
46376 + atomic_inc(&pipe->waiting_writers);
46377 pipe_wait(pipe);
46378 - pipe->waiting_writers--;
46379 + atomic_dec(&pipe->waiting_writers);
46380 }
46381
46382 pipe_unlock(pipe);
46383 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
46384 .spd_release = spd_release_page,
46385 };
46386
46387 + pax_track_stack();
46388 +
46389 index = *ppos >> PAGE_CACHE_SHIFT;
46390 loff = *ppos & ~PAGE_CACHE_MASK;
46391 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46392 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
46393 old_fs = get_fs();
46394 set_fs(get_ds());
46395 /* The cast to a user pointer is valid due to the set_fs() */
46396 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46397 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
46398 set_fs(old_fs);
46399
46400 return res;
46401 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
46402 old_fs = get_fs();
46403 set_fs(get_ds());
46404 /* The cast to a user pointer is valid due to the set_fs() */
46405 - res = vfs_write(file, (const char __user *)buf, count, &pos);
46406 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
46407 set_fs(old_fs);
46408
46409 return res;
46410 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
46411 .spd_release = spd_release_page,
46412 };
46413
46414 + pax_track_stack();
46415 +
46416 index = *ppos >> PAGE_CACHE_SHIFT;
46417 offset = *ppos & ~PAGE_CACHE_MASK;
46418 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46419 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
46420 goto err;
46421
46422 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46423 - vec[i].iov_base = (void __user *) page_address(page);
46424 + vec[i].iov_base = (__force void __user *) page_address(page);
46425 vec[i].iov_len = this_len;
46426 pages[i] = page;
46427 spd.nr_pages++;
46428 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46429 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46430 {
46431 while (!pipe->nrbufs) {
46432 - if (!pipe->writers)
46433 + if (!atomic_read(&pipe->writers))
46434 return 0;
46435
46436 - if (!pipe->waiting_writers && sd->num_spliced)
46437 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46438 return 0;
46439
46440 if (sd->flags & SPLICE_F_NONBLOCK)
46441 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
46442 * out of the pipe right after the splice_to_pipe(). So set
46443 * PIPE_READERS appropriately.
46444 */
46445 - pipe->readers = 1;
46446 + atomic_set(&pipe->readers, 1);
46447
46448 current->splice_pipe = pipe;
46449 }
46450 @@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
46451 .spd_release = spd_release_page,
46452 };
46453
46454 + pax_track_stack();
46455 +
46456 pipe = pipe_info(file->f_path.dentry->d_inode);
46457 if (!pipe)
46458 return -EBADF;
46459 @@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
46460 ret = -ERESTARTSYS;
46461 break;
46462 }
46463 - if (!pipe->writers)
46464 + if (!atomic_read(&pipe->writers))
46465 break;
46466 - if (!pipe->waiting_writers) {
46467 + if (!atomic_read(&pipe->waiting_writers)) {
46468 if (flags & SPLICE_F_NONBLOCK) {
46469 ret = -EAGAIN;
46470 break;
46471 @@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
46472 pipe_lock(pipe);
46473
46474 while (pipe->nrbufs >= PIPE_BUFFERS) {
46475 - if (!pipe->readers) {
46476 + if (!atomic_read(&pipe->readers)) {
46477 send_sig(SIGPIPE, current, 0);
46478 ret = -EPIPE;
46479 break;
46480 @@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
46481 ret = -ERESTARTSYS;
46482 break;
46483 }
46484 - pipe->waiting_writers++;
46485 + atomic_inc(&pipe->waiting_writers);
46486 pipe_wait(pipe);
46487 - pipe->waiting_writers--;
46488 + atomic_dec(&pipe->waiting_writers);
46489 }
46490
46491 pipe_unlock(pipe);
46492 @@ -1785,14 +1791,14 @@ retry:
46493 pipe_double_lock(ipipe, opipe);
46494
46495 do {
46496 - if (!opipe->readers) {
46497 + if (!atomic_read(&opipe->readers)) {
46498 send_sig(SIGPIPE, current, 0);
46499 if (!ret)
46500 ret = -EPIPE;
46501 break;
46502 }
46503
46504 - if (!ipipe->nrbufs && !ipipe->writers)
46505 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46506 break;
46507
46508 /*
46509 @@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
46510 pipe_double_lock(ipipe, opipe);
46511
46512 do {
46513 - if (!opipe->readers) {
46514 + if (!atomic_read(&opipe->readers)) {
46515 send_sig(SIGPIPE, current, 0);
46516 if (!ret)
46517 ret = -EPIPE;
46518 @@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
46519 * return EAGAIN if we have the potential of some data in the
46520 * future, otherwise just return 0
46521 */
46522 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46523 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46524 ret = -EAGAIN;
46525
46526 pipe_unlock(ipipe);
46527 diff -urNp linux-2.6.32.45/fs/sysfs/file.c linux-2.6.32.45/fs/sysfs/file.c
46528 --- linux-2.6.32.45/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
46529 +++ linux-2.6.32.45/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
46530 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46531
46532 struct sysfs_open_dirent {
46533 atomic_t refcnt;
46534 - atomic_t event;
46535 + atomic_unchecked_t event;
46536 wait_queue_head_t poll;
46537 struct list_head buffers; /* goes through sysfs_buffer.list */
46538 };
46539 @@ -53,7 +53,7 @@ struct sysfs_buffer {
46540 size_t count;
46541 loff_t pos;
46542 char * page;
46543 - struct sysfs_ops * ops;
46544 + const struct sysfs_ops * ops;
46545 struct mutex mutex;
46546 int needs_read_fill;
46547 int event;
46548 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
46549 {
46550 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46551 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46552 - struct sysfs_ops * ops = buffer->ops;
46553 + const struct sysfs_ops * ops = buffer->ops;
46554 int ret = 0;
46555 ssize_t count;
46556
46557 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
46558 if (!sysfs_get_active_two(attr_sd))
46559 return -ENODEV;
46560
46561 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46562 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46563 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46564
46565 sysfs_put_active_two(attr_sd);
46566 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
46567 {
46568 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46569 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46570 - struct sysfs_ops * ops = buffer->ops;
46571 + const struct sysfs_ops * ops = buffer->ops;
46572 int rc;
46573
46574 /* need attr_sd for attr and ops, its parent for kobj */
46575 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
46576 return -ENOMEM;
46577
46578 atomic_set(&new_od->refcnt, 0);
46579 - atomic_set(&new_od->event, 1);
46580 + atomic_set_unchecked(&new_od->event, 1);
46581 init_waitqueue_head(&new_od->poll);
46582 INIT_LIST_HEAD(&new_od->buffers);
46583 goto retry;
46584 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
46585 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
46586 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46587 struct sysfs_buffer *buffer;
46588 - struct sysfs_ops *ops;
46589 + const struct sysfs_ops *ops;
46590 int error = -EACCES;
46591 char *p;
46592
46593 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
46594
46595 sysfs_put_active_two(attr_sd);
46596
46597 - if (buffer->event != atomic_read(&od->event))
46598 + if (buffer->event != atomic_read_unchecked(&od->event))
46599 goto trigger;
46600
46601 return DEFAULT_POLLMASK;
46602 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
46603
46604 od = sd->s_attr.open;
46605 if (od) {
46606 - atomic_inc(&od->event);
46607 + atomic_inc_unchecked(&od->event);
46608 wake_up_interruptible(&od->poll);
46609 }
46610
46611 diff -urNp linux-2.6.32.45/fs/sysfs/mount.c linux-2.6.32.45/fs/sysfs/mount.c
46612 --- linux-2.6.32.45/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
46613 +++ linux-2.6.32.45/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
46614 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46615 .s_name = "",
46616 .s_count = ATOMIC_INIT(1),
46617 .s_flags = SYSFS_DIR,
46618 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46619 + .s_mode = S_IFDIR | S_IRWXU,
46620 +#else
46621 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46622 +#endif
46623 .s_ino = 1,
46624 };
46625
46626 diff -urNp linux-2.6.32.45/fs/sysfs/symlink.c linux-2.6.32.45/fs/sysfs/symlink.c
46627 --- linux-2.6.32.45/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46628 +++ linux-2.6.32.45/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46629 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
46630
46631 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46632 {
46633 - char *page = nd_get_link(nd);
46634 + const char *page = nd_get_link(nd);
46635 if (!IS_ERR(page))
46636 free_page((unsigned long)page);
46637 }
46638 diff -urNp linux-2.6.32.45/fs/udf/balloc.c linux-2.6.32.45/fs/udf/balloc.c
46639 --- linux-2.6.32.45/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
46640 +++ linux-2.6.32.45/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
46641 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
46642
46643 mutex_lock(&sbi->s_alloc_mutex);
46644 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46645 - if (bloc->logicalBlockNum < 0 ||
46646 - (bloc->logicalBlockNum + count) >
46647 - partmap->s_partition_len) {
46648 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46649 udf_debug("%d < %d || %d + %d > %d\n",
46650 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
46651 count, partmap->s_partition_len);
46652 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
46653
46654 mutex_lock(&sbi->s_alloc_mutex);
46655 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46656 - if (bloc->logicalBlockNum < 0 ||
46657 - (bloc->logicalBlockNum + count) >
46658 - partmap->s_partition_len) {
46659 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46660 udf_debug("%d < %d || %d + %d > %d\n",
46661 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
46662 partmap->s_partition_len);
46663 diff -urNp linux-2.6.32.45/fs/udf/inode.c linux-2.6.32.45/fs/udf/inode.c
46664 --- linux-2.6.32.45/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
46665 +++ linux-2.6.32.45/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
46666 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
46667 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46668 int lastblock = 0;
46669
46670 + pax_track_stack();
46671 +
46672 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46673 prev_epos.block = iinfo->i_location;
46674 prev_epos.bh = NULL;
46675 diff -urNp linux-2.6.32.45/fs/udf/misc.c linux-2.6.32.45/fs/udf/misc.c
46676 --- linux-2.6.32.45/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
46677 +++ linux-2.6.32.45/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
46678 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46679
46680 u8 udf_tag_checksum(const struct tag *t)
46681 {
46682 - u8 *data = (u8 *)t;
46683 + const u8 *data = (const u8 *)t;
46684 u8 checksum = 0;
46685 int i;
46686 for (i = 0; i < sizeof(struct tag); ++i)
46687 diff -urNp linux-2.6.32.45/fs/utimes.c linux-2.6.32.45/fs/utimes.c
46688 --- linux-2.6.32.45/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
46689 +++ linux-2.6.32.45/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
46690 @@ -1,6 +1,7 @@
46691 #include <linux/compiler.h>
46692 #include <linux/file.h>
46693 #include <linux/fs.h>
46694 +#include <linux/security.h>
46695 #include <linux/linkage.h>
46696 #include <linux/mount.h>
46697 #include <linux/namei.h>
46698 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46699 goto mnt_drop_write_and_out;
46700 }
46701 }
46702 +
46703 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46704 + error = -EACCES;
46705 + goto mnt_drop_write_and_out;
46706 + }
46707 +
46708 mutex_lock(&inode->i_mutex);
46709 error = notify_change(path->dentry, &newattrs);
46710 mutex_unlock(&inode->i_mutex);
46711 diff -urNp linux-2.6.32.45/fs/xattr_acl.c linux-2.6.32.45/fs/xattr_acl.c
46712 --- linux-2.6.32.45/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
46713 +++ linux-2.6.32.45/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
46714 @@ -17,8 +17,8 @@
46715 struct posix_acl *
46716 posix_acl_from_xattr(const void *value, size_t size)
46717 {
46718 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46719 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46720 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46721 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46722 int count;
46723 struct posix_acl *acl;
46724 struct posix_acl_entry *acl_e;
46725 diff -urNp linux-2.6.32.45/fs/xattr.c linux-2.6.32.45/fs/xattr.c
46726 --- linux-2.6.32.45/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
46727 +++ linux-2.6.32.45/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
46728 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46729 * Extended attribute SET operations
46730 */
46731 static long
46732 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
46733 +setxattr(struct path *path, const char __user *name, const void __user *value,
46734 size_t size, int flags)
46735 {
46736 int error;
46737 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
46738 return PTR_ERR(kvalue);
46739 }
46740
46741 - error = vfs_setxattr(d, kname, kvalue, size, flags);
46742 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46743 + error = -EACCES;
46744 + goto out;
46745 + }
46746 +
46747 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46748 +out:
46749 kfree(kvalue);
46750 return error;
46751 }
46752 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
46753 return error;
46754 error = mnt_want_write(path.mnt);
46755 if (!error) {
46756 - error = setxattr(path.dentry, name, value, size, flags);
46757 + error = setxattr(&path, name, value, size, flags);
46758 mnt_drop_write(path.mnt);
46759 }
46760 path_put(&path);
46761 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
46762 return error;
46763 error = mnt_want_write(path.mnt);
46764 if (!error) {
46765 - error = setxattr(path.dentry, name, value, size, flags);
46766 + error = setxattr(&path, name, value, size, flags);
46767 mnt_drop_write(path.mnt);
46768 }
46769 path_put(&path);
46770 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
46771 const void __user *,value, size_t, size, int, flags)
46772 {
46773 struct file *f;
46774 - struct dentry *dentry;
46775 int error = -EBADF;
46776
46777 f = fget(fd);
46778 if (!f)
46779 return error;
46780 - dentry = f->f_path.dentry;
46781 - audit_inode(NULL, dentry);
46782 + audit_inode(NULL, f->f_path.dentry);
46783 error = mnt_want_write_file(f);
46784 if (!error) {
46785 - error = setxattr(dentry, name, value, size, flags);
46786 + error = setxattr(&f->f_path, name, value, size, flags);
46787 mnt_drop_write(f->f_path.mnt);
46788 }
46789 fput(f);
46790 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c
46791 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
46792 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
46793 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
46794 xfs_fsop_geom_t fsgeo;
46795 int error;
46796
46797 + memset(&fsgeo, 0, sizeof(fsgeo));
46798 error = xfs_fs_geometry(mp, &fsgeo, 3);
46799 if (error)
46800 return -error;
46801 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c
46802 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
46803 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
46804 @@ -134,7 +134,7 @@ xfs_find_handle(
46805 }
46806
46807 error = -EFAULT;
46808 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46809 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46810 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46811 goto out_put;
46812
46813 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
46814 if (IS_ERR(dentry))
46815 return PTR_ERR(dentry);
46816
46817 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
46818 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
46819 if (!kbuf)
46820 goto out_dput;
46821
46822 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
46823 xfs_mount_t *mp,
46824 void __user *arg)
46825 {
46826 - xfs_fsop_geom_t fsgeo;
46827 + xfs_fsop_geom_t fsgeo;
46828 int error;
46829
46830 error = xfs_fs_geometry(mp, &fsgeo, 3);
46831 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c
46832 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
46833 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
46834 @@ -468,7 +468,7 @@ xfs_vn_put_link(
46835 struct nameidata *nd,
46836 void *p)
46837 {
46838 - char *s = nd_get_link(nd);
46839 + const char *s = nd_get_link(nd);
46840
46841 if (!IS_ERR(s))
46842 kfree(s);
46843 diff -urNp linux-2.6.32.45/fs/xfs/xfs_bmap.c linux-2.6.32.45/fs/xfs/xfs_bmap.c
46844 --- linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
46845 +++ linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
46846 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
46847 int nmap,
46848 int ret_nmap);
46849 #else
46850 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46851 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46852 #endif /* DEBUG */
46853
46854 #if defined(XFS_RW_TRACE)
46855 diff -urNp linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c
46856 --- linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
46857 +++ linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
46858 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
46859 }
46860
46861 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
46862 - if (filldir(dirent, sfep->name, sfep->namelen,
46863 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46864 + char name[sfep->namelen];
46865 + memcpy(name, sfep->name, sfep->namelen);
46866 + if (filldir(dirent, name, sfep->namelen,
46867 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
46868 + *offset = off & 0x7fffffff;
46869 + return 0;
46870 + }
46871 + } else if (filldir(dirent, sfep->name, sfep->namelen,
46872 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46873 *offset = off & 0x7fffffff;
46874 return 0;
46875 diff -urNp linux-2.6.32.45/grsecurity/gracl_alloc.c linux-2.6.32.45/grsecurity/gracl_alloc.c
46876 --- linux-2.6.32.45/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
46877 +++ linux-2.6.32.45/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
46878 @@ -0,0 +1,105 @@
46879 +#include <linux/kernel.h>
46880 +#include <linux/mm.h>
46881 +#include <linux/slab.h>
46882 +#include <linux/vmalloc.h>
46883 +#include <linux/gracl.h>
46884 +#include <linux/grsecurity.h>
46885 +
46886 +static unsigned long alloc_stack_next = 1;
46887 +static unsigned long alloc_stack_size = 1;
46888 +static void **alloc_stack;
46889 +
46890 +static __inline__ int
46891 +alloc_pop(void)
46892 +{
46893 + if (alloc_stack_next == 1)
46894 + return 0;
46895 +
46896 + kfree(alloc_stack[alloc_stack_next - 2]);
46897 +
46898 + alloc_stack_next--;
46899 +
46900 + return 1;
46901 +}
46902 +
46903 +static __inline__ int
46904 +alloc_push(void *buf)
46905 +{
46906 + if (alloc_stack_next >= alloc_stack_size)
46907 + return 1;
46908 +
46909 + alloc_stack[alloc_stack_next - 1] = buf;
46910 +
46911 + alloc_stack_next++;
46912 +
46913 + return 0;
46914 +}
46915 +
46916 +void *
46917 +acl_alloc(unsigned long len)
46918 +{
46919 + void *ret = NULL;
46920 +
46921 + if (!len || len > PAGE_SIZE)
46922 + goto out;
46923 +
46924 + ret = kmalloc(len, GFP_KERNEL);
46925 +
46926 + if (ret) {
46927 + if (alloc_push(ret)) {
46928 + kfree(ret);
46929 + ret = NULL;
46930 + }
46931 + }
46932 +
46933 +out:
46934 + return ret;
46935 +}
46936 +
46937 +void *
46938 +acl_alloc_num(unsigned long num, unsigned long len)
46939 +{
46940 + if (!len || (num > (PAGE_SIZE / len)))
46941 + return NULL;
46942 +
46943 + return acl_alloc(num * len);
46944 +}
46945 +
46946 +void
46947 +acl_free_all(void)
46948 +{
46949 + if (gr_acl_is_enabled() || !alloc_stack)
46950 + return;
46951 +
46952 + while (alloc_pop()) ;
46953 +
46954 + if (alloc_stack) {
46955 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
46956 + kfree(alloc_stack);
46957 + else
46958 + vfree(alloc_stack);
46959 + }
46960 +
46961 + alloc_stack = NULL;
46962 + alloc_stack_size = 1;
46963 + alloc_stack_next = 1;
46964 +
46965 + return;
46966 +}
46967 +
46968 +int
46969 +acl_alloc_stack_init(unsigned long size)
46970 +{
46971 + if ((size * sizeof (void *)) <= PAGE_SIZE)
46972 + alloc_stack =
46973 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
46974 + else
46975 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
46976 +
46977 + alloc_stack_size = size;
46978 +
46979 + if (!alloc_stack)
46980 + return 0;
46981 + else
46982 + return 1;
46983 +}
46984 diff -urNp linux-2.6.32.45/grsecurity/gracl.c linux-2.6.32.45/grsecurity/gracl.c
46985 --- linux-2.6.32.45/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
46986 +++ linux-2.6.32.45/grsecurity/gracl.c 2011-07-14 20:02:48.000000000 -0400
46987 @@ -0,0 +1,4082 @@
46988 +#include <linux/kernel.h>
46989 +#include <linux/module.h>
46990 +#include <linux/sched.h>
46991 +#include <linux/mm.h>
46992 +#include <linux/file.h>
46993 +#include <linux/fs.h>
46994 +#include <linux/namei.h>
46995 +#include <linux/mount.h>
46996 +#include <linux/tty.h>
46997 +#include <linux/proc_fs.h>
46998 +#include <linux/smp_lock.h>
46999 +#include <linux/slab.h>
47000 +#include <linux/vmalloc.h>
47001 +#include <linux/types.h>
47002 +#include <linux/sysctl.h>
47003 +#include <linux/netdevice.h>
47004 +#include <linux/ptrace.h>
47005 +#include <linux/gracl.h>
47006 +#include <linux/gralloc.h>
47007 +#include <linux/grsecurity.h>
47008 +#include <linux/grinternal.h>
47009 +#include <linux/pid_namespace.h>
47010 +#include <linux/fdtable.h>
47011 +#include <linux/percpu.h>
47012 +
47013 +#include <asm/uaccess.h>
47014 +#include <asm/errno.h>
47015 +#include <asm/mman.h>
47016 +
47017 +static struct acl_role_db acl_role_set;
47018 +static struct name_db name_set;
47019 +static struct inodev_db inodev_set;
47020 +
47021 +/* for keeping track of userspace pointers used for subjects, so we
47022 + can share references in the kernel as well
47023 +*/
47024 +
47025 +static struct dentry *real_root;
47026 +static struct vfsmount *real_root_mnt;
47027 +
47028 +static struct acl_subj_map_db subj_map_set;
47029 +
47030 +static struct acl_role_label *default_role;
47031 +
47032 +static struct acl_role_label *role_list;
47033 +
47034 +static u16 acl_sp_role_value;
47035 +
47036 +extern char *gr_shared_page[4];
47037 +static DEFINE_MUTEX(gr_dev_mutex);
47038 +DEFINE_RWLOCK(gr_inode_lock);
47039 +
47040 +struct gr_arg *gr_usermode;
47041 +
47042 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
47043 +
47044 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
47045 +extern void gr_clear_learn_entries(void);
47046 +
47047 +#ifdef CONFIG_GRKERNSEC_RESLOG
47048 +extern void gr_log_resource(const struct task_struct *task,
47049 + const int res, const unsigned long wanted, const int gt);
47050 +#endif
47051 +
47052 +unsigned char *gr_system_salt;
47053 +unsigned char *gr_system_sum;
47054 +
47055 +static struct sprole_pw **acl_special_roles = NULL;
47056 +static __u16 num_sprole_pws = 0;
47057 +
47058 +static struct acl_role_label *kernel_role = NULL;
47059 +
47060 +static unsigned int gr_auth_attempts = 0;
47061 +static unsigned long gr_auth_expires = 0UL;
47062 +
47063 +#ifdef CONFIG_NET
47064 +extern struct vfsmount *sock_mnt;
47065 +#endif
47066 +extern struct vfsmount *pipe_mnt;
47067 +extern struct vfsmount *shm_mnt;
47068 +#ifdef CONFIG_HUGETLBFS
47069 +extern struct vfsmount *hugetlbfs_vfsmount;
47070 +#endif
47071 +
47072 +static struct acl_object_label *fakefs_obj_rw;
47073 +static struct acl_object_label *fakefs_obj_rwx;
47074 +
47075 +extern int gr_init_uidset(void);
47076 +extern void gr_free_uidset(void);
47077 +extern void gr_remove_uid(uid_t uid);
47078 +extern int gr_find_uid(uid_t uid);
47079 +
47080 +__inline__ int
47081 +gr_acl_is_enabled(void)
47082 +{
47083 + return (gr_status & GR_READY);
47084 +}
47085 +
47086 +#ifdef CONFIG_BTRFS_FS
47087 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
47088 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
47089 +#endif
47090 +
47091 +static inline dev_t __get_dev(const struct dentry *dentry)
47092 +{
47093 +#ifdef CONFIG_BTRFS_FS
47094 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
47095 + return get_btrfs_dev_from_inode(dentry->d_inode);
47096 + else
47097 +#endif
47098 + return dentry->d_inode->i_sb->s_dev;
47099 +}
47100 +
47101 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47102 +{
47103 + return __get_dev(dentry);
47104 +}
47105 +
47106 +static char gr_task_roletype_to_char(struct task_struct *task)
47107 +{
47108 + switch (task->role->roletype &
47109 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
47110 + GR_ROLE_SPECIAL)) {
47111 + case GR_ROLE_DEFAULT:
47112 + return 'D';
47113 + case GR_ROLE_USER:
47114 + return 'U';
47115 + case GR_ROLE_GROUP:
47116 + return 'G';
47117 + case GR_ROLE_SPECIAL:
47118 + return 'S';
47119 + }
47120 +
47121 + return 'X';
47122 +}
47123 +
47124 +char gr_roletype_to_char(void)
47125 +{
47126 + return gr_task_roletype_to_char(current);
47127 +}
47128 +
47129 +__inline__ int
47130 +gr_acl_tpe_check(void)
47131 +{
47132 + if (unlikely(!(gr_status & GR_READY)))
47133 + return 0;
47134 + if (current->role->roletype & GR_ROLE_TPE)
47135 + return 1;
47136 + else
47137 + return 0;
47138 +}
47139 +
47140 +int
47141 +gr_handle_rawio(const struct inode *inode)
47142 +{
47143 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47144 + if (inode && S_ISBLK(inode->i_mode) &&
47145 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47146 + !capable(CAP_SYS_RAWIO))
47147 + return 1;
47148 +#endif
47149 + return 0;
47150 +}
47151 +
47152 +static int
47153 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
47154 +{
47155 + if (likely(lena != lenb))
47156 + return 0;
47157 +
47158 + return !memcmp(a, b, lena);
47159 +}
47160 +
47161 +/* this must be called with vfsmount_lock and dcache_lock held */
47162 +
47163 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
47164 + struct dentry *root, struct vfsmount *rootmnt,
47165 + char *buffer, int buflen)
47166 +{
47167 + char * end = buffer+buflen;
47168 + char * retval;
47169 + int namelen;
47170 +
47171 + *--end = '\0';
47172 + buflen--;
47173 +
47174 + if (buflen < 1)
47175 + goto Elong;
47176 + /* Get '/' right */
47177 + retval = end-1;
47178 + *retval = '/';
47179 +
47180 + for (;;) {
47181 + struct dentry * parent;
47182 +
47183 + if (dentry == root && vfsmnt == rootmnt)
47184 + break;
47185 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47186 + /* Global root? */
47187 + if (vfsmnt->mnt_parent == vfsmnt)
47188 + goto global_root;
47189 + dentry = vfsmnt->mnt_mountpoint;
47190 + vfsmnt = vfsmnt->mnt_parent;
47191 + continue;
47192 + }
47193 + parent = dentry->d_parent;
47194 + prefetch(parent);
47195 + namelen = dentry->d_name.len;
47196 + buflen -= namelen + 1;
47197 + if (buflen < 0)
47198 + goto Elong;
47199 + end -= namelen;
47200 + memcpy(end, dentry->d_name.name, namelen);
47201 + *--end = '/';
47202 + retval = end;
47203 + dentry = parent;
47204 + }
47205 +
47206 +out:
47207 + return retval;
47208 +
47209 +global_root:
47210 + namelen = dentry->d_name.len;
47211 + buflen -= namelen;
47212 + if (buflen < 0)
47213 + goto Elong;
47214 + retval -= namelen-1; /* hit the slash */
47215 + memcpy(retval, dentry->d_name.name, namelen);
47216 + goto out;
47217 +Elong:
47218 + retval = ERR_PTR(-ENAMETOOLONG);
47219 + goto out;
47220 +}
47221 +
47222 +static char *
47223 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
47224 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
47225 +{
47226 + char *retval;
47227 +
47228 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
47229 + if (unlikely(IS_ERR(retval)))
47230 + retval = strcpy(buf, "<path too long>");
47231 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47232 + retval[1] = '\0';
47233 +
47234 + return retval;
47235 +}
47236 +
47237 +static char *
47238 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47239 + char *buf, int buflen)
47240 +{
47241 + char *res;
47242 +
47243 + /* we can use real_root, real_root_mnt, because this is only called
47244 + by the RBAC system */
47245 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
47246 +
47247 + return res;
47248 +}
47249 +
47250 +static char *
47251 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47252 + char *buf, int buflen)
47253 +{
47254 + char *res;
47255 + struct dentry *root;
47256 + struct vfsmount *rootmnt;
47257 + struct task_struct *reaper = &init_task;
47258 +
47259 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
47260 + read_lock(&reaper->fs->lock);
47261 + root = dget(reaper->fs->root.dentry);
47262 + rootmnt = mntget(reaper->fs->root.mnt);
47263 + read_unlock(&reaper->fs->lock);
47264 +
47265 + spin_lock(&dcache_lock);
47266 + spin_lock(&vfsmount_lock);
47267 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
47268 + spin_unlock(&vfsmount_lock);
47269 + spin_unlock(&dcache_lock);
47270 +
47271 + dput(root);
47272 + mntput(rootmnt);
47273 + return res;
47274 +}
47275 +
47276 +static char *
47277 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47278 +{
47279 + char *ret;
47280 + spin_lock(&dcache_lock);
47281 + spin_lock(&vfsmount_lock);
47282 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47283 + PAGE_SIZE);
47284 + spin_unlock(&vfsmount_lock);
47285 + spin_unlock(&dcache_lock);
47286 + return ret;
47287 +}
47288 +
47289 +char *
47290 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47291 +{
47292 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47293 + PAGE_SIZE);
47294 +}
47295 +
47296 +char *
47297 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47298 +{
47299 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47300 + PAGE_SIZE);
47301 +}
47302 +
47303 +char *
47304 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47305 +{
47306 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47307 + PAGE_SIZE);
47308 +}
47309 +
47310 +char *
47311 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47312 +{
47313 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47314 + PAGE_SIZE);
47315 +}
47316 +
47317 +char *
47318 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47319 +{
47320 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47321 + PAGE_SIZE);
47322 +}
47323 +
47324 +__inline__ __u32
47325 +to_gr_audit(const __u32 reqmode)
47326 +{
47327 + /* masks off auditable permission flags, then shifts them to create
47328 + auditing flags, and adds the special case of append auditing if
47329 + we're requesting write */
47330 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47331 +}
47332 +
47333 +struct acl_subject_label *
47334 +lookup_subject_map(const struct acl_subject_label *userp)
47335 +{
47336 + unsigned int index = shash(userp, subj_map_set.s_size);
47337 + struct subject_map *match;
47338 +
47339 + match = subj_map_set.s_hash[index];
47340 +
47341 + while (match && match->user != userp)
47342 + match = match->next;
47343 +
47344 + if (match != NULL)
47345 + return match->kernel;
47346 + else
47347 + return NULL;
47348 +}
47349 +
47350 +static void
47351 +insert_subj_map_entry(struct subject_map *subjmap)
47352 +{
47353 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47354 + struct subject_map **curr;
47355 +
47356 + subjmap->prev = NULL;
47357 +
47358 + curr = &subj_map_set.s_hash[index];
47359 + if (*curr != NULL)
47360 + (*curr)->prev = subjmap;
47361 +
47362 + subjmap->next = *curr;
47363 + *curr = subjmap;
47364 +
47365 + return;
47366 +}
47367 +
47368 +static struct acl_role_label *
47369 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47370 + const gid_t gid)
47371 +{
47372 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47373 + struct acl_role_label *match;
47374 + struct role_allowed_ip *ipp;
47375 + unsigned int x;
47376 + u32 curr_ip = task->signal->curr_ip;
47377 +
47378 + task->signal->saved_ip = curr_ip;
47379 +
47380 + match = acl_role_set.r_hash[index];
47381 +
47382 + while (match) {
47383 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47384 + for (x = 0; x < match->domain_child_num; x++) {
47385 + if (match->domain_children[x] == uid)
47386 + goto found;
47387 + }
47388 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47389 + break;
47390 + match = match->next;
47391 + }
47392 +found:
47393 + if (match == NULL) {
47394 + try_group:
47395 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47396 + match = acl_role_set.r_hash[index];
47397 +
47398 + while (match) {
47399 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47400 + for (x = 0; x < match->domain_child_num; x++) {
47401 + if (match->domain_children[x] == gid)
47402 + goto found2;
47403 + }
47404 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47405 + break;
47406 + match = match->next;
47407 + }
47408 +found2:
47409 + if (match == NULL)
47410 + match = default_role;
47411 + if (match->allowed_ips == NULL)
47412 + return match;
47413 + else {
47414 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47415 + if (likely
47416 + ((ntohl(curr_ip) & ipp->netmask) ==
47417 + (ntohl(ipp->addr) & ipp->netmask)))
47418 + return match;
47419 + }
47420 + match = default_role;
47421 + }
47422 + } else if (match->allowed_ips == NULL) {
47423 + return match;
47424 + } else {
47425 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47426 + if (likely
47427 + ((ntohl(curr_ip) & ipp->netmask) ==
47428 + (ntohl(ipp->addr) & ipp->netmask)))
47429 + return match;
47430 + }
47431 + goto try_group;
47432 + }
47433 +
47434 + return match;
47435 +}
47436 +
47437 +struct acl_subject_label *
47438 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47439 + const struct acl_role_label *role)
47440 +{
47441 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47442 + struct acl_subject_label *match;
47443 +
47444 + match = role->subj_hash[index];
47445 +
47446 + while (match && (match->inode != ino || match->device != dev ||
47447 + (match->mode & GR_DELETED))) {
47448 + match = match->next;
47449 + }
47450 +
47451 + if (match && !(match->mode & GR_DELETED))
47452 + return match;
47453 + else
47454 + return NULL;
47455 +}
47456 +
47457 +struct acl_subject_label *
47458 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47459 + const struct acl_role_label *role)
47460 +{
47461 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47462 + struct acl_subject_label *match;
47463 +
47464 + match = role->subj_hash[index];
47465 +
47466 + while (match && (match->inode != ino || match->device != dev ||
47467 + !(match->mode & GR_DELETED))) {
47468 + match = match->next;
47469 + }
47470 +
47471 + if (match && (match->mode & GR_DELETED))
47472 + return match;
47473 + else
47474 + return NULL;
47475 +}
47476 +
47477 +static struct acl_object_label *
47478 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47479 + const struct acl_subject_label *subj)
47480 +{
47481 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47482 + struct acl_object_label *match;
47483 +
47484 + match = subj->obj_hash[index];
47485 +
47486 + while (match && (match->inode != ino || match->device != dev ||
47487 + (match->mode & GR_DELETED))) {
47488 + match = match->next;
47489 + }
47490 +
47491 + if (match && !(match->mode & GR_DELETED))
47492 + return match;
47493 + else
47494 + return NULL;
47495 +}
47496 +
47497 +static struct acl_object_label *
47498 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47499 + const struct acl_subject_label *subj)
47500 +{
47501 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47502 + struct acl_object_label *match;
47503 +
47504 + match = subj->obj_hash[index];
47505 +
47506 + while (match && (match->inode != ino || match->device != dev ||
47507 + !(match->mode & GR_DELETED))) {
47508 + match = match->next;
47509 + }
47510 +
47511 + if (match && (match->mode & GR_DELETED))
47512 + return match;
47513 +
47514 + match = subj->obj_hash[index];
47515 +
47516 + while (match && (match->inode != ino || match->device != dev ||
47517 + (match->mode & GR_DELETED))) {
47518 + match = match->next;
47519 + }
47520 +
47521 + if (match && !(match->mode & GR_DELETED))
47522 + return match;
47523 + else
47524 + return NULL;
47525 +}
47526 +
47527 +static struct name_entry *
47528 +lookup_name_entry(const char *name)
47529 +{
47530 + unsigned int len = strlen(name);
47531 + unsigned int key = full_name_hash(name, len);
47532 + unsigned int index = key % name_set.n_size;
47533 + struct name_entry *match;
47534 +
47535 + match = name_set.n_hash[index];
47536 +
47537 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47538 + match = match->next;
47539 +
47540 + return match;
47541 +}
47542 +
47543 +static struct name_entry *
47544 +lookup_name_entry_create(const char *name)
47545 +{
47546 + unsigned int len = strlen(name);
47547 + unsigned int key = full_name_hash(name, len);
47548 + unsigned int index = key % name_set.n_size;
47549 + struct name_entry *match;
47550 +
47551 + match = name_set.n_hash[index];
47552 +
47553 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47554 + !match->deleted))
47555 + match = match->next;
47556 +
47557 + if (match && match->deleted)
47558 + return match;
47559 +
47560 + match = name_set.n_hash[index];
47561 +
47562 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47563 + match->deleted))
47564 + match = match->next;
47565 +
47566 + if (match && !match->deleted)
47567 + return match;
47568 + else
47569 + return NULL;
47570 +}
47571 +
47572 +static struct inodev_entry *
47573 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
47574 +{
47575 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
47576 + struct inodev_entry *match;
47577 +
47578 + match = inodev_set.i_hash[index];
47579 +
47580 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47581 + match = match->next;
47582 +
47583 + return match;
47584 +}
47585 +
47586 +static void
47587 +insert_inodev_entry(struct inodev_entry *entry)
47588 +{
47589 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47590 + inodev_set.i_size);
47591 + struct inodev_entry **curr;
47592 +
47593 + entry->prev = NULL;
47594 +
47595 + curr = &inodev_set.i_hash[index];
47596 + if (*curr != NULL)
47597 + (*curr)->prev = entry;
47598 +
47599 + entry->next = *curr;
47600 + *curr = entry;
47601 +
47602 + return;
47603 +}
47604 +
47605 +static void
47606 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47607 +{
47608 + unsigned int index =
47609 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47610 + struct acl_role_label **curr;
47611 + struct acl_role_label *tmp;
47612 +
47613 + curr = &acl_role_set.r_hash[index];
47614 +
47615 + /* if role was already inserted due to domains and already has
47616 + a role in the same bucket as it attached, then we need to
47617 + combine these two buckets
47618 + */
47619 + if (role->next) {
47620 + tmp = role->next;
47621 + while (tmp->next)
47622 + tmp = tmp->next;
47623 + tmp->next = *curr;
47624 + } else
47625 + role->next = *curr;
47626 + *curr = role;
47627 +
47628 + return;
47629 +}
47630 +
47631 +static void
47632 +insert_acl_role_label(struct acl_role_label *role)
47633 +{
47634 + int i;
47635 +
47636 + if (role_list == NULL) {
47637 + role_list = role;
47638 + role->prev = NULL;
47639 + } else {
47640 + role->prev = role_list;
47641 + role_list = role;
47642 + }
47643 +
47644 + /* used for hash chains */
47645 + role->next = NULL;
47646 +
47647 + if (role->roletype & GR_ROLE_DOMAIN) {
47648 + for (i = 0; i < role->domain_child_num; i++)
47649 + __insert_acl_role_label(role, role->domain_children[i]);
47650 + } else
47651 + __insert_acl_role_label(role, role->uidgid);
47652 +}
47653 +
47654 +static int
47655 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47656 +{
47657 + struct name_entry **curr, *nentry;
47658 + struct inodev_entry *ientry;
47659 + unsigned int len = strlen(name);
47660 + unsigned int key = full_name_hash(name, len);
47661 + unsigned int index = key % name_set.n_size;
47662 +
47663 + curr = &name_set.n_hash[index];
47664 +
47665 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47666 + curr = &((*curr)->next);
47667 +
47668 + if (*curr != NULL)
47669 + return 1;
47670 +
47671 + nentry = acl_alloc(sizeof (struct name_entry));
47672 + if (nentry == NULL)
47673 + return 0;
47674 + ientry = acl_alloc(sizeof (struct inodev_entry));
47675 + if (ientry == NULL)
47676 + return 0;
47677 + ientry->nentry = nentry;
47678 +
47679 + nentry->key = key;
47680 + nentry->name = name;
47681 + nentry->inode = inode;
47682 + nentry->device = device;
47683 + nentry->len = len;
47684 + nentry->deleted = deleted;
47685 +
47686 + nentry->prev = NULL;
47687 + curr = &name_set.n_hash[index];
47688 + if (*curr != NULL)
47689 + (*curr)->prev = nentry;
47690 + nentry->next = *curr;
47691 + *curr = nentry;
47692 +
47693 + /* insert us into the table searchable by inode/dev */
47694 + insert_inodev_entry(ientry);
47695 +
47696 + return 1;
47697 +}
47698 +
47699 +static void
47700 +insert_acl_obj_label(struct acl_object_label *obj,
47701 + struct acl_subject_label *subj)
47702 +{
47703 + unsigned int index =
47704 + fhash(obj->inode, obj->device, subj->obj_hash_size);
47705 + struct acl_object_label **curr;
47706 +
47707 +
47708 + obj->prev = NULL;
47709 +
47710 + curr = &subj->obj_hash[index];
47711 + if (*curr != NULL)
47712 + (*curr)->prev = obj;
47713 +
47714 + obj->next = *curr;
47715 + *curr = obj;
47716 +
47717 + return;
47718 +}
47719 +
47720 +static void
47721 +insert_acl_subj_label(struct acl_subject_label *obj,
47722 + struct acl_role_label *role)
47723 +{
47724 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
47725 + struct acl_subject_label **curr;
47726 +
47727 + obj->prev = NULL;
47728 +
47729 + curr = &role->subj_hash[index];
47730 + if (*curr != NULL)
47731 + (*curr)->prev = obj;
47732 +
47733 + obj->next = *curr;
47734 + *curr = obj;
47735 +
47736 + return;
47737 +}
47738 +
47739 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
47740 +
47741 +static void *
47742 +create_table(__u32 * len, int elementsize)
47743 +{
47744 + unsigned int table_sizes[] = {
47745 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
47746 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
47747 + 4194301, 8388593, 16777213, 33554393, 67108859
47748 + };
47749 + void *newtable = NULL;
47750 + unsigned int pwr = 0;
47751 +
47752 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
47753 + table_sizes[pwr] <= *len)
47754 + pwr++;
47755 +
47756 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
47757 + return newtable;
47758 +
47759 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
47760 + newtable =
47761 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
47762 + else
47763 + newtable = vmalloc(table_sizes[pwr] * elementsize);
47764 +
47765 + *len = table_sizes[pwr];
47766 +
47767 + return newtable;
47768 +}
47769 +
47770 +static int
47771 +init_variables(const struct gr_arg *arg)
47772 +{
47773 + struct task_struct *reaper = &init_task;
47774 + unsigned int stacksize;
47775 +
47776 + subj_map_set.s_size = arg->role_db.num_subjects;
47777 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
47778 + name_set.n_size = arg->role_db.num_objects;
47779 + inodev_set.i_size = arg->role_db.num_objects;
47780 +
47781 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
47782 + !name_set.n_size || !inodev_set.i_size)
47783 + return 1;
47784 +
47785 + if (!gr_init_uidset())
47786 + return 1;
47787 +
47788 + /* set up the stack that holds allocation info */
47789 +
47790 + stacksize = arg->role_db.num_pointers + 5;
47791 +
47792 + if (!acl_alloc_stack_init(stacksize))
47793 + return 1;
47794 +
47795 + /* grab reference for the real root dentry and vfsmount */
47796 + read_lock(&reaper->fs->lock);
47797 + real_root = dget(reaper->fs->root.dentry);
47798 + real_root_mnt = mntget(reaper->fs->root.mnt);
47799 + read_unlock(&reaper->fs->lock);
47800 +
47801 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47802 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
47803 +#endif
47804 +
47805 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
47806 + if (fakefs_obj_rw == NULL)
47807 + return 1;
47808 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
47809 +
47810 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
47811 + if (fakefs_obj_rwx == NULL)
47812 + return 1;
47813 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
47814 +
47815 + subj_map_set.s_hash =
47816 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
47817 + acl_role_set.r_hash =
47818 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
47819 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
47820 + inodev_set.i_hash =
47821 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
47822 +
47823 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
47824 + !name_set.n_hash || !inodev_set.i_hash)
47825 + return 1;
47826 +
47827 + memset(subj_map_set.s_hash, 0,
47828 + sizeof(struct subject_map *) * subj_map_set.s_size);
47829 + memset(acl_role_set.r_hash, 0,
47830 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
47831 + memset(name_set.n_hash, 0,
47832 + sizeof (struct name_entry *) * name_set.n_size);
47833 + memset(inodev_set.i_hash, 0,
47834 + sizeof (struct inodev_entry *) * inodev_set.i_size);
47835 +
47836 + return 0;
47837 +}
47838 +
47839 +/* free information not needed after startup
47840 + currently contains user->kernel pointer mappings for subjects
47841 +*/
47842 +
47843 +static void
47844 +free_init_variables(void)
47845 +{
47846 + __u32 i;
47847 +
47848 + if (subj_map_set.s_hash) {
47849 + for (i = 0; i < subj_map_set.s_size; i++) {
47850 + if (subj_map_set.s_hash[i]) {
47851 + kfree(subj_map_set.s_hash[i]);
47852 + subj_map_set.s_hash[i] = NULL;
47853 + }
47854 + }
47855 +
47856 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
47857 + PAGE_SIZE)
47858 + kfree(subj_map_set.s_hash);
47859 + else
47860 + vfree(subj_map_set.s_hash);
47861 + }
47862 +
47863 + return;
47864 +}
47865 +
47866 +static void
47867 +free_variables(void)
47868 +{
47869 + struct acl_subject_label *s;
47870 + struct acl_role_label *r;
47871 + struct task_struct *task, *task2;
47872 + unsigned int x;
47873 +
47874 + gr_clear_learn_entries();
47875 +
47876 + read_lock(&tasklist_lock);
47877 + do_each_thread(task2, task) {
47878 + task->acl_sp_role = 0;
47879 + task->acl_role_id = 0;
47880 + task->acl = NULL;
47881 + task->role = NULL;
47882 + } while_each_thread(task2, task);
47883 + read_unlock(&tasklist_lock);
47884 +
47885 + /* release the reference to the real root dentry and vfsmount */
47886 + if (real_root)
47887 + dput(real_root);
47888 + real_root = NULL;
47889 + if (real_root_mnt)
47890 + mntput(real_root_mnt);
47891 + real_root_mnt = NULL;
47892 +
47893 + /* free all object hash tables */
47894 +
47895 + FOR_EACH_ROLE_START(r)
47896 + if (r->subj_hash == NULL)
47897 + goto next_role;
47898 + FOR_EACH_SUBJECT_START(r, s, x)
47899 + if (s->obj_hash == NULL)
47900 + break;
47901 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47902 + kfree(s->obj_hash);
47903 + else
47904 + vfree(s->obj_hash);
47905 + FOR_EACH_SUBJECT_END(s, x)
47906 + FOR_EACH_NESTED_SUBJECT_START(r, s)
47907 + if (s->obj_hash == NULL)
47908 + break;
47909 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47910 + kfree(s->obj_hash);
47911 + else
47912 + vfree(s->obj_hash);
47913 + FOR_EACH_NESTED_SUBJECT_END(s)
47914 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
47915 + kfree(r->subj_hash);
47916 + else
47917 + vfree(r->subj_hash);
47918 + r->subj_hash = NULL;
47919 +next_role:
47920 + FOR_EACH_ROLE_END(r)
47921 +
47922 + acl_free_all();
47923 +
47924 + if (acl_role_set.r_hash) {
47925 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
47926 + PAGE_SIZE)
47927 + kfree(acl_role_set.r_hash);
47928 + else
47929 + vfree(acl_role_set.r_hash);
47930 + }
47931 + if (name_set.n_hash) {
47932 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
47933 + PAGE_SIZE)
47934 + kfree(name_set.n_hash);
47935 + else
47936 + vfree(name_set.n_hash);
47937 + }
47938 +
47939 + if (inodev_set.i_hash) {
47940 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
47941 + PAGE_SIZE)
47942 + kfree(inodev_set.i_hash);
47943 + else
47944 + vfree(inodev_set.i_hash);
47945 + }
47946 +
47947 + gr_free_uidset();
47948 +
47949 + memset(&name_set, 0, sizeof (struct name_db));
47950 + memset(&inodev_set, 0, sizeof (struct inodev_db));
47951 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
47952 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
47953 +
47954 + default_role = NULL;
47955 + role_list = NULL;
47956 +
47957 + return;
47958 +}
47959 +
47960 +static __u32
47961 +count_user_objs(struct acl_object_label *userp)
47962 +{
47963 + struct acl_object_label o_tmp;
47964 + __u32 num = 0;
47965 +
47966 + while (userp) {
47967 + if (copy_from_user(&o_tmp, userp,
47968 + sizeof (struct acl_object_label)))
47969 + break;
47970 +
47971 + userp = o_tmp.prev;
47972 + num++;
47973 + }
47974 +
47975 + return num;
47976 +}
47977 +
47978 +static struct acl_subject_label *
47979 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
47980 +
47981 +static int
47982 +copy_user_glob(struct acl_object_label *obj)
47983 +{
47984 + struct acl_object_label *g_tmp, **guser;
47985 + unsigned int len;
47986 + char *tmp;
47987 +
47988 + if (obj->globbed == NULL)
47989 + return 0;
47990 +
47991 + guser = &obj->globbed;
47992 + while (*guser) {
47993 + g_tmp = (struct acl_object_label *)
47994 + acl_alloc(sizeof (struct acl_object_label));
47995 + if (g_tmp == NULL)
47996 + return -ENOMEM;
47997 +
47998 + if (copy_from_user(g_tmp, *guser,
47999 + sizeof (struct acl_object_label)))
48000 + return -EFAULT;
48001 +
48002 + len = strnlen_user(g_tmp->filename, PATH_MAX);
48003 +
48004 + if (!len || len >= PATH_MAX)
48005 + return -EINVAL;
48006 +
48007 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48008 + return -ENOMEM;
48009 +
48010 + if (copy_from_user(tmp, g_tmp->filename, len))
48011 + return -EFAULT;
48012 + tmp[len-1] = '\0';
48013 + g_tmp->filename = tmp;
48014 +
48015 + *guser = g_tmp;
48016 + guser = &(g_tmp->next);
48017 + }
48018 +
48019 + return 0;
48020 +}
48021 +
48022 +static int
48023 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
48024 + struct acl_role_label *role)
48025 +{
48026 + struct acl_object_label *o_tmp;
48027 + unsigned int len;
48028 + int ret;
48029 + char *tmp;
48030 +
48031 + while (userp) {
48032 + if ((o_tmp = (struct acl_object_label *)
48033 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
48034 + return -ENOMEM;
48035 +
48036 + if (copy_from_user(o_tmp, userp,
48037 + sizeof (struct acl_object_label)))
48038 + return -EFAULT;
48039 +
48040 + userp = o_tmp->prev;
48041 +
48042 + len = strnlen_user(o_tmp->filename, PATH_MAX);
48043 +
48044 + if (!len || len >= PATH_MAX)
48045 + return -EINVAL;
48046 +
48047 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48048 + return -ENOMEM;
48049 +
48050 + if (copy_from_user(tmp, o_tmp->filename, len))
48051 + return -EFAULT;
48052 + tmp[len-1] = '\0';
48053 + o_tmp->filename = tmp;
48054 +
48055 + insert_acl_obj_label(o_tmp, subj);
48056 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
48057 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
48058 + return -ENOMEM;
48059 +
48060 + ret = copy_user_glob(o_tmp);
48061 + if (ret)
48062 + return ret;
48063 +
48064 + if (o_tmp->nested) {
48065 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
48066 + if (IS_ERR(o_tmp->nested))
48067 + return PTR_ERR(o_tmp->nested);
48068 +
48069 + /* insert into nested subject list */
48070 + o_tmp->nested->next = role->hash->first;
48071 + role->hash->first = o_tmp->nested;
48072 + }
48073 + }
48074 +
48075 + return 0;
48076 +}
48077 +
48078 +static __u32
48079 +count_user_subjs(struct acl_subject_label *userp)
48080 +{
48081 + struct acl_subject_label s_tmp;
48082 + __u32 num = 0;
48083 +
48084 + while (userp) {
48085 + if (copy_from_user(&s_tmp, userp,
48086 + sizeof (struct acl_subject_label)))
48087 + break;
48088 +
48089 + userp = s_tmp.prev;
48090 + /* do not count nested subjects against this count, since
48091 + they are not included in the hash table, but are
48092 + attached to objects. We have already counted
48093 + the subjects in userspace for the allocation
48094 + stack
48095 + */
48096 + if (!(s_tmp.mode & GR_NESTED))
48097 + num++;
48098 + }
48099 +
48100 + return num;
48101 +}
48102 +
48103 +static int
48104 +copy_user_allowedips(struct acl_role_label *rolep)
48105 +{
48106 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
48107 +
48108 + ruserip = rolep->allowed_ips;
48109 +
48110 + while (ruserip) {
48111 + rlast = rtmp;
48112 +
48113 + if ((rtmp = (struct role_allowed_ip *)
48114 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
48115 + return -ENOMEM;
48116 +
48117 + if (copy_from_user(rtmp, ruserip,
48118 + sizeof (struct role_allowed_ip)))
48119 + return -EFAULT;
48120 +
48121 + ruserip = rtmp->prev;
48122 +
48123 + if (!rlast) {
48124 + rtmp->prev = NULL;
48125 + rolep->allowed_ips = rtmp;
48126 + } else {
48127 + rlast->next = rtmp;
48128 + rtmp->prev = rlast;
48129 + }
48130 +
48131 + if (!ruserip)
48132 + rtmp->next = NULL;
48133 + }
48134 +
48135 + return 0;
48136 +}
48137 +
48138 +static int
48139 +copy_user_transitions(struct acl_role_label *rolep)
48140 +{
48141 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
48142 +
48143 + unsigned int len;
48144 + char *tmp;
48145 +
48146 + rusertp = rolep->transitions;
48147 +
48148 + while (rusertp) {
48149 + rlast = rtmp;
48150 +
48151 + if ((rtmp = (struct role_transition *)
48152 + acl_alloc(sizeof (struct role_transition))) == NULL)
48153 + return -ENOMEM;
48154 +
48155 + if (copy_from_user(rtmp, rusertp,
48156 + sizeof (struct role_transition)))
48157 + return -EFAULT;
48158 +
48159 + rusertp = rtmp->prev;
48160 +
48161 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48162 +
48163 + if (!len || len >= GR_SPROLE_LEN)
48164 + return -EINVAL;
48165 +
48166 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48167 + return -ENOMEM;
48168 +
48169 + if (copy_from_user(tmp, rtmp->rolename, len))
48170 + return -EFAULT;
48171 + tmp[len-1] = '\0';
48172 + rtmp->rolename = tmp;
48173 +
48174 + if (!rlast) {
48175 + rtmp->prev = NULL;
48176 + rolep->transitions = rtmp;
48177 + } else {
48178 + rlast->next = rtmp;
48179 + rtmp->prev = rlast;
48180 + }
48181 +
48182 + if (!rusertp)
48183 + rtmp->next = NULL;
48184 + }
48185 +
48186 + return 0;
48187 +}
48188 +
48189 +static struct acl_subject_label *
48190 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48191 +{
48192 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48193 + unsigned int len;
48194 + char *tmp;
48195 + __u32 num_objs;
48196 + struct acl_ip_label **i_tmp, *i_utmp2;
48197 + struct gr_hash_struct ghash;
48198 + struct subject_map *subjmap;
48199 + unsigned int i_num;
48200 + int err;
48201 +
48202 + s_tmp = lookup_subject_map(userp);
48203 +
48204 + /* we've already copied this subject into the kernel, just return
48205 + the reference to it, and don't copy it over again
48206 + */
48207 + if (s_tmp)
48208 + return(s_tmp);
48209 +
48210 + if ((s_tmp = (struct acl_subject_label *)
48211 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48212 + return ERR_PTR(-ENOMEM);
48213 +
48214 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48215 + if (subjmap == NULL)
48216 + return ERR_PTR(-ENOMEM);
48217 +
48218 + subjmap->user = userp;
48219 + subjmap->kernel = s_tmp;
48220 + insert_subj_map_entry(subjmap);
48221 +
48222 + if (copy_from_user(s_tmp, userp,
48223 + sizeof (struct acl_subject_label)))
48224 + return ERR_PTR(-EFAULT);
48225 +
48226 + len = strnlen_user(s_tmp->filename, PATH_MAX);
48227 +
48228 + if (!len || len >= PATH_MAX)
48229 + return ERR_PTR(-EINVAL);
48230 +
48231 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48232 + return ERR_PTR(-ENOMEM);
48233 +
48234 + if (copy_from_user(tmp, s_tmp->filename, len))
48235 + return ERR_PTR(-EFAULT);
48236 + tmp[len-1] = '\0';
48237 + s_tmp->filename = tmp;
48238 +
48239 + if (!strcmp(s_tmp->filename, "/"))
48240 + role->root_label = s_tmp;
48241 +
48242 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48243 + return ERR_PTR(-EFAULT);
48244 +
48245 + /* copy user and group transition tables */
48246 +
48247 + if (s_tmp->user_trans_num) {
48248 + uid_t *uidlist;
48249 +
48250 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48251 + if (uidlist == NULL)
48252 + return ERR_PTR(-ENOMEM);
48253 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48254 + return ERR_PTR(-EFAULT);
48255 +
48256 + s_tmp->user_transitions = uidlist;
48257 + }
48258 +
48259 + if (s_tmp->group_trans_num) {
48260 + gid_t *gidlist;
48261 +
48262 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48263 + if (gidlist == NULL)
48264 + return ERR_PTR(-ENOMEM);
48265 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48266 + return ERR_PTR(-EFAULT);
48267 +
48268 + s_tmp->group_transitions = gidlist;
48269 + }
48270 +
48271 + /* set up object hash table */
48272 + num_objs = count_user_objs(ghash.first);
48273 +
48274 + s_tmp->obj_hash_size = num_objs;
48275 + s_tmp->obj_hash =
48276 + (struct acl_object_label **)
48277 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48278 +
48279 + if (!s_tmp->obj_hash)
48280 + return ERR_PTR(-ENOMEM);
48281 +
48282 + memset(s_tmp->obj_hash, 0,
48283 + s_tmp->obj_hash_size *
48284 + sizeof (struct acl_object_label *));
48285 +
48286 + /* add in objects */
48287 + err = copy_user_objs(ghash.first, s_tmp, role);
48288 +
48289 + if (err)
48290 + return ERR_PTR(err);
48291 +
48292 + /* set pointer for parent subject */
48293 + if (s_tmp->parent_subject) {
48294 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48295 +
48296 + if (IS_ERR(s_tmp2))
48297 + return s_tmp2;
48298 +
48299 + s_tmp->parent_subject = s_tmp2;
48300 + }
48301 +
48302 + /* add in ip acls */
48303 +
48304 + if (!s_tmp->ip_num) {
48305 + s_tmp->ips = NULL;
48306 + goto insert;
48307 + }
48308 +
48309 + i_tmp =
48310 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48311 + sizeof (struct acl_ip_label *));
48312 +
48313 + if (!i_tmp)
48314 + return ERR_PTR(-ENOMEM);
48315 +
48316 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48317 + *(i_tmp + i_num) =
48318 + (struct acl_ip_label *)
48319 + acl_alloc(sizeof (struct acl_ip_label));
48320 + if (!*(i_tmp + i_num))
48321 + return ERR_PTR(-ENOMEM);
48322 +
48323 + if (copy_from_user
48324 + (&i_utmp2, s_tmp->ips + i_num,
48325 + sizeof (struct acl_ip_label *)))
48326 + return ERR_PTR(-EFAULT);
48327 +
48328 + if (copy_from_user
48329 + (*(i_tmp + i_num), i_utmp2,
48330 + sizeof (struct acl_ip_label)))
48331 + return ERR_PTR(-EFAULT);
48332 +
48333 + if ((*(i_tmp + i_num))->iface == NULL)
48334 + continue;
48335 +
48336 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48337 + if (!len || len >= IFNAMSIZ)
48338 + return ERR_PTR(-EINVAL);
48339 + tmp = acl_alloc(len);
48340 + if (tmp == NULL)
48341 + return ERR_PTR(-ENOMEM);
48342 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48343 + return ERR_PTR(-EFAULT);
48344 + (*(i_tmp + i_num))->iface = tmp;
48345 + }
48346 +
48347 + s_tmp->ips = i_tmp;
48348 +
48349 +insert:
48350 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48351 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48352 + return ERR_PTR(-ENOMEM);
48353 +
48354 + return s_tmp;
48355 +}
48356 +
48357 +static int
48358 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48359 +{
48360 + struct acl_subject_label s_pre;
48361 + struct acl_subject_label * ret;
48362 + int err;
48363 +
48364 + while (userp) {
48365 + if (copy_from_user(&s_pre, userp,
48366 + sizeof (struct acl_subject_label)))
48367 + return -EFAULT;
48368 +
48369 + /* do not add nested subjects here, add
48370 + while parsing objects
48371 + */
48372 +
48373 + if (s_pre.mode & GR_NESTED) {
48374 + userp = s_pre.prev;
48375 + continue;
48376 + }
48377 +
48378 + ret = do_copy_user_subj(userp, role);
48379 +
48380 + err = PTR_ERR(ret);
48381 + if (IS_ERR(ret))
48382 + return err;
48383 +
48384 + insert_acl_subj_label(ret, role);
48385 +
48386 + userp = s_pre.prev;
48387 + }
48388 +
48389 + return 0;
48390 +}
48391 +
48392 +static int
48393 +copy_user_acl(struct gr_arg *arg)
48394 +{
48395 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48396 + struct sprole_pw *sptmp;
48397 + struct gr_hash_struct *ghash;
48398 + uid_t *domainlist;
48399 + unsigned int r_num;
48400 + unsigned int len;
48401 + char *tmp;
48402 + int err = 0;
48403 + __u16 i;
48404 + __u32 num_subjs;
48405 +
48406 + /* we need a default and kernel role */
48407 + if (arg->role_db.num_roles < 2)
48408 + return -EINVAL;
48409 +
48410 + /* copy special role authentication info from userspace */
48411 +
48412 + num_sprole_pws = arg->num_sprole_pws;
48413 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48414 +
48415 + if (!acl_special_roles) {
48416 + err = -ENOMEM;
48417 + goto cleanup;
48418 + }
48419 +
48420 + for (i = 0; i < num_sprole_pws; i++) {
48421 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48422 + if (!sptmp) {
48423 + err = -ENOMEM;
48424 + goto cleanup;
48425 + }
48426 + if (copy_from_user(sptmp, arg->sprole_pws + i,
48427 + sizeof (struct sprole_pw))) {
48428 + err = -EFAULT;
48429 + goto cleanup;
48430 + }
48431 +
48432 + len =
48433 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48434 +
48435 + if (!len || len >= GR_SPROLE_LEN) {
48436 + err = -EINVAL;
48437 + goto cleanup;
48438 + }
48439 +
48440 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48441 + err = -ENOMEM;
48442 + goto cleanup;
48443 + }
48444 +
48445 + if (copy_from_user(tmp, sptmp->rolename, len)) {
48446 + err = -EFAULT;
48447 + goto cleanup;
48448 + }
48449 + tmp[len-1] = '\0';
48450 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48451 + printk(KERN_ALERT "Copying special role %s\n", tmp);
48452 +#endif
48453 + sptmp->rolename = tmp;
48454 + acl_special_roles[i] = sptmp;
48455 + }
48456 +
48457 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48458 +
48459 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48460 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
48461 +
48462 + if (!r_tmp) {
48463 + err = -ENOMEM;
48464 + goto cleanup;
48465 + }
48466 +
48467 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
48468 + sizeof (struct acl_role_label *))) {
48469 + err = -EFAULT;
48470 + goto cleanup;
48471 + }
48472 +
48473 + if (copy_from_user(r_tmp, r_utmp2,
48474 + sizeof (struct acl_role_label))) {
48475 + err = -EFAULT;
48476 + goto cleanup;
48477 + }
48478 +
48479 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48480 +
48481 + if (!len || len >= PATH_MAX) {
48482 + err = -EINVAL;
48483 + goto cleanup;
48484 + }
48485 +
48486 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48487 + err = -ENOMEM;
48488 + goto cleanup;
48489 + }
48490 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
48491 + err = -EFAULT;
48492 + goto cleanup;
48493 + }
48494 + tmp[len-1] = '\0';
48495 + r_tmp->rolename = tmp;
48496 +
48497 + if (!strcmp(r_tmp->rolename, "default")
48498 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48499 + default_role = r_tmp;
48500 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48501 + kernel_role = r_tmp;
48502 + }
48503 +
48504 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48505 + err = -ENOMEM;
48506 + goto cleanup;
48507 + }
48508 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48509 + err = -EFAULT;
48510 + goto cleanup;
48511 + }
48512 +
48513 + r_tmp->hash = ghash;
48514 +
48515 + num_subjs = count_user_subjs(r_tmp->hash->first);
48516 +
48517 + r_tmp->subj_hash_size = num_subjs;
48518 + r_tmp->subj_hash =
48519 + (struct acl_subject_label **)
48520 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48521 +
48522 + if (!r_tmp->subj_hash) {
48523 + err = -ENOMEM;
48524 + goto cleanup;
48525 + }
48526 +
48527 + err = copy_user_allowedips(r_tmp);
48528 + if (err)
48529 + goto cleanup;
48530 +
48531 + /* copy domain info */
48532 + if (r_tmp->domain_children != NULL) {
48533 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48534 + if (domainlist == NULL) {
48535 + err = -ENOMEM;
48536 + goto cleanup;
48537 + }
48538 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48539 + err = -EFAULT;
48540 + goto cleanup;
48541 + }
48542 + r_tmp->domain_children = domainlist;
48543 + }
48544 +
48545 + err = copy_user_transitions(r_tmp);
48546 + if (err)
48547 + goto cleanup;
48548 +
48549 + memset(r_tmp->subj_hash, 0,
48550 + r_tmp->subj_hash_size *
48551 + sizeof (struct acl_subject_label *));
48552 +
48553 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48554 +
48555 + if (err)
48556 + goto cleanup;
48557 +
48558 + /* set nested subject list to null */
48559 + r_tmp->hash->first = NULL;
48560 +
48561 + insert_acl_role_label(r_tmp);
48562 + }
48563 +
48564 + goto return_err;
48565 + cleanup:
48566 + free_variables();
48567 + return_err:
48568 + return err;
48569 +
48570 +}
48571 +
48572 +static int
48573 +gracl_init(struct gr_arg *args)
48574 +{
48575 + int error = 0;
48576 +
48577 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48578 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48579 +
48580 + if (init_variables(args)) {
48581 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48582 + error = -ENOMEM;
48583 + free_variables();
48584 + goto out;
48585 + }
48586 +
48587 + error = copy_user_acl(args);
48588 + free_init_variables();
48589 + if (error) {
48590 + free_variables();
48591 + goto out;
48592 + }
48593 +
48594 + if ((error = gr_set_acls(0))) {
48595 + free_variables();
48596 + goto out;
48597 + }
48598 +
48599 + pax_open_kernel();
48600 + gr_status |= GR_READY;
48601 + pax_close_kernel();
48602 +
48603 + out:
48604 + return error;
48605 +}
48606 +
48607 +/* derived from glibc fnmatch() 0: match, 1: no match*/
48608 +
48609 +static int
48610 +glob_match(const char *p, const char *n)
48611 +{
48612 + char c;
48613 +
48614 + while ((c = *p++) != '\0') {
48615 + switch (c) {
48616 + case '?':
48617 + if (*n == '\0')
48618 + return 1;
48619 + else if (*n == '/')
48620 + return 1;
48621 + break;
48622 + case '\\':
48623 + if (*n != c)
48624 + return 1;
48625 + break;
48626 + case '*':
48627 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
48628 + if (*n == '/')
48629 + return 1;
48630 + else if (c == '?') {
48631 + if (*n == '\0')
48632 + return 1;
48633 + else
48634 + ++n;
48635 + }
48636 + }
48637 + if (c == '\0') {
48638 + return 0;
48639 + } else {
48640 + const char *endp;
48641 +
48642 + if ((endp = strchr(n, '/')) == NULL)
48643 + endp = n + strlen(n);
48644 +
48645 + if (c == '[') {
48646 + for (--p; n < endp; ++n)
48647 + if (!glob_match(p, n))
48648 + return 0;
48649 + } else if (c == '/') {
48650 + while (*n != '\0' && *n != '/')
48651 + ++n;
48652 + if (*n == '/' && !glob_match(p, n + 1))
48653 + return 0;
48654 + } else {
48655 + for (--p; n < endp; ++n)
48656 + if (*n == c && !glob_match(p, n))
48657 + return 0;
48658 + }
48659 +
48660 + return 1;
48661 + }
48662 + case '[':
48663 + {
48664 + int not;
48665 + char cold;
48666 +
48667 + if (*n == '\0' || *n == '/')
48668 + return 1;
48669 +
48670 + not = (*p == '!' || *p == '^');
48671 + if (not)
48672 + ++p;
48673 +
48674 + c = *p++;
48675 + for (;;) {
48676 + unsigned char fn = (unsigned char)*n;
48677 +
48678 + if (c == '\0')
48679 + return 1;
48680 + else {
48681 + if (c == fn)
48682 + goto matched;
48683 + cold = c;
48684 + c = *p++;
48685 +
48686 + if (c == '-' && *p != ']') {
48687 + unsigned char cend = *p++;
48688 +
48689 + if (cend == '\0')
48690 + return 1;
48691 +
48692 + if (cold <= fn && fn <= cend)
48693 + goto matched;
48694 +
48695 + c = *p++;
48696 + }
48697 + }
48698 +
48699 + if (c == ']')
48700 + break;
48701 + }
48702 + if (!not)
48703 + return 1;
48704 + break;
48705 + matched:
48706 + while (c != ']') {
48707 + if (c == '\0')
48708 + return 1;
48709 +
48710 + c = *p++;
48711 + }
48712 + if (not)
48713 + return 1;
48714 + }
48715 + break;
48716 + default:
48717 + if (c != *n)
48718 + return 1;
48719 + }
48720 +
48721 + ++n;
48722 + }
48723 +
48724 + if (*n == '\0')
48725 + return 0;
48726 +
48727 + if (*n == '/')
48728 + return 0;
48729 +
48730 + return 1;
48731 +}
48732 +
48733 +static struct acl_object_label *
48734 +chk_glob_label(struct acl_object_label *globbed,
48735 + struct dentry *dentry, struct vfsmount *mnt, char **path)
48736 +{
48737 + struct acl_object_label *tmp;
48738 +
48739 + if (*path == NULL)
48740 + *path = gr_to_filename_nolock(dentry, mnt);
48741 +
48742 + tmp = globbed;
48743 +
48744 + while (tmp) {
48745 + if (!glob_match(tmp->filename, *path))
48746 + return tmp;
48747 + tmp = tmp->next;
48748 + }
48749 +
48750 + return NULL;
48751 +}
48752 +
48753 +static struct acl_object_label *
48754 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48755 + const ino_t curr_ino, const dev_t curr_dev,
48756 + const struct acl_subject_label *subj, char **path, const int checkglob)
48757 +{
48758 + struct acl_subject_label *tmpsubj;
48759 + struct acl_object_label *retval;
48760 + struct acl_object_label *retval2;
48761 +
48762 + tmpsubj = (struct acl_subject_label *) subj;
48763 + read_lock(&gr_inode_lock);
48764 + do {
48765 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
48766 + if (retval) {
48767 + if (checkglob && retval->globbed) {
48768 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
48769 + (struct vfsmount *)orig_mnt, path);
48770 + if (retval2)
48771 + retval = retval2;
48772 + }
48773 + break;
48774 + }
48775 + } while ((tmpsubj = tmpsubj->parent_subject));
48776 + read_unlock(&gr_inode_lock);
48777 +
48778 + return retval;
48779 +}
48780 +
48781 +static __inline__ struct acl_object_label *
48782 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48783 + const struct dentry *curr_dentry,
48784 + const struct acl_subject_label *subj, char **path, const int checkglob)
48785 +{
48786 + int newglob = checkglob;
48787 +
48788 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
48789 + as we don't want a / * rule to match instead of the / object
48790 + don't do this for create lookups that call this function though, since they're looking up
48791 + on the parent and thus need globbing checks on all paths
48792 + */
48793 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
48794 + newglob = GR_NO_GLOB;
48795 +
48796 + return __full_lookup(orig_dentry, orig_mnt,
48797 + curr_dentry->d_inode->i_ino,
48798 + __get_dev(curr_dentry), subj, path, newglob);
48799 +}
48800 +
48801 +static struct acl_object_label *
48802 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48803 + const struct acl_subject_label *subj, char *path, const int checkglob)
48804 +{
48805 + struct dentry *dentry = (struct dentry *) l_dentry;
48806 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48807 + struct acl_object_label *retval;
48808 +
48809 + spin_lock(&dcache_lock);
48810 + spin_lock(&vfsmount_lock);
48811 +
48812 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
48813 +#ifdef CONFIG_NET
48814 + mnt == sock_mnt ||
48815 +#endif
48816 +#ifdef CONFIG_HUGETLBFS
48817 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
48818 +#endif
48819 + /* ignore Eric Biederman */
48820 + IS_PRIVATE(l_dentry->d_inode))) {
48821 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
48822 + goto out;
48823 + }
48824 +
48825 + for (;;) {
48826 + if (dentry == real_root && mnt == real_root_mnt)
48827 + break;
48828 +
48829 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48830 + if (mnt->mnt_parent == mnt)
48831 + break;
48832 +
48833 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48834 + if (retval != NULL)
48835 + goto out;
48836 +
48837 + dentry = mnt->mnt_mountpoint;
48838 + mnt = mnt->mnt_parent;
48839 + continue;
48840 + }
48841 +
48842 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48843 + if (retval != NULL)
48844 + goto out;
48845 +
48846 + dentry = dentry->d_parent;
48847 + }
48848 +
48849 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48850 +
48851 + if (retval == NULL)
48852 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
48853 +out:
48854 + spin_unlock(&vfsmount_lock);
48855 + spin_unlock(&dcache_lock);
48856 +
48857 + BUG_ON(retval == NULL);
48858 +
48859 + return retval;
48860 +}
48861 +
48862 +static __inline__ struct acl_object_label *
48863 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48864 + const struct acl_subject_label *subj)
48865 +{
48866 + char *path = NULL;
48867 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
48868 +}
48869 +
48870 +static __inline__ struct acl_object_label *
48871 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48872 + const struct acl_subject_label *subj)
48873 +{
48874 + char *path = NULL;
48875 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
48876 +}
48877 +
48878 +static __inline__ struct acl_object_label *
48879 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48880 + const struct acl_subject_label *subj, char *path)
48881 +{
48882 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
48883 +}
48884 +
48885 +static struct acl_subject_label *
48886 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48887 + const struct acl_role_label *role)
48888 +{
48889 + struct dentry *dentry = (struct dentry *) l_dentry;
48890 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48891 + struct acl_subject_label *retval;
48892 +
48893 + spin_lock(&dcache_lock);
48894 + spin_lock(&vfsmount_lock);
48895 +
48896 + for (;;) {
48897 + if (dentry == real_root && mnt == real_root_mnt)
48898 + break;
48899 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48900 + if (mnt->mnt_parent == mnt)
48901 + break;
48902 +
48903 + read_lock(&gr_inode_lock);
48904 + retval =
48905 + lookup_acl_subj_label(dentry->d_inode->i_ino,
48906 + __get_dev(dentry), role);
48907 + read_unlock(&gr_inode_lock);
48908 + if (retval != NULL)
48909 + goto out;
48910 +
48911 + dentry = mnt->mnt_mountpoint;
48912 + mnt = mnt->mnt_parent;
48913 + continue;
48914 + }
48915 +
48916 + read_lock(&gr_inode_lock);
48917 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48918 + __get_dev(dentry), role);
48919 + read_unlock(&gr_inode_lock);
48920 + if (retval != NULL)
48921 + goto out;
48922 +
48923 + dentry = dentry->d_parent;
48924 + }
48925 +
48926 + read_lock(&gr_inode_lock);
48927 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48928 + __get_dev(dentry), role);
48929 + read_unlock(&gr_inode_lock);
48930 +
48931 + if (unlikely(retval == NULL)) {
48932 + read_lock(&gr_inode_lock);
48933 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
48934 + __get_dev(real_root), role);
48935 + read_unlock(&gr_inode_lock);
48936 + }
48937 +out:
48938 + spin_unlock(&vfsmount_lock);
48939 + spin_unlock(&dcache_lock);
48940 +
48941 + BUG_ON(retval == NULL);
48942 +
48943 + return retval;
48944 +}
48945 +
48946 +static void
48947 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
48948 +{
48949 + struct task_struct *task = current;
48950 + const struct cred *cred = current_cred();
48951 +
48952 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48953 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48954 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48955 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
48956 +
48957 + return;
48958 +}
48959 +
48960 +static void
48961 +gr_log_learn_sysctl(const char *path, const __u32 mode)
48962 +{
48963 + struct task_struct *task = current;
48964 + const struct cred *cred = current_cred();
48965 +
48966 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48967 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48968 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48969 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
48970 +
48971 + return;
48972 +}
48973 +
48974 +static void
48975 +gr_log_learn_id_change(const char type, const unsigned int real,
48976 + const unsigned int effective, const unsigned int fs)
48977 +{
48978 + struct task_struct *task = current;
48979 + const struct cred *cred = current_cred();
48980 +
48981 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
48982 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48983 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48984 + type, real, effective, fs, &task->signal->saved_ip);
48985 +
48986 + return;
48987 +}
48988 +
48989 +__u32
48990 +gr_check_link(const struct dentry * new_dentry,
48991 + const struct dentry * parent_dentry,
48992 + const struct vfsmount * parent_mnt,
48993 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
48994 +{
48995 + struct acl_object_label *obj;
48996 + __u32 oldmode, newmode;
48997 + __u32 needmode;
48998 +
48999 + if (unlikely(!(gr_status & GR_READY)))
49000 + return (GR_CREATE | GR_LINK);
49001 +
49002 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
49003 + oldmode = obj->mode;
49004 +
49005 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49006 + oldmode |= (GR_CREATE | GR_LINK);
49007 +
49008 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
49009 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49010 + needmode |= GR_SETID | GR_AUDIT_SETID;
49011 +
49012 + newmode =
49013 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
49014 + oldmode | needmode);
49015 +
49016 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
49017 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
49018 + GR_INHERIT | GR_AUDIT_INHERIT);
49019 +
49020 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
49021 + goto bad;
49022 +
49023 + if ((oldmode & needmode) != needmode)
49024 + goto bad;
49025 +
49026 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
49027 + if ((newmode & needmode) != needmode)
49028 + goto bad;
49029 +
49030 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
49031 + return newmode;
49032 +bad:
49033 + needmode = oldmode;
49034 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49035 + needmode |= GR_SETID;
49036 +
49037 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49038 + gr_log_learn(old_dentry, old_mnt, needmode);
49039 + return (GR_CREATE | GR_LINK);
49040 + } else if (newmode & GR_SUPPRESS)
49041 + return GR_SUPPRESS;
49042 + else
49043 + return 0;
49044 +}
49045 +
49046 +__u32
49047 +gr_search_file(const struct dentry * dentry, const __u32 mode,
49048 + const struct vfsmount * mnt)
49049 +{
49050 + __u32 retval = mode;
49051 + struct acl_subject_label *curracl;
49052 + struct acl_object_label *currobj;
49053 +
49054 + if (unlikely(!(gr_status & GR_READY)))
49055 + return (mode & ~GR_AUDITS);
49056 +
49057 + curracl = current->acl;
49058 +
49059 + currobj = chk_obj_label(dentry, mnt, curracl);
49060 + retval = currobj->mode & mode;
49061 +
49062 + /* if we're opening a specified transfer file for writing
49063 + (e.g. /dev/initctl), then transfer our role to init
49064 + */
49065 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
49066 + current->role->roletype & GR_ROLE_PERSIST)) {
49067 + struct task_struct *task = init_pid_ns.child_reaper;
49068 +
49069 + if (task->role != current->role) {
49070 + task->acl_sp_role = 0;
49071 + task->acl_role_id = current->acl_role_id;
49072 + task->role = current->role;
49073 + rcu_read_lock();
49074 + read_lock(&grsec_exec_file_lock);
49075 + gr_apply_subject_to_task(task);
49076 + read_unlock(&grsec_exec_file_lock);
49077 + rcu_read_unlock();
49078 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
49079 + }
49080 + }
49081 +
49082 + if (unlikely
49083 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
49084 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
49085 + __u32 new_mode = mode;
49086 +
49087 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49088 +
49089 + retval = new_mode;
49090 +
49091 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
49092 + new_mode |= GR_INHERIT;
49093 +
49094 + if (!(mode & GR_NOLEARN))
49095 + gr_log_learn(dentry, mnt, new_mode);
49096 + }
49097 +
49098 + return retval;
49099 +}
49100 +
49101 +__u32
49102 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
49103 + const struct vfsmount * mnt, const __u32 mode)
49104 +{
49105 + struct name_entry *match;
49106 + struct acl_object_label *matchpo;
49107 + struct acl_subject_label *curracl;
49108 + char *path;
49109 + __u32 retval;
49110 +
49111 + if (unlikely(!(gr_status & GR_READY)))
49112 + return (mode & ~GR_AUDITS);
49113 +
49114 + preempt_disable();
49115 + path = gr_to_filename_rbac(new_dentry, mnt);
49116 + match = lookup_name_entry_create(path);
49117 +
49118 + if (!match)
49119 + goto check_parent;
49120 +
49121 + curracl = current->acl;
49122 +
49123 + read_lock(&gr_inode_lock);
49124 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
49125 + read_unlock(&gr_inode_lock);
49126 +
49127 + if (matchpo) {
49128 + if ((matchpo->mode & mode) !=
49129 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
49130 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49131 + __u32 new_mode = mode;
49132 +
49133 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49134 +
49135 + gr_log_learn(new_dentry, mnt, new_mode);
49136 +
49137 + preempt_enable();
49138 + return new_mode;
49139 + }
49140 + preempt_enable();
49141 + return (matchpo->mode & mode);
49142 + }
49143 +
49144 + check_parent:
49145 + curracl = current->acl;
49146 +
49147 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
49148 + retval = matchpo->mode & mode;
49149 +
49150 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
49151 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
49152 + __u32 new_mode = mode;
49153 +
49154 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49155 +
49156 + gr_log_learn(new_dentry, mnt, new_mode);
49157 + preempt_enable();
49158 + return new_mode;
49159 + }
49160 +
49161 + preempt_enable();
49162 + return retval;
49163 +}
49164 +
49165 +int
49166 +gr_check_hidden_task(const struct task_struct *task)
49167 +{
49168 + if (unlikely(!(gr_status & GR_READY)))
49169 + return 0;
49170 +
49171 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49172 + return 1;
49173 +
49174 + return 0;
49175 +}
49176 +
49177 +int
49178 +gr_check_protected_task(const struct task_struct *task)
49179 +{
49180 + if (unlikely(!(gr_status & GR_READY) || !task))
49181 + return 0;
49182 +
49183 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49184 + task->acl != current->acl)
49185 + return 1;
49186 +
49187 + return 0;
49188 +}
49189 +
49190 +int
49191 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49192 +{
49193 + struct task_struct *p;
49194 + int ret = 0;
49195 +
49196 + if (unlikely(!(gr_status & GR_READY) || !pid))
49197 + return ret;
49198 +
49199 + read_lock(&tasklist_lock);
49200 + do_each_pid_task(pid, type, p) {
49201 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49202 + p->acl != current->acl) {
49203 + ret = 1;
49204 + goto out;
49205 + }
49206 + } while_each_pid_task(pid, type, p);
49207 +out:
49208 + read_unlock(&tasklist_lock);
49209 +
49210 + return ret;
49211 +}
49212 +
49213 +void
49214 +gr_copy_label(struct task_struct *tsk)
49215 +{
49216 + tsk->signal->used_accept = 0;
49217 + tsk->acl_sp_role = 0;
49218 + tsk->acl_role_id = current->acl_role_id;
49219 + tsk->acl = current->acl;
49220 + tsk->role = current->role;
49221 + tsk->signal->curr_ip = current->signal->curr_ip;
49222 + tsk->signal->saved_ip = current->signal->saved_ip;
49223 + if (current->exec_file)
49224 + get_file(current->exec_file);
49225 + tsk->exec_file = current->exec_file;
49226 + tsk->is_writable = current->is_writable;
49227 + if (unlikely(current->signal->used_accept)) {
49228 + current->signal->curr_ip = 0;
49229 + current->signal->saved_ip = 0;
49230 + }
49231 +
49232 + return;
49233 +}
49234 +
49235 +static void
49236 +gr_set_proc_res(struct task_struct *task)
49237 +{
49238 + struct acl_subject_label *proc;
49239 + unsigned short i;
49240 +
49241 + proc = task->acl;
49242 +
49243 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49244 + return;
49245 +
49246 + for (i = 0; i < RLIM_NLIMITS; i++) {
49247 + if (!(proc->resmask & (1 << i)))
49248 + continue;
49249 +
49250 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49251 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49252 + }
49253 +
49254 + return;
49255 +}
49256 +
49257 +extern int __gr_process_user_ban(struct user_struct *user);
49258 +
49259 +int
49260 +gr_check_user_change(int real, int effective, int fs)
49261 +{
49262 + unsigned int i;
49263 + __u16 num;
49264 + uid_t *uidlist;
49265 + int curuid;
49266 + int realok = 0;
49267 + int effectiveok = 0;
49268 + int fsok = 0;
49269 +
49270 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49271 + struct user_struct *user;
49272 +
49273 + if (real == -1)
49274 + goto skipit;
49275 +
49276 + user = find_user(real);
49277 + if (user == NULL)
49278 + goto skipit;
49279 +
49280 + if (__gr_process_user_ban(user)) {
49281 + /* for find_user */
49282 + free_uid(user);
49283 + return 1;
49284 + }
49285 +
49286 + /* for find_user */
49287 + free_uid(user);
49288 +
49289 +skipit:
49290 +#endif
49291 +
49292 + if (unlikely(!(gr_status & GR_READY)))
49293 + return 0;
49294 +
49295 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49296 + gr_log_learn_id_change('u', real, effective, fs);
49297 +
49298 + num = current->acl->user_trans_num;
49299 + uidlist = current->acl->user_transitions;
49300 +
49301 + if (uidlist == NULL)
49302 + return 0;
49303 +
49304 + if (real == -1)
49305 + realok = 1;
49306 + if (effective == -1)
49307 + effectiveok = 1;
49308 + if (fs == -1)
49309 + fsok = 1;
49310 +
49311 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
49312 + for (i = 0; i < num; i++) {
49313 + curuid = (int)uidlist[i];
49314 + if (real == curuid)
49315 + realok = 1;
49316 + if (effective == curuid)
49317 + effectiveok = 1;
49318 + if (fs == curuid)
49319 + fsok = 1;
49320 + }
49321 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
49322 + for (i = 0; i < num; i++) {
49323 + curuid = (int)uidlist[i];
49324 + if (real == curuid)
49325 + break;
49326 + if (effective == curuid)
49327 + break;
49328 + if (fs == curuid)
49329 + break;
49330 + }
49331 + /* not in deny list */
49332 + if (i == num) {
49333 + realok = 1;
49334 + effectiveok = 1;
49335 + fsok = 1;
49336 + }
49337 + }
49338 +
49339 + if (realok && effectiveok && fsok)
49340 + return 0;
49341 + else {
49342 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49343 + return 1;
49344 + }
49345 +}
49346 +
49347 +int
49348 +gr_check_group_change(int real, int effective, int fs)
49349 +{
49350 + unsigned int i;
49351 + __u16 num;
49352 + gid_t *gidlist;
49353 + int curgid;
49354 + int realok = 0;
49355 + int effectiveok = 0;
49356 + int fsok = 0;
49357 +
49358 + if (unlikely(!(gr_status & GR_READY)))
49359 + return 0;
49360 +
49361 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49362 + gr_log_learn_id_change('g', real, effective, fs);
49363 +
49364 + num = current->acl->group_trans_num;
49365 + gidlist = current->acl->group_transitions;
49366 +
49367 + if (gidlist == NULL)
49368 + return 0;
49369 +
49370 + if (real == -1)
49371 + realok = 1;
49372 + if (effective == -1)
49373 + effectiveok = 1;
49374 + if (fs == -1)
49375 + fsok = 1;
49376 +
49377 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
49378 + for (i = 0; i < num; i++) {
49379 + curgid = (int)gidlist[i];
49380 + if (real == curgid)
49381 + realok = 1;
49382 + if (effective == curgid)
49383 + effectiveok = 1;
49384 + if (fs == curgid)
49385 + fsok = 1;
49386 + }
49387 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
49388 + for (i = 0; i < num; i++) {
49389 + curgid = (int)gidlist[i];
49390 + if (real == curgid)
49391 + break;
49392 + if (effective == curgid)
49393 + break;
49394 + if (fs == curgid)
49395 + break;
49396 + }
49397 + /* not in deny list */
49398 + if (i == num) {
49399 + realok = 1;
49400 + effectiveok = 1;
49401 + fsok = 1;
49402 + }
49403 + }
49404 +
49405 + if (realok && effectiveok && fsok)
49406 + return 0;
49407 + else {
49408 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49409 + return 1;
49410 + }
49411 +}
49412 +
49413 +void
49414 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49415 +{
49416 + struct acl_role_label *role = task->role;
49417 + struct acl_subject_label *subj = NULL;
49418 + struct acl_object_label *obj;
49419 + struct file *filp;
49420 +
49421 + if (unlikely(!(gr_status & GR_READY)))
49422 + return;
49423 +
49424 + filp = task->exec_file;
49425 +
49426 + /* kernel process, we'll give them the kernel role */
49427 + if (unlikely(!filp)) {
49428 + task->role = kernel_role;
49429 + task->acl = kernel_role->root_label;
49430 + return;
49431 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49432 + role = lookup_acl_role_label(task, uid, gid);
49433 +
49434 + /* perform subject lookup in possibly new role
49435 + we can use this result below in the case where role == task->role
49436 + */
49437 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49438 +
49439 + /* if we changed uid/gid, but result in the same role
49440 + and are using inheritance, don't lose the inherited subject
49441 + if current subject is other than what normal lookup
49442 + would result in, we arrived via inheritance, don't
49443 + lose subject
49444 + */
49445 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49446 + (subj == task->acl)))
49447 + task->acl = subj;
49448 +
49449 + task->role = role;
49450 +
49451 + task->is_writable = 0;
49452 +
49453 + /* ignore additional mmap checks for processes that are writable
49454 + by the default ACL */
49455 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49456 + if (unlikely(obj->mode & GR_WRITE))
49457 + task->is_writable = 1;
49458 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49459 + if (unlikely(obj->mode & GR_WRITE))
49460 + task->is_writable = 1;
49461 +
49462 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49463 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49464 +#endif
49465 +
49466 + gr_set_proc_res(task);
49467 +
49468 + return;
49469 +}
49470 +
49471 +int
49472 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49473 + const int unsafe_share)
49474 +{
49475 + struct task_struct *task = current;
49476 + struct acl_subject_label *newacl;
49477 + struct acl_object_label *obj;
49478 + __u32 retmode;
49479 +
49480 + if (unlikely(!(gr_status & GR_READY)))
49481 + return 0;
49482 +
49483 + newacl = chk_subj_label(dentry, mnt, task->role);
49484 +
49485 + task_lock(task);
49486 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49487 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49488 + !(task->role->roletype & GR_ROLE_GOD) &&
49489 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49490 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49491 + task_unlock(task);
49492 + if (unsafe_share)
49493 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49494 + else
49495 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49496 + return -EACCES;
49497 + }
49498 + task_unlock(task);
49499 +
49500 + obj = chk_obj_label(dentry, mnt, task->acl);
49501 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49502 +
49503 + if (!(task->acl->mode & GR_INHERITLEARN) &&
49504 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49505 + if (obj->nested)
49506 + task->acl = obj->nested;
49507 + else
49508 + task->acl = newacl;
49509 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49510 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49511 +
49512 + task->is_writable = 0;
49513 +
49514 + /* ignore additional mmap checks for processes that are writable
49515 + by the default ACL */
49516 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
49517 + if (unlikely(obj->mode & GR_WRITE))
49518 + task->is_writable = 1;
49519 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
49520 + if (unlikely(obj->mode & GR_WRITE))
49521 + task->is_writable = 1;
49522 +
49523 + gr_set_proc_res(task);
49524 +
49525 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49526 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49527 +#endif
49528 + return 0;
49529 +}
49530 +
49531 +/* always called with valid inodev ptr */
49532 +static void
49533 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49534 +{
49535 + struct acl_object_label *matchpo;
49536 + struct acl_subject_label *matchps;
49537 + struct acl_subject_label *subj;
49538 + struct acl_role_label *role;
49539 + unsigned int x;
49540 +
49541 + FOR_EACH_ROLE_START(role)
49542 + FOR_EACH_SUBJECT_START(role, subj, x)
49543 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49544 + matchpo->mode |= GR_DELETED;
49545 + FOR_EACH_SUBJECT_END(subj,x)
49546 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
49547 + if (subj->inode == ino && subj->device == dev)
49548 + subj->mode |= GR_DELETED;
49549 + FOR_EACH_NESTED_SUBJECT_END(subj)
49550 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49551 + matchps->mode |= GR_DELETED;
49552 + FOR_EACH_ROLE_END(role)
49553 +
49554 + inodev->nentry->deleted = 1;
49555 +
49556 + return;
49557 +}
49558 +
49559 +void
49560 +gr_handle_delete(const ino_t ino, const dev_t dev)
49561 +{
49562 + struct inodev_entry *inodev;
49563 +
49564 + if (unlikely(!(gr_status & GR_READY)))
49565 + return;
49566 +
49567 + write_lock(&gr_inode_lock);
49568 + inodev = lookup_inodev_entry(ino, dev);
49569 + if (inodev != NULL)
49570 + do_handle_delete(inodev, ino, dev);
49571 + write_unlock(&gr_inode_lock);
49572 +
49573 + return;
49574 +}
49575 +
49576 +static void
49577 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49578 + const ino_t newinode, const dev_t newdevice,
49579 + struct acl_subject_label *subj)
49580 +{
49581 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49582 + struct acl_object_label *match;
49583 +
49584 + match = subj->obj_hash[index];
49585 +
49586 + while (match && (match->inode != oldinode ||
49587 + match->device != olddevice ||
49588 + !(match->mode & GR_DELETED)))
49589 + match = match->next;
49590 +
49591 + if (match && (match->inode == oldinode)
49592 + && (match->device == olddevice)
49593 + && (match->mode & GR_DELETED)) {
49594 + if (match->prev == NULL) {
49595 + subj->obj_hash[index] = match->next;
49596 + if (match->next != NULL)
49597 + match->next->prev = NULL;
49598 + } else {
49599 + match->prev->next = match->next;
49600 + if (match->next != NULL)
49601 + match->next->prev = match->prev;
49602 + }
49603 + match->prev = NULL;
49604 + match->next = NULL;
49605 + match->inode = newinode;
49606 + match->device = newdevice;
49607 + match->mode &= ~GR_DELETED;
49608 +
49609 + insert_acl_obj_label(match, subj);
49610 + }
49611 +
49612 + return;
49613 +}
49614 +
49615 +static void
49616 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49617 + const ino_t newinode, const dev_t newdevice,
49618 + struct acl_role_label *role)
49619 +{
49620 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49621 + struct acl_subject_label *match;
49622 +
49623 + match = role->subj_hash[index];
49624 +
49625 + while (match && (match->inode != oldinode ||
49626 + match->device != olddevice ||
49627 + !(match->mode & GR_DELETED)))
49628 + match = match->next;
49629 +
49630 + if (match && (match->inode == oldinode)
49631 + && (match->device == olddevice)
49632 + && (match->mode & GR_DELETED)) {
49633 + if (match->prev == NULL) {
49634 + role->subj_hash[index] = match->next;
49635 + if (match->next != NULL)
49636 + match->next->prev = NULL;
49637 + } else {
49638 + match->prev->next = match->next;
49639 + if (match->next != NULL)
49640 + match->next->prev = match->prev;
49641 + }
49642 + match->prev = NULL;
49643 + match->next = NULL;
49644 + match->inode = newinode;
49645 + match->device = newdevice;
49646 + match->mode &= ~GR_DELETED;
49647 +
49648 + insert_acl_subj_label(match, role);
49649 + }
49650 +
49651 + return;
49652 +}
49653 +
49654 +static void
49655 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49656 + const ino_t newinode, const dev_t newdevice)
49657 +{
49658 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49659 + struct inodev_entry *match;
49660 +
49661 + match = inodev_set.i_hash[index];
49662 +
49663 + while (match && (match->nentry->inode != oldinode ||
49664 + match->nentry->device != olddevice || !match->nentry->deleted))
49665 + match = match->next;
49666 +
49667 + if (match && (match->nentry->inode == oldinode)
49668 + && (match->nentry->device == olddevice) &&
49669 + match->nentry->deleted) {
49670 + if (match->prev == NULL) {
49671 + inodev_set.i_hash[index] = match->next;
49672 + if (match->next != NULL)
49673 + match->next->prev = NULL;
49674 + } else {
49675 + match->prev->next = match->next;
49676 + if (match->next != NULL)
49677 + match->next->prev = match->prev;
49678 + }
49679 + match->prev = NULL;
49680 + match->next = NULL;
49681 + match->nentry->inode = newinode;
49682 + match->nentry->device = newdevice;
49683 + match->nentry->deleted = 0;
49684 +
49685 + insert_inodev_entry(match);
49686 + }
49687 +
49688 + return;
49689 +}
49690 +
49691 +static void
49692 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
49693 + const struct vfsmount *mnt)
49694 +{
49695 + struct acl_subject_label *subj;
49696 + struct acl_role_label *role;
49697 + unsigned int x;
49698 + ino_t inode = dentry->d_inode->i_ino;
49699 + dev_t dev = __get_dev(dentry);
49700 +
49701 + FOR_EACH_ROLE_START(role)
49702 + update_acl_subj_label(matchn->inode, matchn->device,
49703 + inode, dev, role);
49704 +
49705 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
49706 + if ((subj->inode == inode) && (subj->device == dev)) {
49707 + subj->inode = inode;
49708 + subj->device = dev;
49709 + }
49710 + FOR_EACH_NESTED_SUBJECT_END(subj)
49711 + FOR_EACH_SUBJECT_START(role, subj, x)
49712 + update_acl_obj_label(matchn->inode, matchn->device,
49713 + inode, dev, subj);
49714 + FOR_EACH_SUBJECT_END(subj,x)
49715 + FOR_EACH_ROLE_END(role)
49716 +
49717 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
49718 +
49719 + return;
49720 +}
49721 +
49722 +void
49723 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49724 +{
49725 + struct name_entry *matchn;
49726 +
49727 + if (unlikely(!(gr_status & GR_READY)))
49728 + return;
49729 +
49730 + preempt_disable();
49731 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
49732 +
49733 + if (unlikely((unsigned long)matchn)) {
49734 + write_lock(&gr_inode_lock);
49735 + do_handle_create(matchn, dentry, mnt);
49736 + write_unlock(&gr_inode_lock);
49737 + }
49738 + preempt_enable();
49739 +
49740 + return;
49741 +}
49742 +
49743 +void
49744 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49745 + struct dentry *old_dentry,
49746 + struct dentry *new_dentry,
49747 + struct vfsmount *mnt, const __u8 replace)
49748 +{
49749 + struct name_entry *matchn;
49750 + struct inodev_entry *inodev;
49751 + ino_t oldinode = old_dentry->d_inode->i_ino;
49752 + dev_t olddev = __get_dev(old_dentry);
49753 +
49754 + /* vfs_rename swaps the name and parent link for old_dentry and
49755 + new_dentry
49756 + at this point, old_dentry has the new name, parent link, and inode
49757 + for the renamed file
49758 + if a file is being replaced by a rename, new_dentry has the inode
49759 + and name for the replaced file
49760 + */
49761 +
49762 + if (unlikely(!(gr_status & GR_READY)))
49763 + return;
49764 +
49765 + preempt_disable();
49766 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
49767 +
49768 + /* we wouldn't have to check d_inode if it weren't for
49769 + NFS silly-renaming
49770 + */
49771 +
49772 + write_lock(&gr_inode_lock);
49773 + if (unlikely(replace && new_dentry->d_inode)) {
49774 + ino_t newinode = new_dentry->d_inode->i_ino;
49775 + dev_t newdev = __get_dev(new_dentry);
49776 + inodev = lookup_inodev_entry(newinode, newdev);
49777 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
49778 + do_handle_delete(inodev, newinode, newdev);
49779 + }
49780 +
49781 + inodev = lookup_inodev_entry(oldinode, olddev);
49782 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
49783 + do_handle_delete(inodev, oldinode, olddev);
49784 +
49785 + if (unlikely((unsigned long)matchn))
49786 + do_handle_create(matchn, old_dentry, mnt);
49787 +
49788 + write_unlock(&gr_inode_lock);
49789 + preempt_enable();
49790 +
49791 + return;
49792 +}
49793 +
49794 +static int
49795 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
49796 + unsigned char **sum)
49797 +{
49798 + struct acl_role_label *r;
49799 + struct role_allowed_ip *ipp;
49800 + struct role_transition *trans;
49801 + unsigned int i;
49802 + int found = 0;
49803 + u32 curr_ip = current->signal->curr_ip;
49804 +
49805 + current->signal->saved_ip = curr_ip;
49806 +
49807 + /* check transition table */
49808 +
49809 + for (trans = current->role->transitions; trans; trans = trans->next) {
49810 + if (!strcmp(rolename, trans->rolename)) {
49811 + found = 1;
49812 + break;
49813 + }
49814 + }
49815 +
49816 + if (!found)
49817 + return 0;
49818 +
49819 + /* handle special roles that do not require authentication
49820 + and check ip */
49821 +
49822 + FOR_EACH_ROLE_START(r)
49823 + if (!strcmp(rolename, r->rolename) &&
49824 + (r->roletype & GR_ROLE_SPECIAL)) {
49825 + found = 0;
49826 + if (r->allowed_ips != NULL) {
49827 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
49828 + if ((ntohl(curr_ip) & ipp->netmask) ==
49829 + (ntohl(ipp->addr) & ipp->netmask))
49830 + found = 1;
49831 + }
49832 + } else
49833 + found = 2;
49834 + if (!found)
49835 + return 0;
49836 +
49837 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
49838 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
49839 + *salt = NULL;
49840 + *sum = NULL;
49841 + return 1;
49842 + }
49843 + }
49844 + FOR_EACH_ROLE_END(r)
49845 +
49846 + for (i = 0; i < num_sprole_pws; i++) {
49847 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
49848 + *salt = acl_special_roles[i]->salt;
49849 + *sum = acl_special_roles[i]->sum;
49850 + return 1;
49851 + }
49852 + }
49853 +
49854 + return 0;
49855 +}
49856 +
49857 +static void
49858 +assign_special_role(char *rolename)
49859 +{
49860 + struct acl_object_label *obj;
49861 + struct acl_role_label *r;
49862 + struct acl_role_label *assigned = NULL;
49863 + struct task_struct *tsk;
49864 + struct file *filp;
49865 +
49866 + FOR_EACH_ROLE_START(r)
49867 + if (!strcmp(rolename, r->rolename) &&
49868 + (r->roletype & GR_ROLE_SPECIAL)) {
49869 + assigned = r;
49870 + break;
49871 + }
49872 + FOR_EACH_ROLE_END(r)
49873 +
49874 + if (!assigned)
49875 + return;
49876 +
49877 + read_lock(&tasklist_lock);
49878 + read_lock(&grsec_exec_file_lock);
49879 +
49880 + tsk = current->real_parent;
49881 + if (tsk == NULL)
49882 + goto out_unlock;
49883 +
49884 + filp = tsk->exec_file;
49885 + if (filp == NULL)
49886 + goto out_unlock;
49887 +
49888 + tsk->is_writable = 0;
49889 +
49890 + tsk->acl_sp_role = 1;
49891 + tsk->acl_role_id = ++acl_sp_role_value;
49892 + tsk->role = assigned;
49893 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
49894 +
49895 + /* ignore additional mmap checks for processes that are writable
49896 + by the default ACL */
49897 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49898 + if (unlikely(obj->mode & GR_WRITE))
49899 + tsk->is_writable = 1;
49900 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
49901 + if (unlikely(obj->mode & GR_WRITE))
49902 + tsk->is_writable = 1;
49903 +
49904 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49905 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
49906 +#endif
49907 +
49908 +out_unlock:
49909 + read_unlock(&grsec_exec_file_lock);
49910 + read_unlock(&tasklist_lock);
49911 + return;
49912 +}
49913 +
49914 +int gr_check_secure_terminal(struct task_struct *task)
49915 +{
49916 + struct task_struct *p, *p2, *p3;
49917 + struct files_struct *files;
49918 + struct fdtable *fdt;
49919 + struct file *our_file = NULL, *file;
49920 + int i;
49921 +
49922 + if (task->signal->tty == NULL)
49923 + return 1;
49924 +
49925 + files = get_files_struct(task);
49926 + if (files != NULL) {
49927 + rcu_read_lock();
49928 + fdt = files_fdtable(files);
49929 + for (i=0; i < fdt->max_fds; i++) {
49930 + file = fcheck_files(files, i);
49931 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
49932 + get_file(file);
49933 + our_file = file;
49934 + }
49935 + }
49936 + rcu_read_unlock();
49937 + put_files_struct(files);
49938 + }
49939 +
49940 + if (our_file == NULL)
49941 + return 1;
49942 +
49943 + read_lock(&tasklist_lock);
49944 + do_each_thread(p2, p) {
49945 + files = get_files_struct(p);
49946 + if (files == NULL ||
49947 + (p->signal && p->signal->tty == task->signal->tty)) {
49948 + if (files != NULL)
49949 + put_files_struct(files);
49950 + continue;
49951 + }
49952 + rcu_read_lock();
49953 + fdt = files_fdtable(files);
49954 + for (i=0; i < fdt->max_fds; i++) {
49955 + file = fcheck_files(files, i);
49956 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
49957 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
49958 + p3 = task;
49959 + while (p3->pid > 0) {
49960 + if (p3 == p)
49961 + break;
49962 + p3 = p3->real_parent;
49963 + }
49964 + if (p3 == p)
49965 + break;
49966 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
49967 + gr_handle_alertkill(p);
49968 + rcu_read_unlock();
49969 + put_files_struct(files);
49970 + read_unlock(&tasklist_lock);
49971 + fput(our_file);
49972 + return 0;
49973 + }
49974 + }
49975 + rcu_read_unlock();
49976 + put_files_struct(files);
49977 + } while_each_thread(p2, p);
49978 + read_unlock(&tasklist_lock);
49979 +
49980 + fput(our_file);
49981 + return 1;
49982 +}
49983 +
49984 +ssize_t
49985 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
49986 +{
49987 + struct gr_arg_wrapper uwrap;
49988 + unsigned char *sprole_salt = NULL;
49989 + unsigned char *sprole_sum = NULL;
49990 + int error = sizeof (struct gr_arg_wrapper);
49991 + int error2 = 0;
49992 +
49993 + mutex_lock(&gr_dev_mutex);
49994 +
49995 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
49996 + error = -EPERM;
49997 + goto out;
49998 + }
49999 +
50000 + if (count != sizeof (struct gr_arg_wrapper)) {
50001 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
50002 + error = -EINVAL;
50003 + goto out;
50004 + }
50005 +
50006 +
50007 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
50008 + gr_auth_expires = 0;
50009 + gr_auth_attempts = 0;
50010 + }
50011 +
50012 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
50013 + error = -EFAULT;
50014 + goto out;
50015 + }
50016 +
50017 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
50018 + error = -EINVAL;
50019 + goto out;
50020 + }
50021 +
50022 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
50023 + error = -EFAULT;
50024 + goto out;
50025 + }
50026 +
50027 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50028 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50029 + time_after(gr_auth_expires, get_seconds())) {
50030 + error = -EBUSY;
50031 + goto out;
50032 + }
50033 +
50034 + /* if non-root trying to do anything other than use a special role,
50035 + do not attempt authentication, do not count towards authentication
50036 + locking
50037 + */
50038 +
50039 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
50040 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50041 + current_uid()) {
50042 + error = -EPERM;
50043 + goto out;
50044 + }
50045 +
50046 + /* ensure pw and special role name are null terminated */
50047 +
50048 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
50049 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
50050 +
50051 + /* Okay.
50052 + * We have our enough of the argument structure..(we have yet
50053 + * to copy_from_user the tables themselves) . Copy the tables
50054 + * only if we need them, i.e. for loading operations. */
50055 +
50056 + switch (gr_usermode->mode) {
50057 + case GR_STATUS:
50058 + if (gr_status & GR_READY) {
50059 + error = 1;
50060 + if (!gr_check_secure_terminal(current))
50061 + error = 3;
50062 + } else
50063 + error = 2;
50064 + goto out;
50065 + case GR_SHUTDOWN:
50066 + if ((gr_status & GR_READY)
50067 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50068 + pax_open_kernel();
50069 + gr_status &= ~GR_READY;
50070 + pax_close_kernel();
50071 +
50072 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
50073 + free_variables();
50074 + memset(gr_usermode, 0, sizeof (struct gr_arg));
50075 + memset(gr_system_salt, 0, GR_SALT_LEN);
50076 + memset(gr_system_sum, 0, GR_SHA_LEN);
50077 + } else if (gr_status & GR_READY) {
50078 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
50079 + error = -EPERM;
50080 + } else {
50081 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
50082 + error = -EAGAIN;
50083 + }
50084 + break;
50085 + case GR_ENABLE:
50086 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
50087 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
50088 + else {
50089 + if (gr_status & GR_READY)
50090 + error = -EAGAIN;
50091 + else
50092 + error = error2;
50093 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
50094 + }
50095 + break;
50096 + case GR_RELOAD:
50097 + if (!(gr_status & GR_READY)) {
50098 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
50099 + error = -EAGAIN;
50100 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50101 + lock_kernel();
50102 +
50103 + pax_open_kernel();
50104 + gr_status &= ~GR_READY;
50105 + pax_close_kernel();
50106 +
50107 + free_variables();
50108 + if (!(error2 = gracl_init(gr_usermode))) {
50109 + unlock_kernel();
50110 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50111 + } else {
50112 + unlock_kernel();
50113 + error = error2;
50114 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50115 + }
50116 + } else {
50117 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50118 + error = -EPERM;
50119 + }
50120 + break;
50121 + case GR_SEGVMOD:
50122 + if (unlikely(!(gr_status & GR_READY))) {
50123 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50124 + error = -EAGAIN;
50125 + break;
50126 + }
50127 +
50128 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50129 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50130 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50131 + struct acl_subject_label *segvacl;
50132 + segvacl =
50133 + lookup_acl_subj_label(gr_usermode->segv_inode,
50134 + gr_usermode->segv_device,
50135 + current->role);
50136 + if (segvacl) {
50137 + segvacl->crashes = 0;
50138 + segvacl->expires = 0;
50139 + }
50140 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50141 + gr_remove_uid(gr_usermode->segv_uid);
50142 + }
50143 + } else {
50144 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50145 + error = -EPERM;
50146 + }
50147 + break;
50148 + case GR_SPROLE:
50149 + case GR_SPROLEPAM:
50150 + if (unlikely(!(gr_status & GR_READY))) {
50151 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50152 + error = -EAGAIN;
50153 + break;
50154 + }
50155 +
50156 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50157 + current->role->expires = 0;
50158 + current->role->auth_attempts = 0;
50159 + }
50160 +
50161 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50162 + time_after(current->role->expires, get_seconds())) {
50163 + error = -EBUSY;
50164 + goto out;
50165 + }
50166 +
50167 + if (lookup_special_role_auth
50168 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50169 + && ((!sprole_salt && !sprole_sum)
50170 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50171 + char *p = "";
50172 + assign_special_role(gr_usermode->sp_role);
50173 + read_lock(&tasklist_lock);
50174 + if (current->real_parent)
50175 + p = current->real_parent->role->rolename;
50176 + read_unlock(&tasklist_lock);
50177 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50178 + p, acl_sp_role_value);
50179 + } else {
50180 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50181 + error = -EPERM;
50182 + if(!(current->role->auth_attempts++))
50183 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50184 +
50185 + goto out;
50186 + }
50187 + break;
50188 + case GR_UNSPROLE:
50189 + if (unlikely(!(gr_status & GR_READY))) {
50190 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50191 + error = -EAGAIN;
50192 + break;
50193 + }
50194 +
50195 + if (current->role->roletype & GR_ROLE_SPECIAL) {
50196 + char *p = "";
50197 + int i = 0;
50198 +
50199 + read_lock(&tasklist_lock);
50200 + if (current->real_parent) {
50201 + p = current->real_parent->role->rolename;
50202 + i = current->real_parent->acl_role_id;
50203 + }
50204 + read_unlock(&tasklist_lock);
50205 +
50206 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50207 + gr_set_acls(1);
50208 + } else {
50209 + error = -EPERM;
50210 + goto out;
50211 + }
50212 + break;
50213 + default:
50214 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50215 + error = -EINVAL;
50216 + break;
50217 + }
50218 +
50219 + if (error != -EPERM)
50220 + goto out;
50221 +
50222 + if(!(gr_auth_attempts++))
50223 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50224 +
50225 + out:
50226 + mutex_unlock(&gr_dev_mutex);
50227 + return error;
50228 +}
50229 +
50230 +/* must be called with
50231 + rcu_read_lock();
50232 + read_lock(&tasklist_lock);
50233 + read_lock(&grsec_exec_file_lock);
50234 +*/
50235 +int gr_apply_subject_to_task(struct task_struct *task)
50236 +{
50237 + struct acl_object_label *obj;
50238 + char *tmpname;
50239 + struct acl_subject_label *tmpsubj;
50240 + struct file *filp;
50241 + struct name_entry *nmatch;
50242 +
50243 + filp = task->exec_file;
50244 + if (filp == NULL)
50245 + return 0;
50246 +
50247 + /* the following is to apply the correct subject
50248 + on binaries running when the RBAC system
50249 + is enabled, when the binaries have been
50250 + replaced or deleted since their execution
50251 + -----
50252 + when the RBAC system starts, the inode/dev
50253 + from exec_file will be one the RBAC system
50254 + is unaware of. It only knows the inode/dev
50255 + of the present file on disk, or the absence
50256 + of it.
50257 + */
50258 + preempt_disable();
50259 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50260 +
50261 + nmatch = lookup_name_entry(tmpname);
50262 + preempt_enable();
50263 + tmpsubj = NULL;
50264 + if (nmatch) {
50265 + if (nmatch->deleted)
50266 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50267 + else
50268 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50269 + if (tmpsubj != NULL)
50270 + task->acl = tmpsubj;
50271 + }
50272 + if (tmpsubj == NULL)
50273 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50274 + task->role);
50275 + if (task->acl) {
50276 + task->is_writable = 0;
50277 + /* ignore additional mmap checks for processes that are writable
50278 + by the default ACL */
50279 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50280 + if (unlikely(obj->mode & GR_WRITE))
50281 + task->is_writable = 1;
50282 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50283 + if (unlikely(obj->mode & GR_WRITE))
50284 + task->is_writable = 1;
50285 +
50286 + gr_set_proc_res(task);
50287 +
50288 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50289 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50290 +#endif
50291 + } else {
50292 + return 1;
50293 + }
50294 +
50295 + return 0;
50296 +}
50297 +
50298 +int
50299 +gr_set_acls(const int type)
50300 +{
50301 + struct task_struct *task, *task2;
50302 + struct acl_role_label *role = current->role;
50303 + __u16 acl_role_id = current->acl_role_id;
50304 + const struct cred *cred;
50305 + int ret;
50306 +
50307 + rcu_read_lock();
50308 + read_lock(&tasklist_lock);
50309 + read_lock(&grsec_exec_file_lock);
50310 + do_each_thread(task2, task) {
50311 + /* check to see if we're called from the exit handler,
50312 + if so, only replace ACLs that have inherited the admin
50313 + ACL */
50314 +
50315 + if (type && (task->role != role ||
50316 + task->acl_role_id != acl_role_id))
50317 + continue;
50318 +
50319 + task->acl_role_id = 0;
50320 + task->acl_sp_role = 0;
50321 +
50322 + if (task->exec_file) {
50323 + cred = __task_cred(task);
50324 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50325 +
50326 + ret = gr_apply_subject_to_task(task);
50327 + if (ret) {
50328 + read_unlock(&grsec_exec_file_lock);
50329 + read_unlock(&tasklist_lock);
50330 + rcu_read_unlock();
50331 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50332 + return ret;
50333 + }
50334 + } else {
50335 + // it's a kernel process
50336 + task->role = kernel_role;
50337 + task->acl = kernel_role->root_label;
50338 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50339 + task->acl->mode &= ~GR_PROCFIND;
50340 +#endif
50341 + }
50342 + } while_each_thread(task2, task);
50343 + read_unlock(&grsec_exec_file_lock);
50344 + read_unlock(&tasklist_lock);
50345 + rcu_read_unlock();
50346 +
50347 + return 0;
50348 +}
50349 +
50350 +void
50351 +gr_learn_resource(const struct task_struct *task,
50352 + const int res, const unsigned long wanted, const int gt)
50353 +{
50354 + struct acl_subject_label *acl;
50355 + const struct cred *cred;
50356 +
50357 + if (unlikely((gr_status & GR_READY) &&
50358 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50359 + goto skip_reslog;
50360 +
50361 +#ifdef CONFIG_GRKERNSEC_RESLOG
50362 + gr_log_resource(task, res, wanted, gt);
50363 +#endif
50364 + skip_reslog:
50365 +
50366 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50367 + return;
50368 +
50369 + acl = task->acl;
50370 +
50371 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50372 + !(acl->resmask & (1 << (unsigned short) res))))
50373 + return;
50374 +
50375 + if (wanted >= acl->res[res].rlim_cur) {
50376 + unsigned long res_add;
50377 +
50378 + res_add = wanted;
50379 + switch (res) {
50380 + case RLIMIT_CPU:
50381 + res_add += GR_RLIM_CPU_BUMP;
50382 + break;
50383 + case RLIMIT_FSIZE:
50384 + res_add += GR_RLIM_FSIZE_BUMP;
50385 + break;
50386 + case RLIMIT_DATA:
50387 + res_add += GR_RLIM_DATA_BUMP;
50388 + break;
50389 + case RLIMIT_STACK:
50390 + res_add += GR_RLIM_STACK_BUMP;
50391 + break;
50392 + case RLIMIT_CORE:
50393 + res_add += GR_RLIM_CORE_BUMP;
50394 + break;
50395 + case RLIMIT_RSS:
50396 + res_add += GR_RLIM_RSS_BUMP;
50397 + break;
50398 + case RLIMIT_NPROC:
50399 + res_add += GR_RLIM_NPROC_BUMP;
50400 + break;
50401 + case RLIMIT_NOFILE:
50402 + res_add += GR_RLIM_NOFILE_BUMP;
50403 + break;
50404 + case RLIMIT_MEMLOCK:
50405 + res_add += GR_RLIM_MEMLOCK_BUMP;
50406 + break;
50407 + case RLIMIT_AS:
50408 + res_add += GR_RLIM_AS_BUMP;
50409 + break;
50410 + case RLIMIT_LOCKS:
50411 + res_add += GR_RLIM_LOCKS_BUMP;
50412 + break;
50413 + case RLIMIT_SIGPENDING:
50414 + res_add += GR_RLIM_SIGPENDING_BUMP;
50415 + break;
50416 + case RLIMIT_MSGQUEUE:
50417 + res_add += GR_RLIM_MSGQUEUE_BUMP;
50418 + break;
50419 + case RLIMIT_NICE:
50420 + res_add += GR_RLIM_NICE_BUMP;
50421 + break;
50422 + case RLIMIT_RTPRIO:
50423 + res_add += GR_RLIM_RTPRIO_BUMP;
50424 + break;
50425 + case RLIMIT_RTTIME:
50426 + res_add += GR_RLIM_RTTIME_BUMP;
50427 + break;
50428 + }
50429 +
50430 + acl->res[res].rlim_cur = res_add;
50431 +
50432 + if (wanted > acl->res[res].rlim_max)
50433 + acl->res[res].rlim_max = res_add;
50434 +
50435 + /* only log the subject filename, since resource logging is supported for
50436 + single-subject learning only */
50437 + rcu_read_lock();
50438 + cred = __task_cred(task);
50439 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50440 + task->role->roletype, cred->uid, cred->gid, acl->filename,
50441 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50442 + "", (unsigned long) res, &task->signal->saved_ip);
50443 + rcu_read_unlock();
50444 + }
50445 +
50446 + return;
50447 +}
50448 +
50449 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50450 +void
50451 +pax_set_initial_flags(struct linux_binprm *bprm)
50452 +{
50453 + struct task_struct *task = current;
50454 + struct acl_subject_label *proc;
50455 + unsigned long flags;
50456 +
50457 + if (unlikely(!(gr_status & GR_READY)))
50458 + return;
50459 +
50460 + flags = pax_get_flags(task);
50461 +
50462 + proc = task->acl;
50463 +
50464 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50465 + flags &= ~MF_PAX_PAGEEXEC;
50466 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50467 + flags &= ~MF_PAX_SEGMEXEC;
50468 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50469 + flags &= ~MF_PAX_RANDMMAP;
50470 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50471 + flags &= ~MF_PAX_EMUTRAMP;
50472 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50473 + flags &= ~MF_PAX_MPROTECT;
50474 +
50475 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50476 + flags |= MF_PAX_PAGEEXEC;
50477 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50478 + flags |= MF_PAX_SEGMEXEC;
50479 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50480 + flags |= MF_PAX_RANDMMAP;
50481 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50482 + flags |= MF_PAX_EMUTRAMP;
50483 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50484 + flags |= MF_PAX_MPROTECT;
50485 +
50486 + pax_set_flags(task, flags);
50487 +
50488 + return;
50489 +}
50490 +#endif
50491 +
50492 +#ifdef CONFIG_SYSCTL
50493 +/* Eric Biederman likes breaking userland ABI and every inode-based security
50494 + system to save 35kb of memory */
50495 +
50496 +/* we modify the passed in filename, but adjust it back before returning */
50497 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50498 +{
50499 + struct name_entry *nmatch;
50500 + char *p, *lastp = NULL;
50501 + struct acl_object_label *obj = NULL, *tmp;
50502 + struct acl_subject_label *tmpsubj;
50503 + char c = '\0';
50504 +
50505 + read_lock(&gr_inode_lock);
50506 +
50507 + p = name + len - 1;
50508 + do {
50509 + nmatch = lookup_name_entry(name);
50510 + if (lastp != NULL)
50511 + *lastp = c;
50512 +
50513 + if (nmatch == NULL)
50514 + goto next_component;
50515 + tmpsubj = current->acl;
50516 + do {
50517 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50518 + if (obj != NULL) {
50519 + tmp = obj->globbed;
50520 + while (tmp) {
50521 + if (!glob_match(tmp->filename, name)) {
50522 + obj = tmp;
50523 + goto found_obj;
50524 + }
50525 + tmp = tmp->next;
50526 + }
50527 + goto found_obj;
50528 + }
50529 + } while ((tmpsubj = tmpsubj->parent_subject));
50530 +next_component:
50531 + /* end case */
50532 + if (p == name)
50533 + break;
50534 +
50535 + while (*p != '/')
50536 + p--;
50537 + if (p == name)
50538 + lastp = p + 1;
50539 + else {
50540 + lastp = p;
50541 + p--;
50542 + }
50543 + c = *lastp;
50544 + *lastp = '\0';
50545 + } while (1);
50546 +found_obj:
50547 + read_unlock(&gr_inode_lock);
50548 + /* obj returned will always be non-null */
50549 + return obj;
50550 +}
50551 +
50552 +/* returns 0 when allowing, non-zero on error
50553 + op of 0 is used for readdir, so we don't log the names of hidden files
50554 +*/
50555 +__u32
50556 +gr_handle_sysctl(const struct ctl_table *table, const int op)
50557 +{
50558 + ctl_table *tmp;
50559 + const char *proc_sys = "/proc/sys";
50560 + char *path;
50561 + struct acl_object_label *obj;
50562 + unsigned short len = 0, pos = 0, depth = 0, i;
50563 + __u32 err = 0;
50564 + __u32 mode = 0;
50565 +
50566 + if (unlikely(!(gr_status & GR_READY)))
50567 + return 0;
50568 +
50569 + /* for now, ignore operations on non-sysctl entries if it's not a
50570 + readdir*/
50571 + if (table->child != NULL && op != 0)
50572 + return 0;
50573 +
50574 + mode |= GR_FIND;
50575 + /* it's only a read if it's an entry, read on dirs is for readdir */
50576 + if (op & MAY_READ)
50577 + mode |= GR_READ;
50578 + if (op & MAY_WRITE)
50579 + mode |= GR_WRITE;
50580 +
50581 + preempt_disable();
50582 +
50583 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50584 +
50585 + /* it's only a read/write if it's an actual entry, not a dir
50586 + (which are opened for readdir)
50587 + */
50588 +
50589 + /* convert the requested sysctl entry into a pathname */
50590 +
50591 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50592 + len += strlen(tmp->procname);
50593 + len++;
50594 + depth++;
50595 + }
50596 +
50597 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50598 + /* deny */
50599 + goto out;
50600 + }
50601 +
50602 + memset(path, 0, PAGE_SIZE);
50603 +
50604 + memcpy(path, proc_sys, strlen(proc_sys));
50605 +
50606 + pos += strlen(proc_sys);
50607 +
50608 + for (; depth > 0; depth--) {
50609 + path[pos] = '/';
50610 + pos++;
50611 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50612 + if (depth == i) {
50613 + memcpy(path + pos, tmp->procname,
50614 + strlen(tmp->procname));
50615 + pos += strlen(tmp->procname);
50616 + }
50617 + i++;
50618 + }
50619 + }
50620 +
50621 + obj = gr_lookup_by_name(path, pos);
50622 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50623 +
50624 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50625 + ((err & mode) != mode))) {
50626 + __u32 new_mode = mode;
50627 +
50628 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50629 +
50630 + err = 0;
50631 + gr_log_learn_sysctl(path, new_mode);
50632 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50633 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50634 + err = -ENOENT;
50635 + } else if (!(err & GR_FIND)) {
50636 + err = -ENOENT;
50637 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50638 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50639 + path, (mode & GR_READ) ? " reading" : "",
50640 + (mode & GR_WRITE) ? " writing" : "");
50641 + err = -EACCES;
50642 + } else if ((err & mode) != mode) {
50643 + err = -EACCES;
50644 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50645 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50646 + path, (mode & GR_READ) ? " reading" : "",
50647 + (mode & GR_WRITE) ? " writing" : "");
50648 + err = 0;
50649 + } else
50650 + err = 0;
50651 +
50652 + out:
50653 + preempt_enable();
50654 +
50655 + return err;
50656 +}
50657 +#endif
50658 +
50659 +int
50660 +gr_handle_proc_ptrace(struct task_struct *task)
50661 +{
50662 + struct file *filp;
50663 + struct task_struct *tmp = task;
50664 + struct task_struct *curtemp = current;
50665 + __u32 retmode;
50666 +
50667 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50668 + if (unlikely(!(gr_status & GR_READY)))
50669 + return 0;
50670 +#endif
50671 +
50672 + read_lock(&tasklist_lock);
50673 + read_lock(&grsec_exec_file_lock);
50674 + filp = task->exec_file;
50675 +
50676 + while (tmp->pid > 0) {
50677 + if (tmp == curtemp)
50678 + break;
50679 + tmp = tmp->real_parent;
50680 + }
50681 +
50682 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50683 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
50684 + read_unlock(&grsec_exec_file_lock);
50685 + read_unlock(&tasklist_lock);
50686 + return 1;
50687 + }
50688 +
50689 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50690 + if (!(gr_status & GR_READY)) {
50691 + read_unlock(&grsec_exec_file_lock);
50692 + read_unlock(&tasklist_lock);
50693 + return 0;
50694 + }
50695 +#endif
50696 +
50697 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
50698 + read_unlock(&grsec_exec_file_lock);
50699 + read_unlock(&tasklist_lock);
50700 +
50701 + if (retmode & GR_NOPTRACE)
50702 + return 1;
50703 +
50704 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
50705 + && (current->acl != task->acl || (current->acl != current->role->root_label
50706 + && current->pid != task->pid)))
50707 + return 1;
50708 +
50709 + return 0;
50710 +}
50711 +
50712 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
50713 +{
50714 + if (unlikely(!(gr_status & GR_READY)))
50715 + return;
50716 +
50717 + if (!(current->role->roletype & GR_ROLE_GOD))
50718 + return;
50719 +
50720 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
50721 + p->role->rolename, gr_task_roletype_to_char(p),
50722 + p->acl->filename);
50723 +}
50724 +
50725 +int
50726 +gr_handle_ptrace(struct task_struct *task, const long request)
50727 +{
50728 + struct task_struct *tmp = task;
50729 + struct task_struct *curtemp = current;
50730 + __u32 retmode;
50731 +
50732 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50733 + if (unlikely(!(gr_status & GR_READY)))
50734 + return 0;
50735 +#endif
50736 +
50737 + read_lock(&tasklist_lock);
50738 + while (tmp->pid > 0) {
50739 + if (tmp == curtemp)
50740 + break;
50741 + tmp = tmp->real_parent;
50742 + }
50743 +
50744 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50745 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
50746 + read_unlock(&tasklist_lock);
50747 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50748 + return 1;
50749 + }
50750 + read_unlock(&tasklist_lock);
50751 +
50752 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50753 + if (!(gr_status & GR_READY))
50754 + return 0;
50755 +#endif
50756 +
50757 + read_lock(&grsec_exec_file_lock);
50758 + if (unlikely(!task->exec_file)) {
50759 + read_unlock(&grsec_exec_file_lock);
50760 + return 0;
50761 + }
50762 +
50763 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
50764 + read_unlock(&grsec_exec_file_lock);
50765 +
50766 + if (retmode & GR_NOPTRACE) {
50767 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50768 + return 1;
50769 + }
50770 +
50771 + if (retmode & GR_PTRACERD) {
50772 + switch (request) {
50773 + case PTRACE_POKETEXT:
50774 + case PTRACE_POKEDATA:
50775 + case PTRACE_POKEUSR:
50776 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
50777 + case PTRACE_SETREGS:
50778 + case PTRACE_SETFPREGS:
50779 +#endif
50780 +#ifdef CONFIG_X86
50781 + case PTRACE_SETFPXREGS:
50782 +#endif
50783 +#ifdef CONFIG_ALTIVEC
50784 + case PTRACE_SETVRREGS:
50785 +#endif
50786 + return 1;
50787 + default:
50788 + return 0;
50789 + }
50790 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
50791 + !(current->role->roletype & GR_ROLE_GOD) &&
50792 + (current->acl != task->acl)) {
50793 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50794 + return 1;
50795 + }
50796 +
50797 + return 0;
50798 +}
50799 +
50800 +static int is_writable_mmap(const struct file *filp)
50801 +{
50802 + struct task_struct *task = current;
50803 + struct acl_object_label *obj, *obj2;
50804 +
50805 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
50806 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
50807 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50808 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
50809 + task->role->root_label);
50810 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
50811 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
50812 + return 1;
50813 + }
50814 + }
50815 + return 0;
50816 +}
50817 +
50818 +int
50819 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
50820 +{
50821 + __u32 mode;
50822 +
50823 + if (unlikely(!file || !(prot & PROT_EXEC)))
50824 + return 1;
50825 +
50826 + if (is_writable_mmap(file))
50827 + return 0;
50828 +
50829 + mode =
50830 + gr_search_file(file->f_path.dentry,
50831 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50832 + file->f_path.mnt);
50833 +
50834 + if (!gr_tpe_allow(file))
50835 + return 0;
50836 +
50837 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50838 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50839 + return 0;
50840 + } else if (unlikely(!(mode & GR_EXEC))) {
50841 + return 0;
50842 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50843 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50844 + return 1;
50845 + }
50846 +
50847 + return 1;
50848 +}
50849 +
50850 +int
50851 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50852 +{
50853 + __u32 mode;
50854 +
50855 + if (unlikely(!file || !(prot & PROT_EXEC)))
50856 + return 1;
50857 +
50858 + if (is_writable_mmap(file))
50859 + return 0;
50860 +
50861 + mode =
50862 + gr_search_file(file->f_path.dentry,
50863 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50864 + file->f_path.mnt);
50865 +
50866 + if (!gr_tpe_allow(file))
50867 + return 0;
50868 +
50869 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50870 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50871 + return 0;
50872 + } else if (unlikely(!(mode & GR_EXEC))) {
50873 + return 0;
50874 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50875 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50876 + return 1;
50877 + }
50878 +
50879 + return 1;
50880 +}
50881 +
50882 +void
50883 +gr_acl_handle_psacct(struct task_struct *task, const long code)
50884 +{
50885 + unsigned long runtime;
50886 + unsigned long cputime;
50887 + unsigned int wday, cday;
50888 + __u8 whr, chr;
50889 + __u8 wmin, cmin;
50890 + __u8 wsec, csec;
50891 + struct timespec timeval;
50892 +
50893 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
50894 + !(task->acl->mode & GR_PROCACCT)))
50895 + return;
50896 +
50897 + do_posix_clock_monotonic_gettime(&timeval);
50898 + runtime = timeval.tv_sec - task->start_time.tv_sec;
50899 + wday = runtime / (3600 * 24);
50900 + runtime -= wday * (3600 * 24);
50901 + whr = runtime / 3600;
50902 + runtime -= whr * 3600;
50903 + wmin = runtime / 60;
50904 + runtime -= wmin * 60;
50905 + wsec = runtime;
50906 +
50907 + cputime = (task->utime + task->stime) / HZ;
50908 + cday = cputime / (3600 * 24);
50909 + cputime -= cday * (3600 * 24);
50910 + chr = cputime / 3600;
50911 + cputime -= chr * 3600;
50912 + cmin = cputime / 60;
50913 + cputime -= cmin * 60;
50914 + csec = cputime;
50915 +
50916 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
50917 +
50918 + return;
50919 +}
50920 +
50921 +void gr_set_kernel_label(struct task_struct *task)
50922 +{
50923 + if (gr_status & GR_READY) {
50924 + task->role = kernel_role;
50925 + task->acl = kernel_role->root_label;
50926 + }
50927 + return;
50928 +}
50929 +
50930 +#ifdef CONFIG_TASKSTATS
50931 +int gr_is_taskstats_denied(int pid)
50932 +{
50933 + struct task_struct *task;
50934 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50935 + const struct cred *cred;
50936 +#endif
50937 + int ret = 0;
50938 +
50939 + /* restrict taskstats viewing to un-chrooted root users
50940 + who have the 'view' subject flag if the RBAC system is enabled
50941 + */
50942 +
50943 + rcu_read_lock();
50944 + read_lock(&tasklist_lock);
50945 + task = find_task_by_vpid(pid);
50946 + if (task) {
50947 +#ifdef CONFIG_GRKERNSEC_CHROOT
50948 + if (proc_is_chrooted(task))
50949 + ret = -EACCES;
50950 +#endif
50951 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50952 + cred = __task_cred(task);
50953 +#ifdef CONFIG_GRKERNSEC_PROC_USER
50954 + if (cred->uid != 0)
50955 + ret = -EACCES;
50956 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50957 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
50958 + ret = -EACCES;
50959 +#endif
50960 +#endif
50961 + if (gr_status & GR_READY) {
50962 + if (!(task->acl->mode & GR_VIEW))
50963 + ret = -EACCES;
50964 + }
50965 + } else
50966 + ret = -ENOENT;
50967 +
50968 + read_unlock(&tasklist_lock);
50969 + rcu_read_unlock();
50970 +
50971 + return ret;
50972 +}
50973 +#endif
50974 +
50975 +/* AUXV entries are filled via a descendant of search_binary_handler
50976 + after we've already applied the subject for the target
50977 +*/
50978 +int gr_acl_enable_at_secure(void)
50979 +{
50980 + if (unlikely(!(gr_status & GR_READY)))
50981 + return 0;
50982 +
50983 + if (current->acl->mode & GR_ATSECURE)
50984 + return 1;
50985 +
50986 + return 0;
50987 +}
50988 +
50989 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
50990 +{
50991 + struct task_struct *task = current;
50992 + struct dentry *dentry = file->f_path.dentry;
50993 + struct vfsmount *mnt = file->f_path.mnt;
50994 + struct acl_object_label *obj, *tmp;
50995 + struct acl_subject_label *subj;
50996 + unsigned int bufsize;
50997 + int is_not_root;
50998 + char *path;
50999 + dev_t dev = __get_dev(dentry);
51000 +
51001 + if (unlikely(!(gr_status & GR_READY)))
51002 + return 1;
51003 +
51004 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51005 + return 1;
51006 +
51007 + /* ignore Eric Biederman */
51008 + if (IS_PRIVATE(dentry->d_inode))
51009 + return 1;
51010 +
51011 + subj = task->acl;
51012 + do {
51013 + obj = lookup_acl_obj_label(ino, dev, subj);
51014 + if (obj != NULL)
51015 + return (obj->mode & GR_FIND) ? 1 : 0;
51016 + } while ((subj = subj->parent_subject));
51017 +
51018 + /* this is purely an optimization since we're looking for an object
51019 + for the directory we're doing a readdir on
51020 + if it's possible for any globbed object to match the entry we're
51021 + filling into the directory, then the object we find here will be
51022 + an anchor point with attached globbed objects
51023 + */
51024 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
51025 + if (obj->globbed == NULL)
51026 + return (obj->mode & GR_FIND) ? 1 : 0;
51027 +
51028 + is_not_root = ((obj->filename[0] == '/') &&
51029 + (obj->filename[1] == '\0')) ? 0 : 1;
51030 + bufsize = PAGE_SIZE - namelen - is_not_root;
51031 +
51032 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
51033 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
51034 + return 1;
51035 +
51036 + preempt_disable();
51037 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51038 + bufsize);
51039 +
51040 + bufsize = strlen(path);
51041 +
51042 + /* if base is "/", don't append an additional slash */
51043 + if (is_not_root)
51044 + *(path + bufsize) = '/';
51045 + memcpy(path + bufsize + is_not_root, name, namelen);
51046 + *(path + bufsize + namelen + is_not_root) = '\0';
51047 +
51048 + tmp = obj->globbed;
51049 + while (tmp) {
51050 + if (!glob_match(tmp->filename, path)) {
51051 + preempt_enable();
51052 + return (tmp->mode & GR_FIND) ? 1 : 0;
51053 + }
51054 + tmp = tmp->next;
51055 + }
51056 + preempt_enable();
51057 + return (obj->mode & GR_FIND) ? 1 : 0;
51058 +}
51059 +
51060 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
51061 +EXPORT_SYMBOL(gr_acl_is_enabled);
51062 +#endif
51063 +EXPORT_SYMBOL(gr_learn_resource);
51064 +EXPORT_SYMBOL(gr_set_kernel_label);
51065 +#ifdef CONFIG_SECURITY
51066 +EXPORT_SYMBOL(gr_check_user_change);
51067 +EXPORT_SYMBOL(gr_check_group_change);
51068 +#endif
51069 +
51070 diff -urNp linux-2.6.32.45/grsecurity/gracl_cap.c linux-2.6.32.45/grsecurity/gracl_cap.c
51071 --- linux-2.6.32.45/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
51072 +++ linux-2.6.32.45/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
51073 @@ -0,0 +1,138 @@
51074 +#include <linux/kernel.h>
51075 +#include <linux/module.h>
51076 +#include <linux/sched.h>
51077 +#include <linux/gracl.h>
51078 +#include <linux/grsecurity.h>
51079 +#include <linux/grinternal.h>
51080 +
51081 +static const char *captab_log[] = {
51082 + "CAP_CHOWN",
51083 + "CAP_DAC_OVERRIDE",
51084 + "CAP_DAC_READ_SEARCH",
51085 + "CAP_FOWNER",
51086 + "CAP_FSETID",
51087 + "CAP_KILL",
51088 + "CAP_SETGID",
51089 + "CAP_SETUID",
51090 + "CAP_SETPCAP",
51091 + "CAP_LINUX_IMMUTABLE",
51092 + "CAP_NET_BIND_SERVICE",
51093 + "CAP_NET_BROADCAST",
51094 + "CAP_NET_ADMIN",
51095 + "CAP_NET_RAW",
51096 + "CAP_IPC_LOCK",
51097 + "CAP_IPC_OWNER",
51098 + "CAP_SYS_MODULE",
51099 + "CAP_SYS_RAWIO",
51100 + "CAP_SYS_CHROOT",
51101 + "CAP_SYS_PTRACE",
51102 + "CAP_SYS_PACCT",
51103 + "CAP_SYS_ADMIN",
51104 + "CAP_SYS_BOOT",
51105 + "CAP_SYS_NICE",
51106 + "CAP_SYS_RESOURCE",
51107 + "CAP_SYS_TIME",
51108 + "CAP_SYS_TTY_CONFIG",
51109 + "CAP_MKNOD",
51110 + "CAP_LEASE",
51111 + "CAP_AUDIT_WRITE",
51112 + "CAP_AUDIT_CONTROL",
51113 + "CAP_SETFCAP",
51114 + "CAP_MAC_OVERRIDE",
51115 + "CAP_MAC_ADMIN"
51116 +};
51117 +
51118 +EXPORT_SYMBOL(gr_is_capable);
51119 +EXPORT_SYMBOL(gr_is_capable_nolog);
51120 +
51121 +int
51122 +gr_is_capable(const int cap)
51123 +{
51124 + struct task_struct *task = current;
51125 + const struct cred *cred = current_cred();
51126 + struct acl_subject_label *curracl;
51127 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51128 + kernel_cap_t cap_audit = __cap_empty_set;
51129 +
51130 + if (!gr_acl_is_enabled())
51131 + return 1;
51132 +
51133 + curracl = task->acl;
51134 +
51135 + cap_drop = curracl->cap_lower;
51136 + cap_mask = curracl->cap_mask;
51137 + cap_audit = curracl->cap_invert_audit;
51138 +
51139 + while ((curracl = curracl->parent_subject)) {
51140 + /* if the cap isn't specified in the current computed mask but is specified in the
51141 + current level subject, and is lowered in the current level subject, then add
51142 + it to the set of dropped capabilities
51143 + otherwise, add the current level subject's mask to the current computed mask
51144 + */
51145 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51146 + cap_raise(cap_mask, cap);
51147 + if (cap_raised(curracl->cap_lower, cap))
51148 + cap_raise(cap_drop, cap);
51149 + if (cap_raised(curracl->cap_invert_audit, cap))
51150 + cap_raise(cap_audit, cap);
51151 + }
51152 + }
51153 +
51154 + if (!cap_raised(cap_drop, cap)) {
51155 + if (cap_raised(cap_audit, cap))
51156 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51157 + return 1;
51158 + }
51159 +
51160 + curracl = task->acl;
51161 +
51162 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51163 + && cap_raised(cred->cap_effective, cap)) {
51164 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51165 + task->role->roletype, cred->uid,
51166 + cred->gid, task->exec_file ?
51167 + gr_to_filename(task->exec_file->f_path.dentry,
51168 + task->exec_file->f_path.mnt) : curracl->filename,
51169 + curracl->filename, 0UL,
51170 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51171 + return 1;
51172 + }
51173 +
51174 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51175 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51176 + return 0;
51177 +}
51178 +
51179 +int
51180 +gr_is_capable_nolog(const int cap)
51181 +{
51182 + struct acl_subject_label *curracl;
51183 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51184 +
51185 + if (!gr_acl_is_enabled())
51186 + return 1;
51187 +
51188 + curracl = current->acl;
51189 +
51190 + cap_drop = curracl->cap_lower;
51191 + cap_mask = curracl->cap_mask;
51192 +
51193 + while ((curracl = curracl->parent_subject)) {
51194 + /* if the cap isn't specified in the current computed mask but is specified in the
51195 + current level subject, and is lowered in the current level subject, then add
51196 + it to the set of dropped capabilities
51197 + otherwise, add the current level subject's mask to the current computed mask
51198 + */
51199 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51200 + cap_raise(cap_mask, cap);
51201 + if (cap_raised(curracl->cap_lower, cap))
51202 + cap_raise(cap_drop, cap);
51203 + }
51204 + }
51205 +
51206 + if (!cap_raised(cap_drop, cap))
51207 + return 1;
51208 +
51209 + return 0;
51210 +}
51211 +
51212 diff -urNp linux-2.6.32.45/grsecurity/gracl_fs.c linux-2.6.32.45/grsecurity/gracl_fs.c
51213 --- linux-2.6.32.45/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51214 +++ linux-2.6.32.45/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
51215 @@ -0,0 +1,431 @@
51216 +#include <linux/kernel.h>
51217 +#include <linux/sched.h>
51218 +#include <linux/types.h>
51219 +#include <linux/fs.h>
51220 +#include <linux/file.h>
51221 +#include <linux/stat.h>
51222 +#include <linux/grsecurity.h>
51223 +#include <linux/grinternal.h>
51224 +#include <linux/gracl.h>
51225 +
51226 +__u32
51227 +gr_acl_handle_hidden_file(const struct dentry * dentry,
51228 + const struct vfsmount * mnt)
51229 +{
51230 + __u32 mode;
51231 +
51232 + if (unlikely(!dentry->d_inode))
51233 + return GR_FIND;
51234 +
51235 + mode =
51236 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51237 +
51238 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51239 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51240 + return mode;
51241 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51242 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51243 + return 0;
51244 + } else if (unlikely(!(mode & GR_FIND)))
51245 + return 0;
51246 +
51247 + return GR_FIND;
51248 +}
51249 +
51250 +__u32
51251 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51252 + const int fmode)
51253 +{
51254 + __u32 reqmode = GR_FIND;
51255 + __u32 mode;
51256 +
51257 + if (unlikely(!dentry->d_inode))
51258 + return reqmode;
51259 +
51260 + if (unlikely(fmode & O_APPEND))
51261 + reqmode |= GR_APPEND;
51262 + else if (unlikely(fmode & FMODE_WRITE))
51263 + reqmode |= GR_WRITE;
51264 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51265 + reqmode |= GR_READ;
51266 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
51267 + reqmode &= ~GR_READ;
51268 + mode =
51269 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51270 + mnt);
51271 +
51272 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51273 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51274 + reqmode & GR_READ ? " reading" : "",
51275 + reqmode & GR_WRITE ? " writing" : reqmode &
51276 + GR_APPEND ? " appending" : "");
51277 + return reqmode;
51278 + } else
51279 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51280 + {
51281 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51282 + reqmode & GR_READ ? " reading" : "",
51283 + reqmode & GR_WRITE ? " writing" : reqmode &
51284 + GR_APPEND ? " appending" : "");
51285 + return 0;
51286 + } else if (unlikely((mode & reqmode) != reqmode))
51287 + return 0;
51288 +
51289 + return reqmode;
51290 +}
51291 +
51292 +__u32
51293 +gr_acl_handle_creat(const struct dentry * dentry,
51294 + const struct dentry * p_dentry,
51295 + const struct vfsmount * p_mnt, const int fmode,
51296 + const int imode)
51297 +{
51298 + __u32 reqmode = GR_WRITE | GR_CREATE;
51299 + __u32 mode;
51300 +
51301 + if (unlikely(fmode & O_APPEND))
51302 + reqmode |= GR_APPEND;
51303 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51304 + reqmode |= GR_READ;
51305 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
51306 + reqmode |= GR_SETID;
51307 +
51308 + mode =
51309 + gr_check_create(dentry, p_dentry, p_mnt,
51310 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51311 +
51312 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51313 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51314 + reqmode & GR_READ ? " reading" : "",
51315 + reqmode & GR_WRITE ? " writing" : reqmode &
51316 + GR_APPEND ? " appending" : "");
51317 + return reqmode;
51318 + } else
51319 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51320 + {
51321 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51322 + reqmode & GR_READ ? " reading" : "",
51323 + reqmode & GR_WRITE ? " writing" : reqmode &
51324 + GR_APPEND ? " appending" : "");
51325 + return 0;
51326 + } else if (unlikely((mode & reqmode) != reqmode))
51327 + return 0;
51328 +
51329 + return reqmode;
51330 +}
51331 +
51332 +__u32
51333 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51334 + const int fmode)
51335 +{
51336 + __u32 mode, reqmode = GR_FIND;
51337 +
51338 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51339 + reqmode |= GR_EXEC;
51340 + if (fmode & S_IWOTH)
51341 + reqmode |= GR_WRITE;
51342 + if (fmode & S_IROTH)
51343 + reqmode |= GR_READ;
51344 +
51345 + mode =
51346 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51347 + mnt);
51348 +
51349 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51350 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51351 + reqmode & GR_READ ? " reading" : "",
51352 + reqmode & GR_WRITE ? " writing" : "",
51353 + reqmode & GR_EXEC ? " executing" : "");
51354 + return reqmode;
51355 + } else
51356 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51357 + {
51358 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51359 + reqmode & GR_READ ? " reading" : "",
51360 + reqmode & GR_WRITE ? " writing" : "",
51361 + reqmode & GR_EXEC ? " executing" : "");
51362 + return 0;
51363 + } else if (unlikely((mode & reqmode) != reqmode))
51364 + return 0;
51365 +
51366 + return reqmode;
51367 +}
51368 +
51369 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51370 +{
51371 + __u32 mode;
51372 +
51373 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51374 +
51375 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51376 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51377 + return mode;
51378 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51379 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51380 + return 0;
51381 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
51382 + return 0;
51383 +
51384 + return (reqmode);
51385 +}
51386 +
51387 +__u32
51388 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51389 +{
51390 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51391 +}
51392 +
51393 +__u32
51394 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51395 +{
51396 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51397 +}
51398 +
51399 +__u32
51400 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51401 +{
51402 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51403 +}
51404 +
51405 +__u32
51406 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51407 +{
51408 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51409 +}
51410 +
51411 +__u32
51412 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51413 + mode_t mode)
51414 +{
51415 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51416 + return 1;
51417 +
51418 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51419 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51420 + GR_FCHMOD_ACL_MSG);
51421 + } else {
51422 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51423 + }
51424 +}
51425 +
51426 +__u32
51427 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51428 + mode_t mode)
51429 +{
51430 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51431 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51432 + GR_CHMOD_ACL_MSG);
51433 + } else {
51434 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51435 + }
51436 +}
51437 +
51438 +__u32
51439 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51440 +{
51441 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51442 +}
51443 +
51444 +__u32
51445 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51446 +{
51447 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51448 +}
51449 +
51450 +__u32
51451 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51452 +{
51453 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51454 +}
51455 +
51456 +__u32
51457 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51458 +{
51459 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51460 + GR_UNIXCONNECT_ACL_MSG);
51461 +}
51462 +
51463 +/* hardlinks require at minimum create permission,
51464 + any additional privilege required is based on the
51465 + privilege of the file being linked to
51466 +*/
51467 +__u32
51468 +gr_acl_handle_link(const struct dentry * new_dentry,
51469 + const struct dentry * parent_dentry,
51470 + const struct vfsmount * parent_mnt,
51471 + const struct dentry * old_dentry,
51472 + const struct vfsmount * old_mnt, const char *to)
51473 +{
51474 + __u32 mode;
51475 + __u32 needmode = GR_CREATE | GR_LINK;
51476 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51477 +
51478 + mode =
51479 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51480 + old_mnt);
51481 +
51482 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51483 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51484 + return mode;
51485 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51486 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51487 + return 0;
51488 + } else if (unlikely((mode & needmode) != needmode))
51489 + return 0;
51490 +
51491 + return 1;
51492 +}
51493 +
51494 +__u32
51495 +gr_acl_handle_symlink(const struct dentry * new_dentry,
51496 + const struct dentry * parent_dentry,
51497 + const struct vfsmount * parent_mnt, const char *from)
51498 +{
51499 + __u32 needmode = GR_WRITE | GR_CREATE;
51500 + __u32 mode;
51501 +
51502 + mode =
51503 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
51504 + GR_CREATE | GR_AUDIT_CREATE |
51505 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51506 +
51507 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51508 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51509 + return mode;
51510 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51511 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51512 + return 0;
51513 + } else if (unlikely((mode & needmode) != needmode))
51514 + return 0;
51515 +
51516 + return (GR_WRITE | GR_CREATE);
51517 +}
51518 +
51519 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51520 +{
51521 + __u32 mode;
51522 +
51523 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51524 +
51525 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51526 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51527 + return mode;
51528 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51529 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51530 + return 0;
51531 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
51532 + return 0;
51533 +
51534 + return (reqmode);
51535 +}
51536 +
51537 +__u32
51538 +gr_acl_handle_mknod(const struct dentry * new_dentry,
51539 + const struct dentry * parent_dentry,
51540 + const struct vfsmount * parent_mnt,
51541 + const int mode)
51542 +{
51543 + __u32 reqmode = GR_WRITE | GR_CREATE;
51544 + if (unlikely(mode & (S_ISUID | S_ISGID)))
51545 + reqmode |= GR_SETID;
51546 +
51547 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51548 + reqmode, GR_MKNOD_ACL_MSG);
51549 +}
51550 +
51551 +__u32
51552 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
51553 + const struct dentry *parent_dentry,
51554 + const struct vfsmount *parent_mnt)
51555 +{
51556 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51557 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51558 +}
51559 +
51560 +#define RENAME_CHECK_SUCCESS(old, new) \
51561 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51562 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51563 +
51564 +int
51565 +gr_acl_handle_rename(struct dentry *new_dentry,
51566 + struct dentry *parent_dentry,
51567 + const struct vfsmount *parent_mnt,
51568 + struct dentry *old_dentry,
51569 + struct inode *old_parent_inode,
51570 + struct vfsmount *old_mnt, const char *newname)
51571 +{
51572 + __u32 comp1, comp2;
51573 + int error = 0;
51574 +
51575 + if (unlikely(!gr_acl_is_enabled()))
51576 + return 0;
51577 +
51578 + if (!new_dentry->d_inode) {
51579 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51580 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51581 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51582 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51583 + GR_DELETE | GR_AUDIT_DELETE |
51584 + GR_AUDIT_READ | GR_AUDIT_WRITE |
51585 + GR_SUPPRESS, old_mnt);
51586 + } else {
51587 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51588 + GR_CREATE | GR_DELETE |
51589 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51590 + GR_AUDIT_READ | GR_AUDIT_WRITE |
51591 + GR_SUPPRESS, parent_mnt);
51592 + comp2 =
51593 + gr_search_file(old_dentry,
51594 + GR_READ | GR_WRITE | GR_AUDIT_READ |
51595 + GR_DELETE | GR_AUDIT_DELETE |
51596 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51597 + }
51598 +
51599 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51600 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51601 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51602 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51603 + && !(comp2 & GR_SUPPRESS)) {
51604 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51605 + error = -EACCES;
51606 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51607 + error = -EACCES;
51608 +
51609 + return error;
51610 +}
51611 +
51612 +void
51613 +gr_acl_handle_exit(void)
51614 +{
51615 + u16 id;
51616 + char *rolename;
51617 + struct file *exec_file;
51618 +
51619 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51620 + !(current->role->roletype & GR_ROLE_PERSIST))) {
51621 + id = current->acl_role_id;
51622 + rolename = current->role->rolename;
51623 + gr_set_acls(1);
51624 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51625 + }
51626 +
51627 + write_lock(&grsec_exec_file_lock);
51628 + exec_file = current->exec_file;
51629 + current->exec_file = NULL;
51630 + write_unlock(&grsec_exec_file_lock);
51631 +
51632 + if (exec_file)
51633 + fput(exec_file);
51634 +}
51635 +
51636 +int
51637 +gr_acl_handle_procpidmem(const struct task_struct *task)
51638 +{
51639 + if (unlikely(!gr_acl_is_enabled()))
51640 + return 0;
51641 +
51642 + if (task != current && task->acl->mode & GR_PROTPROCFD)
51643 + return -EACCES;
51644 +
51645 + return 0;
51646 +}
51647 diff -urNp linux-2.6.32.45/grsecurity/gracl_ip.c linux-2.6.32.45/grsecurity/gracl_ip.c
51648 --- linux-2.6.32.45/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51649 +++ linux-2.6.32.45/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
51650 @@ -0,0 +1,382 @@
51651 +#include <linux/kernel.h>
51652 +#include <asm/uaccess.h>
51653 +#include <asm/errno.h>
51654 +#include <net/sock.h>
51655 +#include <linux/file.h>
51656 +#include <linux/fs.h>
51657 +#include <linux/net.h>
51658 +#include <linux/in.h>
51659 +#include <linux/skbuff.h>
51660 +#include <linux/ip.h>
51661 +#include <linux/udp.h>
51662 +#include <linux/smp_lock.h>
51663 +#include <linux/types.h>
51664 +#include <linux/sched.h>
51665 +#include <linux/netdevice.h>
51666 +#include <linux/inetdevice.h>
51667 +#include <linux/gracl.h>
51668 +#include <linux/grsecurity.h>
51669 +#include <linux/grinternal.h>
51670 +
51671 +#define GR_BIND 0x01
51672 +#define GR_CONNECT 0x02
51673 +#define GR_INVERT 0x04
51674 +#define GR_BINDOVERRIDE 0x08
51675 +#define GR_CONNECTOVERRIDE 0x10
51676 +#define GR_SOCK_FAMILY 0x20
51677 +
51678 +static const char * gr_protocols[IPPROTO_MAX] = {
51679 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51680 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51681 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51682 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51683 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51684 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51685 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51686 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
51687 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
51688 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
51689 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
51690 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
51691 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
51692 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
51693 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
51694 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
51695 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
51696 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
51697 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
51698 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
51699 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
51700 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
51701 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
51702 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
51703 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
51704 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
51705 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
51706 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
51707 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
51708 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
51709 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
51710 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
51711 + };
51712 +
51713 +static const char * gr_socktypes[SOCK_MAX] = {
51714 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
51715 + "unknown:7", "unknown:8", "unknown:9", "packet"
51716 + };
51717 +
51718 +static const char * gr_sockfamilies[AF_MAX+1] = {
51719 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
51720 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
51721 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
51722 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
51723 + };
51724 +
51725 +const char *
51726 +gr_proto_to_name(unsigned char proto)
51727 +{
51728 + return gr_protocols[proto];
51729 +}
51730 +
51731 +const char *
51732 +gr_socktype_to_name(unsigned char type)
51733 +{
51734 + return gr_socktypes[type];
51735 +}
51736 +
51737 +const char *
51738 +gr_sockfamily_to_name(unsigned char family)
51739 +{
51740 + return gr_sockfamilies[family];
51741 +}
51742 +
51743 +int
51744 +gr_search_socket(const int domain, const int type, const int protocol)
51745 +{
51746 + struct acl_subject_label *curr;
51747 + const struct cred *cred = current_cred();
51748 +
51749 + if (unlikely(!gr_acl_is_enabled()))
51750 + goto exit;
51751 +
51752 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
51753 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
51754 + goto exit; // let the kernel handle it
51755 +
51756 + curr = current->acl;
51757 +
51758 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
51759 + /* the family is allowed, if this is PF_INET allow it only if
51760 + the extra sock type/protocol checks pass */
51761 + if (domain == PF_INET)
51762 + goto inet_check;
51763 + goto exit;
51764 + } else {
51765 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51766 + __u32 fakeip = 0;
51767 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51768 + current->role->roletype, cred->uid,
51769 + cred->gid, current->exec_file ?
51770 + gr_to_filename(current->exec_file->f_path.dentry,
51771 + current->exec_file->f_path.mnt) :
51772 + curr->filename, curr->filename,
51773 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
51774 + &current->signal->saved_ip);
51775 + goto exit;
51776 + }
51777 + goto exit_fail;
51778 + }
51779 +
51780 +inet_check:
51781 + /* the rest of this checking is for IPv4 only */
51782 + if (!curr->ips)
51783 + goto exit;
51784 +
51785 + if ((curr->ip_type & (1 << type)) &&
51786 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
51787 + goto exit;
51788 +
51789 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51790 + /* we don't place acls on raw sockets , and sometimes
51791 + dgram/ip sockets are opened for ioctl and not
51792 + bind/connect, so we'll fake a bind learn log */
51793 + if (type == SOCK_RAW || type == SOCK_PACKET) {
51794 + __u32 fakeip = 0;
51795 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51796 + current->role->roletype, cred->uid,
51797 + cred->gid, current->exec_file ?
51798 + gr_to_filename(current->exec_file->f_path.dentry,
51799 + current->exec_file->f_path.mnt) :
51800 + curr->filename, curr->filename,
51801 + &fakeip, 0, type,
51802 + protocol, GR_CONNECT, &current->signal->saved_ip);
51803 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
51804 + __u32 fakeip = 0;
51805 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51806 + current->role->roletype, cred->uid,
51807 + cred->gid, current->exec_file ?
51808 + gr_to_filename(current->exec_file->f_path.dentry,
51809 + current->exec_file->f_path.mnt) :
51810 + curr->filename, curr->filename,
51811 + &fakeip, 0, type,
51812 + protocol, GR_BIND, &current->signal->saved_ip);
51813 + }
51814 + /* we'll log when they use connect or bind */
51815 + goto exit;
51816 + }
51817 +
51818 +exit_fail:
51819 + if (domain == PF_INET)
51820 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
51821 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
51822 + else
51823 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
51824 + gr_socktype_to_name(type), protocol);
51825 +
51826 + return 0;
51827 +exit:
51828 + return 1;
51829 +}
51830 +
51831 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
51832 +{
51833 + if ((ip->mode & mode) &&
51834 + (ip_port >= ip->low) &&
51835 + (ip_port <= ip->high) &&
51836 + ((ntohl(ip_addr) & our_netmask) ==
51837 + (ntohl(our_addr) & our_netmask))
51838 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
51839 + && (ip->type & (1 << type))) {
51840 + if (ip->mode & GR_INVERT)
51841 + return 2; // specifically denied
51842 + else
51843 + return 1; // allowed
51844 + }
51845 +
51846 + return 0; // not specifically allowed, may continue parsing
51847 +}
51848 +
51849 +static int
51850 +gr_search_connectbind(const int full_mode, struct sock *sk,
51851 + struct sockaddr_in *addr, const int type)
51852 +{
51853 + char iface[IFNAMSIZ] = {0};
51854 + struct acl_subject_label *curr;
51855 + struct acl_ip_label *ip;
51856 + struct inet_sock *isk;
51857 + struct net_device *dev;
51858 + struct in_device *idev;
51859 + unsigned long i;
51860 + int ret;
51861 + int mode = full_mode & (GR_BIND | GR_CONNECT);
51862 + __u32 ip_addr = 0;
51863 + __u32 our_addr;
51864 + __u32 our_netmask;
51865 + char *p;
51866 + __u16 ip_port = 0;
51867 + const struct cred *cred = current_cred();
51868 +
51869 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
51870 + return 0;
51871 +
51872 + curr = current->acl;
51873 + isk = inet_sk(sk);
51874 +
51875 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
51876 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
51877 + addr->sin_addr.s_addr = curr->inaddr_any_override;
51878 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
51879 + struct sockaddr_in saddr;
51880 + int err;
51881 +
51882 + saddr.sin_family = AF_INET;
51883 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
51884 + saddr.sin_port = isk->sport;
51885 +
51886 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51887 + if (err)
51888 + return err;
51889 +
51890 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51891 + if (err)
51892 + return err;
51893 + }
51894 +
51895 + if (!curr->ips)
51896 + return 0;
51897 +
51898 + ip_addr = addr->sin_addr.s_addr;
51899 + ip_port = ntohs(addr->sin_port);
51900 +
51901 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51902 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51903 + current->role->roletype, cred->uid,
51904 + cred->gid, current->exec_file ?
51905 + gr_to_filename(current->exec_file->f_path.dentry,
51906 + current->exec_file->f_path.mnt) :
51907 + curr->filename, curr->filename,
51908 + &ip_addr, ip_port, type,
51909 + sk->sk_protocol, mode, &current->signal->saved_ip);
51910 + return 0;
51911 + }
51912 +
51913 + for (i = 0; i < curr->ip_num; i++) {
51914 + ip = *(curr->ips + i);
51915 + if (ip->iface != NULL) {
51916 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
51917 + p = strchr(iface, ':');
51918 + if (p != NULL)
51919 + *p = '\0';
51920 + dev = dev_get_by_name(sock_net(sk), iface);
51921 + if (dev == NULL)
51922 + continue;
51923 + idev = in_dev_get(dev);
51924 + if (idev == NULL) {
51925 + dev_put(dev);
51926 + continue;
51927 + }
51928 + rcu_read_lock();
51929 + for_ifa(idev) {
51930 + if (!strcmp(ip->iface, ifa->ifa_label)) {
51931 + our_addr = ifa->ifa_address;
51932 + our_netmask = 0xffffffff;
51933 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51934 + if (ret == 1) {
51935 + rcu_read_unlock();
51936 + in_dev_put(idev);
51937 + dev_put(dev);
51938 + return 0;
51939 + } else if (ret == 2) {
51940 + rcu_read_unlock();
51941 + in_dev_put(idev);
51942 + dev_put(dev);
51943 + goto denied;
51944 + }
51945 + }
51946 + } endfor_ifa(idev);
51947 + rcu_read_unlock();
51948 + in_dev_put(idev);
51949 + dev_put(dev);
51950 + } else {
51951 + our_addr = ip->addr;
51952 + our_netmask = ip->netmask;
51953 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51954 + if (ret == 1)
51955 + return 0;
51956 + else if (ret == 2)
51957 + goto denied;
51958 + }
51959 + }
51960 +
51961 +denied:
51962 + if (mode == GR_BIND)
51963 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51964 + else if (mode == GR_CONNECT)
51965 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51966 +
51967 + return -EACCES;
51968 +}
51969 +
51970 +int
51971 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
51972 +{
51973 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
51974 +}
51975 +
51976 +int
51977 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
51978 +{
51979 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
51980 +}
51981 +
51982 +int gr_search_listen(struct socket *sock)
51983 +{
51984 + struct sock *sk = sock->sk;
51985 + struct sockaddr_in addr;
51986 +
51987 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51988 + addr.sin_port = inet_sk(sk)->sport;
51989 +
51990 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51991 +}
51992 +
51993 +int gr_search_accept(struct socket *sock)
51994 +{
51995 + struct sock *sk = sock->sk;
51996 + struct sockaddr_in addr;
51997 +
51998 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51999 + addr.sin_port = inet_sk(sk)->sport;
52000 +
52001 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52002 +}
52003 +
52004 +int
52005 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
52006 +{
52007 + if (addr)
52008 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
52009 + else {
52010 + struct sockaddr_in sin;
52011 + const struct inet_sock *inet = inet_sk(sk);
52012 +
52013 + sin.sin_addr.s_addr = inet->daddr;
52014 + sin.sin_port = inet->dport;
52015 +
52016 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52017 + }
52018 +}
52019 +
52020 +int
52021 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
52022 +{
52023 + struct sockaddr_in sin;
52024 +
52025 + if (unlikely(skb->len < sizeof (struct udphdr)))
52026 + return 0; // skip this packet
52027 +
52028 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
52029 + sin.sin_port = udp_hdr(skb)->source;
52030 +
52031 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52032 +}
52033 diff -urNp linux-2.6.32.45/grsecurity/gracl_learn.c linux-2.6.32.45/grsecurity/gracl_learn.c
52034 --- linux-2.6.32.45/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
52035 +++ linux-2.6.32.45/grsecurity/gracl_learn.c 2011-07-14 21:02:03.000000000 -0400
52036 @@ -0,0 +1,208 @@
52037 +#include <linux/kernel.h>
52038 +#include <linux/mm.h>
52039 +#include <linux/sched.h>
52040 +#include <linux/poll.h>
52041 +#include <linux/smp_lock.h>
52042 +#include <linux/string.h>
52043 +#include <linux/file.h>
52044 +#include <linux/types.h>
52045 +#include <linux/vmalloc.h>
52046 +#include <linux/grinternal.h>
52047 +
52048 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
52049 + size_t count, loff_t *ppos);
52050 +extern int gr_acl_is_enabled(void);
52051 +
52052 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
52053 +static int gr_learn_attached;
52054 +
52055 +/* use a 512k buffer */
52056 +#define LEARN_BUFFER_SIZE (512 * 1024)
52057 +
52058 +static DEFINE_SPINLOCK(gr_learn_lock);
52059 +static DEFINE_MUTEX(gr_learn_user_mutex);
52060 +
52061 +/* we need to maintain two buffers, so that the kernel context of grlearn
52062 + uses a semaphore around the userspace copying, and the other kernel contexts
52063 + use a spinlock when copying into the buffer, since they cannot sleep
52064 +*/
52065 +static char *learn_buffer;
52066 +static char *learn_buffer_user;
52067 +static int learn_buffer_len;
52068 +static int learn_buffer_user_len;
52069 +
52070 +static ssize_t
52071 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
52072 +{
52073 + DECLARE_WAITQUEUE(wait, current);
52074 + ssize_t retval = 0;
52075 +
52076 + add_wait_queue(&learn_wait, &wait);
52077 + set_current_state(TASK_INTERRUPTIBLE);
52078 + do {
52079 + mutex_lock(&gr_learn_user_mutex);
52080 + spin_lock(&gr_learn_lock);
52081 + if (learn_buffer_len)
52082 + break;
52083 + spin_unlock(&gr_learn_lock);
52084 + mutex_unlock(&gr_learn_user_mutex);
52085 + if (file->f_flags & O_NONBLOCK) {
52086 + retval = -EAGAIN;
52087 + goto out;
52088 + }
52089 + if (signal_pending(current)) {
52090 + retval = -ERESTARTSYS;
52091 + goto out;
52092 + }
52093 +
52094 + schedule();
52095 + } while (1);
52096 +
52097 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
52098 + learn_buffer_user_len = learn_buffer_len;
52099 + retval = learn_buffer_len;
52100 + learn_buffer_len = 0;
52101 +
52102 + spin_unlock(&gr_learn_lock);
52103 +
52104 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
52105 + retval = -EFAULT;
52106 +
52107 + mutex_unlock(&gr_learn_user_mutex);
52108 +out:
52109 + set_current_state(TASK_RUNNING);
52110 + remove_wait_queue(&learn_wait, &wait);
52111 + return retval;
52112 +}
52113 +
52114 +static unsigned int
52115 +poll_learn(struct file * file, poll_table * wait)
52116 +{
52117 + poll_wait(file, &learn_wait, wait);
52118 +
52119 + if (learn_buffer_len)
52120 + return (POLLIN | POLLRDNORM);
52121 +
52122 + return 0;
52123 +}
52124 +
52125 +void
52126 +gr_clear_learn_entries(void)
52127 +{
52128 + char *tmp;
52129 +
52130 + mutex_lock(&gr_learn_user_mutex);
52131 + spin_lock(&gr_learn_lock);
52132 + tmp = learn_buffer;
52133 + learn_buffer = NULL;
52134 + spin_unlock(&gr_learn_lock);
52135 + if (tmp)
52136 + vfree(tmp);
52137 + if (learn_buffer_user != NULL) {
52138 + vfree(learn_buffer_user);
52139 + learn_buffer_user = NULL;
52140 + }
52141 + learn_buffer_len = 0;
52142 + mutex_unlock(&gr_learn_user_mutex);
52143 +
52144 + return;
52145 +}
52146 +
52147 +void
52148 +gr_add_learn_entry(const char *fmt, ...)
52149 +{
52150 + va_list args;
52151 + unsigned int len;
52152 +
52153 + if (!gr_learn_attached)
52154 + return;
52155 +
52156 + spin_lock(&gr_learn_lock);
52157 +
52158 + /* leave a gap at the end so we know when it's "full" but don't have to
52159 + compute the exact length of the string we're trying to append
52160 + */
52161 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52162 + spin_unlock(&gr_learn_lock);
52163 + wake_up_interruptible(&learn_wait);
52164 + return;
52165 + }
52166 + if (learn_buffer == NULL) {
52167 + spin_unlock(&gr_learn_lock);
52168 + return;
52169 + }
52170 +
52171 + va_start(args, fmt);
52172 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52173 + va_end(args);
52174 +
52175 + learn_buffer_len += len + 1;
52176 +
52177 + spin_unlock(&gr_learn_lock);
52178 + wake_up_interruptible(&learn_wait);
52179 +
52180 + return;
52181 +}
52182 +
52183 +static int
52184 +open_learn(struct inode *inode, struct file *file)
52185 +{
52186 + if (file->f_mode & FMODE_READ && gr_learn_attached)
52187 + return -EBUSY;
52188 + if (file->f_mode & FMODE_READ) {
52189 + int retval = 0;
52190 + mutex_lock(&gr_learn_user_mutex);
52191 + if (learn_buffer == NULL)
52192 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52193 + if (learn_buffer_user == NULL)
52194 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52195 + if (learn_buffer == NULL) {
52196 + retval = -ENOMEM;
52197 + goto out_error;
52198 + }
52199 + if (learn_buffer_user == NULL) {
52200 + retval = -ENOMEM;
52201 + goto out_error;
52202 + }
52203 + learn_buffer_len = 0;
52204 + learn_buffer_user_len = 0;
52205 + gr_learn_attached = 1;
52206 +out_error:
52207 + mutex_unlock(&gr_learn_user_mutex);
52208 + return retval;
52209 + }
52210 + return 0;
52211 +}
52212 +
52213 +static int
52214 +close_learn(struct inode *inode, struct file *file)
52215 +{
52216 + if (file->f_mode & FMODE_READ) {
52217 + char *tmp = NULL;
52218 + mutex_lock(&gr_learn_user_mutex);
52219 + spin_lock(&gr_learn_lock);
52220 + tmp = learn_buffer;
52221 + learn_buffer = NULL;
52222 + spin_unlock(&gr_learn_lock);
52223 + if (tmp)
52224 + vfree(tmp);
52225 + if (learn_buffer_user != NULL) {
52226 + vfree(learn_buffer_user);
52227 + learn_buffer_user = NULL;
52228 + }
52229 + learn_buffer_len = 0;
52230 + learn_buffer_user_len = 0;
52231 + gr_learn_attached = 0;
52232 + mutex_unlock(&gr_learn_user_mutex);
52233 + }
52234 +
52235 + return 0;
52236 +}
52237 +
52238 +const struct file_operations grsec_fops = {
52239 + .read = read_learn,
52240 + .write = write_grsec_handler,
52241 + .open = open_learn,
52242 + .release = close_learn,
52243 + .poll = poll_learn,
52244 +};
52245 diff -urNp linux-2.6.32.45/grsecurity/gracl_res.c linux-2.6.32.45/grsecurity/gracl_res.c
52246 --- linux-2.6.32.45/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52247 +++ linux-2.6.32.45/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
52248 @@ -0,0 +1,67 @@
52249 +#include <linux/kernel.h>
52250 +#include <linux/sched.h>
52251 +#include <linux/gracl.h>
52252 +#include <linux/grinternal.h>
52253 +
52254 +static const char *restab_log[] = {
52255 + [RLIMIT_CPU] = "RLIMIT_CPU",
52256 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52257 + [RLIMIT_DATA] = "RLIMIT_DATA",
52258 + [RLIMIT_STACK] = "RLIMIT_STACK",
52259 + [RLIMIT_CORE] = "RLIMIT_CORE",
52260 + [RLIMIT_RSS] = "RLIMIT_RSS",
52261 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
52262 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52263 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52264 + [RLIMIT_AS] = "RLIMIT_AS",
52265 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52266 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52267 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52268 + [RLIMIT_NICE] = "RLIMIT_NICE",
52269 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52270 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52271 + [GR_CRASH_RES] = "RLIMIT_CRASH"
52272 +};
52273 +
52274 +void
52275 +gr_log_resource(const struct task_struct *task,
52276 + const int res, const unsigned long wanted, const int gt)
52277 +{
52278 + const struct cred *cred;
52279 + unsigned long rlim;
52280 +
52281 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
52282 + return;
52283 +
52284 + // not yet supported resource
52285 + if (unlikely(!restab_log[res]))
52286 + return;
52287 +
52288 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52289 + rlim = task->signal->rlim[res].rlim_max;
52290 + else
52291 + rlim = task->signal->rlim[res].rlim_cur;
52292 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52293 + return;
52294 +
52295 + rcu_read_lock();
52296 + cred = __task_cred(task);
52297 +
52298 + if (res == RLIMIT_NPROC &&
52299 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52300 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52301 + goto out_rcu_unlock;
52302 + else if (res == RLIMIT_MEMLOCK &&
52303 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52304 + goto out_rcu_unlock;
52305 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52306 + goto out_rcu_unlock;
52307 + rcu_read_unlock();
52308 +
52309 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52310 +
52311 + return;
52312 +out_rcu_unlock:
52313 + rcu_read_unlock();
52314 + return;
52315 +}
52316 diff -urNp linux-2.6.32.45/grsecurity/gracl_segv.c linux-2.6.32.45/grsecurity/gracl_segv.c
52317 --- linux-2.6.32.45/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52318 +++ linux-2.6.32.45/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
52319 @@ -0,0 +1,284 @@
52320 +#include <linux/kernel.h>
52321 +#include <linux/mm.h>
52322 +#include <asm/uaccess.h>
52323 +#include <asm/errno.h>
52324 +#include <asm/mman.h>
52325 +#include <net/sock.h>
52326 +#include <linux/file.h>
52327 +#include <linux/fs.h>
52328 +#include <linux/net.h>
52329 +#include <linux/in.h>
52330 +#include <linux/smp_lock.h>
52331 +#include <linux/slab.h>
52332 +#include <linux/types.h>
52333 +#include <linux/sched.h>
52334 +#include <linux/timer.h>
52335 +#include <linux/gracl.h>
52336 +#include <linux/grsecurity.h>
52337 +#include <linux/grinternal.h>
52338 +
52339 +static struct crash_uid *uid_set;
52340 +static unsigned short uid_used;
52341 +static DEFINE_SPINLOCK(gr_uid_lock);
52342 +extern rwlock_t gr_inode_lock;
52343 +extern struct acl_subject_label *
52344 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52345 + struct acl_role_label *role);
52346 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
52347 +
52348 +int
52349 +gr_init_uidset(void)
52350 +{
52351 + uid_set =
52352 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52353 + uid_used = 0;
52354 +
52355 + return uid_set ? 1 : 0;
52356 +}
52357 +
52358 +void
52359 +gr_free_uidset(void)
52360 +{
52361 + if (uid_set)
52362 + kfree(uid_set);
52363 +
52364 + return;
52365 +}
52366 +
52367 +int
52368 +gr_find_uid(const uid_t uid)
52369 +{
52370 + struct crash_uid *tmp = uid_set;
52371 + uid_t buid;
52372 + int low = 0, high = uid_used - 1, mid;
52373 +
52374 + while (high >= low) {
52375 + mid = (low + high) >> 1;
52376 + buid = tmp[mid].uid;
52377 + if (buid == uid)
52378 + return mid;
52379 + if (buid > uid)
52380 + high = mid - 1;
52381 + if (buid < uid)
52382 + low = mid + 1;
52383 + }
52384 +
52385 + return -1;
52386 +}
52387 +
52388 +static __inline__ void
52389 +gr_insertsort(void)
52390 +{
52391 + unsigned short i, j;
52392 + struct crash_uid index;
52393 +
52394 + for (i = 1; i < uid_used; i++) {
52395 + index = uid_set[i];
52396 + j = i;
52397 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52398 + uid_set[j] = uid_set[j - 1];
52399 + j--;
52400 + }
52401 + uid_set[j] = index;
52402 + }
52403 +
52404 + return;
52405 +}
52406 +
52407 +static __inline__ void
52408 +gr_insert_uid(const uid_t uid, const unsigned long expires)
52409 +{
52410 + int loc;
52411 +
52412 + if (uid_used == GR_UIDTABLE_MAX)
52413 + return;
52414 +
52415 + loc = gr_find_uid(uid);
52416 +
52417 + if (loc >= 0) {
52418 + uid_set[loc].expires = expires;
52419 + return;
52420 + }
52421 +
52422 + uid_set[uid_used].uid = uid;
52423 + uid_set[uid_used].expires = expires;
52424 + uid_used++;
52425 +
52426 + gr_insertsort();
52427 +
52428 + return;
52429 +}
52430 +
52431 +void
52432 +gr_remove_uid(const unsigned short loc)
52433 +{
52434 + unsigned short i;
52435 +
52436 + for (i = loc + 1; i < uid_used; i++)
52437 + uid_set[i - 1] = uid_set[i];
52438 +
52439 + uid_used--;
52440 +
52441 + return;
52442 +}
52443 +
52444 +int
52445 +gr_check_crash_uid(const uid_t uid)
52446 +{
52447 + int loc;
52448 + int ret = 0;
52449 +
52450 + if (unlikely(!gr_acl_is_enabled()))
52451 + return 0;
52452 +
52453 + spin_lock(&gr_uid_lock);
52454 + loc = gr_find_uid(uid);
52455 +
52456 + if (loc < 0)
52457 + goto out_unlock;
52458 +
52459 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
52460 + gr_remove_uid(loc);
52461 + else
52462 + ret = 1;
52463 +
52464 +out_unlock:
52465 + spin_unlock(&gr_uid_lock);
52466 + return ret;
52467 +}
52468 +
52469 +static __inline__ int
52470 +proc_is_setxid(const struct cred *cred)
52471 +{
52472 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
52473 + cred->uid != cred->fsuid)
52474 + return 1;
52475 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52476 + cred->gid != cred->fsgid)
52477 + return 1;
52478 +
52479 + return 0;
52480 +}
52481 +
52482 +void
52483 +gr_handle_crash(struct task_struct *task, const int sig)
52484 +{
52485 + struct acl_subject_label *curr;
52486 + struct acl_subject_label *curr2;
52487 + struct task_struct *tsk, *tsk2;
52488 + const struct cred *cred;
52489 + const struct cred *cred2;
52490 +
52491 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52492 + return;
52493 +
52494 + if (unlikely(!gr_acl_is_enabled()))
52495 + return;
52496 +
52497 + curr = task->acl;
52498 +
52499 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
52500 + return;
52501 +
52502 + if (time_before_eq(curr->expires, get_seconds())) {
52503 + curr->expires = 0;
52504 + curr->crashes = 0;
52505 + }
52506 +
52507 + curr->crashes++;
52508 +
52509 + if (!curr->expires)
52510 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52511 +
52512 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52513 + time_after(curr->expires, get_seconds())) {
52514 + rcu_read_lock();
52515 + cred = __task_cred(task);
52516 + if (cred->uid && proc_is_setxid(cred)) {
52517 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52518 + spin_lock(&gr_uid_lock);
52519 + gr_insert_uid(cred->uid, curr->expires);
52520 + spin_unlock(&gr_uid_lock);
52521 + curr->expires = 0;
52522 + curr->crashes = 0;
52523 + read_lock(&tasklist_lock);
52524 + do_each_thread(tsk2, tsk) {
52525 + cred2 = __task_cred(tsk);
52526 + if (tsk != task && cred2->uid == cred->uid)
52527 + gr_fake_force_sig(SIGKILL, tsk);
52528 + } while_each_thread(tsk2, tsk);
52529 + read_unlock(&tasklist_lock);
52530 + } else {
52531 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52532 + read_lock(&tasklist_lock);
52533 + do_each_thread(tsk2, tsk) {
52534 + if (likely(tsk != task)) {
52535 + curr2 = tsk->acl;
52536 +
52537 + if (curr2->device == curr->device &&
52538 + curr2->inode == curr->inode)
52539 + gr_fake_force_sig(SIGKILL, tsk);
52540 + }
52541 + } while_each_thread(tsk2, tsk);
52542 + read_unlock(&tasklist_lock);
52543 + }
52544 + rcu_read_unlock();
52545 + }
52546 +
52547 + return;
52548 +}
52549 +
52550 +int
52551 +gr_check_crash_exec(const struct file *filp)
52552 +{
52553 + struct acl_subject_label *curr;
52554 +
52555 + if (unlikely(!gr_acl_is_enabled()))
52556 + return 0;
52557 +
52558 + read_lock(&gr_inode_lock);
52559 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52560 + filp->f_path.dentry->d_inode->i_sb->s_dev,
52561 + current->role);
52562 + read_unlock(&gr_inode_lock);
52563 +
52564 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52565 + (!curr->crashes && !curr->expires))
52566 + return 0;
52567 +
52568 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52569 + time_after(curr->expires, get_seconds()))
52570 + return 1;
52571 + else if (time_before_eq(curr->expires, get_seconds())) {
52572 + curr->crashes = 0;
52573 + curr->expires = 0;
52574 + }
52575 +
52576 + return 0;
52577 +}
52578 +
52579 +void
52580 +gr_handle_alertkill(struct task_struct *task)
52581 +{
52582 + struct acl_subject_label *curracl;
52583 + __u32 curr_ip;
52584 + struct task_struct *p, *p2;
52585 +
52586 + if (unlikely(!gr_acl_is_enabled()))
52587 + return;
52588 +
52589 + curracl = task->acl;
52590 + curr_ip = task->signal->curr_ip;
52591 +
52592 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52593 + read_lock(&tasklist_lock);
52594 + do_each_thread(p2, p) {
52595 + if (p->signal->curr_ip == curr_ip)
52596 + gr_fake_force_sig(SIGKILL, p);
52597 + } while_each_thread(p2, p);
52598 + read_unlock(&tasklist_lock);
52599 + } else if (curracl->mode & GR_KILLPROC)
52600 + gr_fake_force_sig(SIGKILL, task);
52601 +
52602 + return;
52603 +}
52604 diff -urNp linux-2.6.32.45/grsecurity/gracl_shm.c linux-2.6.32.45/grsecurity/gracl_shm.c
52605 --- linux-2.6.32.45/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52606 +++ linux-2.6.32.45/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
52607 @@ -0,0 +1,40 @@
52608 +#include <linux/kernel.h>
52609 +#include <linux/mm.h>
52610 +#include <linux/sched.h>
52611 +#include <linux/file.h>
52612 +#include <linux/ipc.h>
52613 +#include <linux/gracl.h>
52614 +#include <linux/grsecurity.h>
52615 +#include <linux/grinternal.h>
52616 +
52617 +int
52618 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52619 + const time_t shm_createtime, const uid_t cuid, const int shmid)
52620 +{
52621 + struct task_struct *task;
52622 +
52623 + if (!gr_acl_is_enabled())
52624 + return 1;
52625 +
52626 + rcu_read_lock();
52627 + read_lock(&tasklist_lock);
52628 +
52629 + task = find_task_by_vpid(shm_cprid);
52630 +
52631 + if (unlikely(!task))
52632 + task = find_task_by_vpid(shm_lapid);
52633 +
52634 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52635 + (task->pid == shm_lapid)) &&
52636 + (task->acl->mode & GR_PROTSHM) &&
52637 + (task->acl != current->acl))) {
52638 + read_unlock(&tasklist_lock);
52639 + rcu_read_unlock();
52640 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52641 + return 0;
52642 + }
52643 + read_unlock(&tasklist_lock);
52644 + rcu_read_unlock();
52645 +
52646 + return 1;
52647 +}
52648 diff -urNp linux-2.6.32.45/grsecurity/grsec_chdir.c linux-2.6.32.45/grsecurity/grsec_chdir.c
52649 --- linux-2.6.32.45/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52650 +++ linux-2.6.32.45/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
52651 @@ -0,0 +1,19 @@
52652 +#include <linux/kernel.h>
52653 +#include <linux/sched.h>
52654 +#include <linux/fs.h>
52655 +#include <linux/file.h>
52656 +#include <linux/grsecurity.h>
52657 +#include <linux/grinternal.h>
52658 +
52659 +void
52660 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52661 +{
52662 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52663 + if ((grsec_enable_chdir && grsec_enable_group &&
52664 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52665 + !grsec_enable_group)) {
52666 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52667 + }
52668 +#endif
52669 + return;
52670 +}
52671 diff -urNp linux-2.6.32.45/grsecurity/grsec_chroot.c linux-2.6.32.45/grsecurity/grsec_chroot.c
52672 --- linux-2.6.32.45/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
52673 +++ linux-2.6.32.45/grsecurity/grsec_chroot.c 2011-07-18 17:14:10.000000000 -0400
52674 @@ -0,0 +1,384 @@
52675 +#include <linux/kernel.h>
52676 +#include <linux/module.h>
52677 +#include <linux/sched.h>
52678 +#include <linux/file.h>
52679 +#include <linux/fs.h>
52680 +#include <linux/mount.h>
52681 +#include <linux/types.h>
52682 +#include <linux/pid_namespace.h>
52683 +#include <linux/grsecurity.h>
52684 +#include <linux/grinternal.h>
52685 +
52686 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
52687 +{
52688 +#ifdef CONFIG_GRKERNSEC
52689 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
52690 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
52691 + task->gr_is_chrooted = 1;
52692 + else
52693 + task->gr_is_chrooted = 0;
52694 +
52695 + task->gr_chroot_dentry = path->dentry;
52696 +#endif
52697 + return;
52698 +}
52699 +
52700 +void gr_clear_chroot_entries(struct task_struct *task)
52701 +{
52702 +#ifdef CONFIG_GRKERNSEC
52703 + task->gr_is_chrooted = 0;
52704 + task->gr_chroot_dentry = NULL;
52705 +#endif
52706 + return;
52707 +}
52708 +
52709 +int
52710 +gr_handle_chroot_unix(const pid_t pid)
52711 +{
52712 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52713 + struct task_struct *p;
52714 +
52715 + if (unlikely(!grsec_enable_chroot_unix))
52716 + return 1;
52717 +
52718 + if (likely(!proc_is_chrooted(current)))
52719 + return 1;
52720 +
52721 + rcu_read_lock();
52722 + read_lock(&tasklist_lock);
52723 +
52724 + p = find_task_by_vpid_unrestricted(pid);
52725 + if (unlikely(p && !have_same_root(current, p))) {
52726 + read_unlock(&tasklist_lock);
52727 + rcu_read_unlock();
52728 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
52729 + return 0;
52730 + }
52731 + read_unlock(&tasklist_lock);
52732 + rcu_read_unlock();
52733 +#endif
52734 + return 1;
52735 +}
52736 +
52737 +int
52738 +gr_handle_chroot_nice(void)
52739 +{
52740 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52741 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
52742 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
52743 + return -EPERM;
52744 + }
52745 +#endif
52746 + return 0;
52747 +}
52748 +
52749 +int
52750 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
52751 +{
52752 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52753 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
52754 + && proc_is_chrooted(current)) {
52755 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
52756 + return -EACCES;
52757 + }
52758 +#endif
52759 + return 0;
52760 +}
52761 +
52762 +int
52763 +gr_handle_chroot_rawio(const struct inode *inode)
52764 +{
52765 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52766 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
52767 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
52768 + return 1;
52769 +#endif
52770 + return 0;
52771 +}
52772 +
52773 +int
52774 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
52775 +{
52776 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52777 + struct task_struct *p;
52778 + int ret = 0;
52779 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
52780 + return ret;
52781 +
52782 + read_lock(&tasklist_lock);
52783 + do_each_pid_task(pid, type, p) {
52784 + if (!have_same_root(current, p)) {
52785 + ret = 1;
52786 + goto out;
52787 + }
52788 + } while_each_pid_task(pid, type, p);
52789 +out:
52790 + read_unlock(&tasklist_lock);
52791 + return ret;
52792 +#endif
52793 + return 0;
52794 +}
52795 +
52796 +int
52797 +gr_pid_is_chrooted(struct task_struct *p)
52798 +{
52799 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52800 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
52801 + return 0;
52802 +
52803 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
52804 + !have_same_root(current, p)) {
52805 + return 1;
52806 + }
52807 +#endif
52808 + return 0;
52809 +}
52810 +
52811 +EXPORT_SYMBOL(gr_pid_is_chrooted);
52812 +
52813 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
52814 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
52815 +{
52816 + struct dentry *dentry = (struct dentry *)u_dentry;
52817 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
52818 + struct dentry *realroot;
52819 + struct vfsmount *realrootmnt;
52820 + struct dentry *currentroot;
52821 + struct vfsmount *currentmnt;
52822 + struct task_struct *reaper = &init_task;
52823 + int ret = 1;
52824 +
52825 + read_lock(&reaper->fs->lock);
52826 + realrootmnt = mntget(reaper->fs->root.mnt);
52827 + realroot = dget(reaper->fs->root.dentry);
52828 + read_unlock(&reaper->fs->lock);
52829 +
52830 + read_lock(&current->fs->lock);
52831 + currentmnt = mntget(current->fs->root.mnt);
52832 + currentroot = dget(current->fs->root.dentry);
52833 + read_unlock(&current->fs->lock);
52834 +
52835 + spin_lock(&dcache_lock);
52836 + for (;;) {
52837 + if (unlikely((dentry == realroot && mnt == realrootmnt)
52838 + || (dentry == currentroot && mnt == currentmnt)))
52839 + break;
52840 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
52841 + if (mnt->mnt_parent == mnt)
52842 + break;
52843 + dentry = mnt->mnt_mountpoint;
52844 + mnt = mnt->mnt_parent;
52845 + continue;
52846 + }
52847 + dentry = dentry->d_parent;
52848 + }
52849 + spin_unlock(&dcache_lock);
52850 +
52851 + dput(currentroot);
52852 + mntput(currentmnt);
52853 +
52854 + /* access is outside of chroot */
52855 + if (dentry == realroot && mnt == realrootmnt)
52856 + ret = 0;
52857 +
52858 + dput(realroot);
52859 + mntput(realrootmnt);
52860 + return ret;
52861 +}
52862 +#endif
52863 +
52864 +int
52865 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
52866 +{
52867 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52868 + if (!grsec_enable_chroot_fchdir)
52869 + return 1;
52870 +
52871 + if (!proc_is_chrooted(current))
52872 + return 1;
52873 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
52874 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
52875 + return 0;
52876 + }
52877 +#endif
52878 + return 1;
52879 +}
52880 +
52881 +int
52882 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52883 + const time_t shm_createtime)
52884 +{
52885 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52886 + struct task_struct *p;
52887 + time_t starttime;
52888 +
52889 + if (unlikely(!grsec_enable_chroot_shmat))
52890 + return 1;
52891 +
52892 + if (likely(!proc_is_chrooted(current)))
52893 + return 1;
52894 +
52895 + rcu_read_lock();
52896 + read_lock(&tasklist_lock);
52897 +
52898 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
52899 + starttime = p->start_time.tv_sec;
52900 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
52901 + if (have_same_root(current, p)) {
52902 + goto allow;
52903 + } else {
52904 + read_unlock(&tasklist_lock);
52905 + rcu_read_unlock();
52906 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52907 + return 0;
52908 + }
52909 + }
52910 + /* creator exited, pid reuse, fall through to next check */
52911 + }
52912 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
52913 + if (unlikely(!have_same_root(current, p))) {
52914 + read_unlock(&tasklist_lock);
52915 + rcu_read_unlock();
52916 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52917 + return 0;
52918 + }
52919 + }
52920 +
52921 +allow:
52922 + read_unlock(&tasklist_lock);
52923 + rcu_read_unlock();
52924 +#endif
52925 + return 1;
52926 +}
52927 +
52928 +void
52929 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
52930 +{
52931 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52932 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
52933 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
52934 +#endif
52935 + return;
52936 +}
52937 +
52938 +int
52939 +gr_handle_chroot_mknod(const struct dentry *dentry,
52940 + const struct vfsmount *mnt, const int mode)
52941 +{
52942 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52943 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
52944 + proc_is_chrooted(current)) {
52945 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
52946 + return -EPERM;
52947 + }
52948 +#endif
52949 + return 0;
52950 +}
52951 +
52952 +int
52953 +gr_handle_chroot_mount(const struct dentry *dentry,
52954 + const struct vfsmount *mnt, const char *dev_name)
52955 +{
52956 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52957 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
52958 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
52959 + return -EPERM;
52960 + }
52961 +#endif
52962 + return 0;
52963 +}
52964 +
52965 +int
52966 +gr_handle_chroot_pivot(void)
52967 +{
52968 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52969 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
52970 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
52971 + return -EPERM;
52972 + }
52973 +#endif
52974 + return 0;
52975 +}
52976 +
52977 +int
52978 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
52979 +{
52980 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52981 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
52982 + !gr_is_outside_chroot(dentry, mnt)) {
52983 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
52984 + return -EPERM;
52985 + }
52986 +#endif
52987 + return 0;
52988 +}
52989 +
52990 +int
52991 +gr_handle_chroot_caps(struct path *path)
52992 +{
52993 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52994 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
52995 + (init_task.fs->root.dentry != path->dentry) &&
52996 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
52997 +
52998 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
52999 + const struct cred *old = current_cred();
53000 + struct cred *new = prepare_creds();
53001 + if (new == NULL)
53002 + return 1;
53003 +
53004 + new->cap_permitted = cap_drop(old->cap_permitted,
53005 + chroot_caps);
53006 + new->cap_inheritable = cap_drop(old->cap_inheritable,
53007 + chroot_caps);
53008 + new->cap_effective = cap_drop(old->cap_effective,
53009 + chroot_caps);
53010 +
53011 + commit_creds(new);
53012 +
53013 + return 0;
53014 + }
53015 +#endif
53016 + return 0;
53017 +}
53018 +
53019 +int
53020 +gr_handle_chroot_sysctl(const int op)
53021 +{
53022 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53023 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
53024 + && (op & MAY_WRITE))
53025 + return -EACCES;
53026 +#endif
53027 + return 0;
53028 +}
53029 +
53030 +void
53031 +gr_handle_chroot_chdir(struct path *path)
53032 +{
53033 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53034 + if (grsec_enable_chroot_chdir)
53035 + set_fs_pwd(current->fs, path);
53036 +#endif
53037 + return;
53038 +}
53039 +
53040 +int
53041 +gr_handle_chroot_chmod(const struct dentry *dentry,
53042 + const struct vfsmount *mnt, const int mode)
53043 +{
53044 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53045 + /* allow chmod +s on directories, but not on files */
53046 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
53047 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
53048 + proc_is_chrooted(current)) {
53049 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
53050 + return -EPERM;
53051 + }
53052 +#endif
53053 + return 0;
53054 +}
53055 +
53056 +#ifdef CONFIG_SECURITY
53057 +EXPORT_SYMBOL(gr_handle_chroot_caps);
53058 +#endif
53059 diff -urNp linux-2.6.32.45/grsecurity/grsec_disabled.c linux-2.6.32.45/grsecurity/grsec_disabled.c
53060 --- linux-2.6.32.45/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
53061 +++ linux-2.6.32.45/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
53062 @@ -0,0 +1,447 @@
53063 +#include <linux/kernel.h>
53064 +#include <linux/module.h>
53065 +#include <linux/sched.h>
53066 +#include <linux/file.h>
53067 +#include <linux/fs.h>
53068 +#include <linux/kdev_t.h>
53069 +#include <linux/net.h>
53070 +#include <linux/in.h>
53071 +#include <linux/ip.h>
53072 +#include <linux/skbuff.h>
53073 +#include <linux/sysctl.h>
53074 +
53075 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
53076 +void
53077 +pax_set_initial_flags(struct linux_binprm *bprm)
53078 +{
53079 + return;
53080 +}
53081 +#endif
53082 +
53083 +#ifdef CONFIG_SYSCTL
53084 +__u32
53085 +gr_handle_sysctl(const struct ctl_table * table, const int op)
53086 +{
53087 + return 0;
53088 +}
53089 +#endif
53090 +
53091 +#ifdef CONFIG_TASKSTATS
53092 +int gr_is_taskstats_denied(int pid)
53093 +{
53094 + return 0;
53095 +}
53096 +#endif
53097 +
53098 +int
53099 +gr_acl_is_enabled(void)
53100 +{
53101 + return 0;
53102 +}
53103 +
53104 +int
53105 +gr_handle_rawio(const struct inode *inode)
53106 +{
53107 + return 0;
53108 +}
53109 +
53110 +void
53111 +gr_acl_handle_psacct(struct task_struct *task, const long code)
53112 +{
53113 + return;
53114 +}
53115 +
53116 +int
53117 +gr_handle_ptrace(struct task_struct *task, const long request)
53118 +{
53119 + return 0;
53120 +}
53121 +
53122 +int
53123 +gr_handle_proc_ptrace(struct task_struct *task)
53124 +{
53125 + return 0;
53126 +}
53127 +
53128 +void
53129 +gr_learn_resource(const struct task_struct *task,
53130 + const int res, const unsigned long wanted, const int gt)
53131 +{
53132 + return;
53133 +}
53134 +
53135 +int
53136 +gr_set_acls(const int type)
53137 +{
53138 + return 0;
53139 +}
53140 +
53141 +int
53142 +gr_check_hidden_task(const struct task_struct *tsk)
53143 +{
53144 + return 0;
53145 +}
53146 +
53147 +int
53148 +gr_check_protected_task(const struct task_struct *task)
53149 +{
53150 + return 0;
53151 +}
53152 +
53153 +int
53154 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53155 +{
53156 + return 0;
53157 +}
53158 +
53159 +void
53160 +gr_copy_label(struct task_struct *tsk)
53161 +{
53162 + return;
53163 +}
53164 +
53165 +void
53166 +gr_set_pax_flags(struct task_struct *task)
53167 +{
53168 + return;
53169 +}
53170 +
53171 +int
53172 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53173 + const int unsafe_share)
53174 +{
53175 + return 0;
53176 +}
53177 +
53178 +void
53179 +gr_handle_delete(const ino_t ino, const dev_t dev)
53180 +{
53181 + return;
53182 +}
53183 +
53184 +void
53185 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53186 +{
53187 + return;
53188 +}
53189 +
53190 +void
53191 +gr_handle_crash(struct task_struct *task, const int sig)
53192 +{
53193 + return;
53194 +}
53195 +
53196 +int
53197 +gr_check_crash_exec(const struct file *filp)
53198 +{
53199 + return 0;
53200 +}
53201 +
53202 +int
53203 +gr_check_crash_uid(const uid_t uid)
53204 +{
53205 + return 0;
53206 +}
53207 +
53208 +void
53209 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53210 + struct dentry *old_dentry,
53211 + struct dentry *new_dentry,
53212 + struct vfsmount *mnt, const __u8 replace)
53213 +{
53214 + return;
53215 +}
53216 +
53217 +int
53218 +gr_search_socket(const int family, const int type, const int protocol)
53219 +{
53220 + return 1;
53221 +}
53222 +
53223 +int
53224 +gr_search_connectbind(const int mode, const struct socket *sock,
53225 + const struct sockaddr_in *addr)
53226 +{
53227 + return 0;
53228 +}
53229 +
53230 +int
53231 +gr_is_capable(const int cap)
53232 +{
53233 + return 1;
53234 +}
53235 +
53236 +int
53237 +gr_is_capable_nolog(const int cap)
53238 +{
53239 + return 1;
53240 +}
53241 +
53242 +void
53243 +gr_handle_alertkill(struct task_struct *task)
53244 +{
53245 + return;
53246 +}
53247 +
53248 +__u32
53249 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53250 +{
53251 + return 1;
53252 +}
53253 +
53254 +__u32
53255 +gr_acl_handle_hidden_file(const struct dentry * dentry,
53256 + const struct vfsmount * mnt)
53257 +{
53258 + return 1;
53259 +}
53260 +
53261 +__u32
53262 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53263 + const int fmode)
53264 +{
53265 + return 1;
53266 +}
53267 +
53268 +__u32
53269 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53270 +{
53271 + return 1;
53272 +}
53273 +
53274 +__u32
53275 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53276 +{
53277 + return 1;
53278 +}
53279 +
53280 +int
53281 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53282 + unsigned int *vm_flags)
53283 +{
53284 + return 1;
53285 +}
53286 +
53287 +__u32
53288 +gr_acl_handle_truncate(const struct dentry * dentry,
53289 + const struct vfsmount * mnt)
53290 +{
53291 + return 1;
53292 +}
53293 +
53294 +__u32
53295 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53296 +{
53297 + return 1;
53298 +}
53299 +
53300 +__u32
53301 +gr_acl_handle_access(const struct dentry * dentry,
53302 + const struct vfsmount * mnt, const int fmode)
53303 +{
53304 + return 1;
53305 +}
53306 +
53307 +__u32
53308 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53309 + mode_t mode)
53310 +{
53311 + return 1;
53312 +}
53313 +
53314 +__u32
53315 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53316 + mode_t mode)
53317 +{
53318 + return 1;
53319 +}
53320 +
53321 +__u32
53322 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53323 +{
53324 + return 1;
53325 +}
53326 +
53327 +__u32
53328 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53329 +{
53330 + return 1;
53331 +}
53332 +
53333 +void
53334 +grsecurity_init(void)
53335 +{
53336 + return;
53337 +}
53338 +
53339 +__u32
53340 +gr_acl_handle_mknod(const struct dentry * new_dentry,
53341 + const struct dentry * parent_dentry,
53342 + const struct vfsmount * parent_mnt,
53343 + const int mode)
53344 +{
53345 + return 1;
53346 +}
53347 +
53348 +__u32
53349 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
53350 + const struct dentry * parent_dentry,
53351 + const struct vfsmount * parent_mnt)
53352 +{
53353 + return 1;
53354 +}
53355 +
53356 +__u32
53357 +gr_acl_handle_symlink(const struct dentry * new_dentry,
53358 + const struct dentry * parent_dentry,
53359 + const struct vfsmount * parent_mnt, const char *from)
53360 +{
53361 + return 1;
53362 +}
53363 +
53364 +__u32
53365 +gr_acl_handle_link(const struct dentry * new_dentry,
53366 + const struct dentry * parent_dentry,
53367 + const struct vfsmount * parent_mnt,
53368 + const struct dentry * old_dentry,
53369 + const struct vfsmount * old_mnt, const char *to)
53370 +{
53371 + return 1;
53372 +}
53373 +
53374 +int
53375 +gr_acl_handle_rename(const struct dentry *new_dentry,
53376 + const struct dentry *parent_dentry,
53377 + const struct vfsmount *parent_mnt,
53378 + const struct dentry *old_dentry,
53379 + const struct inode *old_parent_inode,
53380 + const struct vfsmount *old_mnt, const char *newname)
53381 +{
53382 + return 0;
53383 +}
53384 +
53385 +int
53386 +gr_acl_handle_filldir(const struct file *file, const char *name,
53387 + const int namelen, const ino_t ino)
53388 +{
53389 + return 1;
53390 +}
53391 +
53392 +int
53393 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53394 + const time_t shm_createtime, const uid_t cuid, const int shmid)
53395 +{
53396 + return 1;
53397 +}
53398 +
53399 +int
53400 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53401 +{
53402 + return 0;
53403 +}
53404 +
53405 +int
53406 +gr_search_accept(const struct socket *sock)
53407 +{
53408 + return 0;
53409 +}
53410 +
53411 +int
53412 +gr_search_listen(const struct socket *sock)
53413 +{
53414 + return 0;
53415 +}
53416 +
53417 +int
53418 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53419 +{
53420 + return 0;
53421 +}
53422 +
53423 +__u32
53424 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53425 +{
53426 + return 1;
53427 +}
53428 +
53429 +__u32
53430 +gr_acl_handle_creat(const struct dentry * dentry,
53431 + const struct dentry * p_dentry,
53432 + const struct vfsmount * p_mnt, const int fmode,
53433 + const int imode)
53434 +{
53435 + return 1;
53436 +}
53437 +
53438 +void
53439 +gr_acl_handle_exit(void)
53440 +{
53441 + return;
53442 +}
53443 +
53444 +int
53445 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53446 +{
53447 + return 1;
53448 +}
53449 +
53450 +void
53451 +gr_set_role_label(const uid_t uid, const gid_t gid)
53452 +{
53453 + return;
53454 +}
53455 +
53456 +int
53457 +gr_acl_handle_procpidmem(const struct task_struct *task)
53458 +{
53459 + return 0;
53460 +}
53461 +
53462 +int
53463 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53464 +{
53465 + return 0;
53466 +}
53467 +
53468 +int
53469 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53470 +{
53471 + return 0;
53472 +}
53473 +
53474 +void
53475 +gr_set_kernel_label(struct task_struct *task)
53476 +{
53477 + return;
53478 +}
53479 +
53480 +int
53481 +gr_check_user_change(int real, int effective, int fs)
53482 +{
53483 + return 0;
53484 +}
53485 +
53486 +int
53487 +gr_check_group_change(int real, int effective, int fs)
53488 +{
53489 + return 0;
53490 +}
53491 +
53492 +int gr_acl_enable_at_secure(void)
53493 +{
53494 + return 0;
53495 +}
53496 +
53497 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53498 +{
53499 + return dentry->d_inode->i_sb->s_dev;
53500 +}
53501 +
53502 +EXPORT_SYMBOL(gr_is_capable);
53503 +EXPORT_SYMBOL(gr_is_capable_nolog);
53504 +EXPORT_SYMBOL(gr_learn_resource);
53505 +EXPORT_SYMBOL(gr_set_kernel_label);
53506 +#ifdef CONFIG_SECURITY
53507 +EXPORT_SYMBOL(gr_check_user_change);
53508 +EXPORT_SYMBOL(gr_check_group_change);
53509 +#endif
53510 diff -urNp linux-2.6.32.45/grsecurity/grsec_exec.c linux-2.6.32.45/grsecurity/grsec_exec.c
53511 --- linux-2.6.32.45/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53512 +++ linux-2.6.32.45/grsecurity/grsec_exec.c 2011-08-11 19:57:19.000000000 -0400
53513 @@ -0,0 +1,132 @@
53514 +#include <linux/kernel.h>
53515 +#include <linux/sched.h>
53516 +#include <linux/file.h>
53517 +#include <linux/binfmts.h>
53518 +#include <linux/smp_lock.h>
53519 +#include <linux/fs.h>
53520 +#include <linux/types.h>
53521 +#include <linux/grdefs.h>
53522 +#include <linux/grinternal.h>
53523 +#include <linux/capability.h>
53524 +#include <linux/compat.h>
53525 +
53526 +#include <asm/uaccess.h>
53527 +
53528 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53529 +static char gr_exec_arg_buf[132];
53530 +static DEFINE_MUTEX(gr_exec_arg_mutex);
53531 +#endif
53532 +
53533 +void
53534 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
53535 +{
53536 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53537 + char *grarg = gr_exec_arg_buf;
53538 + unsigned int i, x, execlen = 0;
53539 + char c;
53540 +
53541 + if (!((grsec_enable_execlog && grsec_enable_group &&
53542 + in_group_p(grsec_audit_gid))
53543 + || (grsec_enable_execlog && !grsec_enable_group)))
53544 + return;
53545 +
53546 + mutex_lock(&gr_exec_arg_mutex);
53547 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
53548 +
53549 + if (unlikely(argv == NULL))
53550 + goto log;
53551 +
53552 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
53553 + const char __user *p;
53554 + unsigned int len;
53555 +
53556 + if (copy_from_user(&p, argv + i, sizeof(p)))
53557 + goto log;
53558 + if (!p)
53559 + goto log;
53560 + len = strnlen_user(p, 128 - execlen);
53561 + if (len > 128 - execlen)
53562 + len = 128 - execlen;
53563 + else if (len > 0)
53564 + len--;
53565 + if (copy_from_user(grarg + execlen, p, len))
53566 + goto log;
53567 +
53568 + /* rewrite unprintable characters */
53569 + for (x = 0; x < len; x++) {
53570 + c = *(grarg + execlen + x);
53571 + if (c < 32 || c > 126)
53572 + *(grarg + execlen + x) = ' ';
53573 + }
53574 +
53575 + execlen += len;
53576 + *(grarg + execlen) = ' ';
53577 + *(grarg + execlen + 1) = '\0';
53578 + execlen++;
53579 + }
53580 +
53581 + log:
53582 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53583 + bprm->file->f_path.mnt, grarg);
53584 + mutex_unlock(&gr_exec_arg_mutex);
53585 +#endif
53586 + return;
53587 +}
53588 +
53589 +#ifdef CONFIG_COMPAT
53590 +void
53591 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
53592 +{
53593 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53594 + char *grarg = gr_exec_arg_buf;
53595 + unsigned int i, x, execlen = 0;
53596 + char c;
53597 +
53598 + if (!((grsec_enable_execlog && grsec_enable_group &&
53599 + in_group_p(grsec_audit_gid))
53600 + || (grsec_enable_execlog && !grsec_enable_group)))
53601 + return;
53602 +
53603 + mutex_lock(&gr_exec_arg_mutex);
53604 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
53605 +
53606 + if (unlikely(argv == NULL))
53607 + goto log;
53608 +
53609 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
53610 + compat_uptr_t p;
53611 + unsigned int len;
53612 +
53613 + if (get_user(p, argv + i))
53614 + goto log;
53615 + len = strnlen_user(compat_ptr(p), 128 - execlen);
53616 + if (len > 128 - execlen)
53617 + len = 128 - execlen;
53618 + else if (len > 0)
53619 + len--;
53620 + else
53621 + goto log;
53622 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
53623 + goto log;
53624 +
53625 + /* rewrite unprintable characters */
53626 + for (x = 0; x < len; x++) {
53627 + c = *(grarg + execlen + x);
53628 + if (c < 32 || c > 126)
53629 + *(grarg + execlen + x) = ' ';
53630 + }
53631 +
53632 + execlen += len;
53633 + *(grarg + execlen) = ' ';
53634 + *(grarg + execlen + 1) = '\0';
53635 + execlen++;
53636 + }
53637 +
53638 + log:
53639 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53640 + bprm->file->f_path.mnt, grarg);
53641 + mutex_unlock(&gr_exec_arg_mutex);
53642 +#endif
53643 + return;
53644 +}
53645 +#endif
53646 diff -urNp linux-2.6.32.45/grsecurity/grsec_fifo.c linux-2.6.32.45/grsecurity/grsec_fifo.c
53647 --- linux-2.6.32.45/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53648 +++ linux-2.6.32.45/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
53649 @@ -0,0 +1,24 @@
53650 +#include <linux/kernel.h>
53651 +#include <linux/sched.h>
53652 +#include <linux/fs.h>
53653 +#include <linux/file.h>
53654 +#include <linux/grinternal.h>
53655 +
53656 +int
53657 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53658 + const struct dentry *dir, const int flag, const int acc_mode)
53659 +{
53660 +#ifdef CONFIG_GRKERNSEC_FIFO
53661 + const struct cred *cred = current_cred();
53662 +
53663 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53664 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53665 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53666 + (cred->fsuid != dentry->d_inode->i_uid)) {
53667 + if (!inode_permission(dentry->d_inode, acc_mode))
53668 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53669 + return -EACCES;
53670 + }
53671 +#endif
53672 + return 0;
53673 +}
53674 diff -urNp linux-2.6.32.45/grsecurity/grsec_fork.c linux-2.6.32.45/grsecurity/grsec_fork.c
53675 --- linux-2.6.32.45/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53676 +++ linux-2.6.32.45/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
53677 @@ -0,0 +1,23 @@
53678 +#include <linux/kernel.h>
53679 +#include <linux/sched.h>
53680 +#include <linux/grsecurity.h>
53681 +#include <linux/grinternal.h>
53682 +#include <linux/errno.h>
53683 +
53684 +void
53685 +gr_log_forkfail(const int retval)
53686 +{
53687 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
53688 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53689 + switch (retval) {
53690 + case -EAGAIN:
53691 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53692 + break;
53693 + case -ENOMEM:
53694 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53695 + break;
53696 + }
53697 + }
53698 +#endif
53699 + return;
53700 +}
53701 diff -urNp linux-2.6.32.45/grsecurity/grsec_init.c linux-2.6.32.45/grsecurity/grsec_init.c
53702 --- linux-2.6.32.45/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
53703 +++ linux-2.6.32.45/grsecurity/grsec_init.c 2011-08-11 19:57:42.000000000 -0400
53704 @@ -0,0 +1,270 @@
53705 +#include <linux/kernel.h>
53706 +#include <linux/sched.h>
53707 +#include <linux/mm.h>
53708 +#include <linux/smp_lock.h>
53709 +#include <linux/gracl.h>
53710 +#include <linux/slab.h>
53711 +#include <linux/vmalloc.h>
53712 +#include <linux/percpu.h>
53713 +#include <linux/module.h>
53714 +
53715 +int grsec_enable_brute;
53716 +int grsec_enable_link;
53717 +int grsec_enable_dmesg;
53718 +int grsec_enable_harden_ptrace;
53719 +int grsec_enable_fifo;
53720 +int grsec_enable_execlog;
53721 +int grsec_enable_signal;
53722 +int grsec_enable_forkfail;
53723 +int grsec_enable_audit_ptrace;
53724 +int grsec_enable_time;
53725 +int grsec_enable_audit_textrel;
53726 +int grsec_enable_group;
53727 +int grsec_audit_gid;
53728 +int grsec_enable_chdir;
53729 +int grsec_enable_mount;
53730 +int grsec_enable_rofs;
53731 +int grsec_enable_chroot_findtask;
53732 +int grsec_enable_chroot_mount;
53733 +int grsec_enable_chroot_shmat;
53734 +int grsec_enable_chroot_fchdir;
53735 +int grsec_enable_chroot_double;
53736 +int grsec_enable_chroot_pivot;
53737 +int grsec_enable_chroot_chdir;
53738 +int grsec_enable_chroot_chmod;
53739 +int grsec_enable_chroot_mknod;
53740 +int grsec_enable_chroot_nice;
53741 +int grsec_enable_chroot_execlog;
53742 +int grsec_enable_chroot_caps;
53743 +int grsec_enable_chroot_sysctl;
53744 +int grsec_enable_chroot_unix;
53745 +int grsec_enable_tpe;
53746 +int grsec_tpe_gid;
53747 +int grsec_enable_blackhole;
53748 +#ifdef CONFIG_IPV6_MODULE
53749 +EXPORT_SYMBOL(grsec_enable_blackhole);
53750 +#endif
53751 +int grsec_lastack_retries;
53752 +int grsec_enable_tpe_all;
53753 +int grsec_enable_tpe_invert;
53754 +int grsec_enable_socket_all;
53755 +int grsec_socket_all_gid;
53756 +int grsec_enable_socket_client;
53757 +int grsec_socket_client_gid;
53758 +int grsec_enable_socket_server;
53759 +int grsec_socket_server_gid;
53760 +int grsec_resource_logging;
53761 +int grsec_disable_privio;
53762 +int grsec_enable_log_rwxmaps;
53763 +int grsec_lock;
53764 +
53765 +DEFINE_SPINLOCK(grsec_alert_lock);
53766 +unsigned long grsec_alert_wtime = 0;
53767 +unsigned long grsec_alert_fyet = 0;
53768 +
53769 +DEFINE_SPINLOCK(grsec_audit_lock);
53770 +
53771 +DEFINE_RWLOCK(grsec_exec_file_lock);
53772 +
53773 +char *gr_shared_page[4];
53774 +
53775 +char *gr_alert_log_fmt;
53776 +char *gr_audit_log_fmt;
53777 +char *gr_alert_log_buf;
53778 +char *gr_audit_log_buf;
53779 +
53780 +extern struct gr_arg *gr_usermode;
53781 +extern unsigned char *gr_system_salt;
53782 +extern unsigned char *gr_system_sum;
53783 +
53784 +void __init
53785 +grsecurity_init(void)
53786 +{
53787 + int j;
53788 + /* create the per-cpu shared pages */
53789 +
53790 +#ifdef CONFIG_X86
53791 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
53792 +#endif
53793 +
53794 + for (j = 0; j < 4; j++) {
53795 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
53796 + if (gr_shared_page[j] == NULL) {
53797 + panic("Unable to allocate grsecurity shared page");
53798 + return;
53799 + }
53800 + }
53801 +
53802 + /* allocate log buffers */
53803 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
53804 + if (!gr_alert_log_fmt) {
53805 + panic("Unable to allocate grsecurity alert log format buffer");
53806 + return;
53807 + }
53808 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
53809 + if (!gr_audit_log_fmt) {
53810 + panic("Unable to allocate grsecurity audit log format buffer");
53811 + return;
53812 + }
53813 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53814 + if (!gr_alert_log_buf) {
53815 + panic("Unable to allocate grsecurity alert log buffer");
53816 + return;
53817 + }
53818 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53819 + if (!gr_audit_log_buf) {
53820 + panic("Unable to allocate grsecurity audit log buffer");
53821 + return;
53822 + }
53823 +
53824 + /* allocate memory for authentication structure */
53825 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
53826 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
53827 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
53828 +
53829 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
53830 + panic("Unable to allocate grsecurity authentication structure");
53831 + return;
53832 + }
53833 +
53834 +
53835 +#ifdef CONFIG_GRKERNSEC_IO
53836 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
53837 + grsec_disable_privio = 1;
53838 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53839 + grsec_disable_privio = 1;
53840 +#else
53841 + grsec_disable_privio = 0;
53842 +#endif
53843 +#endif
53844 +
53845 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53846 + /* for backward compatibility, tpe_invert always defaults to on if
53847 + enabled in the kernel
53848 + */
53849 + grsec_enable_tpe_invert = 1;
53850 +#endif
53851 +
53852 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53853 +#ifndef CONFIG_GRKERNSEC_SYSCTL
53854 + grsec_lock = 1;
53855 +#endif
53856 +
53857 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53858 + grsec_enable_audit_textrel = 1;
53859 +#endif
53860 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53861 + grsec_enable_log_rwxmaps = 1;
53862 +#endif
53863 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53864 + grsec_enable_group = 1;
53865 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
53866 +#endif
53867 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53868 + grsec_enable_chdir = 1;
53869 +#endif
53870 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53871 + grsec_enable_harden_ptrace = 1;
53872 +#endif
53873 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53874 + grsec_enable_mount = 1;
53875 +#endif
53876 +#ifdef CONFIG_GRKERNSEC_LINK
53877 + grsec_enable_link = 1;
53878 +#endif
53879 +#ifdef CONFIG_GRKERNSEC_BRUTE
53880 + grsec_enable_brute = 1;
53881 +#endif
53882 +#ifdef CONFIG_GRKERNSEC_DMESG
53883 + grsec_enable_dmesg = 1;
53884 +#endif
53885 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
53886 + grsec_enable_blackhole = 1;
53887 + grsec_lastack_retries = 4;
53888 +#endif
53889 +#ifdef CONFIG_GRKERNSEC_FIFO
53890 + grsec_enable_fifo = 1;
53891 +#endif
53892 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53893 + grsec_enable_execlog = 1;
53894 +#endif
53895 +#ifdef CONFIG_GRKERNSEC_SIGNAL
53896 + grsec_enable_signal = 1;
53897 +#endif
53898 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
53899 + grsec_enable_forkfail = 1;
53900 +#endif
53901 +#ifdef CONFIG_GRKERNSEC_TIME
53902 + grsec_enable_time = 1;
53903 +#endif
53904 +#ifdef CONFIG_GRKERNSEC_RESLOG
53905 + grsec_resource_logging = 1;
53906 +#endif
53907 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53908 + grsec_enable_chroot_findtask = 1;
53909 +#endif
53910 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53911 + grsec_enable_chroot_unix = 1;
53912 +#endif
53913 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53914 + grsec_enable_chroot_mount = 1;
53915 +#endif
53916 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53917 + grsec_enable_chroot_fchdir = 1;
53918 +#endif
53919 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53920 + grsec_enable_chroot_shmat = 1;
53921 +#endif
53922 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53923 + grsec_enable_audit_ptrace = 1;
53924 +#endif
53925 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53926 + grsec_enable_chroot_double = 1;
53927 +#endif
53928 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53929 + grsec_enable_chroot_pivot = 1;
53930 +#endif
53931 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53932 + grsec_enable_chroot_chdir = 1;
53933 +#endif
53934 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53935 + grsec_enable_chroot_chmod = 1;
53936 +#endif
53937 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53938 + grsec_enable_chroot_mknod = 1;
53939 +#endif
53940 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53941 + grsec_enable_chroot_nice = 1;
53942 +#endif
53943 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53944 + grsec_enable_chroot_execlog = 1;
53945 +#endif
53946 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53947 + grsec_enable_chroot_caps = 1;
53948 +#endif
53949 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53950 + grsec_enable_chroot_sysctl = 1;
53951 +#endif
53952 +#ifdef CONFIG_GRKERNSEC_TPE
53953 + grsec_enable_tpe = 1;
53954 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
53955 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
53956 + grsec_enable_tpe_all = 1;
53957 +#endif
53958 +#endif
53959 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53960 + grsec_enable_socket_all = 1;
53961 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
53962 +#endif
53963 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53964 + grsec_enable_socket_client = 1;
53965 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
53966 +#endif
53967 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53968 + grsec_enable_socket_server = 1;
53969 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
53970 +#endif
53971 +#endif
53972 +
53973 + return;
53974 +}
53975 diff -urNp linux-2.6.32.45/grsecurity/grsec_link.c linux-2.6.32.45/grsecurity/grsec_link.c
53976 --- linux-2.6.32.45/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
53977 +++ linux-2.6.32.45/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
53978 @@ -0,0 +1,43 @@
53979 +#include <linux/kernel.h>
53980 +#include <linux/sched.h>
53981 +#include <linux/fs.h>
53982 +#include <linux/file.h>
53983 +#include <linux/grinternal.h>
53984 +
53985 +int
53986 +gr_handle_follow_link(const struct inode *parent,
53987 + const struct inode *inode,
53988 + const struct dentry *dentry, const struct vfsmount *mnt)
53989 +{
53990 +#ifdef CONFIG_GRKERNSEC_LINK
53991 + const struct cred *cred = current_cred();
53992 +
53993 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
53994 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
53995 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
53996 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
53997 + return -EACCES;
53998 + }
53999 +#endif
54000 + return 0;
54001 +}
54002 +
54003 +int
54004 +gr_handle_hardlink(const struct dentry *dentry,
54005 + const struct vfsmount *mnt,
54006 + struct inode *inode, const int mode, const char *to)
54007 +{
54008 +#ifdef CONFIG_GRKERNSEC_LINK
54009 + const struct cred *cred = current_cred();
54010 +
54011 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
54012 + (!S_ISREG(mode) || (mode & S_ISUID) ||
54013 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
54014 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
54015 + !capable(CAP_FOWNER) && cred->uid) {
54016 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
54017 + return -EPERM;
54018 + }
54019 +#endif
54020 + return 0;
54021 +}
54022 diff -urNp linux-2.6.32.45/grsecurity/grsec_log.c linux-2.6.32.45/grsecurity/grsec_log.c
54023 --- linux-2.6.32.45/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
54024 +++ linux-2.6.32.45/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
54025 @@ -0,0 +1,310 @@
54026 +#include <linux/kernel.h>
54027 +#include <linux/sched.h>
54028 +#include <linux/file.h>
54029 +#include <linux/tty.h>
54030 +#include <linux/fs.h>
54031 +#include <linux/grinternal.h>
54032 +
54033 +#ifdef CONFIG_TREE_PREEMPT_RCU
54034 +#define DISABLE_PREEMPT() preempt_disable()
54035 +#define ENABLE_PREEMPT() preempt_enable()
54036 +#else
54037 +#define DISABLE_PREEMPT()
54038 +#define ENABLE_PREEMPT()
54039 +#endif
54040 +
54041 +#define BEGIN_LOCKS(x) \
54042 + DISABLE_PREEMPT(); \
54043 + rcu_read_lock(); \
54044 + read_lock(&tasklist_lock); \
54045 + read_lock(&grsec_exec_file_lock); \
54046 + if (x != GR_DO_AUDIT) \
54047 + spin_lock(&grsec_alert_lock); \
54048 + else \
54049 + spin_lock(&grsec_audit_lock)
54050 +
54051 +#define END_LOCKS(x) \
54052 + if (x != GR_DO_AUDIT) \
54053 + spin_unlock(&grsec_alert_lock); \
54054 + else \
54055 + spin_unlock(&grsec_audit_lock); \
54056 + read_unlock(&grsec_exec_file_lock); \
54057 + read_unlock(&tasklist_lock); \
54058 + rcu_read_unlock(); \
54059 + ENABLE_PREEMPT(); \
54060 + if (x == GR_DONT_AUDIT) \
54061 + gr_handle_alertkill(current)
54062 +
54063 +enum {
54064 + FLOODING,
54065 + NO_FLOODING
54066 +};
54067 +
54068 +extern char *gr_alert_log_fmt;
54069 +extern char *gr_audit_log_fmt;
54070 +extern char *gr_alert_log_buf;
54071 +extern char *gr_audit_log_buf;
54072 +
54073 +static int gr_log_start(int audit)
54074 +{
54075 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
54076 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
54077 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54078 +
54079 + if (audit == GR_DO_AUDIT)
54080 + goto set_fmt;
54081 +
54082 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
54083 + grsec_alert_wtime = jiffies;
54084 + grsec_alert_fyet = 0;
54085 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
54086 + grsec_alert_fyet++;
54087 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
54088 + grsec_alert_wtime = jiffies;
54089 + grsec_alert_fyet++;
54090 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
54091 + return FLOODING;
54092 + } else return FLOODING;
54093 +
54094 +set_fmt:
54095 + memset(buf, 0, PAGE_SIZE);
54096 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
54097 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
54098 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54099 + } else if (current->signal->curr_ip) {
54100 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
54101 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
54102 + } else if (gr_acl_is_enabled()) {
54103 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
54104 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54105 + } else {
54106 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
54107 + strcpy(buf, fmt);
54108 + }
54109 +
54110 + return NO_FLOODING;
54111 +}
54112 +
54113 +static void gr_log_middle(int audit, const char *msg, va_list ap)
54114 + __attribute__ ((format (printf, 2, 0)));
54115 +
54116 +static void gr_log_middle(int audit, const char *msg, va_list ap)
54117 +{
54118 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54119 + unsigned int len = strlen(buf);
54120 +
54121 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54122 +
54123 + return;
54124 +}
54125 +
54126 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
54127 + __attribute__ ((format (printf, 2, 3)));
54128 +
54129 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
54130 +{
54131 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54132 + unsigned int len = strlen(buf);
54133 + va_list ap;
54134 +
54135 + va_start(ap, msg);
54136 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54137 + va_end(ap);
54138 +
54139 + return;
54140 +}
54141 +
54142 +static void gr_log_end(int audit)
54143 +{
54144 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54145 + unsigned int len = strlen(buf);
54146 +
54147 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
54148 + printk("%s\n", buf);
54149 +
54150 + return;
54151 +}
54152 +
54153 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
54154 +{
54155 + int logtype;
54156 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
54157 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
54158 + void *voidptr = NULL;
54159 + int num1 = 0, num2 = 0;
54160 + unsigned long ulong1 = 0, ulong2 = 0;
54161 + struct dentry *dentry = NULL;
54162 + struct vfsmount *mnt = NULL;
54163 + struct file *file = NULL;
54164 + struct task_struct *task = NULL;
54165 + const struct cred *cred, *pcred;
54166 + va_list ap;
54167 +
54168 + BEGIN_LOCKS(audit);
54169 + logtype = gr_log_start(audit);
54170 + if (logtype == FLOODING) {
54171 + END_LOCKS(audit);
54172 + return;
54173 + }
54174 + va_start(ap, argtypes);
54175 + switch (argtypes) {
54176 + case GR_TTYSNIFF:
54177 + task = va_arg(ap, struct task_struct *);
54178 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54179 + break;
54180 + case GR_SYSCTL_HIDDEN:
54181 + str1 = va_arg(ap, char *);
54182 + gr_log_middle_varargs(audit, msg, result, str1);
54183 + break;
54184 + case GR_RBAC:
54185 + dentry = va_arg(ap, struct dentry *);
54186 + mnt = va_arg(ap, struct vfsmount *);
54187 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54188 + break;
54189 + case GR_RBAC_STR:
54190 + dentry = va_arg(ap, struct dentry *);
54191 + mnt = va_arg(ap, struct vfsmount *);
54192 + str1 = va_arg(ap, char *);
54193 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54194 + break;
54195 + case GR_STR_RBAC:
54196 + str1 = va_arg(ap, char *);
54197 + dentry = va_arg(ap, struct dentry *);
54198 + mnt = va_arg(ap, struct vfsmount *);
54199 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54200 + break;
54201 + case GR_RBAC_MODE2:
54202 + dentry = va_arg(ap, struct dentry *);
54203 + mnt = va_arg(ap, struct vfsmount *);
54204 + str1 = va_arg(ap, char *);
54205 + str2 = va_arg(ap, char *);
54206 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54207 + break;
54208 + case GR_RBAC_MODE3:
54209 + dentry = va_arg(ap, struct dentry *);
54210 + mnt = va_arg(ap, struct vfsmount *);
54211 + str1 = va_arg(ap, char *);
54212 + str2 = va_arg(ap, char *);
54213 + str3 = va_arg(ap, char *);
54214 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54215 + break;
54216 + case GR_FILENAME:
54217 + dentry = va_arg(ap, struct dentry *);
54218 + mnt = va_arg(ap, struct vfsmount *);
54219 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54220 + break;
54221 + case GR_STR_FILENAME:
54222 + str1 = va_arg(ap, char *);
54223 + dentry = va_arg(ap, struct dentry *);
54224 + mnt = va_arg(ap, struct vfsmount *);
54225 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54226 + break;
54227 + case GR_FILENAME_STR:
54228 + dentry = va_arg(ap, struct dentry *);
54229 + mnt = va_arg(ap, struct vfsmount *);
54230 + str1 = va_arg(ap, char *);
54231 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54232 + break;
54233 + case GR_FILENAME_TWO_INT:
54234 + dentry = va_arg(ap, struct dentry *);
54235 + mnt = va_arg(ap, struct vfsmount *);
54236 + num1 = va_arg(ap, int);
54237 + num2 = va_arg(ap, int);
54238 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54239 + break;
54240 + case GR_FILENAME_TWO_INT_STR:
54241 + dentry = va_arg(ap, struct dentry *);
54242 + mnt = va_arg(ap, struct vfsmount *);
54243 + num1 = va_arg(ap, int);
54244 + num2 = va_arg(ap, int);
54245 + str1 = va_arg(ap, char *);
54246 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54247 + break;
54248 + case GR_TEXTREL:
54249 + file = va_arg(ap, struct file *);
54250 + ulong1 = va_arg(ap, unsigned long);
54251 + ulong2 = va_arg(ap, unsigned long);
54252 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54253 + break;
54254 + case GR_PTRACE:
54255 + task = va_arg(ap, struct task_struct *);
54256 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54257 + break;
54258 + case GR_RESOURCE:
54259 + task = va_arg(ap, struct task_struct *);
54260 + cred = __task_cred(task);
54261 + pcred = __task_cred(task->real_parent);
54262 + ulong1 = va_arg(ap, unsigned long);
54263 + str1 = va_arg(ap, char *);
54264 + ulong2 = va_arg(ap, unsigned long);
54265 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54266 + break;
54267 + case GR_CAP:
54268 + task = va_arg(ap, struct task_struct *);
54269 + cred = __task_cred(task);
54270 + pcred = __task_cred(task->real_parent);
54271 + str1 = va_arg(ap, char *);
54272 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54273 + break;
54274 + case GR_SIG:
54275 + str1 = va_arg(ap, char *);
54276 + voidptr = va_arg(ap, void *);
54277 + gr_log_middle_varargs(audit, msg, str1, voidptr);
54278 + break;
54279 + case GR_SIG2:
54280 + task = va_arg(ap, struct task_struct *);
54281 + cred = __task_cred(task);
54282 + pcred = __task_cred(task->real_parent);
54283 + num1 = va_arg(ap, int);
54284 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54285 + break;
54286 + case GR_CRASH1:
54287 + task = va_arg(ap, struct task_struct *);
54288 + cred = __task_cred(task);
54289 + pcred = __task_cred(task->real_parent);
54290 + ulong1 = va_arg(ap, unsigned long);
54291 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54292 + break;
54293 + case GR_CRASH2:
54294 + task = va_arg(ap, struct task_struct *);
54295 + cred = __task_cred(task);
54296 + pcred = __task_cred(task->real_parent);
54297 + ulong1 = va_arg(ap, unsigned long);
54298 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54299 + break;
54300 + case GR_RWXMAP:
54301 + file = va_arg(ap, struct file *);
54302 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54303 + break;
54304 + case GR_PSACCT:
54305 + {
54306 + unsigned int wday, cday;
54307 + __u8 whr, chr;
54308 + __u8 wmin, cmin;
54309 + __u8 wsec, csec;
54310 + char cur_tty[64] = { 0 };
54311 + char parent_tty[64] = { 0 };
54312 +
54313 + task = va_arg(ap, struct task_struct *);
54314 + wday = va_arg(ap, unsigned int);
54315 + cday = va_arg(ap, unsigned int);
54316 + whr = va_arg(ap, int);
54317 + chr = va_arg(ap, int);
54318 + wmin = va_arg(ap, int);
54319 + cmin = va_arg(ap, int);
54320 + wsec = va_arg(ap, int);
54321 + csec = va_arg(ap, int);
54322 + ulong1 = va_arg(ap, unsigned long);
54323 + cred = __task_cred(task);
54324 + pcred = __task_cred(task->real_parent);
54325 +
54326 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54327 + }
54328 + break;
54329 + default:
54330 + gr_log_middle(audit, msg, ap);
54331 + }
54332 + va_end(ap);
54333 + gr_log_end(audit);
54334 + END_LOCKS(audit);
54335 +}
54336 diff -urNp linux-2.6.32.45/grsecurity/grsec_mem.c linux-2.6.32.45/grsecurity/grsec_mem.c
54337 --- linux-2.6.32.45/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54338 +++ linux-2.6.32.45/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
54339 @@ -0,0 +1,33 @@
54340 +#include <linux/kernel.h>
54341 +#include <linux/sched.h>
54342 +#include <linux/mm.h>
54343 +#include <linux/mman.h>
54344 +#include <linux/grinternal.h>
54345 +
54346 +void
54347 +gr_handle_ioperm(void)
54348 +{
54349 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54350 + return;
54351 +}
54352 +
54353 +void
54354 +gr_handle_iopl(void)
54355 +{
54356 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54357 + return;
54358 +}
54359 +
54360 +void
54361 +gr_handle_mem_readwrite(u64 from, u64 to)
54362 +{
54363 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54364 + return;
54365 +}
54366 +
54367 +void
54368 +gr_handle_vm86(void)
54369 +{
54370 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54371 + return;
54372 +}
54373 diff -urNp linux-2.6.32.45/grsecurity/grsec_mount.c linux-2.6.32.45/grsecurity/grsec_mount.c
54374 --- linux-2.6.32.45/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54375 +++ linux-2.6.32.45/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
54376 @@ -0,0 +1,62 @@
54377 +#include <linux/kernel.h>
54378 +#include <linux/sched.h>
54379 +#include <linux/mount.h>
54380 +#include <linux/grsecurity.h>
54381 +#include <linux/grinternal.h>
54382 +
54383 +void
54384 +gr_log_remount(const char *devname, const int retval)
54385 +{
54386 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54387 + if (grsec_enable_mount && (retval >= 0))
54388 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54389 +#endif
54390 + return;
54391 +}
54392 +
54393 +void
54394 +gr_log_unmount(const char *devname, const int retval)
54395 +{
54396 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54397 + if (grsec_enable_mount && (retval >= 0))
54398 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54399 +#endif
54400 + return;
54401 +}
54402 +
54403 +void
54404 +gr_log_mount(const char *from, const char *to, const int retval)
54405 +{
54406 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54407 + if (grsec_enable_mount && (retval >= 0))
54408 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54409 +#endif
54410 + return;
54411 +}
54412 +
54413 +int
54414 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54415 +{
54416 +#ifdef CONFIG_GRKERNSEC_ROFS
54417 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54418 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54419 + return -EPERM;
54420 + } else
54421 + return 0;
54422 +#endif
54423 + return 0;
54424 +}
54425 +
54426 +int
54427 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54428 +{
54429 +#ifdef CONFIG_GRKERNSEC_ROFS
54430 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54431 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54432 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54433 + return -EPERM;
54434 + } else
54435 + return 0;
54436 +#endif
54437 + return 0;
54438 +}
54439 diff -urNp linux-2.6.32.45/grsecurity/grsec_pax.c linux-2.6.32.45/grsecurity/grsec_pax.c
54440 --- linux-2.6.32.45/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54441 +++ linux-2.6.32.45/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
54442 @@ -0,0 +1,36 @@
54443 +#include <linux/kernel.h>
54444 +#include <linux/sched.h>
54445 +#include <linux/mm.h>
54446 +#include <linux/file.h>
54447 +#include <linux/grinternal.h>
54448 +#include <linux/grsecurity.h>
54449 +
54450 +void
54451 +gr_log_textrel(struct vm_area_struct * vma)
54452 +{
54453 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54454 + if (grsec_enable_audit_textrel)
54455 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54456 +#endif
54457 + return;
54458 +}
54459 +
54460 +void
54461 +gr_log_rwxmmap(struct file *file)
54462 +{
54463 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54464 + if (grsec_enable_log_rwxmaps)
54465 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54466 +#endif
54467 + return;
54468 +}
54469 +
54470 +void
54471 +gr_log_rwxmprotect(struct file *file)
54472 +{
54473 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54474 + if (grsec_enable_log_rwxmaps)
54475 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54476 +#endif
54477 + return;
54478 +}
54479 diff -urNp linux-2.6.32.45/grsecurity/grsec_ptrace.c linux-2.6.32.45/grsecurity/grsec_ptrace.c
54480 --- linux-2.6.32.45/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54481 +++ linux-2.6.32.45/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
54482 @@ -0,0 +1,14 @@
54483 +#include <linux/kernel.h>
54484 +#include <linux/sched.h>
54485 +#include <linux/grinternal.h>
54486 +#include <linux/grsecurity.h>
54487 +
54488 +void
54489 +gr_audit_ptrace(struct task_struct *task)
54490 +{
54491 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54492 + if (grsec_enable_audit_ptrace)
54493 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54494 +#endif
54495 + return;
54496 +}
54497 diff -urNp linux-2.6.32.45/grsecurity/grsec_sig.c linux-2.6.32.45/grsecurity/grsec_sig.c
54498 --- linux-2.6.32.45/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54499 +++ linux-2.6.32.45/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
54500 @@ -0,0 +1,205 @@
54501 +#include <linux/kernel.h>
54502 +#include <linux/sched.h>
54503 +#include <linux/delay.h>
54504 +#include <linux/grsecurity.h>
54505 +#include <linux/grinternal.h>
54506 +#include <linux/hardirq.h>
54507 +
54508 +char *signames[] = {
54509 + [SIGSEGV] = "Segmentation fault",
54510 + [SIGILL] = "Illegal instruction",
54511 + [SIGABRT] = "Abort",
54512 + [SIGBUS] = "Invalid alignment/Bus error"
54513 +};
54514 +
54515 +void
54516 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54517 +{
54518 +#ifdef CONFIG_GRKERNSEC_SIGNAL
54519 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54520 + (sig == SIGABRT) || (sig == SIGBUS))) {
54521 + if (t->pid == current->pid) {
54522 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54523 + } else {
54524 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54525 + }
54526 + }
54527 +#endif
54528 + return;
54529 +}
54530 +
54531 +int
54532 +gr_handle_signal(const struct task_struct *p, const int sig)
54533 +{
54534 +#ifdef CONFIG_GRKERNSEC
54535 + if (current->pid > 1 && gr_check_protected_task(p)) {
54536 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54537 + return -EPERM;
54538 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54539 + return -EPERM;
54540 + }
54541 +#endif
54542 + return 0;
54543 +}
54544 +
54545 +#ifdef CONFIG_GRKERNSEC
54546 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54547 +
54548 +int gr_fake_force_sig(int sig, struct task_struct *t)
54549 +{
54550 + unsigned long int flags;
54551 + int ret, blocked, ignored;
54552 + struct k_sigaction *action;
54553 +
54554 + spin_lock_irqsave(&t->sighand->siglock, flags);
54555 + action = &t->sighand->action[sig-1];
54556 + ignored = action->sa.sa_handler == SIG_IGN;
54557 + blocked = sigismember(&t->blocked, sig);
54558 + if (blocked || ignored) {
54559 + action->sa.sa_handler = SIG_DFL;
54560 + if (blocked) {
54561 + sigdelset(&t->blocked, sig);
54562 + recalc_sigpending_and_wake(t);
54563 + }
54564 + }
54565 + if (action->sa.sa_handler == SIG_DFL)
54566 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
54567 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54568 +
54569 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
54570 +
54571 + return ret;
54572 +}
54573 +#endif
54574 +
54575 +#ifdef CONFIG_GRKERNSEC_BRUTE
54576 +#define GR_USER_BAN_TIME (15 * 60)
54577 +
54578 +static int __get_dumpable(unsigned long mm_flags)
54579 +{
54580 + int ret;
54581 +
54582 + ret = mm_flags & MMF_DUMPABLE_MASK;
54583 + return (ret >= 2) ? 2 : ret;
54584 +}
54585 +#endif
54586 +
54587 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54588 +{
54589 +#ifdef CONFIG_GRKERNSEC_BRUTE
54590 + uid_t uid = 0;
54591 +
54592 + if (!grsec_enable_brute)
54593 + return;
54594 +
54595 + rcu_read_lock();
54596 + read_lock(&tasklist_lock);
54597 + read_lock(&grsec_exec_file_lock);
54598 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54599 + p->real_parent->brute = 1;
54600 + else {
54601 + const struct cred *cred = __task_cred(p), *cred2;
54602 + struct task_struct *tsk, *tsk2;
54603 +
54604 + if (!__get_dumpable(mm_flags) && cred->uid) {
54605 + struct user_struct *user;
54606 +
54607 + uid = cred->uid;
54608 +
54609 + /* this is put upon execution past expiration */
54610 + user = find_user(uid);
54611 + if (user == NULL)
54612 + goto unlock;
54613 + user->banned = 1;
54614 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54615 + if (user->ban_expires == ~0UL)
54616 + user->ban_expires--;
54617 +
54618 + do_each_thread(tsk2, tsk) {
54619 + cred2 = __task_cred(tsk);
54620 + if (tsk != p && cred2->uid == uid)
54621 + gr_fake_force_sig(SIGKILL, tsk);
54622 + } while_each_thread(tsk2, tsk);
54623 + }
54624 + }
54625 +unlock:
54626 + read_unlock(&grsec_exec_file_lock);
54627 + read_unlock(&tasklist_lock);
54628 + rcu_read_unlock();
54629 +
54630 + if (uid)
54631 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54632 +#endif
54633 + return;
54634 +}
54635 +
54636 +void gr_handle_brute_check(void)
54637 +{
54638 +#ifdef CONFIG_GRKERNSEC_BRUTE
54639 + if (current->brute)
54640 + msleep(30 * 1000);
54641 +#endif
54642 + return;
54643 +}
54644 +
54645 +void gr_handle_kernel_exploit(void)
54646 +{
54647 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54648 + const struct cred *cred;
54649 + struct task_struct *tsk, *tsk2;
54650 + struct user_struct *user;
54651 + uid_t uid;
54652 +
54653 + if (in_irq() || in_serving_softirq() || in_nmi())
54654 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54655 +
54656 + uid = current_uid();
54657 +
54658 + if (uid == 0)
54659 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
54660 + else {
54661 + /* kill all the processes of this user, hold a reference
54662 + to their creds struct, and prevent them from creating
54663 + another process until system reset
54664 + */
54665 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54666 + /* we intentionally leak this ref */
54667 + user = get_uid(current->cred->user);
54668 + if (user) {
54669 + user->banned = 1;
54670 + user->ban_expires = ~0UL;
54671 + }
54672 +
54673 + read_lock(&tasklist_lock);
54674 + do_each_thread(tsk2, tsk) {
54675 + cred = __task_cred(tsk);
54676 + if (cred->uid == uid)
54677 + gr_fake_force_sig(SIGKILL, tsk);
54678 + } while_each_thread(tsk2, tsk);
54679 + read_unlock(&tasklist_lock);
54680 + }
54681 +#endif
54682 +}
54683 +
54684 +int __gr_process_user_ban(struct user_struct *user)
54685 +{
54686 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54687 + if (unlikely(user->banned)) {
54688 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
54689 + user->banned = 0;
54690 + user->ban_expires = 0;
54691 + free_uid(user);
54692 + } else
54693 + return -EPERM;
54694 + }
54695 +#endif
54696 + return 0;
54697 +}
54698 +
54699 +int gr_process_user_ban(void)
54700 +{
54701 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54702 + return __gr_process_user_ban(current->cred->user);
54703 +#endif
54704 + return 0;
54705 +}
54706 diff -urNp linux-2.6.32.45/grsecurity/grsec_sock.c linux-2.6.32.45/grsecurity/grsec_sock.c
54707 --- linux-2.6.32.45/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
54708 +++ linux-2.6.32.45/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
54709 @@ -0,0 +1,275 @@
54710 +#include <linux/kernel.h>
54711 +#include <linux/module.h>
54712 +#include <linux/sched.h>
54713 +#include <linux/file.h>
54714 +#include <linux/net.h>
54715 +#include <linux/in.h>
54716 +#include <linux/ip.h>
54717 +#include <net/sock.h>
54718 +#include <net/inet_sock.h>
54719 +#include <linux/grsecurity.h>
54720 +#include <linux/grinternal.h>
54721 +#include <linux/gracl.h>
54722 +
54723 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
54724 +EXPORT_SYMBOL(gr_cap_rtnetlink);
54725 +
54726 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
54727 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
54728 +
54729 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
54730 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
54731 +
54732 +#ifdef CONFIG_UNIX_MODULE
54733 +EXPORT_SYMBOL(gr_acl_handle_unix);
54734 +EXPORT_SYMBOL(gr_acl_handle_mknod);
54735 +EXPORT_SYMBOL(gr_handle_chroot_unix);
54736 +EXPORT_SYMBOL(gr_handle_create);
54737 +#endif
54738 +
54739 +#ifdef CONFIG_GRKERNSEC
54740 +#define gr_conn_table_size 32749
54741 +struct conn_table_entry {
54742 + struct conn_table_entry *next;
54743 + struct signal_struct *sig;
54744 +};
54745 +
54746 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
54747 +DEFINE_SPINLOCK(gr_conn_table_lock);
54748 +
54749 +extern const char * gr_socktype_to_name(unsigned char type);
54750 +extern const char * gr_proto_to_name(unsigned char proto);
54751 +extern const char * gr_sockfamily_to_name(unsigned char family);
54752 +
54753 +static __inline__ int
54754 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
54755 +{
54756 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
54757 +}
54758 +
54759 +static __inline__ int
54760 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
54761 + __u16 sport, __u16 dport)
54762 +{
54763 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
54764 + sig->gr_sport == sport && sig->gr_dport == dport))
54765 + return 1;
54766 + else
54767 + return 0;
54768 +}
54769 +
54770 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
54771 +{
54772 + struct conn_table_entry **match;
54773 + unsigned int index;
54774 +
54775 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54776 + sig->gr_sport, sig->gr_dport,
54777 + gr_conn_table_size);
54778 +
54779 + newent->sig = sig;
54780 +
54781 + match = &gr_conn_table[index];
54782 + newent->next = *match;
54783 + *match = newent;
54784 +
54785 + return;
54786 +}
54787 +
54788 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
54789 +{
54790 + struct conn_table_entry *match, *last = NULL;
54791 + unsigned int index;
54792 +
54793 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54794 + sig->gr_sport, sig->gr_dport,
54795 + gr_conn_table_size);
54796 +
54797 + match = gr_conn_table[index];
54798 + while (match && !conn_match(match->sig,
54799 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
54800 + sig->gr_dport)) {
54801 + last = match;
54802 + match = match->next;
54803 + }
54804 +
54805 + if (match) {
54806 + if (last)
54807 + last->next = match->next;
54808 + else
54809 + gr_conn_table[index] = NULL;
54810 + kfree(match);
54811 + }
54812 +
54813 + return;
54814 +}
54815 +
54816 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
54817 + __u16 sport, __u16 dport)
54818 +{
54819 + struct conn_table_entry *match;
54820 + unsigned int index;
54821 +
54822 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
54823 +
54824 + match = gr_conn_table[index];
54825 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
54826 + match = match->next;
54827 +
54828 + if (match)
54829 + return match->sig;
54830 + else
54831 + return NULL;
54832 +}
54833 +
54834 +#endif
54835 +
54836 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
54837 +{
54838 +#ifdef CONFIG_GRKERNSEC
54839 + struct signal_struct *sig = task->signal;
54840 + struct conn_table_entry *newent;
54841 +
54842 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
54843 + if (newent == NULL)
54844 + return;
54845 + /* no bh lock needed since we are called with bh disabled */
54846 + spin_lock(&gr_conn_table_lock);
54847 + gr_del_task_from_ip_table_nolock(sig);
54848 + sig->gr_saddr = inet->rcv_saddr;
54849 + sig->gr_daddr = inet->daddr;
54850 + sig->gr_sport = inet->sport;
54851 + sig->gr_dport = inet->dport;
54852 + gr_add_to_task_ip_table_nolock(sig, newent);
54853 + spin_unlock(&gr_conn_table_lock);
54854 +#endif
54855 + return;
54856 +}
54857 +
54858 +void gr_del_task_from_ip_table(struct task_struct *task)
54859 +{
54860 +#ifdef CONFIG_GRKERNSEC
54861 + spin_lock_bh(&gr_conn_table_lock);
54862 + gr_del_task_from_ip_table_nolock(task->signal);
54863 + spin_unlock_bh(&gr_conn_table_lock);
54864 +#endif
54865 + return;
54866 +}
54867 +
54868 +void
54869 +gr_attach_curr_ip(const struct sock *sk)
54870 +{
54871 +#ifdef CONFIG_GRKERNSEC
54872 + struct signal_struct *p, *set;
54873 + const struct inet_sock *inet = inet_sk(sk);
54874 +
54875 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
54876 + return;
54877 +
54878 + set = current->signal;
54879 +
54880 + spin_lock_bh(&gr_conn_table_lock);
54881 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
54882 + inet->dport, inet->sport);
54883 + if (unlikely(p != NULL)) {
54884 + set->curr_ip = p->curr_ip;
54885 + set->used_accept = 1;
54886 + gr_del_task_from_ip_table_nolock(p);
54887 + spin_unlock_bh(&gr_conn_table_lock);
54888 + return;
54889 + }
54890 + spin_unlock_bh(&gr_conn_table_lock);
54891 +
54892 + set->curr_ip = inet->daddr;
54893 + set->used_accept = 1;
54894 +#endif
54895 + return;
54896 +}
54897 +
54898 +int
54899 +gr_handle_sock_all(const int family, const int type, const int protocol)
54900 +{
54901 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54902 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
54903 + (family != AF_UNIX)) {
54904 + if (family == AF_INET)
54905 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
54906 + else
54907 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
54908 + return -EACCES;
54909 + }
54910 +#endif
54911 + return 0;
54912 +}
54913 +
54914 +int
54915 +gr_handle_sock_server(const struct sockaddr *sck)
54916 +{
54917 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54918 + if (grsec_enable_socket_server &&
54919 + in_group_p(grsec_socket_server_gid) &&
54920 + sck && (sck->sa_family != AF_UNIX) &&
54921 + (sck->sa_family != AF_LOCAL)) {
54922 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54923 + return -EACCES;
54924 + }
54925 +#endif
54926 + return 0;
54927 +}
54928 +
54929 +int
54930 +gr_handle_sock_server_other(const struct sock *sck)
54931 +{
54932 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54933 + if (grsec_enable_socket_server &&
54934 + in_group_p(grsec_socket_server_gid) &&
54935 + sck && (sck->sk_family != AF_UNIX) &&
54936 + (sck->sk_family != AF_LOCAL)) {
54937 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54938 + return -EACCES;
54939 + }
54940 +#endif
54941 + return 0;
54942 +}
54943 +
54944 +int
54945 +gr_handle_sock_client(const struct sockaddr *sck)
54946 +{
54947 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54948 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
54949 + sck && (sck->sa_family != AF_UNIX) &&
54950 + (sck->sa_family != AF_LOCAL)) {
54951 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
54952 + return -EACCES;
54953 + }
54954 +#endif
54955 + return 0;
54956 +}
54957 +
54958 +kernel_cap_t
54959 +gr_cap_rtnetlink(struct sock *sock)
54960 +{
54961 +#ifdef CONFIG_GRKERNSEC
54962 + if (!gr_acl_is_enabled())
54963 + return current_cap();
54964 + else if (sock->sk_protocol == NETLINK_ISCSI &&
54965 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
54966 + gr_is_capable(CAP_SYS_ADMIN))
54967 + return current_cap();
54968 + else if (sock->sk_protocol == NETLINK_AUDIT &&
54969 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
54970 + gr_is_capable(CAP_AUDIT_WRITE) &&
54971 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
54972 + gr_is_capable(CAP_AUDIT_CONTROL))
54973 + return current_cap();
54974 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
54975 + ((sock->sk_protocol == NETLINK_ROUTE) ?
54976 + gr_is_capable_nolog(CAP_NET_ADMIN) :
54977 + gr_is_capable(CAP_NET_ADMIN)))
54978 + return current_cap();
54979 + else
54980 + return __cap_empty_set;
54981 +#else
54982 + return current_cap();
54983 +#endif
54984 +}
54985 diff -urNp linux-2.6.32.45/grsecurity/grsec_sysctl.c linux-2.6.32.45/grsecurity/grsec_sysctl.c
54986 --- linux-2.6.32.45/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
54987 +++ linux-2.6.32.45/grsecurity/grsec_sysctl.c 2011-08-11 19:57:54.000000000 -0400
54988 @@ -0,0 +1,479 @@
54989 +#include <linux/kernel.h>
54990 +#include <linux/sched.h>
54991 +#include <linux/sysctl.h>
54992 +#include <linux/grsecurity.h>
54993 +#include <linux/grinternal.h>
54994 +
54995 +int
54996 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
54997 +{
54998 +#ifdef CONFIG_GRKERNSEC_SYSCTL
54999 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
55000 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
55001 + return -EACCES;
55002 + }
55003 +#endif
55004 + return 0;
55005 +}
55006 +
55007 +#ifdef CONFIG_GRKERNSEC_ROFS
55008 +static int __maybe_unused one = 1;
55009 +#endif
55010 +
55011 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
55012 +ctl_table grsecurity_table[] = {
55013 +#ifdef CONFIG_GRKERNSEC_SYSCTL
55014 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
55015 +#ifdef CONFIG_GRKERNSEC_IO
55016 + {
55017 + .ctl_name = CTL_UNNUMBERED,
55018 + .procname = "disable_priv_io",
55019 + .data = &grsec_disable_privio,
55020 + .maxlen = sizeof(int),
55021 + .mode = 0600,
55022 + .proc_handler = &proc_dointvec,
55023 + },
55024 +#endif
55025 +#endif
55026 +#ifdef CONFIG_GRKERNSEC_LINK
55027 + {
55028 + .ctl_name = CTL_UNNUMBERED,
55029 + .procname = "linking_restrictions",
55030 + .data = &grsec_enable_link,
55031 + .maxlen = sizeof(int),
55032 + .mode = 0600,
55033 + .proc_handler = &proc_dointvec,
55034 + },
55035 +#endif
55036 +#ifdef CONFIG_GRKERNSEC_BRUTE
55037 + {
55038 + .ctl_name = CTL_UNNUMBERED,
55039 + .procname = "deter_bruteforce",
55040 + .data = &grsec_enable_brute,
55041 + .maxlen = sizeof(int),
55042 + .mode = 0600,
55043 + .proc_handler = &proc_dointvec,
55044 + },
55045 +#endif
55046 +#ifdef CONFIG_GRKERNSEC_FIFO
55047 + {
55048 + .ctl_name = CTL_UNNUMBERED,
55049 + .procname = "fifo_restrictions",
55050 + .data = &grsec_enable_fifo,
55051 + .maxlen = sizeof(int),
55052 + .mode = 0600,
55053 + .proc_handler = &proc_dointvec,
55054 + },
55055 +#endif
55056 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55057 + {
55058 + .ctl_name = CTL_UNNUMBERED,
55059 + .procname = "ip_blackhole",
55060 + .data = &grsec_enable_blackhole,
55061 + .maxlen = sizeof(int),
55062 + .mode = 0600,
55063 + .proc_handler = &proc_dointvec,
55064 + },
55065 + {
55066 + .ctl_name = CTL_UNNUMBERED,
55067 + .procname = "lastack_retries",
55068 + .data = &grsec_lastack_retries,
55069 + .maxlen = sizeof(int),
55070 + .mode = 0600,
55071 + .proc_handler = &proc_dointvec,
55072 + },
55073 +#endif
55074 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55075 + {
55076 + .ctl_name = CTL_UNNUMBERED,
55077 + .procname = "exec_logging",
55078 + .data = &grsec_enable_execlog,
55079 + .maxlen = sizeof(int),
55080 + .mode = 0600,
55081 + .proc_handler = &proc_dointvec,
55082 + },
55083 +#endif
55084 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55085 + {
55086 + .ctl_name = CTL_UNNUMBERED,
55087 + .procname = "rwxmap_logging",
55088 + .data = &grsec_enable_log_rwxmaps,
55089 + .maxlen = sizeof(int),
55090 + .mode = 0600,
55091 + .proc_handler = &proc_dointvec,
55092 + },
55093 +#endif
55094 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55095 + {
55096 + .ctl_name = CTL_UNNUMBERED,
55097 + .procname = "signal_logging",
55098 + .data = &grsec_enable_signal,
55099 + .maxlen = sizeof(int),
55100 + .mode = 0600,
55101 + .proc_handler = &proc_dointvec,
55102 + },
55103 +#endif
55104 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55105 + {
55106 + .ctl_name = CTL_UNNUMBERED,
55107 + .procname = "forkfail_logging",
55108 + .data = &grsec_enable_forkfail,
55109 + .maxlen = sizeof(int),
55110 + .mode = 0600,
55111 + .proc_handler = &proc_dointvec,
55112 + },
55113 +#endif
55114 +#ifdef CONFIG_GRKERNSEC_TIME
55115 + {
55116 + .ctl_name = CTL_UNNUMBERED,
55117 + .procname = "timechange_logging",
55118 + .data = &grsec_enable_time,
55119 + .maxlen = sizeof(int),
55120 + .mode = 0600,
55121 + .proc_handler = &proc_dointvec,
55122 + },
55123 +#endif
55124 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55125 + {
55126 + .ctl_name = CTL_UNNUMBERED,
55127 + .procname = "chroot_deny_shmat",
55128 + .data = &grsec_enable_chroot_shmat,
55129 + .maxlen = sizeof(int),
55130 + .mode = 0600,
55131 + .proc_handler = &proc_dointvec,
55132 + },
55133 +#endif
55134 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55135 + {
55136 + .ctl_name = CTL_UNNUMBERED,
55137 + .procname = "chroot_deny_unix",
55138 + .data = &grsec_enable_chroot_unix,
55139 + .maxlen = sizeof(int),
55140 + .mode = 0600,
55141 + .proc_handler = &proc_dointvec,
55142 + },
55143 +#endif
55144 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55145 + {
55146 + .ctl_name = CTL_UNNUMBERED,
55147 + .procname = "chroot_deny_mount",
55148 + .data = &grsec_enable_chroot_mount,
55149 + .maxlen = sizeof(int),
55150 + .mode = 0600,
55151 + .proc_handler = &proc_dointvec,
55152 + },
55153 +#endif
55154 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55155 + {
55156 + .ctl_name = CTL_UNNUMBERED,
55157 + .procname = "chroot_deny_fchdir",
55158 + .data = &grsec_enable_chroot_fchdir,
55159 + .maxlen = sizeof(int),
55160 + .mode = 0600,
55161 + .proc_handler = &proc_dointvec,
55162 + },
55163 +#endif
55164 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55165 + {
55166 + .ctl_name = CTL_UNNUMBERED,
55167 + .procname = "chroot_deny_chroot",
55168 + .data = &grsec_enable_chroot_double,
55169 + .maxlen = sizeof(int),
55170 + .mode = 0600,
55171 + .proc_handler = &proc_dointvec,
55172 + },
55173 +#endif
55174 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55175 + {
55176 + .ctl_name = CTL_UNNUMBERED,
55177 + .procname = "chroot_deny_pivot",
55178 + .data = &grsec_enable_chroot_pivot,
55179 + .maxlen = sizeof(int),
55180 + .mode = 0600,
55181 + .proc_handler = &proc_dointvec,
55182 + },
55183 +#endif
55184 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55185 + {
55186 + .ctl_name = CTL_UNNUMBERED,
55187 + .procname = "chroot_enforce_chdir",
55188 + .data = &grsec_enable_chroot_chdir,
55189 + .maxlen = sizeof(int),
55190 + .mode = 0600,
55191 + .proc_handler = &proc_dointvec,
55192 + },
55193 +#endif
55194 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55195 + {
55196 + .ctl_name = CTL_UNNUMBERED,
55197 + .procname = "chroot_deny_chmod",
55198 + .data = &grsec_enable_chroot_chmod,
55199 + .maxlen = sizeof(int),
55200 + .mode = 0600,
55201 + .proc_handler = &proc_dointvec,
55202 + },
55203 +#endif
55204 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55205 + {
55206 + .ctl_name = CTL_UNNUMBERED,
55207 + .procname = "chroot_deny_mknod",
55208 + .data = &grsec_enable_chroot_mknod,
55209 + .maxlen = sizeof(int),
55210 + .mode = 0600,
55211 + .proc_handler = &proc_dointvec,
55212 + },
55213 +#endif
55214 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55215 + {
55216 + .ctl_name = CTL_UNNUMBERED,
55217 + .procname = "chroot_restrict_nice",
55218 + .data = &grsec_enable_chroot_nice,
55219 + .maxlen = sizeof(int),
55220 + .mode = 0600,
55221 + .proc_handler = &proc_dointvec,
55222 + },
55223 +#endif
55224 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55225 + {
55226 + .ctl_name = CTL_UNNUMBERED,
55227 + .procname = "chroot_execlog",
55228 + .data = &grsec_enable_chroot_execlog,
55229 + .maxlen = sizeof(int),
55230 + .mode = 0600,
55231 + .proc_handler = &proc_dointvec,
55232 + },
55233 +#endif
55234 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55235 + {
55236 + .ctl_name = CTL_UNNUMBERED,
55237 + .procname = "chroot_caps",
55238 + .data = &grsec_enable_chroot_caps,
55239 + .maxlen = sizeof(int),
55240 + .mode = 0600,
55241 + .proc_handler = &proc_dointvec,
55242 + },
55243 +#endif
55244 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55245 + {
55246 + .ctl_name = CTL_UNNUMBERED,
55247 + .procname = "chroot_deny_sysctl",
55248 + .data = &grsec_enable_chroot_sysctl,
55249 + .maxlen = sizeof(int),
55250 + .mode = 0600,
55251 + .proc_handler = &proc_dointvec,
55252 + },
55253 +#endif
55254 +#ifdef CONFIG_GRKERNSEC_TPE
55255 + {
55256 + .ctl_name = CTL_UNNUMBERED,
55257 + .procname = "tpe",
55258 + .data = &grsec_enable_tpe,
55259 + .maxlen = sizeof(int),
55260 + .mode = 0600,
55261 + .proc_handler = &proc_dointvec,
55262 + },
55263 + {
55264 + .ctl_name = CTL_UNNUMBERED,
55265 + .procname = "tpe_gid",
55266 + .data = &grsec_tpe_gid,
55267 + .maxlen = sizeof(int),
55268 + .mode = 0600,
55269 + .proc_handler = &proc_dointvec,
55270 + },
55271 +#endif
55272 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55273 + {
55274 + .ctl_name = CTL_UNNUMBERED,
55275 + .procname = "tpe_invert",
55276 + .data = &grsec_enable_tpe_invert,
55277 + .maxlen = sizeof(int),
55278 + .mode = 0600,
55279 + .proc_handler = &proc_dointvec,
55280 + },
55281 +#endif
55282 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55283 + {
55284 + .ctl_name = CTL_UNNUMBERED,
55285 + .procname = "tpe_restrict_all",
55286 + .data = &grsec_enable_tpe_all,
55287 + .maxlen = sizeof(int),
55288 + .mode = 0600,
55289 + .proc_handler = &proc_dointvec,
55290 + },
55291 +#endif
55292 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55293 + {
55294 + .ctl_name = CTL_UNNUMBERED,
55295 + .procname = "socket_all",
55296 + .data = &grsec_enable_socket_all,
55297 + .maxlen = sizeof(int),
55298 + .mode = 0600,
55299 + .proc_handler = &proc_dointvec,
55300 + },
55301 + {
55302 + .ctl_name = CTL_UNNUMBERED,
55303 + .procname = "socket_all_gid",
55304 + .data = &grsec_socket_all_gid,
55305 + .maxlen = sizeof(int),
55306 + .mode = 0600,
55307 + .proc_handler = &proc_dointvec,
55308 + },
55309 +#endif
55310 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55311 + {
55312 + .ctl_name = CTL_UNNUMBERED,
55313 + .procname = "socket_client",
55314 + .data = &grsec_enable_socket_client,
55315 + .maxlen = sizeof(int),
55316 + .mode = 0600,
55317 + .proc_handler = &proc_dointvec,
55318 + },
55319 + {
55320 + .ctl_name = CTL_UNNUMBERED,
55321 + .procname = "socket_client_gid",
55322 + .data = &grsec_socket_client_gid,
55323 + .maxlen = sizeof(int),
55324 + .mode = 0600,
55325 + .proc_handler = &proc_dointvec,
55326 + },
55327 +#endif
55328 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55329 + {
55330 + .ctl_name = CTL_UNNUMBERED,
55331 + .procname = "socket_server",
55332 + .data = &grsec_enable_socket_server,
55333 + .maxlen = sizeof(int),
55334 + .mode = 0600,
55335 + .proc_handler = &proc_dointvec,
55336 + },
55337 + {
55338 + .ctl_name = CTL_UNNUMBERED,
55339 + .procname = "socket_server_gid",
55340 + .data = &grsec_socket_server_gid,
55341 + .maxlen = sizeof(int),
55342 + .mode = 0600,
55343 + .proc_handler = &proc_dointvec,
55344 + },
55345 +#endif
55346 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55347 + {
55348 + .ctl_name = CTL_UNNUMBERED,
55349 + .procname = "audit_group",
55350 + .data = &grsec_enable_group,
55351 + .maxlen = sizeof(int),
55352 + .mode = 0600,
55353 + .proc_handler = &proc_dointvec,
55354 + },
55355 + {
55356 + .ctl_name = CTL_UNNUMBERED,
55357 + .procname = "audit_gid",
55358 + .data = &grsec_audit_gid,
55359 + .maxlen = sizeof(int),
55360 + .mode = 0600,
55361 + .proc_handler = &proc_dointvec,
55362 + },
55363 +#endif
55364 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55365 + {
55366 + .ctl_name = CTL_UNNUMBERED,
55367 + .procname = "audit_chdir",
55368 + .data = &grsec_enable_chdir,
55369 + .maxlen = sizeof(int),
55370 + .mode = 0600,
55371 + .proc_handler = &proc_dointvec,
55372 + },
55373 +#endif
55374 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55375 + {
55376 + .ctl_name = CTL_UNNUMBERED,
55377 + .procname = "audit_mount",
55378 + .data = &grsec_enable_mount,
55379 + .maxlen = sizeof(int),
55380 + .mode = 0600,
55381 + .proc_handler = &proc_dointvec,
55382 + },
55383 +#endif
55384 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55385 + {
55386 + .ctl_name = CTL_UNNUMBERED,
55387 + .procname = "audit_textrel",
55388 + .data = &grsec_enable_audit_textrel,
55389 + .maxlen = sizeof(int),
55390 + .mode = 0600,
55391 + .proc_handler = &proc_dointvec,
55392 + },
55393 +#endif
55394 +#ifdef CONFIG_GRKERNSEC_DMESG
55395 + {
55396 + .ctl_name = CTL_UNNUMBERED,
55397 + .procname = "dmesg",
55398 + .data = &grsec_enable_dmesg,
55399 + .maxlen = sizeof(int),
55400 + .mode = 0600,
55401 + .proc_handler = &proc_dointvec,
55402 + },
55403 +#endif
55404 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55405 + {
55406 + .ctl_name = CTL_UNNUMBERED,
55407 + .procname = "chroot_findtask",
55408 + .data = &grsec_enable_chroot_findtask,
55409 + .maxlen = sizeof(int),
55410 + .mode = 0600,
55411 + .proc_handler = &proc_dointvec,
55412 + },
55413 +#endif
55414 +#ifdef CONFIG_GRKERNSEC_RESLOG
55415 + {
55416 + .ctl_name = CTL_UNNUMBERED,
55417 + .procname = "resource_logging",
55418 + .data = &grsec_resource_logging,
55419 + .maxlen = sizeof(int),
55420 + .mode = 0600,
55421 + .proc_handler = &proc_dointvec,
55422 + },
55423 +#endif
55424 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55425 + {
55426 + .ctl_name = CTL_UNNUMBERED,
55427 + .procname = "audit_ptrace",
55428 + .data = &grsec_enable_audit_ptrace,
55429 + .maxlen = sizeof(int),
55430 + .mode = 0600,
55431 + .proc_handler = &proc_dointvec,
55432 + },
55433 +#endif
55434 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55435 + {
55436 + .ctl_name = CTL_UNNUMBERED,
55437 + .procname = "harden_ptrace",
55438 + .data = &grsec_enable_harden_ptrace,
55439 + .maxlen = sizeof(int),
55440 + .mode = 0600,
55441 + .proc_handler = &proc_dointvec,
55442 + },
55443 +#endif
55444 + {
55445 + .ctl_name = CTL_UNNUMBERED,
55446 + .procname = "grsec_lock",
55447 + .data = &grsec_lock,
55448 + .maxlen = sizeof(int),
55449 + .mode = 0600,
55450 + .proc_handler = &proc_dointvec,
55451 + },
55452 +#endif
55453 +#ifdef CONFIG_GRKERNSEC_ROFS
55454 + {
55455 + .ctl_name = CTL_UNNUMBERED,
55456 + .procname = "romount_protect",
55457 + .data = &grsec_enable_rofs,
55458 + .maxlen = sizeof(int),
55459 + .mode = 0600,
55460 + .proc_handler = &proc_dointvec_minmax,
55461 + .extra1 = &one,
55462 + .extra2 = &one,
55463 + },
55464 +#endif
55465 + { .ctl_name = 0 }
55466 +};
55467 +#endif
55468 diff -urNp linux-2.6.32.45/grsecurity/grsec_time.c linux-2.6.32.45/grsecurity/grsec_time.c
55469 --- linux-2.6.32.45/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55470 +++ linux-2.6.32.45/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
55471 @@ -0,0 +1,16 @@
55472 +#include <linux/kernel.h>
55473 +#include <linux/sched.h>
55474 +#include <linux/grinternal.h>
55475 +#include <linux/module.h>
55476 +
55477 +void
55478 +gr_log_timechange(void)
55479 +{
55480 +#ifdef CONFIG_GRKERNSEC_TIME
55481 + if (grsec_enable_time)
55482 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55483 +#endif
55484 + return;
55485 +}
55486 +
55487 +EXPORT_SYMBOL(gr_log_timechange);
55488 diff -urNp linux-2.6.32.45/grsecurity/grsec_tpe.c linux-2.6.32.45/grsecurity/grsec_tpe.c
55489 --- linux-2.6.32.45/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55490 +++ linux-2.6.32.45/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
55491 @@ -0,0 +1,39 @@
55492 +#include <linux/kernel.h>
55493 +#include <linux/sched.h>
55494 +#include <linux/file.h>
55495 +#include <linux/fs.h>
55496 +#include <linux/grinternal.h>
55497 +
55498 +extern int gr_acl_tpe_check(void);
55499 +
55500 +int
55501 +gr_tpe_allow(const struct file *file)
55502 +{
55503 +#ifdef CONFIG_GRKERNSEC
55504 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55505 + const struct cred *cred = current_cred();
55506 +
55507 + if (cred->uid && ((grsec_enable_tpe &&
55508 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55509 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55510 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55511 +#else
55512 + in_group_p(grsec_tpe_gid)
55513 +#endif
55514 + ) || gr_acl_tpe_check()) &&
55515 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55516 + (inode->i_mode & S_IWOTH))))) {
55517 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55518 + return 0;
55519 + }
55520 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55521 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55522 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55523 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55524 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55525 + return 0;
55526 + }
55527 +#endif
55528 +#endif
55529 + return 1;
55530 +}
55531 diff -urNp linux-2.6.32.45/grsecurity/grsum.c linux-2.6.32.45/grsecurity/grsum.c
55532 --- linux-2.6.32.45/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55533 +++ linux-2.6.32.45/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
55534 @@ -0,0 +1,61 @@
55535 +#include <linux/err.h>
55536 +#include <linux/kernel.h>
55537 +#include <linux/sched.h>
55538 +#include <linux/mm.h>
55539 +#include <linux/scatterlist.h>
55540 +#include <linux/crypto.h>
55541 +#include <linux/gracl.h>
55542 +
55543 +
55544 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55545 +#error "crypto and sha256 must be built into the kernel"
55546 +#endif
55547 +
55548 +int
55549 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55550 +{
55551 + char *p;
55552 + struct crypto_hash *tfm;
55553 + struct hash_desc desc;
55554 + struct scatterlist sg;
55555 + unsigned char temp_sum[GR_SHA_LEN];
55556 + volatile int retval = 0;
55557 + volatile int dummy = 0;
55558 + unsigned int i;
55559 +
55560 + sg_init_table(&sg, 1);
55561 +
55562 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55563 + if (IS_ERR(tfm)) {
55564 + /* should never happen, since sha256 should be built in */
55565 + return 1;
55566 + }
55567 +
55568 + desc.tfm = tfm;
55569 + desc.flags = 0;
55570 +
55571 + crypto_hash_init(&desc);
55572 +
55573 + p = salt;
55574 + sg_set_buf(&sg, p, GR_SALT_LEN);
55575 + crypto_hash_update(&desc, &sg, sg.length);
55576 +
55577 + p = entry->pw;
55578 + sg_set_buf(&sg, p, strlen(p));
55579 +
55580 + crypto_hash_update(&desc, &sg, sg.length);
55581 +
55582 + crypto_hash_final(&desc, temp_sum);
55583 +
55584 + memset(entry->pw, 0, GR_PW_LEN);
55585 +
55586 + for (i = 0; i < GR_SHA_LEN; i++)
55587 + if (sum[i] != temp_sum[i])
55588 + retval = 1;
55589 + else
55590 + dummy = 1; // waste a cycle
55591 +
55592 + crypto_free_hash(tfm);
55593 +
55594 + return retval;
55595 +}
55596 diff -urNp linux-2.6.32.45/grsecurity/Kconfig linux-2.6.32.45/grsecurity/Kconfig
55597 --- linux-2.6.32.45/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55598 +++ linux-2.6.32.45/grsecurity/Kconfig 2011-08-17 19:04:25.000000000 -0400
55599 @@ -0,0 +1,1037 @@
55600 +#
55601 +# grecurity configuration
55602 +#
55603 +
55604 +menu "Grsecurity"
55605 +
55606 +config GRKERNSEC
55607 + bool "Grsecurity"
55608 + select CRYPTO
55609 + select CRYPTO_SHA256
55610 + help
55611 + If you say Y here, you will be able to configure many features
55612 + that will enhance the security of your system. It is highly
55613 + recommended that you say Y here and read through the help
55614 + for each option so that you fully understand the features and
55615 + can evaluate their usefulness for your machine.
55616 +
55617 +choice
55618 + prompt "Security Level"
55619 + depends on GRKERNSEC
55620 + default GRKERNSEC_CUSTOM
55621 +
55622 +config GRKERNSEC_LOW
55623 + bool "Low"
55624 + select GRKERNSEC_LINK
55625 + select GRKERNSEC_FIFO
55626 + select GRKERNSEC_RANDNET
55627 + select GRKERNSEC_DMESG
55628 + select GRKERNSEC_CHROOT
55629 + select GRKERNSEC_CHROOT_CHDIR
55630 +
55631 + help
55632 + If you choose this option, several of the grsecurity options will
55633 + be enabled that will give you greater protection against a number
55634 + of attacks, while assuring that none of your software will have any
55635 + conflicts with the additional security measures. If you run a lot
55636 + of unusual software, or you are having problems with the higher
55637 + security levels, you should say Y here. With this option, the
55638 + following features are enabled:
55639 +
55640 + - Linking restrictions
55641 + - FIFO restrictions
55642 + - Restricted dmesg
55643 + - Enforced chdir("/") on chroot
55644 + - Runtime module disabling
55645 +
55646 +config GRKERNSEC_MEDIUM
55647 + bool "Medium"
55648 + select PAX
55649 + select PAX_EI_PAX
55650 + select PAX_PT_PAX_FLAGS
55651 + select PAX_HAVE_ACL_FLAGS
55652 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55653 + select GRKERNSEC_CHROOT
55654 + select GRKERNSEC_CHROOT_SYSCTL
55655 + select GRKERNSEC_LINK
55656 + select GRKERNSEC_FIFO
55657 + select GRKERNSEC_DMESG
55658 + select GRKERNSEC_RANDNET
55659 + select GRKERNSEC_FORKFAIL
55660 + select GRKERNSEC_TIME
55661 + select GRKERNSEC_SIGNAL
55662 + select GRKERNSEC_CHROOT
55663 + select GRKERNSEC_CHROOT_UNIX
55664 + select GRKERNSEC_CHROOT_MOUNT
55665 + select GRKERNSEC_CHROOT_PIVOT
55666 + select GRKERNSEC_CHROOT_DOUBLE
55667 + select GRKERNSEC_CHROOT_CHDIR
55668 + select GRKERNSEC_CHROOT_MKNOD
55669 + select GRKERNSEC_PROC
55670 + select GRKERNSEC_PROC_USERGROUP
55671 + select PAX_RANDUSTACK
55672 + select PAX_ASLR
55673 + select PAX_RANDMMAP
55674 + select PAX_REFCOUNT if (X86 || SPARC64)
55675 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55676 +
55677 + help
55678 + If you say Y here, several features in addition to those included
55679 + in the low additional security level will be enabled. These
55680 + features provide even more security to your system, though in rare
55681 + cases they may be incompatible with very old or poorly written
55682 + software. If you enable this option, make sure that your auth
55683 + service (identd) is running as gid 1001. With this option,
55684 + the following features (in addition to those provided in the
55685 + low additional security level) will be enabled:
55686 +
55687 + - Failed fork logging
55688 + - Time change logging
55689 + - Signal logging
55690 + - Deny mounts in chroot
55691 + - Deny double chrooting
55692 + - Deny sysctl writes in chroot
55693 + - Deny mknod in chroot
55694 + - Deny access to abstract AF_UNIX sockets out of chroot
55695 + - Deny pivot_root in chroot
55696 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
55697 + - /proc restrictions with special GID set to 10 (usually wheel)
55698 + - Address Space Layout Randomization (ASLR)
55699 + - Prevent exploitation of most refcount overflows
55700 + - Bounds checking of copying between the kernel and userland
55701 +
55702 +config GRKERNSEC_HIGH
55703 + bool "High"
55704 + select GRKERNSEC_LINK
55705 + select GRKERNSEC_FIFO
55706 + select GRKERNSEC_DMESG
55707 + select GRKERNSEC_FORKFAIL
55708 + select GRKERNSEC_TIME
55709 + select GRKERNSEC_SIGNAL
55710 + select GRKERNSEC_CHROOT
55711 + select GRKERNSEC_CHROOT_SHMAT
55712 + select GRKERNSEC_CHROOT_UNIX
55713 + select GRKERNSEC_CHROOT_MOUNT
55714 + select GRKERNSEC_CHROOT_FCHDIR
55715 + select GRKERNSEC_CHROOT_PIVOT
55716 + select GRKERNSEC_CHROOT_DOUBLE
55717 + select GRKERNSEC_CHROOT_CHDIR
55718 + select GRKERNSEC_CHROOT_MKNOD
55719 + select GRKERNSEC_CHROOT_CAPS
55720 + select GRKERNSEC_CHROOT_SYSCTL
55721 + select GRKERNSEC_CHROOT_FINDTASK
55722 + select GRKERNSEC_SYSFS_RESTRICT
55723 + select GRKERNSEC_PROC
55724 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55725 + select GRKERNSEC_HIDESYM
55726 + select GRKERNSEC_BRUTE
55727 + select GRKERNSEC_PROC_USERGROUP
55728 + select GRKERNSEC_KMEM
55729 + select GRKERNSEC_RESLOG
55730 + select GRKERNSEC_RANDNET
55731 + select GRKERNSEC_PROC_ADD
55732 + select GRKERNSEC_CHROOT_CHMOD
55733 + select GRKERNSEC_CHROOT_NICE
55734 + select GRKERNSEC_AUDIT_MOUNT
55735 + select GRKERNSEC_MODHARDEN if (MODULES)
55736 + select GRKERNSEC_HARDEN_PTRACE
55737 + select GRKERNSEC_VM86 if (X86_32)
55738 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55739 + select PAX
55740 + select PAX_RANDUSTACK
55741 + select PAX_ASLR
55742 + select PAX_RANDMMAP
55743 + select PAX_NOEXEC
55744 + select PAX_MPROTECT
55745 + select PAX_EI_PAX
55746 + select PAX_PT_PAX_FLAGS
55747 + select PAX_HAVE_ACL_FLAGS
55748 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55749 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
55750 + select PAX_RANDKSTACK if (X86_TSC && X86)
55751 + select PAX_SEGMEXEC if (X86_32)
55752 + select PAX_PAGEEXEC
55753 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55754 + select PAX_EMUTRAMP if (PARISC)
55755 + select PAX_EMUSIGRT if (PARISC)
55756 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55757 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55758 + select PAX_REFCOUNT if (X86 || SPARC64)
55759 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55760 + help
55761 + If you say Y here, many of the features of grsecurity will be
55762 + enabled, which will protect you against many kinds of attacks
55763 + against your system. The heightened security comes at a cost
55764 + of an increased chance of incompatibilities with rare software
55765 + on your machine. Since this security level enables PaX, you should
55766 + view <http://pax.grsecurity.net> and read about the PaX
55767 + project. While you are there, download chpax and run it on
55768 + binaries that cause problems with PaX. Also remember that
55769 + since the /proc restrictions are enabled, you must run your
55770 + identd as gid 1001. This security level enables the following
55771 + features in addition to those listed in the low and medium
55772 + security levels:
55773 +
55774 + - Additional /proc restrictions
55775 + - Chmod restrictions in chroot
55776 + - No signals, ptrace, or viewing of processes outside of chroot
55777 + - Capability restrictions in chroot
55778 + - Deny fchdir out of chroot
55779 + - Priority restrictions in chroot
55780 + - Segmentation-based implementation of PaX
55781 + - Mprotect restrictions
55782 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55783 + - Kernel stack randomization
55784 + - Mount/unmount/remount logging
55785 + - Kernel symbol hiding
55786 + - Prevention of memory exhaustion-based exploits
55787 + - Hardening of module auto-loading
55788 + - Ptrace restrictions
55789 + - Restricted vm86 mode
55790 + - Restricted sysfs/debugfs
55791 + - Active kernel exploit response
55792 +
55793 +config GRKERNSEC_CUSTOM
55794 + bool "Custom"
55795 + help
55796 + If you say Y here, you will be able to configure every grsecurity
55797 + option, which allows you to enable many more features that aren't
55798 + covered in the basic security levels. These additional features
55799 + include TPE, socket restrictions, and the sysctl system for
55800 + grsecurity. It is advised that you read through the help for
55801 + each option to determine its usefulness in your situation.
55802 +
55803 +endchoice
55804 +
55805 +menu "Address Space Protection"
55806 +depends on GRKERNSEC
55807 +
55808 +config GRKERNSEC_KMEM
55809 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
55810 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55811 + help
55812 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55813 + be written to via mmap or otherwise to modify the running kernel.
55814 + /dev/port will also not be allowed to be opened. If you have module
55815 + support disabled, enabling this will close up four ways that are
55816 + currently used to insert malicious code into the running kernel.
55817 + Even with all these features enabled, we still highly recommend that
55818 + you use the RBAC system, as it is still possible for an attacker to
55819 + modify the running kernel through privileged I/O granted by ioperm/iopl.
55820 + If you are not using XFree86, you may be able to stop this additional
55821 + case by enabling the 'Disable privileged I/O' option. Though nothing
55822 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55823 + but only to video memory, which is the only writing we allow in this
55824 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55825 + not be allowed to mprotect it with PROT_WRITE later.
55826 + It is highly recommended that you say Y here if you meet all the
55827 + conditions above.
55828 +
55829 +config GRKERNSEC_VM86
55830 + bool "Restrict VM86 mode"
55831 + depends on X86_32
55832 +
55833 + help
55834 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55835 + make use of a special execution mode on 32bit x86 processors called
55836 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55837 + video cards and will still work with this option enabled. The purpose
55838 + of the option is to prevent exploitation of emulation errors in
55839 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
55840 + Nearly all users should be able to enable this option.
55841 +
55842 +config GRKERNSEC_IO
55843 + bool "Disable privileged I/O"
55844 + depends on X86
55845 + select RTC_CLASS
55846 + select RTC_INTF_DEV
55847 + select RTC_DRV_CMOS
55848 +
55849 + help
55850 + If you say Y here, all ioperm and iopl calls will return an error.
55851 + Ioperm and iopl can be used to modify the running kernel.
55852 + Unfortunately, some programs need this access to operate properly,
55853 + the most notable of which are XFree86 and hwclock. hwclock can be
55854 + remedied by having RTC support in the kernel, so real-time
55855 + clock support is enabled if this option is enabled, to ensure
55856 + that hwclock operates correctly. XFree86 still will not
55857 + operate correctly with this option enabled, so DO NOT CHOOSE Y
55858 + IF YOU USE XFree86. If you use XFree86 and you still want to
55859 + protect your kernel against modification, use the RBAC system.
55860 +
55861 +config GRKERNSEC_PROC_MEMMAP
55862 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55863 + default y if (PAX_NOEXEC || PAX_ASLR)
55864 + depends on PAX_NOEXEC || PAX_ASLR
55865 + help
55866 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55867 + give no information about the addresses of its mappings if
55868 + PaX features that rely on random addresses are enabled on the task.
55869 + If you use PaX it is greatly recommended that you say Y here as it
55870 + closes up a hole that makes the full ASLR useless for suid
55871 + binaries.
55872 +
55873 +config GRKERNSEC_BRUTE
55874 + bool "Deter exploit bruteforcing"
55875 + help
55876 + If you say Y here, attempts to bruteforce exploits against forking
55877 + daemons such as apache or sshd, as well as against suid/sgid binaries
55878 + will be deterred. When a child of a forking daemon is killed by PaX
55879 + or crashes due to an illegal instruction or other suspicious signal,
55880 + the parent process will be delayed 30 seconds upon every subsequent
55881 + fork until the administrator is able to assess the situation and
55882 + restart the daemon.
55883 + In the suid/sgid case, the attempt is logged, the user has all their
55884 + processes terminated, and they are prevented from executing any further
55885 + processes for 15 minutes.
55886 + It is recommended that you also enable signal logging in the auditing
55887 + section so that logs are generated when a process triggers a suspicious
55888 + signal.
55889 + If the sysctl option is enabled, a sysctl option with name
55890 + "deter_bruteforce" is created.
55891 +
55892 +config GRKERNSEC_MODHARDEN
55893 + bool "Harden module auto-loading"
55894 + depends on MODULES
55895 + help
55896 + If you say Y here, module auto-loading in response to use of some
55897 + feature implemented by an unloaded module will be restricted to
55898 + root users. Enabling this option helps defend against attacks
55899 + by unprivileged users who abuse the auto-loading behavior to
55900 + cause a vulnerable module to load that is then exploited.
55901 +
55902 + If this option prevents a legitimate use of auto-loading for a
55903 + non-root user, the administrator can execute modprobe manually
55904 + with the exact name of the module mentioned in the alert log.
55905 + Alternatively, the administrator can add the module to the list
55906 + of modules loaded at boot by modifying init scripts.
55907 +
55908 + Modification of init scripts will most likely be needed on
55909 + Ubuntu servers with encrypted home directory support enabled,
55910 + as the first non-root user logging in will cause the ecb(aes),
55911 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55912 +
55913 +config GRKERNSEC_HIDESYM
55914 + bool "Hide kernel symbols"
55915 + help
55916 + If you say Y here, getting information on loaded modules, and
55917 + displaying all kernel symbols through a syscall will be restricted
55918 + to users with CAP_SYS_MODULE. For software compatibility reasons,
55919 + /proc/kallsyms will be restricted to the root user. The RBAC
55920 + system can hide that entry even from root.
55921 +
55922 + This option also prevents leaking of kernel addresses through
55923 + several /proc entries.
55924 +
55925 + Note that this option is only effective provided the following
55926 + conditions are met:
55927 + 1) The kernel using grsecurity is not precompiled by some distribution
55928 + 2) You have also enabled GRKERNSEC_DMESG
55929 + 3) You are using the RBAC system and hiding other files such as your
55930 + kernel image and System.map. Alternatively, enabling this option
55931 + causes the permissions on /boot, /lib/modules, and the kernel
55932 + source directory to change at compile time to prevent
55933 + reading by non-root users.
55934 + If the above conditions are met, this option will aid in providing a
55935 + useful protection against local kernel exploitation of overflows
55936 + and arbitrary read/write vulnerabilities.
55937 +
55938 +config GRKERNSEC_KERN_LOCKOUT
55939 + bool "Active kernel exploit response"
55940 + depends on X86 || ARM || PPC || SPARC
55941 + help
55942 + If you say Y here, when a PaX alert is triggered due to suspicious
55943 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55944 + or an OOPs occurs due to bad memory accesses, instead of just
55945 + terminating the offending process (and potentially allowing
55946 + a subsequent exploit from the same user), we will take one of two
55947 + actions:
55948 + If the user was root, we will panic the system
55949 + If the user was non-root, we will log the attempt, terminate
55950 + all processes owned by the user, then prevent them from creating
55951 + any new processes until the system is restarted
55952 + This deters repeated kernel exploitation/bruteforcing attempts
55953 + and is useful for later forensics.
55954 +
55955 +endmenu
55956 +menu "Role Based Access Control Options"
55957 +depends on GRKERNSEC
55958 +
55959 +config GRKERNSEC_RBAC_DEBUG
55960 + bool
55961 +
55962 +config GRKERNSEC_NO_RBAC
55963 + bool "Disable RBAC system"
55964 + help
55965 + If you say Y here, the /dev/grsec device will be removed from the kernel,
55966 + preventing the RBAC system from being enabled. You should only say Y
55967 + here if you have no intention of using the RBAC system, so as to prevent
55968 + an attacker with root access from misusing the RBAC system to hide files
55969 + and processes when loadable module support and /dev/[k]mem have been
55970 + locked down.
55971 +
55972 +config GRKERNSEC_ACL_HIDEKERN
55973 + bool "Hide kernel processes"
55974 + help
55975 + If you say Y here, all kernel threads will be hidden to all
55976 + processes but those whose subject has the "view hidden processes"
55977 + flag.
55978 +
55979 +config GRKERNSEC_ACL_MAXTRIES
55980 + int "Maximum tries before password lockout"
55981 + default 3
55982 + help
55983 + This option enforces the maximum number of times a user can attempt
55984 + to authorize themselves with the grsecurity RBAC system before being
55985 + denied the ability to attempt authorization again for a specified time.
55986 + The lower the number, the harder it will be to brute-force a password.
55987 +
55988 +config GRKERNSEC_ACL_TIMEOUT
55989 + int "Time to wait after max password tries, in seconds"
55990 + default 30
55991 + help
55992 + This option specifies the time the user must wait after attempting to
55993 + authorize to the RBAC system with the maximum number of invalid
55994 + passwords. The higher the number, the harder it will be to brute-force
55995 + a password.
55996 +
55997 +endmenu
55998 +menu "Filesystem Protections"
55999 +depends on GRKERNSEC
56000 +
56001 +config GRKERNSEC_PROC
56002 + bool "Proc restrictions"
56003 + help
56004 + If you say Y here, the permissions of the /proc filesystem
56005 + will be altered to enhance system security and privacy. You MUST
56006 + choose either a user only restriction or a user and group restriction.
56007 + Depending upon the option you choose, you can either restrict users to
56008 + see only the processes they themselves run, or choose a group that can
56009 + view all processes and files normally restricted to root if you choose
56010 + the "restrict to user only" option. NOTE: If you're running identd as
56011 + a non-root user, you will have to run it as the group you specify here.
56012 +
56013 +config GRKERNSEC_PROC_USER
56014 + bool "Restrict /proc to user only"
56015 + depends on GRKERNSEC_PROC
56016 + help
56017 + If you say Y here, non-root users will only be able to view their own
56018 + processes, and restricts them from viewing network-related information,
56019 + and viewing kernel symbol and module information.
56020 +
56021 +config GRKERNSEC_PROC_USERGROUP
56022 + bool "Allow special group"
56023 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56024 + help
56025 + If you say Y here, you will be able to select a group that will be
56026 + able to view all processes and network-related information. If you've
56027 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
56028 + remain hidden. This option is useful if you want to run identd as
56029 + a non-root user.
56030 +
56031 +config GRKERNSEC_PROC_GID
56032 + int "GID for special group"
56033 + depends on GRKERNSEC_PROC_USERGROUP
56034 + default 1001
56035 +
56036 +config GRKERNSEC_PROC_ADD
56037 + bool "Additional restrictions"
56038 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56039 + help
56040 + If you say Y here, additional restrictions will be placed on
56041 + /proc that keep normal users from viewing device information and
56042 + slabinfo information that could be useful for exploits.
56043 +
56044 +config GRKERNSEC_LINK
56045 + bool "Linking restrictions"
56046 + help
56047 + If you say Y here, /tmp race exploits will be prevented, since users
56048 + will no longer be able to follow symlinks owned by other users in
56049 + world-writable +t directories (e.g. /tmp), unless the owner of the
56050 + symlink is the owner of the directory. users will also not be
56051 + able to hardlink to files they do not own. If the sysctl option is
56052 + enabled, a sysctl option with name "linking_restrictions" is created.
56053 +
56054 +config GRKERNSEC_FIFO
56055 + bool "FIFO restrictions"
56056 + help
56057 + If you say Y here, users will not be able to write to FIFOs they don't
56058 + own in world-writable +t directories (e.g. /tmp), unless the owner of
56059 + the FIFO is the same owner of the directory it's held in. If the sysctl
56060 + option is enabled, a sysctl option with name "fifo_restrictions" is
56061 + created.
56062 +
56063 +config GRKERNSEC_SYSFS_RESTRICT
56064 + bool "Sysfs/debugfs restriction"
56065 + depends on SYSFS
56066 + help
56067 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56068 + any filesystem normally mounted under it (e.g. debugfs) will only
56069 + be accessible by root. These filesystems generally provide access
56070 + to hardware and debug information that isn't appropriate for unprivileged
56071 + users of the system. Sysfs and debugfs have also become a large source
56072 + of new vulnerabilities, ranging from infoleaks to local compromise.
56073 + There has been very little oversight with an eye toward security involved
56074 + in adding new exporters of information to these filesystems, so their
56075 + use is discouraged.
56076 + This option is equivalent to a chmod 0700 of the mount paths.
56077 +
56078 +config GRKERNSEC_ROFS
56079 + bool "Runtime read-only mount protection"
56080 + help
56081 + If you say Y here, a sysctl option with name "romount_protect" will
56082 + be created. By setting this option to 1 at runtime, filesystems
56083 + will be protected in the following ways:
56084 + * No new writable mounts will be allowed
56085 + * Existing read-only mounts won't be able to be remounted read/write
56086 + * Write operations will be denied on all block devices
56087 + This option acts independently of grsec_lock: once it is set to 1,
56088 + it cannot be turned off. Therefore, please be mindful of the resulting
56089 + behavior if this option is enabled in an init script on a read-only
56090 + filesystem. This feature is mainly intended for secure embedded systems.
56091 +
56092 +config GRKERNSEC_CHROOT
56093 + bool "Chroot jail restrictions"
56094 + help
56095 + If you say Y here, you will be able to choose several options that will
56096 + make breaking out of a chrooted jail much more difficult. If you
56097 + encounter no software incompatibilities with the following options, it
56098 + is recommended that you enable each one.
56099 +
56100 +config GRKERNSEC_CHROOT_MOUNT
56101 + bool "Deny mounts"
56102 + depends on GRKERNSEC_CHROOT
56103 + help
56104 + If you say Y here, processes inside a chroot will not be able to
56105 + mount or remount filesystems. If the sysctl option is enabled, a
56106 + sysctl option with name "chroot_deny_mount" is created.
56107 +
56108 +config GRKERNSEC_CHROOT_DOUBLE
56109 + bool "Deny double-chroots"
56110 + depends on GRKERNSEC_CHROOT
56111 + help
56112 + If you say Y here, processes inside a chroot will not be able to chroot
56113 + again outside the chroot. This is a widely used method of breaking
56114 + out of a chroot jail and should not be allowed. If the sysctl
56115 + option is enabled, a sysctl option with name
56116 + "chroot_deny_chroot" is created.
56117 +
56118 +config GRKERNSEC_CHROOT_PIVOT
56119 + bool "Deny pivot_root in chroot"
56120 + depends on GRKERNSEC_CHROOT
56121 + help
56122 + If you say Y here, processes inside a chroot will not be able to use
56123 + a function called pivot_root() that was introduced in Linux 2.3.41. It
56124 + works similar to chroot in that it changes the root filesystem. This
56125 + function could be misused in a chrooted process to attempt to break out
56126 + of the chroot, and therefore should not be allowed. If the sysctl
56127 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
56128 + created.
56129 +
56130 +config GRKERNSEC_CHROOT_CHDIR
56131 + bool "Enforce chdir(\"/\") on all chroots"
56132 + depends on GRKERNSEC_CHROOT
56133 + help
56134 + If you say Y here, the current working directory of all newly-chrooted
56135 + applications will be set to the the root directory of the chroot.
56136 + The man page on chroot(2) states:
56137 + Note that this call does not change the current working
56138 + directory, so that `.' can be outside the tree rooted at
56139 + `/'. In particular, the super-user can escape from a
56140 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56141 +
56142 + It is recommended that you say Y here, since it's not known to break
56143 + any software. If the sysctl option is enabled, a sysctl option with
56144 + name "chroot_enforce_chdir" is created.
56145 +
56146 +config GRKERNSEC_CHROOT_CHMOD
56147 + bool "Deny (f)chmod +s"
56148 + depends on GRKERNSEC_CHROOT
56149 + help
56150 + If you say Y here, processes inside a chroot will not be able to chmod
56151 + or fchmod files to make them have suid or sgid bits. This protects
56152 + against another published method of breaking a chroot. If the sysctl
56153 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
56154 + created.
56155 +
56156 +config GRKERNSEC_CHROOT_FCHDIR
56157 + bool "Deny fchdir out of chroot"
56158 + depends on GRKERNSEC_CHROOT
56159 + help
56160 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
56161 + to a file descriptor of the chrooting process that points to a directory
56162 + outside the filesystem will be stopped. If the sysctl option
56163 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56164 +
56165 +config GRKERNSEC_CHROOT_MKNOD
56166 + bool "Deny mknod"
56167 + depends on GRKERNSEC_CHROOT
56168 + help
56169 + If you say Y here, processes inside a chroot will not be allowed to
56170 + mknod. The problem with using mknod inside a chroot is that it
56171 + would allow an attacker to create a device entry that is the same
56172 + as one on the physical root of your system, which could range from
56173 + anything from the console device to a device for your harddrive (which
56174 + they could then use to wipe the drive or steal data). It is recommended
56175 + that you say Y here, unless you run into software incompatibilities.
56176 + If the sysctl option is enabled, a sysctl option with name
56177 + "chroot_deny_mknod" is created.
56178 +
56179 +config GRKERNSEC_CHROOT_SHMAT
56180 + bool "Deny shmat() out of chroot"
56181 + depends on GRKERNSEC_CHROOT
56182 + help
56183 + If you say Y here, processes inside a chroot will not be able to attach
56184 + to shared memory segments that were created outside of the chroot jail.
56185 + It is recommended that you say Y here. If the sysctl option is enabled,
56186 + a sysctl option with name "chroot_deny_shmat" is created.
56187 +
56188 +config GRKERNSEC_CHROOT_UNIX
56189 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
56190 + depends on GRKERNSEC_CHROOT
56191 + help
56192 + If you say Y here, processes inside a chroot will not be able to
56193 + connect to abstract (meaning not belonging to a filesystem) Unix
56194 + domain sockets that were bound outside of a chroot. It is recommended
56195 + that you say Y here. If the sysctl option is enabled, a sysctl option
56196 + with name "chroot_deny_unix" is created.
56197 +
56198 +config GRKERNSEC_CHROOT_FINDTASK
56199 + bool "Protect outside processes"
56200 + depends on GRKERNSEC_CHROOT
56201 + help
56202 + If you say Y here, processes inside a chroot will not be able to
56203 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56204 + getsid, or view any process outside of the chroot. If the sysctl
56205 + option is enabled, a sysctl option with name "chroot_findtask" is
56206 + created.
56207 +
56208 +config GRKERNSEC_CHROOT_NICE
56209 + bool "Restrict priority changes"
56210 + depends on GRKERNSEC_CHROOT
56211 + help
56212 + If you say Y here, processes inside a chroot will not be able to raise
56213 + the priority of processes in the chroot, or alter the priority of
56214 + processes outside the chroot. This provides more security than simply
56215 + removing CAP_SYS_NICE from the process' capability set. If the
56216 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56217 + is created.
56218 +
56219 +config GRKERNSEC_CHROOT_SYSCTL
56220 + bool "Deny sysctl writes"
56221 + depends on GRKERNSEC_CHROOT
56222 + help
56223 + If you say Y here, an attacker in a chroot will not be able to
56224 + write to sysctl entries, either by sysctl(2) or through a /proc
56225 + interface. It is strongly recommended that you say Y here. If the
56226 + sysctl option is enabled, a sysctl option with name
56227 + "chroot_deny_sysctl" is created.
56228 +
56229 +config GRKERNSEC_CHROOT_CAPS
56230 + bool "Capability restrictions"
56231 + depends on GRKERNSEC_CHROOT
56232 + help
56233 + If you say Y here, the capabilities on all root processes within a
56234 + chroot jail will be lowered to stop module insertion, raw i/o,
56235 + system and net admin tasks, rebooting the system, modifying immutable
56236 + files, modifying IPC owned by another, and changing the system time.
56237 + This is left an option because it can break some apps. Disable this
56238 + if your chrooted apps are having problems performing those kinds of
56239 + tasks. If the sysctl option is enabled, a sysctl option with
56240 + name "chroot_caps" is created.
56241 +
56242 +endmenu
56243 +menu "Kernel Auditing"
56244 +depends on GRKERNSEC
56245 +
56246 +config GRKERNSEC_AUDIT_GROUP
56247 + bool "Single group for auditing"
56248 + help
56249 + If you say Y here, the exec, chdir, and (un)mount logging features
56250 + will only operate on a group you specify. This option is recommended
56251 + if you only want to watch certain users instead of having a large
56252 + amount of logs from the entire system. If the sysctl option is enabled,
56253 + a sysctl option with name "audit_group" is created.
56254 +
56255 +config GRKERNSEC_AUDIT_GID
56256 + int "GID for auditing"
56257 + depends on GRKERNSEC_AUDIT_GROUP
56258 + default 1007
56259 +
56260 +config GRKERNSEC_EXECLOG
56261 + bool "Exec logging"
56262 + help
56263 + If you say Y here, all execve() calls will be logged (since the
56264 + other exec*() calls are frontends to execve(), all execution
56265 + will be logged). Useful for shell-servers that like to keep track
56266 + of their users. If the sysctl option is enabled, a sysctl option with
56267 + name "exec_logging" is created.
56268 + WARNING: This option when enabled will produce a LOT of logs, especially
56269 + on an active system.
56270 +
56271 +config GRKERNSEC_RESLOG
56272 + bool "Resource logging"
56273 + help
56274 + If you say Y here, all attempts to overstep resource limits will
56275 + be logged with the resource name, the requested size, and the current
56276 + limit. It is highly recommended that you say Y here. If the sysctl
56277 + option is enabled, a sysctl option with name "resource_logging" is
56278 + created. If the RBAC system is enabled, the sysctl value is ignored.
56279 +
56280 +config GRKERNSEC_CHROOT_EXECLOG
56281 + bool "Log execs within chroot"
56282 + help
56283 + If you say Y here, all executions inside a chroot jail will be logged
56284 + to syslog. This can cause a large amount of logs if certain
56285 + applications (eg. djb's daemontools) are installed on the system, and
56286 + is therefore left as an option. If the sysctl option is enabled, a
56287 + sysctl option with name "chroot_execlog" is created.
56288 +
56289 +config GRKERNSEC_AUDIT_PTRACE
56290 + bool "Ptrace logging"
56291 + help
56292 + If you say Y here, all attempts to attach to a process via ptrace
56293 + will be logged. If the sysctl option is enabled, a sysctl option
56294 + with name "audit_ptrace" is created.
56295 +
56296 +config GRKERNSEC_AUDIT_CHDIR
56297 + bool "Chdir logging"
56298 + help
56299 + If you say Y here, all chdir() calls will be logged. If the sysctl
56300 + option is enabled, a sysctl option with name "audit_chdir" is created.
56301 +
56302 +config GRKERNSEC_AUDIT_MOUNT
56303 + bool "(Un)Mount logging"
56304 + help
56305 + If you say Y here, all mounts and unmounts will be logged. If the
56306 + sysctl option is enabled, a sysctl option with name "audit_mount" is
56307 + created.
56308 +
56309 +config GRKERNSEC_SIGNAL
56310 + bool "Signal logging"
56311 + help
56312 + If you say Y here, certain important signals will be logged, such as
56313 + SIGSEGV, which will as a result inform you of when a error in a program
56314 + occurred, which in some cases could mean a possible exploit attempt.
56315 + If the sysctl option is enabled, a sysctl option with name
56316 + "signal_logging" is created.
56317 +
56318 +config GRKERNSEC_FORKFAIL
56319 + bool "Fork failure logging"
56320 + help
56321 + If you say Y here, all failed fork() attempts will be logged.
56322 + This could suggest a fork bomb, or someone attempting to overstep
56323 + their process limit. If the sysctl option is enabled, a sysctl option
56324 + with name "forkfail_logging" is created.
56325 +
56326 +config GRKERNSEC_TIME
56327 + bool "Time change logging"
56328 + help
56329 + If you say Y here, any changes of the system clock will be logged.
56330 + If the sysctl option is enabled, a sysctl option with name
56331 + "timechange_logging" is created.
56332 +
56333 +config GRKERNSEC_PROC_IPADDR
56334 + bool "/proc/<pid>/ipaddr support"
56335 + help
56336 + If you say Y here, a new entry will be added to each /proc/<pid>
56337 + directory that contains the IP address of the person using the task.
56338 + The IP is carried across local TCP and AF_UNIX stream sockets.
56339 + This information can be useful for IDS/IPSes to perform remote response
56340 + to a local attack. The entry is readable by only the owner of the
56341 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56342 + the RBAC system), and thus does not create privacy concerns.
56343 +
56344 +config GRKERNSEC_RWXMAP_LOG
56345 + bool 'Denied RWX mmap/mprotect logging'
56346 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56347 + help
56348 + If you say Y here, calls to mmap() and mprotect() with explicit
56349 + usage of PROT_WRITE and PROT_EXEC together will be logged when
56350 + denied by the PAX_MPROTECT feature. If the sysctl option is
56351 + enabled, a sysctl option with name "rwxmap_logging" is created.
56352 +
56353 +config GRKERNSEC_AUDIT_TEXTREL
56354 + bool 'ELF text relocations logging (READ HELP)'
56355 + depends on PAX_MPROTECT
56356 + help
56357 + If you say Y here, text relocations will be logged with the filename
56358 + of the offending library or binary. The purpose of the feature is
56359 + to help Linux distribution developers get rid of libraries and
56360 + binaries that need text relocations which hinder the future progress
56361 + of PaX. Only Linux distribution developers should say Y here, and
56362 + never on a production machine, as this option creates an information
56363 + leak that could aid an attacker in defeating the randomization of
56364 + a single memory region. If the sysctl option is enabled, a sysctl
56365 + option with name "audit_textrel" is created.
56366 +
56367 +endmenu
56368 +
56369 +menu "Executable Protections"
56370 +depends on GRKERNSEC
56371 +
56372 +config GRKERNSEC_DMESG
56373 + bool "Dmesg(8) restriction"
56374 + help
56375 + If you say Y here, non-root users will not be able to use dmesg(8)
56376 + to view up to the last 4kb of messages in the kernel's log buffer.
56377 + The kernel's log buffer often contains kernel addresses and other
56378 + identifying information useful to an attacker in fingerprinting a
56379 + system for a targeted exploit.
56380 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
56381 + created.
56382 +
56383 +config GRKERNSEC_HARDEN_PTRACE
56384 + bool "Deter ptrace-based process snooping"
56385 + help
56386 + If you say Y here, TTY sniffers and other malicious monitoring
56387 + programs implemented through ptrace will be defeated. If you
56388 + have been using the RBAC system, this option has already been
56389 + enabled for several years for all users, with the ability to make
56390 + fine-grained exceptions.
56391 +
56392 + This option only affects the ability of non-root users to ptrace
56393 + processes that are not a descendent of the ptracing process.
56394 + This means that strace ./binary and gdb ./binary will still work,
56395 + but attaching to arbitrary processes will not. If the sysctl
56396 + option is enabled, a sysctl option with name "harden_ptrace" is
56397 + created.
56398 +
56399 +config GRKERNSEC_TPE
56400 + bool "Trusted Path Execution (TPE)"
56401 + help
56402 + If you say Y here, you will be able to choose a gid to add to the
56403 + supplementary groups of users you want to mark as "untrusted."
56404 + These users will not be able to execute any files that are not in
56405 + root-owned directories writable only by root. If the sysctl option
56406 + is enabled, a sysctl option with name "tpe" is created.
56407 +
56408 +config GRKERNSEC_TPE_ALL
56409 + bool "Partially restrict all non-root users"
56410 + depends on GRKERNSEC_TPE
56411 + help
56412 + If you say Y here, all non-root users will be covered under
56413 + a weaker TPE restriction. This is separate from, and in addition to,
56414 + the main TPE options that you have selected elsewhere. Thus, if a
56415 + "trusted" GID is chosen, this restriction applies to even that GID.
56416 + Under this restriction, all non-root users will only be allowed to
56417 + execute files in directories they own that are not group or
56418 + world-writable, or in directories owned by root and writable only by
56419 + root. If the sysctl option is enabled, a sysctl option with name
56420 + "tpe_restrict_all" is created.
56421 +
56422 +config GRKERNSEC_TPE_INVERT
56423 + bool "Invert GID option"
56424 + depends on GRKERNSEC_TPE
56425 + help
56426 + If you say Y here, the group you specify in the TPE configuration will
56427 + decide what group TPE restrictions will be *disabled* for. This
56428 + option is useful if you want TPE restrictions to be applied to most
56429 + users on the system. If the sysctl option is enabled, a sysctl option
56430 + with name "tpe_invert" is created. Unlike other sysctl options, this
56431 + entry will default to on for backward-compatibility.
56432 +
56433 +config GRKERNSEC_TPE_GID
56434 + int "GID for untrusted users"
56435 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56436 + default 1005
56437 + help
56438 + Setting this GID determines what group TPE restrictions will be
56439 + *enabled* for. If the sysctl option is enabled, a sysctl option
56440 + with name "tpe_gid" is created.
56441 +
56442 +config GRKERNSEC_TPE_GID
56443 + int "GID for trusted users"
56444 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56445 + default 1005
56446 + help
56447 + Setting this GID determines what group TPE restrictions will be
56448 + *disabled* for. If the sysctl option is enabled, a sysctl option
56449 + with name "tpe_gid" is created.
56450 +
56451 +endmenu
56452 +menu "Network Protections"
56453 +depends on GRKERNSEC
56454 +
56455 +config GRKERNSEC_RANDNET
56456 + bool "Larger entropy pools"
56457 + help
56458 + If you say Y here, the entropy pools used for many features of Linux
56459 + and grsecurity will be doubled in size. Since several grsecurity
56460 + features use additional randomness, it is recommended that you say Y
56461 + here. Saying Y here has a similar effect as modifying
56462 + /proc/sys/kernel/random/poolsize.
56463 +
56464 +config GRKERNSEC_BLACKHOLE
56465 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56466 + depends on NET
56467 + help
56468 + If you say Y here, neither TCP resets nor ICMP
56469 + destination-unreachable packets will be sent in response to packets
56470 + sent to ports for which no associated listening process exists.
56471 + This feature supports both IPV4 and IPV6 and exempts the
56472 + loopback interface from blackholing. Enabling this feature
56473 + makes a host more resilient to DoS attacks and reduces network
56474 + visibility against scanners.
56475 +
56476 + The blackhole feature as-implemented is equivalent to the FreeBSD
56477 + blackhole feature, as it prevents RST responses to all packets, not
56478 + just SYNs. Under most application behavior this causes no
56479 + problems, but applications (like haproxy) may not close certain
56480 + connections in a way that cleanly terminates them on the remote
56481 + end, leaving the remote host in LAST_ACK state. Because of this
56482 + side-effect and to prevent intentional LAST_ACK DoSes, this
56483 + feature also adds automatic mitigation against such attacks.
56484 + The mitigation drastically reduces the amount of time a socket
56485 + can spend in LAST_ACK state. If you're using haproxy and not
56486 + all servers it connects to have this option enabled, consider
56487 + disabling this feature on the haproxy host.
56488 +
56489 + If the sysctl option is enabled, two sysctl options with names
56490 + "ip_blackhole" and "lastack_retries" will be created.
56491 + While "ip_blackhole" takes the standard zero/non-zero on/off
56492 + toggle, "lastack_retries" uses the same kinds of values as
56493 + "tcp_retries1" and "tcp_retries2". The default value of 4
56494 + prevents a socket from lasting more than 45 seconds in LAST_ACK
56495 + state.
56496 +
56497 +config GRKERNSEC_SOCKET
56498 + bool "Socket restrictions"
56499 + depends on NET
56500 + help
56501 + If you say Y here, you will be able to choose from several options.
56502 + If you assign a GID on your system and add it to the supplementary
56503 + groups of users you want to restrict socket access to, this patch
56504 + will perform up to three things, based on the option(s) you choose.
56505 +
56506 +config GRKERNSEC_SOCKET_ALL
56507 + bool "Deny any sockets to group"
56508 + depends on GRKERNSEC_SOCKET
56509 + help
56510 + If you say Y here, you will be able to choose a GID of whose users will
56511 + be unable to connect to other hosts from your machine or run server
56512 + applications from your machine. If the sysctl option is enabled, a
56513 + sysctl option with name "socket_all" is created.
56514 +
56515 +config GRKERNSEC_SOCKET_ALL_GID
56516 + int "GID to deny all sockets for"
56517 + depends on GRKERNSEC_SOCKET_ALL
56518 + default 1004
56519 + help
56520 + Here you can choose the GID to disable socket access for. Remember to
56521 + add the users you want socket access disabled for to the GID
56522 + specified here. If the sysctl option is enabled, a sysctl option
56523 + with name "socket_all_gid" is created.
56524 +
56525 +config GRKERNSEC_SOCKET_CLIENT
56526 + bool "Deny client sockets to group"
56527 + depends on GRKERNSEC_SOCKET
56528 + help
56529 + If you say Y here, you will be able to choose a GID of whose users will
56530 + be unable to connect to other hosts from your machine, but will be
56531 + able to run servers. If this option is enabled, all users in the group
56532 + you specify will have to use passive mode when initiating ftp transfers
56533 + from the shell on your machine. If the sysctl option is enabled, a
56534 + sysctl option with name "socket_client" is created.
56535 +
56536 +config GRKERNSEC_SOCKET_CLIENT_GID
56537 + int "GID to deny client sockets for"
56538 + depends on GRKERNSEC_SOCKET_CLIENT
56539 + default 1003
56540 + help
56541 + Here you can choose the GID to disable client socket access for.
56542 + Remember to add the users you want client socket access disabled for to
56543 + the GID specified here. If the sysctl option is enabled, a sysctl
56544 + option with name "socket_client_gid" is created.
56545 +
56546 +config GRKERNSEC_SOCKET_SERVER
56547 + bool "Deny server sockets to group"
56548 + depends on GRKERNSEC_SOCKET
56549 + help
56550 + If you say Y here, you will be able to choose a GID of whose users will
56551 + be unable to run server applications from your machine. If the sysctl
56552 + option is enabled, a sysctl option with name "socket_server" is created.
56553 +
56554 +config GRKERNSEC_SOCKET_SERVER_GID
56555 + int "GID to deny server sockets for"
56556 + depends on GRKERNSEC_SOCKET_SERVER
56557 + default 1002
56558 + help
56559 + Here you can choose the GID to disable server socket access for.
56560 + Remember to add the users you want server socket access disabled for to
56561 + the GID specified here. If the sysctl option is enabled, a sysctl
56562 + option with name "socket_server_gid" is created.
56563 +
56564 +endmenu
56565 +menu "Sysctl support"
56566 +depends on GRKERNSEC && SYSCTL
56567 +
56568 +config GRKERNSEC_SYSCTL
56569 + bool "Sysctl support"
56570 + help
56571 + If you say Y here, you will be able to change the options that
56572 + grsecurity runs with at bootup, without having to recompile your
56573 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56574 + to enable (1) or disable (0) various features. All the sysctl entries
56575 + are mutable until the "grsec_lock" entry is set to a non-zero value.
56576 + All features enabled in the kernel configuration are disabled at boot
56577 + if you do not say Y to the "Turn on features by default" option.
56578 + All options should be set at startup, and the grsec_lock entry should
56579 + be set to a non-zero value after all the options are set.
56580 + *THIS IS EXTREMELY IMPORTANT*
56581 +
56582 +config GRKERNSEC_SYSCTL_DISTRO
56583 + bool "Extra sysctl support for distro makers (READ HELP)"
56584 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56585 + help
56586 + If you say Y here, additional sysctl options will be created
56587 + for features that affect processes running as root. Therefore,
56588 + it is critical when using this option that the grsec_lock entry be
56589 + enabled after boot. Only distros with prebuilt kernel packages
56590 + with this option enabled that can ensure grsec_lock is enabled
56591 + after boot should use this option.
56592 + *Failure to set grsec_lock after boot makes all grsec features
56593 + this option covers useless*
56594 +
56595 + Currently this option creates the following sysctl entries:
56596 + "Disable Privileged I/O": "disable_priv_io"
56597 +
56598 +config GRKERNSEC_SYSCTL_ON
56599 + bool "Turn on features by default"
56600 + depends on GRKERNSEC_SYSCTL
56601 + help
56602 + If you say Y here, instead of having all features enabled in the
56603 + kernel configuration disabled at boot time, the features will be
56604 + enabled at boot time. It is recommended you say Y here unless
56605 + there is some reason you would want all sysctl-tunable features to
56606 + be disabled by default. As mentioned elsewhere, it is important
56607 + to enable the grsec_lock entry once you have finished modifying
56608 + the sysctl entries.
56609 +
56610 +endmenu
56611 +menu "Logging Options"
56612 +depends on GRKERNSEC
56613 +
56614 +config GRKERNSEC_FLOODTIME
56615 + int "Seconds in between log messages (minimum)"
56616 + default 10
56617 + help
56618 + This option allows you to enforce the number of seconds between
56619 + grsecurity log messages. The default should be suitable for most
56620 + people, however, if you choose to change it, choose a value small enough
56621 + to allow informative logs to be produced, but large enough to
56622 + prevent flooding.
56623 +
56624 +config GRKERNSEC_FLOODBURST
56625 + int "Number of messages in a burst (maximum)"
56626 + default 4
56627 + help
56628 + This option allows you to choose the maximum number of messages allowed
56629 + within the flood time interval you chose in a separate option. The
56630 + default should be suitable for most people, however if you find that
56631 + many of your logs are being interpreted as flooding, you may want to
56632 + raise this value.
56633 +
56634 +endmenu
56635 +
56636 +endmenu
56637 diff -urNp linux-2.6.32.45/grsecurity/Makefile linux-2.6.32.45/grsecurity/Makefile
56638 --- linux-2.6.32.45/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56639 +++ linux-2.6.32.45/grsecurity/Makefile 2011-08-17 19:02:41.000000000 -0400
56640 @@ -0,0 +1,33 @@
56641 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56642 +# during 2001-2009 it has been completely redesigned by Brad Spengler
56643 +# into an RBAC system
56644 +#
56645 +# All code in this directory and various hooks inserted throughout the kernel
56646 +# are copyright Brad Spengler - Open Source Security, Inc., and released
56647 +# under the GPL v2 or higher
56648 +
56649 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56650 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
56651 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56652 +
56653 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56654 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56655 + gracl_learn.o grsec_log.o
56656 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56657 +
56658 +ifdef CONFIG_NET
56659 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o grsec_sock.o
56660 +endif
56661 +
56662 +ifndef CONFIG_GRKERNSEC
56663 +obj-y += grsec_disabled.o
56664 +endif
56665 +
56666 +ifdef CONFIG_GRKERNSEC_HIDESYM
56667 +extra-y := grsec_hidesym.o
56668 +$(obj)/grsec_hidesym.o:
56669 + @-chmod -f 500 /boot
56670 + @-chmod -f 500 /lib/modules
56671 + @-chmod -f 700 .
56672 + @echo ' grsec: protected kernel image paths'
56673 +endif
56674 diff -urNp linux-2.6.32.45/include/acpi/acpi_bus.h linux-2.6.32.45/include/acpi/acpi_bus.h
56675 --- linux-2.6.32.45/include/acpi/acpi_bus.h 2011-03-27 14:31:47.000000000 -0400
56676 +++ linux-2.6.32.45/include/acpi/acpi_bus.h 2011-08-05 20:33:55.000000000 -0400
56677 @@ -107,7 +107,7 @@ struct acpi_device_ops {
56678 acpi_op_bind bind;
56679 acpi_op_unbind unbind;
56680 acpi_op_notify notify;
56681 -};
56682 +} __no_const;
56683
56684 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56685
56686 diff -urNp linux-2.6.32.45/include/acpi/acpi_drivers.h linux-2.6.32.45/include/acpi/acpi_drivers.h
56687 --- linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
56688 +++ linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
56689 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
56690 Dock Station
56691 -------------------------------------------------------------------------- */
56692 struct acpi_dock_ops {
56693 - acpi_notify_handler handler;
56694 - acpi_notify_handler uevent;
56695 + const acpi_notify_handler handler;
56696 + const acpi_notify_handler uevent;
56697 };
56698
56699 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
56700 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
56701 extern int register_dock_notifier(struct notifier_block *nb);
56702 extern void unregister_dock_notifier(struct notifier_block *nb);
56703 extern int register_hotplug_dock_device(acpi_handle handle,
56704 - struct acpi_dock_ops *ops,
56705 + const struct acpi_dock_ops *ops,
56706 void *context);
56707 extern void unregister_hotplug_dock_device(acpi_handle handle);
56708 #else
56709 @@ -144,7 +144,7 @@ static inline void unregister_dock_notif
56710 {
56711 }
56712 static inline int register_hotplug_dock_device(acpi_handle handle,
56713 - struct acpi_dock_ops *ops,
56714 + const struct acpi_dock_ops *ops,
56715 void *context)
56716 {
56717 return -ENODEV;
56718 diff -urNp linux-2.6.32.45/include/asm-generic/atomic-long.h linux-2.6.32.45/include/asm-generic/atomic-long.h
56719 --- linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
56720 +++ linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-07-13 22:21:25.000000000 -0400
56721 @@ -22,6 +22,12 @@
56722
56723 typedef atomic64_t atomic_long_t;
56724
56725 +#ifdef CONFIG_PAX_REFCOUNT
56726 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
56727 +#else
56728 +typedef atomic64_t atomic_long_unchecked_t;
56729 +#endif
56730 +
56731 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56732
56733 static inline long atomic_long_read(atomic_long_t *l)
56734 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56735 return (long)atomic64_read(v);
56736 }
56737
56738 +#ifdef CONFIG_PAX_REFCOUNT
56739 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56740 +{
56741 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56742 +
56743 + return (long)atomic64_read_unchecked(v);
56744 +}
56745 +#endif
56746 +
56747 static inline void atomic_long_set(atomic_long_t *l, long i)
56748 {
56749 atomic64_t *v = (atomic64_t *)l;
56750 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56751 atomic64_set(v, i);
56752 }
56753
56754 +#ifdef CONFIG_PAX_REFCOUNT
56755 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56756 +{
56757 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56758 +
56759 + atomic64_set_unchecked(v, i);
56760 +}
56761 +#endif
56762 +
56763 static inline void atomic_long_inc(atomic_long_t *l)
56764 {
56765 atomic64_t *v = (atomic64_t *)l;
56766 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56767 atomic64_inc(v);
56768 }
56769
56770 +#ifdef CONFIG_PAX_REFCOUNT
56771 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56772 +{
56773 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56774 +
56775 + atomic64_inc_unchecked(v);
56776 +}
56777 +#endif
56778 +
56779 static inline void atomic_long_dec(atomic_long_t *l)
56780 {
56781 atomic64_t *v = (atomic64_t *)l;
56782 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56783 atomic64_dec(v);
56784 }
56785
56786 +#ifdef CONFIG_PAX_REFCOUNT
56787 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56788 +{
56789 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56790 +
56791 + atomic64_dec_unchecked(v);
56792 +}
56793 +#endif
56794 +
56795 static inline void atomic_long_add(long i, atomic_long_t *l)
56796 {
56797 atomic64_t *v = (atomic64_t *)l;
56798 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56799 atomic64_add(i, v);
56800 }
56801
56802 +#ifdef CONFIG_PAX_REFCOUNT
56803 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56804 +{
56805 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56806 +
56807 + atomic64_add_unchecked(i, v);
56808 +}
56809 +#endif
56810 +
56811 static inline void atomic_long_sub(long i, atomic_long_t *l)
56812 {
56813 atomic64_t *v = (atomic64_t *)l;
56814 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
56815 return (long)atomic64_inc_return(v);
56816 }
56817
56818 +#ifdef CONFIG_PAX_REFCOUNT
56819 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56820 +{
56821 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56822 +
56823 + return (long)atomic64_inc_return_unchecked(v);
56824 +}
56825 +#endif
56826 +
56827 static inline long atomic_long_dec_return(atomic_long_t *l)
56828 {
56829 atomic64_t *v = (atomic64_t *)l;
56830 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
56831
56832 typedef atomic_t atomic_long_t;
56833
56834 +#ifdef CONFIG_PAX_REFCOUNT
56835 +typedef atomic_unchecked_t atomic_long_unchecked_t;
56836 +#else
56837 +typedef atomic_t atomic_long_unchecked_t;
56838 +#endif
56839 +
56840 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56841 static inline long atomic_long_read(atomic_long_t *l)
56842 {
56843 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
56844 return (long)atomic_read(v);
56845 }
56846
56847 +#ifdef CONFIG_PAX_REFCOUNT
56848 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56849 +{
56850 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56851 +
56852 + return (long)atomic_read_unchecked(v);
56853 +}
56854 +#endif
56855 +
56856 static inline void atomic_long_set(atomic_long_t *l, long i)
56857 {
56858 atomic_t *v = (atomic_t *)l;
56859 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
56860 atomic_set(v, i);
56861 }
56862
56863 +#ifdef CONFIG_PAX_REFCOUNT
56864 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56865 +{
56866 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56867 +
56868 + atomic_set_unchecked(v, i);
56869 +}
56870 +#endif
56871 +
56872 static inline void atomic_long_inc(atomic_long_t *l)
56873 {
56874 atomic_t *v = (atomic_t *)l;
56875 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
56876 atomic_inc(v);
56877 }
56878
56879 +#ifdef CONFIG_PAX_REFCOUNT
56880 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56881 +{
56882 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56883 +
56884 + atomic_inc_unchecked(v);
56885 +}
56886 +#endif
56887 +
56888 static inline void atomic_long_dec(atomic_long_t *l)
56889 {
56890 atomic_t *v = (atomic_t *)l;
56891 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
56892 atomic_dec(v);
56893 }
56894
56895 +#ifdef CONFIG_PAX_REFCOUNT
56896 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56897 +{
56898 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56899 +
56900 + atomic_dec_unchecked(v);
56901 +}
56902 +#endif
56903 +
56904 static inline void atomic_long_add(long i, atomic_long_t *l)
56905 {
56906 atomic_t *v = (atomic_t *)l;
56907 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
56908 atomic_add(i, v);
56909 }
56910
56911 +#ifdef CONFIG_PAX_REFCOUNT
56912 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56913 +{
56914 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56915 +
56916 + atomic_add_unchecked(i, v);
56917 +}
56918 +#endif
56919 +
56920 static inline void atomic_long_sub(long i, atomic_long_t *l)
56921 {
56922 atomic_t *v = (atomic_t *)l;
56923 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
56924 return (long)atomic_inc_return(v);
56925 }
56926
56927 +#ifdef CONFIG_PAX_REFCOUNT
56928 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56929 +{
56930 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56931 +
56932 + return (long)atomic_inc_return_unchecked(v);
56933 +}
56934 +#endif
56935 +
56936 static inline long atomic_long_dec_return(atomic_long_t *l)
56937 {
56938 atomic_t *v = (atomic_t *)l;
56939 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
56940
56941 #endif /* BITS_PER_LONG == 64 */
56942
56943 +#ifdef CONFIG_PAX_REFCOUNT
56944 +static inline void pax_refcount_needs_these_functions(void)
56945 +{
56946 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
56947 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
56948 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
56949 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
56950 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
56951 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
56952 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
56953 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
56954 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
56955 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
56956 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
56957 +
56958 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
56959 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
56960 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
56961 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
56962 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
56963 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
56964 +}
56965 +#else
56966 +#define atomic_read_unchecked(v) atomic_read(v)
56967 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
56968 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
56969 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
56970 +#define atomic_inc_unchecked(v) atomic_inc(v)
56971 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
56972 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
56973 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
56974 +#define atomic_dec_unchecked(v) atomic_dec(v)
56975 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
56976 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
56977 +
56978 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
56979 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
56980 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
56981 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
56982 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
56983 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
56984 +#endif
56985 +
56986 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
56987 diff -urNp linux-2.6.32.45/include/asm-generic/cache.h linux-2.6.32.45/include/asm-generic/cache.h
56988 --- linux-2.6.32.45/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
56989 +++ linux-2.6.32.45/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
56990 @@ -6,7 +6,7 @@
56991 * cache lines need to provide their own cache.h.
56992 */
56993
56994 -#define L1_CACHE_SHIFT 5
56995 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
56996 +#define L1_CACHE_SHIFT 5UL
56997 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
56998
56999 #endif /* __ASM_GENERIC_CACHE_H */
57000 diff -urNp linux-2.6.32.45/include/asm-generic/dma-mapping-common.h linux-2.6.32.45/include/asm-generic/dma-mapping-common.h
57001 --- linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
57002 +++ linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
57003 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
57004 enum dma_data_direction dir,
57005 struct dma_attrs *attrs)
57006 {
57007 - struct dma_map_ops *ops = get_dma_ops(dev);
57008 + const struct dma_map_ops *ops = get_dma_ops(dev);
57009 dma_addr_t addr;
57010
57011 kmemcheck_mark_initialized(ptr, size);
57012 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
57013 enum dma_data_direction dir,
57014 struct dma_attrs *attrs)
57015 {
57016 - struct dma_map_ops *ops = get_dma_ops(dev);
57017 + const struct dma_map_ops *ops = get_dma_ops(dev);
57018
57019 BUG_ON(!valid_dma_direction(dir));
57020 if (ops->unmap_page)
57021 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
57022 int nents, enum dma_data_direction dir,
57023 struct dma_attrs *attrs)
57024 {
57025 - struct dma_map_ops *ops = get_dma_ops(dev);
57026 + const struct dma_map_ops *ops = get_dma_ops(dev);
57027 int i, ents;
57028 struct scatterlist *s;
57029
57030 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
57031 int nents, enum dma_data_direction dir,
57032 struct dma_attrs *attrs)
57033 {
57034 - struct dma_map_ops *ops = get_dma_ops(dev);
57035 + const struct dma_map_ops *ops = get_dma_ops(dev);
57036
57037 BUG_ON(!valid_dma_direction(dir));
57038 debug_dma_unmap_sg(dev, sg, nents, dir);
57039 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
57040 size_t offset, size_t size,
57041 enum dma_data_direction dir)
57042 {
57043 - struct dma_map_ops *ops = get_dma_ops(dev);
57044 + const struct dma_map_ops *ops = get_dma_ops(dev);
57045 dma_addr_t addr;
57046
57047 kmemcheck_mark_initialized(page_address(page) + offset, size);
57048 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
57049 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
57050 size_t size, enum dma_data_direction dir)
57051 {
57052 - struct dma_map_ops *ops = get_dma_ops(dev);
57053 + const struct dma_map_ops *ops = get_dma_ops(dev);
57054
57055 BUG_ON(!valid_dma_direction(dir));
57056 if (ops->unmap_page)
57057 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
57058 size_t size,
57059 enum dma_data_direction dir)
57060 {
57061 - struct dma_map_ops *ops = get_dma_ops(dev);
57062 + const struct dma_map_ops *ops = get_dma_ops(dev);
57063
57064 BUG_ON(!valid_dma_direction(dir));
57065 if (ops->sync_single_for_cpu)
57066 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
57067 dma_addr_t addr, size_t size,
57068 enum dma_data_direction dir)
57069 {
57070 - struct dma_map_ops *ops = get_dma_ops(dev);
57071 + const struct dma_map_ops *ops = get_dma_ops(dev);
57072
57073 BUG_ON(!valid_dma_direction(dir));
57074 if (ops->sync_single_for_device)
57075 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
57076 size_t size,
57077 enum dma_data_direction dir)
57078 {
57079 - struct dma_map_ops *ops = get_dma_ops(dev);
57080 + const struct dma_map_ops *ops = get_dma_ops(dev);
57081
57082 BUG_ON(!valid_dma_direction(dir));
57083 if (ops->sync_single_range_for_cpu) {
57084 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
57085 size_t size,
57086 enum dma_data_direction dir)
57087 {
57088 - struct dma_map_ops *ops = get_dma_ops(dev);
57089 + const struct dma_map_ops *ops = get_dma_ops(dev);
57090
57091 BUG_ON(!valid_dma_direction(dir));
57092 if (ops->sync_single_range_for_device) {
57093 @@ -155,7 +155,7 @@ static inline void
57094 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
57095 int nelems, enum dma_data_direction dir)
57096 {
57097 - struct dma_map_ops *ops = get_dma_ops(dev);
57098 + const struct dma_map_ops *ops = get_dma_ops(dev);
57099
57100 BUG_ON(!valid_dma_direction(dir));
57101 if (ops->sync_sg_for_cpu)
57102 @@ -167,7 +167,7 @@ static inline void
57103 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
57104 int nelems, enum dma_data_direction dir)
57105 {
57106 - struct dma_map_ops *ops = get_dma_ops(dev);
57107 + const struct dma_map_ops *ops = get_dma_ops(dev);
57108
57109 BUG_ON(!valid_dma_direction(dir));
57110 if (ops->sync_sg_for_device)
57111 diff -urNp linux-2.6.32.45/include/asm-generic/futex.h linux-2.6.32.45/include/asm-generic/futex.h
57112 --- linux-2.6.32.45/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
57113 +++ linux-2.6.32.45/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
57114 @@ -6,7 +6,7 @@
57115 #include <asm/errno.h>
57116
57117 static inline int
57118 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
57119 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
57120 {
57121 int op = (encoded_op >> 28) & 7;
57122 int cmp = (encoded_op >> 24) & 15;
57123 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
57124 }
57125
57126 static inline int
57127 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
57128 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
57129 {
57130 return -ENOSYS;
57131 }
57132 diff -urNp linux-2.6.32.45/include/asm-generic/int-l64.h linux-2.6.32.45/include/asm-generic/int-l64.h
57133 --- linux-2.6.32.45/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
57134 +++ linux-2.6.32.45/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
57135 @@ -46,6 +46,8 @@ typedef unsigned int u32;
57136 typedef signed long s64;
57137 typedef unsigned long u64;
57138
57139 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57140 +
57141 #define S8_C(x) x
57142 #define U8_C(x) x ## U
57143 #define S16_C(x) x
57144 diff -urNp linux-2.6.32.45/include/asm-generic/int-ll64.h linux-2.6.32.45/include/asm-generic/int-ll64.h
57145 --- linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
57146 +++ linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
57147 @@ -51,6 +51,8 @@ typedef unsigned int u32;
57148 typedef signed long long s64;
57149 typedef unsigned long long u64;
57150
57151 +typedef unsigned long long intoverflow_t;
57152 +
57153 #define S8_C(x) x
57154 #define U8_C(x) x ## U
57155 #define S16_C(x) x
57156 diff -urNp linux-2.6.32.45/include/asm-generic/kmap_types.h linux-2.6.32.45/include/asm-generic/kmap_types.h
57157 --- linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
57158 +++ linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
57159 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
57160 KMAP_D(16) KM_IRQ_PTE,
57161 KMAP_D(17) KM_NMI,
57162 KMAP_D(18) KM_NMI_PTE,
57163 -KMAP_D(19) KM_TYPE_NR
57164 +KMAP_D(19) KM_CLEARPAGE,
57165 +KMAP_D(20) KM_TYPE_NR
57166 };
57167
57168 #undef KMAP_D
57169 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable.h linux-2.6.32.45/include/asm-generic/pgtable.h
57170 --- linux-2.6.32.45/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
57171 +++ linux-2.6.32.45/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
57172 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
57173 unsigned long size);
57174 #endif
57175
57176 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57177 +static inline unsigned long pax_open_kernel(void) { return 0; }
57178 +#endif
57179 +
57180 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57181 +static inline unsigned long pax_close_kernel(void) { return 0; }
57182 +#endif
57183 +
57184 #endif /* !__ASSEMBLY__ */
57185
57186 #endif /* _ASM_GENERIC_PGTABLE_H */
57187 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h
57188 --- linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
57189 +++ linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
57190 @@ -1,14 +1,19 @@
57191 #ifndef _PGTABLE_NOPMD_H
57192 #define _PGTABLE_NOPMD_H
57193
57194 -#ifndef __ASSEMBLY__
57195 -
57196 #include <asm-generic/pgtable-nopud.h>
57197
57198 -struct mm_struct;
57199 -
57200 #define __PAGETABLE_PMD_FOLDED
57201
57202 +#define PMD_SHIFT PUD_SHIFT
57203 +#define PTRS_PER_PMD 1
57204 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57205 +#define PMD_MASK (~(PMD_SIZE-1))
57206 +
57207 +#ifndef __ASSEMBLY__
57208 +
57209 +struct mm_struct;
57210 +
57211 /*
57212 * Having the pmd type consist of a pud gets the size right, and allows
57213 * us to conceptually access the pud entry that this pmd is folded into
57214 @@ -16,11 +21,6 @@ struct mm_struct;
57215 */
57216 typedef struct { pud_t pud; } pmd_t;
57217
57218 -#define PMD_SHIFT PUD_SHIFT
57219 -#define PTRS_PER_PMD 1
57220 -#define PMD_SIZE (1UL << PMD_SHIFT)
57221 -#define PMD_MASK (~(PMD_SIZE-1))
57222 -
57223 /*
57224 * The "pud_xxx()" functions here are trivial for a folded two-level
57225 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57226 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopud.h linux-2.6.32.45/include/asm-generic/pgtable-nopud.h
57227 --- linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
57228 +++ linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
57229 @@ -1,10 +1,15 @@
57230 #ifndef _PGTABLE_NOPUD_H
57231 #define _PGTABLE_NOPUD_H
57232
57233 -#ifndef __ASSEMBLY__
57234 -
57235 #define __PAGETABLE_PUD_FOLDED
57236
57237 +#define PUD_SHIFT PGDIR_SHIFT
57238 +#define PTRS_PER_PUD 1
57239 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57240 +#define PUD_MASK (~(PUD_SIZE-1))
57241 +
57242 +#ifndef __ASSEMBLY__
57243 +
57244 /*
57245 * Having the pud type consist of a pgd gets the size right, and allows
57246 * us to conceptually access the pgd entry that this pud is folded into
57247 @@ -12,11 +17,6 @@
57248 */
57249 typedef struct { pgd_t pgd; } pud_t;
57250
57251 -#define PUD_SHIFT PGDIR_SHIFT
57252 -#define PTRS_PER_PUD 1
57253 -#define PUD_SIZE (1UL << PUD_SHIFT)
57254 -#define PUD_MASK (~(PUD_SIZE-1))
57255 -
57256 /*
57257 * The "pgd_xxx()" functions here are trivial for a folded two-level
57258 * setup: the pud is never bad, and a pud always exists (as it's folded
57259 diff -urNp linux-2.6.32.45/include/asm-generic/vmlinux.lds.h linux-2.6.32.45/include/asm-generic/vmlinux.lds.h
57260 --- linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
57261 +++ linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
57262 @@ -199,6 +199,7 @@
57263 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57264 VMLINUX_SYMBOL(__start_rodata) = .; \
57265 *(.rodata) *(.rodata.*) \
57266 + *(.data.read_only) \
57267 *(__vermagic) /* Kernel version magic */ \
57268 *(__markers_strings) /* Markers: strings */ \
57269 *(__tracepoints_strings)/* Tracepoints: strings */ \
57270 @@ -656,22 +657,24 @@
57271 * section in the linker script will go there too. @phdr should have
57272 * a leading colon.
57273 *
57274 - * Note that this macros defines __per_cpu_load as an absolute symbol.
57275 + * Note that this macros defines per_cpu_load as an absolute symbol.
57276 * If there is no need to put the percpu section at a predetermined
57277 * address, use PERCPU().
57278 */
57279 #define PERCPU_VADDR(vaddr, phdr) \
57280 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
57281 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57282 + per_cpu_load = .; \
57283 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57284 - LOAD_OFFSET) { \
57285 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57286 VMLINUX_SYMBOL(__per_cpu_start) = .; \
57287 *(.data.percpu.first) \
57288 - *(.data.percpu.page_aligned) \
57289 *(.data.percpu) \
57290 + . = ALIGN(PAGE_SIZE); \
57291 + *(.data.percpu.page_aligned) \
57292 *(.data.percpu.shared_aligned) \
57293 VMLINUX_SYMBOL(__per_cpu_end) = .; \
57294 } phdr \
57295 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
57296 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
57297
57298 /**
57299 * PERCPU - define output section for percpu area, simple version
57300 diff -urNp linux-2.6.32.45/include/drm/drm_crtc_helper.h linux-2.6.32.45/include/drm/drm_crtc_helper.h
57301 --- linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-03-27 14:31:47.000000000 -0400
57302 +++ linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-08-05 20:33:55.000000000 -0400
57303 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
57304
57305 /* reload the current crtc LUT */
57306 void (*load_lut)(struct drm_crtc *crtc);
57307 -};
57308 +} __no_const;
57309
57310 struct drm_encoder_helper_funcs {
57311 void (*dpms)(struct drm_encoder *encoder, int mode);
57312 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
57313 struct drm_connector *connector);
57314 /* disable encoder when not in use - more explicit than dpms off */
57315 void (*disable)(struct drm_encoder *encoder);
57316 -};
57317 +} __no_const;
57318
57319 struct drm_connector_helper_funcs {
57320 int (*get_modes)(struct drm_connector *connector);
57321 diff -urNp linux-2.6.32.45/include/drm/drmP.h linux-2.6.32.45/include/drm/drmP.h
57322 --- linux-2.6.32.45/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
57323 +++ linux-2.6.32.45/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
57324 @@ -71,6 +71,7 @@
57325 #include <linux/workqueue.h>
57326 #include <linux/poll.h>
57327 #include <asm/pgalloc.h>
57328 +#include <asm/local.h>
57329 #include "drm.h"
57330
57331 #include <linux/idr.h>
57332 @@ -814,7 +815,7 @@ struct drm_driver {
57333 void (*vgaarb_irq)(struct drm_device *dev, bool state);
57334
57335 /* Driver private ops for this object */
57336 - struct vm_operations_struct *gem_vm_ops;
57337 + const struct vm_operations_struct *gem_vm_ops;
57338
57339 int major;
57340 int minor;
57341 @@ -917,7 +918,7 @@ struct drm_device {
57342
57343 /** \name Usage Counters */
57344 /*@{ */
57345 - int open_count; /**< Outstanding files open */
57346 + local_t open_count; /**< Outstanding files open */
57347 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57348 atomic_t vma_count; /**< Outstanding vma areas open */
57349 int buf_use; /**< Buffers in use -- cannot alloc */
57350 @@ -928,7 +929,7 @@ struct drm_device {
57351 /*@{ */
57352 unsigned long counters;
57353 enum drm_stat_type types[15];
57354 - atomic_t counts[15];
57355 + atomic_unchecked_t counts[15];
57356 /*@} */
57357
57358 struct list_head filelist;
57359 @@ -1016,7 +1017,7 @@ struct drm_device {
57360 struct pci_controller *hose;
57361 #endif
57362 struct drm_sg_mem *sg; /**< Scatter gather memory */
57363 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
57364 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
57365 void *dev_private; /**< device private data */
57366 void *mm_private;
57367 struct address_space *dev_mapping;
57368 @@ -1042,11 +1043,11 @@ struct drm_device {
57369 spinlock_t object_name_lock;
57370 struct idr object_name_idr;
57371 atomic_t object_count;
57372 - atomic_t object_memory;
57373 + atomic_unchecked_t object_memory;
57374 atomic_t pin_count;
57375 - atomic_t pin_memory;
57376 + atomic_unchecked_t pin_memory;
57377 atomic_t gtt_count;
57378 - atomic_t gtt_memory;
57379 + atomic_unchecked_t gtt_memory;
57380 uint32_t gtt_total;
57381 uint32_t invalidate_domains; /* domains pending invalidation */
57382 uint32_t flush_domains; /* domains pending flush */
57383 diff -urNp linux-2.6.32.45/include/drm/ttm/ttm_memory.h linux-2.6.32.45/include/drm/ttm/ttm_memory.h
57384 --- linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-03-27 14:31:47.000000000 -0400
57385 +++ linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-08-05 20:33:55.000000000 -0400
57386 @@ -47,7 +47,7 @@
57387
57388 struct ttm_mem_shrink {
57389 int (*do_shrink) (struct ttm_mem_shrink *);
57390 -};
57391 +} __no_const;
57392
57393 /**
57394 * struct ttm_mem_global - Global memory accounting structure.
57395 diff -urNp linux-2.6.32.45/include/linux/a.out.h linux-2.6.32.45/include/linux/a.out.h
57396 --- linux-2.6.32.45/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
57397 +++ linux-2.6.32.45/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
57398 @@ -39,6 +39,14 @@ enum machine_type {
57399 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57400 };
57401
57402 +/* Constants for the N_FLAGS field */
57403 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57404 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57405 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57406 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57407 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57408 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57409 +
57410 #if !defined (N_MAGIC)
57411 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57412 #endif
57413 diff -urNp linux-2.6.32.45/include/linux/atmdev.h linux-2.6.32.45/include/linux/atmdev.h
57414 --- linux-2.6.32.45/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
57415 +++ linux-2.6.32.45/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
57416 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57417 #endif
57418
57419 struct k_atm_aal_stats {
57420 -#define __HANDLE_ITEM(i) atomic_t i
57421 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57422 __AAL_STAT_ITEMS
57423 #undef __HANDLE_ITEM
57424 };
57425 diff -urNp linux-2.6.32.45/include/linux/backlight.h linux-2.6.32.45/include/linux/backlight.h
57426 --- linux-2.6.32.45/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
57427 +++ linux-2.6.32.45/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
57428 @@ -36,18 +36,18 @@ struct backlight_device;
57429 struct fb_info;
57430
57431 struct backlight_ops {
57432 - unsigned int options;
57433 + const unsigned int options;
57434
57435 #define BL_CORE_SUSPENDRESUME (1 << 0)
57436
57437 /* Notify the backlight driver some property has changed */
57438 - int (*update_status)(struct backlight_device *);
57439 + int (* const update_status)(struct backlight_device *);
57440 /* Return the current backlight brightness (accounting for power,
57441 fb_blank etc.) */
57442 - int (*get_brightness)(struct backlight_device *);
57443 + int (* const get_brightness)(struct backlight_device *);
57444 /* Check if given framebuffer device is the one bound to this backlight;
57445 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
57446 - int (*check_fb)(struct fb_info *);
57447 + int (* const check_fb)(struct fb_info *);
57448 };
57449
57450 /* This structure defines all the properties of a backlight */
57451 @@ -86,7 +86,7 @@ struct backlight_device {
57452 registered this device has been unloaded, and if class_get_devdata()
57453 points to something in the body of that driver, it is also invalid. */
57454 struct mutex ops_lock;
57455 - struct backlight_ops *ops;
57456 + const struct backlight_ops *ops;
57457
57458 /* The framebuffer notifier block */
57459 struct notifier_block fb_notif;
57460 @@ -103,7 +103,7 @@ static inline void backlight_update_stat
57461 }
57462
57463 extern struct backlight_device *backlight_device_register(const char *name,
57464 - struct device *dev, void *devdata, struct backlight_ops *ops);
57465 + struct device *dev, void *devdata, const struct backlight_ops *ops);
57466 extern void backlight_device_unregister(struct backlight_device *bd);
57467 extern void backlight_force_update(struct backlight_device *bd,
57468 enum backlight_update_reason reason);
57469 diff -urNp linux-2.6.32.45/include/linux/binfmts.h linux-2.6.32.45/include/linux/binfmts.h
57470 --- linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
57471 +++ linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
57472 @@ -83,6 +83,7 @@ struct linux_binfmt {
57473 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57474 int (*load_shlib)(struct file *);
57475 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
57476 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57477 unsigned long min_coredump; /* minimal dump size */
57478 int hasvdso;
57479 };
57480 diff -urNp linux-2.6.32.45/include/linux/blkdev.h linux-2.6.32.45/include/linux/blkdev.h
57481 --- linux-2.6.32.45/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
57482 +++ linux-2.6.32.45/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
57483 @@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
57484 #endif /* CONFIG_BLK_DEV_INTEGRITY */
57485
57486 struct block_device_operations {
57487 - int (*open) (struct block_device *, fmode_t);
57488 - int (*release) (struct gendisk *, fmode_t);
57489 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57490 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57491 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57492 - int (*direct_access) (struct block_device *, sector_t,
57493 + int (* const open) (struct block_device *, fmode_t);
57494 + int (* const release) (struct gendisk *, fmode_t);
57495 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57496 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57497 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57498 + int (* const direct_access) (struct block_device *, sector_t,
57499 void **, unsigned long *);
57500 - int (*media_changed) (struct gendisk *);
57501 - unsigned long long (*set_capacity) (struct gendisk *,
57502 + int (* const media_changed) (struct gendisk *);
57503 + unsigned long long (* const set_capacity) (struct gendisk *,
57504 unsigned long long);
57505 - int (*revalidate_disk) (struct gendisk *);
57506 - int (*getgeo)(struct block_device *, struct hd_geometry *);
57507 - struct module *owner;
57508 + int (* const revalidate_disk) (struct gendisk *);
57509 + int (*const getgeo)(struct block_device *, struct hd_geometry *);
57510 + struct module * const owner;
57511 };
57512
57513 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57514 diff -urNp linux-2.6.32.45/include/linux/blktrace_api.h linux-2.6.32.45/include/linux/blktrace_api.h
57515 --- linux-2.6.32.45/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
57516 +++ linux-2.6.32.45/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
57517 @@ -160,7 +160,7 @@ struct blk_trace {
57518 struct dentry *dir;
57519 struct dentry *dropped_file;
57520 struct dentry *msg_file;
57521 - atomic_t dropped;
57522 + atomic_unchecked_t dropped;
57523 };
57524
57525 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57526 diff -urNp linux-2.6.32.45/include/linux/byteorder/little_endian.h linux-2.6.32.45/include/linux/byteorder/little_endian.h
57527 --- linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
57528 +++ linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
57529 @@ -42,51 +42,51 @@
57530
57531 static inline __le64 __cpu_to_le64p(const __u64 *p)
57532 {
57533 - return (__force __le64)*p;
57534 + return (__force const __le64)*p;
57535 }
57536 static inline __u64 __le64_to_cpup(const __le64 *p)
57537 {
57538 - return (__force __u64)*p;
57539 + return (__force const __u64)*p;
57540 }
57541 static inline __le32 __cpu_to_le32p(const __u32 *p)
57542 {
57543 - return (__force __le32)*p;
57544 + return (__force const __le32)*p;
57545 }
57546 static inline __u32 __le32_to_cpup(const __le32 *p)
57547 {
57548 - return (__force __u32)*p;
57549 + return (__force const __u32)*p;
57550 }
57551 static inline __le16 __cpu_to_le16p(const __u16 *p)
57552 {
57553 - return (__force __le16)*p;
57554 + return (__force const __le16)*p;
57555 }
57556 static inline __u16 __le16_to_cpup(const __le16 *p)
57557 {
57558 - return (__force __u16)*p;
57559 + return (__force const __u16)*p;
57560 }
57561 static inline __be64 __cpu_to_be64p(const __u64 *p)
57562 {
57563 - return (__force __be64)__swab64p(p);
57564 + return (__force const __be64)__swab64p(p);
57565 }
57566 static inline __u64 __be64_to_cpup(const __be64 *p)
57567 {
57568 - return __swab64p((__u64 *)p);
57569 + return __swab64p((const __u64 *)p);
57570 }
57571 static inline __be32 __cpu_to_be32p(const __u32 *p)
57572 {
57573 - return (__force __be32)__swab32p(p);
57574 + return (__force const __be32)__swab32p(p);
57575 }
57576 static inline __u32 __be32_to_cpup(const __be32 *p)
57577 {
57578 - return __swab32p((__u32 *)p);
57579 + return __swab32p((const __u32 *)p);
57580 }
57581 static inline __be16 __cpu_to_be16p(const __u16 *p)
57582 {
57583 - return (__force __be16)__swab16p(p);
57584 + return (__force const __be16)__swab16p(p);
57585 }
57586 static inline __u16 __be16_to_cpup(const __be16 *p)
57587 {
57588 - return __swab16p((__u16 *)p);
57589 + return __swab16p((const __u16 *)p);
57590 }
57591 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57592 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57593 diff -urNp linux-2.6.32.45/include/linux/cache.h linux-2.6.32.45/include/linux/cache.h
57594 --- linux-2.6.32.45/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
57595 +++ linux-2.6.32.45/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
57596 @@ -16,6 +16,10 @@
57597 #define __read_mostly
57598 #endif
57599
57600 +#ifndef __read_only
57601 +#define __read_only __read_mostly
57602 +#endif
57603 +
57604 #ifndef ____cacheline_aligned
57605 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57606 #endif
57607 diff -urNp linux-2.6.32.45/include/linux/capability.h linux-2.6.32.45/include/linux/capability.h
57608 --- linux-2.6.32.45/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
57609 +++ linux-2.6.32.45/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
57610 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
57611 (security_real_capable_noaudit((t), (cap)) == 0)
57612
57613 extern int capable(int cap);
57614 +int capable_nolog(int cap);
57615
57616 /* audit system wants to get cap info from files as well */
57617 struct dentry;
57618 diff -urNp linux-2.6.32.45/include/linux/compiler-gcc4.h linux-2.6.32.45/include/linux/compiler-gcc4.h
57619 --- linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
57620 +++ linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-08-05 20:33:55.000000000 -0400
57621 @@ -36,4 +36,13 @@
57622 the kernel context */
57623 #define __cold __attribute__((__cold__))
57624
57625 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57626 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57627 +#define __bos0(ptr) __bos((ptr), 0)
57628 +#define __bos1(ptr) __bos((ptr), 1)
57629 +
57630 +#if __GNUC_MINOR__ >= 5
57631 +#define __no_const __attribute__((no_const))
57632 +#endif
57633 +
57634 #endif
57635 diff -urNp linux-2.6.32.45/include/linux/compiler.h linux-2.6.32.45/include/linux/compiler.h
57636 --- linux-2.6.32.45/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
57637 +++ linux-2.6.32.45/include/linux/compiler.h 2011-08-05 20:33:55.000000000 -0400
57638 @@ -247,6 +247,10 @@ void ftrace_likely_update(struct ftrace_
57639 # define __attribute_const__ /* unimplemented */
57640 #endif
57641
57642 +#ifndef __no_const
57643 +# define __no_const
57644 +#endif
57645 +
57646 /*
57647 * Tell gcc if a function is cold. The compiler will assume any path
57648 * directly leading to the call is unlikely.
57649 @@ -256,6 +260,22 @@ void ftrace_likely_update(struct ftrace_
57650 #define __cold
57651 #endif
57652
57653 +#ifndef __alloc_size
57654 +#define __alloc_size(...)
57655 +#endif
57656 +
57657 +#ifndef __bos
57658 +#define __bos(ptr, arg)
57659 +#endif
57660 +
57661 +#ifndef __bos0
57662 +#define __bos0(ptr)
57663 +#endif
57664 +
57665 +#ifndef __bos1
57666 +#define __bos1(ptr)
57667 +#endif
57668 +
57669 /* Simple shorthand for a section definition */
57670 #ifndef __section
57671 # define __section(S) __attribute__ ((__section__(#S)))
57672 @@ -278,6 +298,7 @@ void ftrace_likely_update(struct ftrace_
57673 * use is to mediate communication between process-level code and irq/NMI
57674 * handlers, all running on the same CPU.
57675 */
57676 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57677 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57678 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57679
57680 #endif /* __LINUX_COMPILER_H */
57681 diff -urNp linux-2.6.32.45/include/linux/crypto.h linux-2.6.32.45/include/linux/crypto.h
57682 --- linux-2.6.32.45/include/linux/crypto.h 2011-03-27 14:31:47.000000000 -0400
57683 +++ linux-2.6.32.45/include/linux/crypto.h 2011-08-05 20:33:55.000000000 -0400
57684 @@ -394,7 +394,7 @@ struct cipher_tfm {
57685 const u8 *key, unsigned int keylen);
57686 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57687 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57688 -};
57689 +} __no_const;
57690
57691 struct hash_tfm {
57692 int (*init)(struct hash_desc *desc);
57693 @@ -415,13 +415,13 @@ struct compress_tfm {
57694 int (*cot_decompress)(struct crypto_tfm *tfm,
57695 const u8 *src, unsigned int slen,
57696 u8 *dst, unsigned int *dlen);
57697 -};
57698 +} __no_const;
57699
57700 struct rng_tfm {
57701 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57702 unsigned int dlen);
57703 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57704 -};
57705 +} __no_const;
57706
57707 #define crt_ablkcipher crt_u.ablkcipher
57708 #define crt_aead crt_u.aead
57709 diff -urNp linux-2.6.32.45/include/linux/dcache.h linux-2.6.32.45/include/linux/dcache.h
57710 --- linux-2.6.32.45/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
57711 +++ linux-2.6.32.45/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
57712 @@ -119,6 +119,8 @@ struct dentry {
57713 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
57714 };
57715
57716 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
57717 +
57718 /*
57719 * dentry->d_lock spinlock nesting subclasses:
57720 *
57721 diff -urNp linux-2.6.32.45/include/linux/decompress/mm.h linux-2.6.32.45/include/linux/decompress/mm.h
57722 --- linux-2.6.32.45/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
57723 +++ linux-2.6.32.45/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
57724 @@ -78,7 +78,7 @@ static void free(void *where)
57725 * warnings when not needed (indeed large_malloc / large_free are not
57726 * needed by inflate */
57727
57728 -#define malloc(a) kmalloc(a, GFP_KERNEL)
57729 +#define malloc(a) kmalloc((a), GFP_KERNEL)
57730 #define free(a) kfree(a)
57731
57732 #define large_malloc(a) vmalloc(a)
57733 diff -urNp linux-2.6.32.45/include/linux/dma-mapping.h linux-2.6.32.45/include/linux/dma-mapping.h
57734 --- linux-2.6.32.45/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
57735 +++ linux-2.6.32.45/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
57736 @@ -16,50 +16,50 @@ enum dma_data_direction {
57737 };
57738
57739 struct dma_map_ops {
57740 - void* (*alloc_coherent)(struct device *dev, size_t size,
57741 + void* (* const alloc_coherent)(struct device *dev, size_t size,
57742 dma_addr_t *dma_handle, gfp_t gfp);
57743 - void (*free_coherent)(struct device *dev, size_t size,
57744 + void (* const free_coherent)(struct device *dev, size_t size,
57745 void *vaddr, dma_addr_t dma_handle);
57746 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
57747 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
57748 unsigned long offset, size_t size,
57749 enum dma_data_direction dir,
57750 struct dma_attrs *attrs);
57751 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
57752 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
57753 size_t size, enum dma_data_direction dir,
57754 struct dma_attrs *attrs);
57755 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
57756 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
57757 int nents, enum dma_data_direction dir,
57758 struct dma_attrs *attrs);
57759 - void (*unmap_sg)(struct device *dev,
57760 + void (* const unmap_sg)(struct device *dev,
57761 struct scatterlist *sg, int nents,
57762 enum dma_data_direction dir,
57763 struct dma_attrs *attrs);
57764 - void (*sync_single_for_cpu)(struct device *dev,
57765 + void (* const sync_single_for_cpu)(struct device *dev,
57766 dma_addr_t dma_handle, size_t size,
57767 enum dma_data_direction dir);
57768 - void (*sync_single_for_device)(struct device *dev,
57769 + void (* const sync_single_for_device)(struct device *dev,
57770 dma_addr_t dma_handle, size_t size,
57771 enum dma_data_direction dir);
57772 - void (*sync_single_range_for_cpu)(struct device *dev,
57773 + void (* const sync_single_range_for_cpu)(struct device *dev,
57774 dma_addr_t dma_handle,
57775 unsigned long offset,
57776 size_t size,
57777 enum dma_data_direction dir);
57778 - void (*sync_single_range_for_device)(struct device *dev,
57779 + void (* const sync_single_range_for_device)(struct device *dev,
57780 dma_addr_t dma_handle,
57781 unsigned long offset,
57782 size_t size,
57783 enum dma_data_direction dir);
57784 - void (*sync_sg_for_cpu)(struct device *dev,
57785 + void (* const sync_sg_for_cpu)(struct device *dev,
57786 struct scatterlist *sg, int nents,
57787 enum dma_data_direction dir);
57788 - void (*sync_sg_for_device)(struct device *dev,
57789 + void (* const sync_sg_for_device)(struct device *dev,
57790 struct scatterlist *sg, int nents,
57791 enum dma_data_direction dir);
57792 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
57793 - int (*dma_supported)(struct device *dev, u64 mask);
57794 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
57795 + int (* const dma_supported)(struct device *dev, u64 mask);
57796 int (*set_dma_mask)(struct device *dev, u64 mask);
57797 - int is_phys;
57798 + const int is_phys;
57799 };
57800
57801 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57802 diff -urNp linux-2.6.32.45/include/linux/dst.h linux-2.6.32.45/include/linux/dst.h
57803 --- linux-2.6.32.45/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
57804 +++ linux-2.6.32.45/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
57805 @@ -380,7 +380,7 @@ struct dst_node
57806 struct thread_pool *pool;
57807
57808 /* Transaction IDs live here */
57809 - atomic_long_t gen;
57810 + atomic_long_unchecked_t gen;
57811
57812 /*
57813 * How frequently and how many times transaction
57814 diff -urNp linux-2.6.32.45/include/linux/elf.h linux-2.6.32.45/include/linux/elf.h
57815 --- linux-2.6.32.45/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
57816 +++ linux-2.6.32.45/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
57817 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57818 #define PT_GNU_EH_FRAME 0x6474e550
57819
57820 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57821 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57822 +
57823 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57824 +
57825 +/* Constants for the e_flags field */
57826 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57827 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57828 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57829 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57830 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57831 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57832
57833 /* These constants define the different elf file types */
57834 #define ET_NONE 0
57835 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
57836 #define DT_DEBUG 21
57837 #define DT_TEXTREL 22
57838 #define DT_JMPREL 23
57839 +#define DT_FLAGS 30
57840 + #define DF_TEXTREL 0x00000004
57841 #define DT_ENCODING 32
57842 #define OLD_DT_LOOS 0x60000000
57843 #define DT_LOOS 0x6000000d
57844 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
57845 #define PF_W 0x2
57846 #define PF_X 0x1
57847
57848 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57849 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57850 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57851 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57852 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57853 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57854 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57855 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57856 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57857 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57858 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57859 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57860 +
57861 typedef struct elf32_phdr{
57862 Elf32_Word p_type;
57863 Elf32_Off p_offset;
57864 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
57865 #define EI_OSABI 7
57866 #define EI_PAD 8
57867
57868 +#define EI_PAX 14
57869 +
57870 #define ELFMAG0 0x7f /* EI_MAG */
57871 #define ELFMAG1 'E'
57872 #define ELFMAG2 'L'
57873 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
57874 #define elf_phdr elf32_phdr
57875 #define elf_note elf32_note
57876 #define elf_addr_t Elf32_Off
57877 +#define elf_dyn Elf32_Dyn
57878
57879 #else
57880
57881 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
57882 #define elf_phdr elf64_phdr
57883 #define elf_note elf64_note
57884 #define elf_addr_t Elf64_Off
57885 +#define elf_dyn Elf64_Dyn
57886
57887 #endif
57888
57889 diff -urNp linux-2.6.32.45/include/linux/fscache-cache.h linux-2.6.32.45/include/linux/fscache-cache.h
57890 --- linux-2.6.32.45/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
57891 +++ linux-2.6.32.45/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
57892 @@ -116,7 +116,7 @@ struct fscache_operation {
57893 #endif
57894 };
57895
57896 -extern atomic_t fscache_op_debug_id;
57897 +extern atomic_unchecked_t fscache_op_debug_id;
57898 extern const struct slow_work_ops fscache_op_slow_work_ops;
57899
57900 extern void fscache_enqueue_operation(struct fscache_operation *);
57901 @@ -134,7 +134,7 @@ static inline void fscache_operation_ini
57902 fscache_operation_release_t release)
57903 {
57904 atomic_set(&op->usage, 1);
57905 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57906 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57907 op->release = release;
57908 INIT_LIST_HEAD(&op->pend_link);
57909 fscache_set_op_state(op, "Init");
57910 diff -urNp linux-2.6.32.45/include/linux/fs.h linux-2.6.32.45/include/linux/fs.h
57911 --- linux-2.6.32.45/include/linux/fs.h 2011-07-13 17:23:04.000000000 -0400
57912 +++ linux-2.6.32.45/include/linux/fs.h 2011-08-05 20:33:55.000000000 -0400
57913 @@ -90,6 +90,11 @@ struct inodes_stat_t {
57914 /* Expect random access pattern */
57915 #define FMODE_RANDOM ((__force fmode_t)4096)
57916
57917 +/* Hack for grsec so as not to require read permission simply to execute
57918 + * a binary
57919 + */
57920 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
57921 +
57922 /*
57923 * The below are the various read and write types that we support. Some of
57924 * them include behavioral modifiers that send information down to the
57925 @@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
57926 unsigned long, unsigned long);
57927
57928 struct address_space_operations {
57929 - int (*writepage)(struct page *page, struct writeback_control *wbc);
57930 - int (*readpage)(struct file *, struct page *);
57931 - void (*sync_page)(struct page *);
57932 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
57933 + int (* const readpage)(struct file *, struct page *);
57934 + void (* const sync_page)(struct page *);
57935
57936 /* Write back some dirty pages from this mapping. */
57937 - int (*writepages)(struct address_space *, struct writeback_control *);
57938 + int (* const writepages)(struct address_space *, struct writeback_control *);
57939
57940 /* Set a page dirty. Return true if this dirtied it */
57941 - int (*set_page_dirty)(struct page *page);
57942 + int (* const set_page_dirty)(struct page *page);
57943
57944 - int (*readpages)(struct file *filp, struct address_space *mapping,
57945 + int (* const readpages)(struct file *filp, struct address_space *mapping,
57946 struct list_head *pages, unsigned nr_pages);
57947
57948 - int (*write_begin)(struct file *, struct address_space *mapping,
57949 + int (* const write_begin)(struct file *, struct address_space *mapping,
57950 loff_t pos, unsigned len, unsigned flags,
57951 struct page **pagep, void **fsdata);
57952 - int (*write_end)(struct file *, struct address_space *mapping,
57953 + int (* const write_end)(struct file *, struct address_space *mapping,
57954 loff_t pos, unsigned len, unsigned copied,
57955 struct page *page, void *fsdata);
57956
57957 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
57958 - sector_t (*bmap)(struct address_space *, sector_t);
57959 - void (*invalidatepage) (struct page *, unsigned long);
57960 - int (*releasepage) (struct page *, gfp_t);
57961 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
57962 + sector_t (* const bmap)(struct address_space *, sector_t);
57963 + void (* const invalidatepage) (struct page *, unsigned long);
57964 + int (* const releasepage) (struct page *, gfp_t);
57965 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
57966 loff_t offset, unsigned long nr_segs);
57967 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
57968 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
57969 void **, unsigned long *);
57970 /* migrate the contents of a page to the specified target */
57971 - int (*migratepage) (struct address_space *,
57972 + int (* const migratepage) (struct address_space *,
57973 struct page *, struct page *);
57974 - int (*launder_page) (struct page *);
57975 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
57976 + int (* const launder_page) (struct page *);
57977 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
57978 unsigned long);
57979 - int (*error_remove_page)(struct address_space *, struct page *);
57980 + int (* const error_remove_page)(struct address_space *, struct page *);
57981 };
57982
57983 /*
57984 @@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
57985 typedef struct files_struct *fl_owner_t;
57986
57987 struct file_lock_operations {
57988 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57989 - void (*fl_release_private)(struct file_lock *);
57990 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57991 + void (* const fl_release_private)(struct file_lock *);
57992 };
57993
57994 struct lock_manager_operations {
57995 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
57996 - void (*fl_notify)(struct file_lock *); /* unblock callback */
57997 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
57998 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57999 - void (*fl_release_private)(struct file_lock *);
58000 - void (*fl_break)(struct file_lock *);
58001 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
58002 - int (*fl_change)(struct file_lock **, int);
58003 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
58004 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
58005 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
58006 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
58007 + void (* const fl_release_private)(struct file_lock *);
58008 + void (* const fl_break)(struct file_lock *);
58009 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
58010 + int (* const fl_change)(struct file_lock **, int);
58011 };
58012
58013 struct lock_manager {
58014 @@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
58015 unsigned int fi_flags; /* Flags as passed from user */
58016 unsigned int fi_extents_mapped; /* Number of mapped extents */
58017 unsigned int fi_extents_max; /* Size of fiemap_extent array */
58018 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
58019 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
58020 * array */
58021 };
58022 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
58023 @@ -1486,7 +1491,7 @@ struct block_device_operations;
58024 * can be called without the big kernel lock held in all filesystems.
58025 */
58026 struct file_operations {
58027 - struct module *owner;
58028 + struct module * const owner;
58029 loff_t (*llseek) (struct file *, loff_t, int);
58030 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
58031 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
58032 @@ -1559,30 +1564,30 @@ extern ssize_t vfs_writev(struct file *,
58033 unsigned long, loff_t *);
58034
58035 struct super_operations {
58036 - struct inode *(*alloc_inode)(struct super_block *sb);
58037 - void (*destroy_inode)(struct inode *);
58038 + struct inode *(* const alloc_inode)(struct super_block *sb);
58039 + void (* const destroy_inode)(struct inode *);
58040
58041 - void (*dirty_inode) (struct inode *);
58042 - int (*write_inode) (struct inode *, int);
58043 - void (*drop_inode) (struct inode *);
58044 - void (*delete_inode) (struct inode *);
58045 - void (*put_super) (struct super_block *);
58046 - void (*write_super) (struct super_block *);
58047 - int (*sync_fs)(struct super_block *sb, int wait);
58048 - int (*freeze_fs) (struct super_block *);
58049 - int (*unfreeze_fs) (struct super_block *);
58050 - int (*statfs) (struct dentry *, struct kstatfs *);
58051 - int (*remount_fs) (struct super_block *, int *, char *);
58052 - void (*clear_inode) (struct inode *);
58053 - void (*umount_begin) (struct super_block *);
58054 + void (* const dirty_inode) (struct inode *);
58055 + int (* const write_inode) (struct inode *, int);
58056 + void (* const drop_inode) (struct inode *);
58057 + void (* const delete_inode) (struct inode *);
58058 + void (* const put_super) (struct super_block *);
58059 + void (* const write_super) (struct super_block *);
58060 + int (* const sync_fs)(struct super_block *sb, int wait);
58061 + int (* const freeze_fs) (struct super_block *);
58062 + int (* const unfreeze_fs) (struct super_block *);
58063 + int (* const statfs) (struct dentry *, struct kstatfs *);
58064 + int (* const remount_fs) (struct super_block *, int *, char *);
58065 + void (* const clear_inode) (struct inode *);
58066 + void (* const umount_begin) (struct super_block *);
58067
58068 - int (*show_options)(struct seq_file *, struct vfsmount *);
58069 - int (*show_stats)(struct seq_file *, struct vfsmount *);
58070 + int (* const show_options)(struct seq_file *, struct vfsmount *);
58071 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
58072 #ifdef CONFIG_QUOTA
58073 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
58074 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
58075 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
58076 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
58077 #endif
58078 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
58079 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
58080 };
58081
58082 /*
58083 diff -urNp linux-2.6.32.45/include/linux/fs_struct.h linux-2.6.32.45/include/linux/fs_struct.h
58084 --- linux-2.6.32.45/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
58085 +++ linux-2.6.32.45/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
58086 @@ -4,7 +4,7 @@
58087 #include <linux/path.h>
58088
58089 struct fs_struct {
58090 - int users;
58091 + atomic_t users;
58092 rwlock_t lock;
58093 int umask;
58094 int in_exec;
58095 diff -urNp linux-2.6.32.45/include/linux/ftrace_event.h linux-2.6.32.45/include/linux/ftrace_event.h
58096 --- linux-2.6.32.45/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
58097 +++ linux-2.6.32.45/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
58098 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
58099 int filter_type);
58100 extern int trace_define_common_fields(struct ftrace_event_call *call);
58101
58102 -#define is_signed_type(type) (((type)(-1)) < 0)
58103 +#define is_signed_type(type) (((type)(-1)) < (type)1)
58104
58105 int trace_set_clr_event(const char *system, const char *event, int set);
58106
58107 diff -urNp linux-2.6.32.45/include/linux/genhd.h linux-2.6.32.45/include/linux/genhd.h
58108 --- linux-2.6.32.45/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
58109 +++ linux-2.6.32.45/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
58110 @@ -161,7 +161,7 @@ struct gendisk {
58111
58112 struct timer_rand_state *random;
58113
58114 - atomic_t sync_io; /* RAID */
58115 + atomic_unchecked_t sync_io; /* RAID */
58116 struct work_struct async_notify;
58117 #ifdef CONFIG_BLK_DEV_INTEGRITY
58118 struct blk_integrity *integrity;
58119 diff -urNp linux-2.6.32.45/include/linux/gracl.h linux-2.6.32.45/include/linux/gracl.h
58120 --- linux-2.6.32.45/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
58121 +++ linux-2.6.32.45/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
58122 @@ -0,0 +1,317 @@
58123 +#ifndef GR_ACL_H
58124 +#define GR_ACL_H
58125 +
58126 +#include <linux/grdefs.h>
58127 +#include <linux/resource.h>
58128 +#include <linux/capability.h>
58129 +#include <linux/dcache.h>
58130 +#include <asm/resource.h>
58131 +
58132 +/* Major status information */
58133 +
58134 +#define GR_VERSION "grsecurity 2.2.2"
58135 +#define GRSECURITY_VERSION 0x2202
58136 +
58137 +enum {
58138 + GR_SHUTDOWN = 0,
58139 + GR_ENABLE = 1,
58140 + GR_SPROLE = 2,
58141 + GR_RELOAD = 3,
58142 + GR_SEGVMOD = 4,
58143 + GR_STATUS = 5,
58144 + GR_UNSPROLE = 6,
58145 + GR_PASSSET = 7,
58146 + GR_SPROLEPAM = 8,
58147 +};
58148 +
58149 +/* Password setup definitions
58150 + * kernel/grhash.c */
58151 +enum {
58152 + GR_PW_LEN = 128,
58153 + GR_SALT_LEN = 16,
58154 + GR_SHA_LEN = 32,
58155 +};
58156 +
58157 +enum {
58158 + GR_SPROLE_LEN = 64,
58159 +};
58160 +
58161 +enum {
58162 + GR_NO_GLOB = 0,
58163 + GR_REG_GLOB,
58164 + GR_CREATE_GLOB
58165 +};
58166 +
58167 +#define GR_NLIMITS 32
58168 +
58169 +/* Begin Data Structures */
58170 +
58171 +struct sprole_pw {
58172 + unsigned char *rolename;
58173 + unsigned char salt[GR_SALT_LEN];
58174 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58175 +};
58176 +
58177 +struct name_entry {
58178 + __u32 key;
58179 + ino_t inode;
58180 + dev_t device;
58181 + char *name;
58182 + __u16 len;
58183 + __u8 deleted;
58184 + struct name_entry *prev;
58185 + struct name_entry *next;
58186 +};
58187 +
58188 +struct inodev_entry {
58189 + struct name_entry *nentry;
58190 + struct inodev_entry *prev;
58191 + struct inodev_entry *next;
58192 +};
58193 +
58194 +struct acl_role_db {
58195 + struct acl_role_label **r_hash;
58196 + __u32 r_size;
58197 +};
58198 +
58199 +struct inodev_db {
58200 + struct inodev_entry **i_hash;
58201 + __u32 i_size;
58202 +};
58203 +
58204 +struct name_db {
58205 + struct name_entry **n_hash;
58206 + __u32 n_size;
58207 +};
58208 +
58209 +struct crash_uid {
58210 + uid_t uid;
58211 + unsigned long expires;
58212 +};
58213 +
58214 +struct gr_hash_struct {
58215 + void **table;
58216 + void **nametable;
58217 + void *first;
58218 + __u32 table_size;
58219 + __u32 used_size;
58220 + int type;
58221 +};
58222 +
58223 +/* Userspace Grsecurity ACL data structures */
58224 +
58225 +struct acl_subject_label {
58226 + char *filename;
58227 + ino_t inode;
58228 + dev_t device;
58229 + __u32 mode;
58230 + kernel_cap_t cap_mask;
58231 + kernel_cap_t cap_lower;
58232 + kernel_cap_t cap_invert_audit;
58233 +
58234 + struct rlimit res[GR_NLIMITS];
58235 + __u32 resmask;
58236 +
58237 + __u8 user_trans_type;
58238 + __u8 group_trans_type;
58239 + uid_t *user_transitions;
58240 + gid_t *group_transitions;
58241 + __u16 user_trans_num;
58242 + __u16 group_trans_num;
58243 +
58244 + __u32 sock_families[2];
58245 + __u32 ip_proto[8];
58246 + __u32 ip_type;
58247 + struct acl_ip_label **ips;
58248 + __u32 ip_num;
58249 + __u32 inaddr_any_override;
58250 +
58251 + __u32 crashes;
58252 + unsigned long expires;
58253 +
58254 + struct acl_subject_label *parent_subject;
58255 + struct gr_hash_struct *hash;
58256 + struct acl_subject_label *prev;
58257 + struct acl_subject_label *next;
58258 +
58259 + struct acl_object_label **obj_hash;
58260 + __u32 obj_hash_size;
58261 + __u16 pax_flags;
58262 +};
58263 +
58264 +struct role_allowed_ip {
58265 + __u32 addr;
58266 + __u32 netmask;
58267 +
58268 + struct role_allowed_ip *prev;
58269 + struct role_allowed_ip *next;
58270 +};
58271 +
58272 +struct role_transition {
58273 + char *rolename;
58274 +
58275 + struct role_transition *prev;
58276 + struct role_transition *next;
58277 +};
58278 +
58279 +struct acl_role_label {
58280 + char *rolename;
58281 + uid_t uidgid;
58282 + __u16 roletype;
58283 +
58284 + __u16 auth_attempts;
58285 + unsigned long expires;
58286 +
58287 + struct acl_subject_label *root_label;
58288 + struct gr_hash_struct *hash;
58289 +
58290 + struct acl_role_label *prev;
58291 + struct acl_role_label *next;
58292 +
58293 + struct role_transition *transitions;
58294 + struct role_allowed_ip *allowed_ips;
58295 + uid_t *domain_children;
58296 + __u16 domain_child_num;
58297 +
58298 + struct acl_subject_label **subj_hash;
58299 + __u32 subj_hash_size;
58300 +};
58301 +
58302 +struct user_acl_role_db {
58303 + struct acl_role_label **r_table;
58304 + __u32 num_pointers; /* Number of allocations to track */
58305 + __u32 num_roles; /* Number of roles */
58306 + __u32 num_domain_children; /* Number of domain children */
58307 + __u32 num_subjects; /* Number of subjects */
58308 + __u32 num_objects; /* Number of objects */
58309 +};
58310 +
58311 +struct acl_object_label {
58312 + char *filename;
58313 + ino_t inode;
58314 + dev_t device;
58315 + __u32 mode;
58316 +
58317 + struct acl_subject_label *nested;
58318 + struct acl_object_label *globbed;
58319 +
58320 + /* next two structures not used */
58321 +
58322 + struct acl_object_label *prev;
58323 + struct acl_object_label *next;
58324 +};
58325 +
58326 +struct acl_ip_label {
58327 + char *iface;
58328 + __u32 addr;
58329 + __u32 netmask;
58330 + __u16 low, high;
58331 + __u8 mode;
58332 + __u32 type;
58333 + __u32 proto[8];
58334 +
58335 + /* next two structures not used */
58336 +
58337 + struct acl_ip_label *prev;
58338 + struct acl_ip_label *next;
58339 +};
58340 +
58341 +struct gr_arg {
58342 + struct user_acl_role_db role_db;
58343 + unsigned char pw[GR_PW_LEN];
58344 + unsigned char salt[GR_SALT_LEN];
58345 + unsigned char sum[GR_SHA_LEN];
58346 + unsigned char sp_role[GR_SPROLE_LEN];
58347 + struct sprole_pw *sprole_pws;
58348 + dev_t segv_device;
58349 + ino_t segv_inode;
58350 + uid_t segv_uid;
58351 + __u16 num_sprole_pws;
58352 + __u16 mode;
58353 +};
58354 +
58355 +struct gr_arg_wrapper {
58356 + struct gr_arg *arg;
58357 + __u32 version;
58358 + __u32 size;
58359 +};
58360 +
58361 +struct subject_map {
58362 + struct acl_subject_label *user;
58363 + struct acl_subject_label *kernel;
58364 + struct subject_map *prev;
58365 + struct subject_map *next;
58366 +};
58367 +
58368 +struct acl_subj_map_db {
58369 + struct subject_map **s_hash;
58370 + __u32 s_size;
58371 +};
58372 +
58373 +/* End Data Structures Section */
58374 +
58375 +/* Hash functions generated by empirical testing by Brad Spengler
58376 + Makes good use of the low bits of the inode. Generally 0-1 times
58377 + in loop for successful match. 0-3 for unsuccessful match.
58378 + Shift/add algorithm with modulus of table size and an XOR*/
58379 +
58380 +static __inline__ unsigned int
58381 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58382 +{
58383 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
58384 +}
58385 +
58386 + static __inline__ unsigned int
58387 +shash(const struct acl_subject_label *userp, const unsigned int sz)
58388 +{
58389 + return ((const unsigned long)userp % sz);
58390 +}
58391 +
58392 +static __inline__ unsigned int
58393 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58394 +{
58395 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58396 +}
58397 +
58398 +static __inline__ unsigned int
58399 +nhash(const char *name, const __u16 len, const unsigned int sz)
58400 +{
58401 + return full_name_hash((const unsigned char *)name, len) % sz;
58402 +}
58403 +
58404 +#define FOR_EACH_ROLE_START(role) \
58405 + role = role_list; \
58406 + while (role) {
58407 +
58408 +#define FOR_EACH_ROLE_END(role) \
58409 + role = role->prev; \
58410 + }
58411 +
58412 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58413 + subj = NULL; \
58414 + iter = 0; \
58415 + while (iter < role->subj_hash_size) { \
58416 + if (subj == NULL) \
58417 + subj = role->subj_hash[iter]; \
58418 + if (subj == NULL) { \
58419 + iter++; \
58420 + continue; \
58421 + }
58422 +
58423 +#define FOR_EACH_SUBJECT_END(subj,iter) \
58424 + subj = subj->next; \
58425 + if (subj == NULL) \
58426 + iter++; \
58427 + }
58428 +
58429 +
58430 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58431 + subj = role->hash->first; \
58432 + while (subj != NULL) {
58433 +
58434 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58435 + subj = subj->next; \
58436 + }
58437 +
58438 +#endif
58439 +
58440 diff -urNp linux-2.6.32.45/include/linux/gralloc.h linux-2.6.32.45/include/linux/gralloc.h
58441 --- linux-2.6.32.45/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58442 +++ linux-2.6.32.45/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
58443 @@ -0,0 +1,9 @@
58444 +#ifndef __GRALLOC_H
58445 +#define __GRALLOC_H
58446 +
58447 +void acl_free_all(void);
58448 +int acl_alloc_stack_init(unsigned long size);
58449 +void *acl_alloc(unsigned long len);
58450 +void *acl_alloc_num(unsigned long num, unsigned long len);
58451 +
58452 +#endif
58453 diff -urNp linux-2.6.32.45/include/linux/grdefs.h linux-2.6.32.45/include/linux/grdefs.h
58454 --- linux-2.6.32.45/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58455 +++ linux-2.6.32.45/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
58456 @@ -0,0 +1,140 @@
58457 +#ifndef GRDEFS_H
58458 +#define GRDEFS_H
58459 +
58460 +/* Begin grsecurity status declarations */
58461 +
58462 +enum {
58463 + GR_READY = 0x01,
58464 + GR_STATUS_INIT = 0x00 // disabled state
58465 +};
58466 +
58467 +/* Begin ACL declarations */
58468 +
58469 +/* Role flags */
58470 +
58471 +enum {
58472 + GR_ROLE_USER = 0x0001,
58473 + GR_ROLE_GROUP = 0x0002,
58474 + GR_ROLE_DEFAULT = 0x0004,
58475 + GR_ROLE_SPECIAL = 0x0008,
58476 + GR_ROLE_AUTH = 0x0010,
58477 + GR_ROLE_NOPW = 0x0020,
58478 + GR_ROLE_GOD = 0x0040,
58479 + GR_ROLE_LEARN = 0x0080,
58480 + GR_ROLE_TPE = 0x0100,
58481 + GR_ROLE_DOMAIN = 0x0200,
58482 + GR_ROLE_PAM = 0x0400,
58483 + GR_ROLE_PERSIST = 0x800
58484 +};
58485 +
58486 +/* ACL Subject and Object mode flags */
58487 +enum {
58488 + GR_DELETED = 0x80000000
58489 +};
58490 +
58491 +/* ACL Object-only mode flags */
58492 +enum {
58493 + GR_READ = 0x00000001,
58494 + GR_APPEND = 0x00000002,
58495 + GR_WRITE = 0x00000004,
58496 + GR_EXEC = 0x00000008,
58497 + GR_FIND = 0x00000010,
58498 + GR_INHERIT = 0x00000020,
58499 + GR_SETID = 0x00000040,
58500 + GR_CREATE = 0x00000080,
58501 + GR_DELETE = 0x00000100,
58502 + GR_LINK = 0x00000200,
58503 + GR_AUDIT_READ = 0x00000400,
58504 + GR_AUDIT_APPEND = 0x00000800,
58505 + GR_AUDIT_WRITE = 0x00001000,
58506 + GR_AUDIT_EXEC = 0x00002000,
58507 + GR_AUDIT_FIND = 0x00004000,
58508 + GR_AUDIT_INHERIT= 0x00008000,
58509 + GR_AUDIT_SETID = 0x00010000,
58510 + GR_AUDIT_CREATE = 0x00020000,
58511 + GR_AUDIT_DELETE = 0x00040000,
58512 + GR_AUDIT_LINK = 0x00080000,
58513 + GR_PTRACERD = 0x00100000,
58514 + GR_NOPTRACE = 0x00200000,
58515 + GR_SUPPRESS = 0x00400000,
58516 + GR_NOLEARN = 0x00800000,
58517 + GR_INIT_TRANSFER= 0x01000000
58518 +};
58519 +
58520 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58521 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58522 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58523 +
58524 +/* ACL subject-only mode flags */
58525 +enum {
58526 + GR_KILL = 0x00000001,
58527 + GR_VIEW = 0x00000002,
58528 + GR_PROTECTED = 0x00000004,
58529 + GR_LEARN = 0x00000008,
58530 + GR_OVERRIDE = 0x00000010,
58531 + /* just a placeholder, this mode is only used in userspace */
58532 + GR_DUMMY = 0x00000020,
58533 + GR_PROTSHM = 0x00000040,
58534 + GR_KILLPROC = 0x00000080,
58535 + GR_KILLIPPROC = 0x00000100,
58536 + /* just a placeholder, this mode is only used in userspace */
58537 + GR_NOTROJAN = 0x00000200,
58538 + GR_PROTPROCFD = 0x00000400,
58539 + GR_PROCACCT = 0x00000800,
58540 + GR_RELAXPTRACE = 0x00001000,
58541 + GR_NESTED = 0x00002000,
58542 + GR_INHERITLEARN = 0x00004000,
58543 + GR_PROCFIND = 0x00008000,
58544 + GR_POVERRIDE = 0x00010000,
58545 + GR_KERNELAUTH = 0x00020000,
58546 + GR_ATSECURE = 0x00040000,
58547 + GR_SHMEXEC = 0x00080000
58548 +};
58549 +
58550 +enum {
58551 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58552 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58553 + GR_PAX_ENABLE_MPROTECT = 0x0004,
58554 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
58555 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58556 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58557 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58558 + GR_PAX_DISABLE_MPROTECT = 0x0400,
58559 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
58560 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58561 +};
58562 +
58563 +enum {
58564 + GR_ID_USER = 0x01,
58565 + GR_ID_GROUP = 0x02,
58566 +};
58567 +
58568 +enum {
58569 + GR_ID_ALLOW = 0x01,
58570 + GR_ID_DENY = 0x02,
58571 +};
58572 +
58573 +#define GR_CRASH_RES 31
58574 +#define GR_UIDTABLE_MAX 500
58575 +
58576 +/* begin resource learning section */
58577 +enum {
58578 + GR_RLIM_CPU_BUMP = 60,
58579 + GR_RLIM_FSIZE_BUMP = 50000,
58580 + GR_RLIM_DATA_BUMP = 10000,
58581 + GR_RLIM_STACK_BUMP = 1000,
58582 + GR_RLIM_CORE_BUMP = 10000,
58583 + GR_RLIM_RSS_BUMP = 500000,
58584 + GR_RLIM_NPROC_BUMP = 1,
58585 + GR_RLIM_NOFILE_BUMP = 5,
58586 + GR_RLIM_MEMLOCK_BUMP = 50000,
58587 + GR_RLIM_AS_BUMP = 500000,
58588 + GR_RLIM_LOCKS_BUMP = 2,
58589 + GR_RLIM_SIGPENDING_BUMP = 5,
58590 + GR_RLIM_MSGQUEUE_BUMP = 10000,
58591 + GR_RLIM_NICE_BUMP = 1,
58592 + GR_RLIM_RTPRIO_BUMP = 1,
58593 + GR_RLIM_RTTIME_BUMP = 1000000
58594 +};
58595 +
58596 +#endif
58597 diff -urNp linux-2.6.32.45/include/linux/grinternal.h linux-2.6.32.45/include/linux/grinternal.h
58598 --- linux-2.6.32.45/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58599 +++ linux-2.6.32.45/include/linux/grinternal.h 2011-08-11 19:58:37.000000000 -0400
58600 @@ -0,0 +1,217 @@
58601 +#ifndef __GRINTERNAL_H
58602 +#define __GRINTERNAL_H
58603 +
58604 +#ifdef CONFIG_GRKERNSEC
58605 +
58606 +#include <linux/fs.h>
58607 +#include <linux/mnt_namespace.h>
58608 +#include <linux/nsproxy.h>
58609 +#include <linux/gracl.h>
58610 +#include <linux/grdefs.h>
58611 +#include <linux/grmsg.h>
58612 +
58613 +void gr_add_learn_entry(const char *fmt, ...)
58614 + __attribute__ ((format (printf, 1, 2)));
58615 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58616 + const struct vfsmount *mnt);
58617 +__u32 gr_check_create(const struct dentry *new_dentry,
58618 + const struct dentry *parent,
58619 + const struct vfsmount *mnt, const __u32 mode);
58620 +int gr_check_protected_task(const struct task_struct *task);
58621 +__u32 to_gr_audit(const __u32 reqmode);
58622 +int gr_set_acls(const int type);
58623 +int gr_apply_subject_to_task(struct task_struct *task);
58624 +int gr_acl_is_enabled(void);
58625 +char gr_roletype_to_char(void);
58626 +
58627 +void gr_handle_alertkill(struct task_struct *task);
58628 +char *gr_to_filename(const struct dentry *dentry,
58629 + const struct vfsmount *mnt);
58630 +char *gr_to_filename1(const struct dentry *dentry,
58631 + const struct vfsmount *mnt);
58632 +char *gr_to_filename2(const struct dentry *dentry,
58633 + const struct vfsmount *mnt);
58634 +char *gr_to_filename3(const struct dentry *dentry,
58635 + const struct vfsmount *mnt);
58636 +
58637 +extern int grsec_enable_harden_ptrace;
58638 +extern int grsec_enable_link;
58639 +extern int grsec_enable_fifo;
58640 +extern int grsec_enable_shm;
58641 +extern int grsec_enable_execlog;
58642 +extern int grsec_enable_signal;
58643 +extern int grsec_enable_audit_ptrace;
58644 +extern int grsec_enable_forkfail;
58645 +extern int grsec_enable_time;
58646 +extern int grsec_enable_rofs;
58647 +extern int grsec_enable_chroot_shmat;
58648 +extern int grsec_enable_chroot_mount;
58649 +extern int grsec_enable_chroot_double;
58650 +extern int grsec_enable_chroot_pivot;
58651 +extern int grsec_enable_chroot_chdir;
58652 +extern int grsec_enable_chroot_chmod;
58653 +extern int grsec_enable_chroot_mknod;
58654 +extern int grsec_enable_chroot_fchdir;
58655 +extern int grsec_enable_chroot_nice;
58656 +extern int grsec_enable_chroot_execlog;
58657 +extern int grsec_enable_chroot_caps;
58658 +extern int grsec_enable_chroot_sysctl;
58659 +extern int grsec_enable_chroot_unix;
58660 +extern int grsec_enable_tpe;
58661 +extern int grsec_tpe_gid;
58662 +extern int grsec_enable_tpe_all;
58663 +extern int grsec_enable_tpe_invert;
58664 +extern int grsec_enable_socket_all;
58665 +extern int grsec_socket_all_gid;
58666 +extern int grsec_enable_socket_client;
58667 +extern int grsec_socket_client_gid;
58668 +extern int grsec_enable_socket_server;
58669 +extern int grsec_socket_server_gid;
58670 +extern int grsec_audit_gid;
58671 +extern int grsec_enable_group;
58672 +extern int grsec_enable_audit_textrel;
58673 +extern int grsec_enable_log_rwxmaps;
58674 +extern int grsec_enable_mount;
58675 +extern int grsec_enable_chdir;
58676 +extern int grsec_resource_logging;
58677 +extern int grsec_enable_blackhole;
58678 +extern int grsec_lastack_retries;
58679 +extern int grsec_enable_brute;
58680 +extern int grsec_lock;
58681 +
58682 +extern spinlock_t grsec_alert_lock;
58683 +extern unsigned long grsec_alert_wtime;
58684 +extern unsigned long grsec_alert_fyet;
58685 +
58686 +extern spinlock_t grsec_audit_lock;
58687 +
58688 +extern rwlock_t grsec_exec_file_lock;
58689 +
58690 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58691 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58692 + (tsk)->exec_file->f_vfsmnt) : "/")
58693 +
58694 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58695 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58696 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58697 +
58698 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58699 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
58700 + (tsk)->exec_file->f_vfsmnt) : "/")
58701 +
58702 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58703 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58704 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58705 +
58706 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58707 +
58708 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58709 +
58710 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58711 + (task)->pid, (cred)->uid, \
58712 + (cred)->euid, (cred)->gid, (cred)->egid, \
58713 + gr_parent_task_fullpath(task), \
58714 + (task)->real_parent->comm, (task)->real_parent->pid, \
58715 + (pcred)->uid, (pcred)->euid, \
58716 + (pcred)->gid, (pcred)->egid
58717 +
58718 +#define GR_CHROOT_CAPS {{ \
58719 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58720 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58721 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58722 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58723 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58724 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
58725 +
58726 +#define security_learn(normal_msg,args...) \
58727 +({ \
58728 + read_lock(&grsec_exec_file_lock); \
58729 + gr_add_learn_entry(normal_msg "\n", ## args); \
58730 + read_unlock(&grsec_exec_file_lock); \
58731 +})
58732 +
58733 +enum {
58734 + GR_DO_AUDIT,
58735 + GR_DONT_AUDIT,
58736 + GR_DONT_AUDIT_GOOD
58737 +};
58738 +
58739 +enum {
58740 + GR_TTYSNIFF,
58741 + GR_RBAC,
58742 + GR_RBAC_STR,
58743 + GR_STR_RBAC,
58744 + GR_RBAC_MODE2,
58745 + GR_RBAC_MODE3,
58746 + GR_FILENAME,
58747 + GR_SYSCTL_HIDDEN,
58748 + GR_NOARGS,
58749 + GR_ONE_INT,
58750 + GR_ONE_INT_TWO_STR,
58751 + GR_ONE_STR,
58752 + GR_STR_INT,
58753 + GR_TWO_STR_INT,
58754 + GR_TWO_INT,
58755 + GR_TWO_U64,
58756 + GR_THREE_INT,
58757 + GR_FIVE_INT_TWO_STR,
58758 + GR_TWO_STR,
58759 + GR_THREE_STR,
58760 + GR_FOUR_STR,
58761 + GR_STR_FILENAME,
58762 + GR_FILENAME_STR,
58763 + GR_FILENAME_TWO_INT,
58764 + GR_FILENAME_TWO_INT_STR,
58765 + GR_TEXTREL,
58766 + GR_PTRACE,
58767 + GR_RESOURCE,
58768 + GR_CAP,
58769 + GR_SIG,
58770 + GR_SIG2,
58771 + GR_CRASH1,
58772 + GR_CRASH2,
58773 + GR_PSACCT,
58774 + GR_RWXMAP
58775 +};
58776 +
58777 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58778 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58779 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58780 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58781 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58782 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58783 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58784 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58785 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58786 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58787 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58788 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58789 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58790 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58791 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58792 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58793 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58794 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58795 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58796 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58797 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58798 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58799 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58800 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58801 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58802 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58803 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58804 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58805 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58806 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58807 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58808 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58809 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58810 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58811 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58812 +
58813 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58814 +
58815 +#endif
58816 +
58817 +#endif
58818 diff -urNp linux-2.6.32.45/include/linux/grmsg.h linux-2.6.32.45/include/linux/grmsg.h
58819 --- linux-2.6.32.45/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58820 +++ linux-2.6.32.45/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
58821 @@ -0,0 +1,108 @@
58822 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58823 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58824 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58825 +#define GR_STOPMOD_MSG "denied modification of module state by "
58826 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58827 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58828 +#define GR_IOPERM_MSG "denied use of ioperm() by "
58829 +#define GR_IOPL_MSG "denied use of iopl() by "
58830 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58831 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58832 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58833 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58834 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58835 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58836 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58837 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58838 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58839 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58840 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58841 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58842 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58843 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58844 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58845 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58846 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58847 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58848 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58849 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58850 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58851 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58852 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58853 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58854 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58855 +#define GR_NPROC_MSG "denied overstep of process limit by "
58856 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58857 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58858 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58859 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58860 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58861 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58862 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58863 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58864 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58865 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58866 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58867 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58868 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58869 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58870 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58871 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58872 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58873 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58874 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58875 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58876 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58877 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58878 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58879 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58880 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58881 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58882 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58883 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58884 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58885 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58886 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58887 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58888 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58889 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58890 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58891 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58892 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58893 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58894 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58895 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
58896 +#define GR_NICE_CHROOT_MSG "denied priority change by "
58897 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58898 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58899 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58900 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58901 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58902 +#define GR_TIME_MSG "time set by "
58903 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58904 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58905 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58906 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58907 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58908 +#define GR_BIND_MSG "denied bind() by "
58909 +#define GR_CONNECT_MSG "denied connect() by "
58910 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58911 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58912 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58913 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58914 +#define GR_CAP_ACL_MSG "use of %s denied for "
58915 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58916 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58917 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58918 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58919 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58920 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58921 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58922 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58923 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58924 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58925 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58926 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58927 +#define GR_VM86_MSG "denied use of vm86 by "
58928 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58929 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58930 diff -urNp linux-2.6.32.45/include/linux/grsecurity.h linux-2.6.32.45/include/linux/grsecurity.h
58931 --- linux-2.6.32.45/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58932 +++ linux-2.6.32.45/include/linux/grsecurity.h 2011-08-11 19:58:57.000000000 -0400
58933 @@ -0,0 +1,217 @@
58934 +#ifndef GR_SECURITY_H
58935 +#define GR_SECURITY_H
58936 +#include <linux/fs.h>
58937 +#include <linux/fs_struct.h>
58938 +#include <linux/binfmts.h>
58939 +#include <linux/gracl.h>
58940 +#include <linux/compat.h>
58941 +
58942 +/* notify of brain-dead configs */
58943 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58944 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58945 +#endif
58946 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58947 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58948 +#endif
58949 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58950 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58951 +#endif
58952 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58953 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58954 +#endif
58955 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58956 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58957 +#endif
58958 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58959 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
58960 +#endif
58961 +
58962 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58963 +void gr_handle_brute_check(void);
58964 +void gr_handle_kernel_exploit(void);
58965 +int gr_process_user_ban(void);
58966 +
58967 +char gr_roletype_to_char(void);
58968 +
58969 +int gr_acl_enable_at_secure(void);
58970 +
58971 +int gr_check_user_change(int real, int effective, int fs);
58972 +int gr_check_group_change(int real, int effective, int fs);
58973 +
58974 +void gr_del_task_from_ip_table(struct task_struct *p);
58975 +
58976 +int gr_pid_is_chrooted(struct task_struct *p);
58977 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58978 +int gr_handle_chroot_nice(void);
58979 +int gr_handle_chroot_sysctl(const int op);
58980 +int gr_handle_chroot_setpriority(struct task_struct *p,
58981 + const int niceval);
58982 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58983 +int gr_handle_chroot_chroot(const struct dentry *dentry,
58984 + const struct vfsmount *mnt);
58985 +int gr_handle_chroot_caps(struct path *path);
58986 +void gr_handle_chroot_chdir(struct path *path);
58987 +int gr_handle_chroot_chmod(const struct dentry *dentry,
58988 + const struct vfsmount *mnt, const int mode);
58989 +int gr_handle_chroot_mknod(const struct dentry *dentry,
58990 + const struct vfsmount *mnt, const int mode);
58991 +int gr_handle_chroot_mount(const struct dentry *dentry,
58992 + const struct vfsmount *mnt,
58993 + const char *dev_name);
58994 +int gr_handle_chroot_pivot(void);
58995 +int gr_handle_chroot_unix(const pid_t pid);
58996 +
58997 +int gr_handle_rawio(const struct inode *inode);
58998 +
58999 +void gr_handle_ioperm(void);
59000 +void gr_handle_iopl(void);
59001 +
59002 +int gr_tpe_allow(const struct file *file);
59003 +
59004 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59005 +void gr_clear_chroot_entries(struct task_struct *task);
59006 +
59007 +void gr_log_forkfail(const int retval);
59008 +void gr_log_timechange(void);
59009 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59010 +void gr_log_chdir(const struct dentry *dentry,
59011 + const struct vfsmount *mnt);
59012 +void gr_log_chroot_exec(const struct dentry *dentry,
59013 + const struct vfsmount *mnt);
59014 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
59015 +#ifdef CONFIG_COMPAT
59016 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
59017 +#endif
59018 +void gr_log_remount(const char *devname, const int retval);
59019 +void gr_log_unmount(const char *devname, const int retval);
59020 +void gr_log_mount(const char *from, const char *to, const int retval);
59021 +void gr_log_textrel(struct vm_area_struct *vma);
59022 +void gr_log_rwxmmap(struct file *file);
59023 +void gr_log_rwxmprotect(struct file *file);
59024 +
59025 +int gr_handle_follow_link(const struct inode *parent,
59026 + const struct inode *inode,
59027 + const struct dentry *dentry,
59028 + const struct vfsmount *mnt);
59029 +int gr_handle_fifo(const struct dentry *dentry,
59030 + const struct vfsmount *mnt,
59031 + const struct dentry *dir, const int flag,
59032 + const int acc_mode);
59033 +int gr_handle_hardlink(const struct dentry *dentry,
59034 + const struct vfsmount *mnt,
59035 + struct inode *inode,
59036 + const int mode, const char *to);
59037 +
59038 +int gr_is_capable(const int cap);
59039 +int gr_is_capable_nolog(const int cap);
59040 +void gr_learn_resource(const struct task_struct *task, const int limit,
59041 + const unsigned long wanted, const int gt);
59042 +void gr_copy_label(struct task_struct *tsk);
59043 +void gr_handle_crash(struct task_struct *task, const int sig);
59044 +int gr_handle_signal(const struct task_struct *p, const int sig);
59045 +int gr_check_crash_uid(const uid_t uid);
59046 +int gr_check_protected_task(const struct task_struct *task);
59047 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59048 +int gr_acl_handle_mmap(const struct file *file,
59049 + const unsigned long prot);
59050 +int gr_acl_handle_mprotect(const struct file *file,
59051 + const unsigned long prot);
59052 +int gr_check_hidden_task(const struct task_struct *tsk);
59053 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59054 + const struct vfsmount *mnt);
59055 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
59056 + const struct vfsmount *mnt);
59057 +__u32 gr_acl_handle_access(const struct dentry *dentry,
59058 + const struct vfsmount *mnt, const int fmode);
59059 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59060 + const struct vfsmount *mnt, mode_t mode);
59061 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59062 + const struct vfsmount *mnt, mode_t mode);
59063 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
59064 + const struct vfsmount *mnt);
59065 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59066 + const struct vfsmount *mnt);
59067 +int gr_handle_ptrace(struct task_struct *task, const long request);
59068 +int gr_handle_proc_ptrace(struct task_struct *task);
59069 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
59070 + const struct vfsmount *mnt);
59071 +int gr_check_crash_exec(const struct file *filp);
59072 +int gr_acl_is_enabled(void);
59073 +void gr_set_kernel_label(struct task_struct *task);
59074 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
59075 + const gid_t gid);
59076 +int gr_set_proc_label(const struct dentry *dentry,
59077 + const struct vfsmount *mnt,
59078 + const int unsafe_share);
59079 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59080 + const struct vfsmount *mnt);
59081 +__u32 gr_acl_handle_open(const struct dentry *dentry,
59082 + const struct vfsmount *mnt, const int fmode);
59083 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
59084 + const struct dentry *p_dentry,
59085 + const struct vfsmount *p_mnt, const int fmode,
59086 + const int imode);
59087 +void gr_handle_create(const struct dentry *dentry,
59088 + const struct vfsmount *mnt);
59089 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59090 + const struct dentry *parent_dentry,
59091 + const struct vfsmount *parent_mnt,
59092 + const int mode);
59093 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59094 + const struct dentry *parent_dentry,
59095 + const struct vfsmount *parent_mnt);
59096 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59097 + const struct vfsmount *mnt);
59098 +void gr_handle_delete(const ino_t ino, const dev_t dev);
59099 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59100 + const struct vfsmount *mnt);
59101 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59102 + const struct dentry *parent_dentry,
59103 + const struct vfsmount *parent_mnt,
59104 + const char *from);
59105 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59106 + const struct dentry *parent_dentry,
59107 + const struct vfsmount *parent_mnt,
59108 + const struct dentry *old_dentry,
59109 + const struct vfsmount *old_mnt, const char *to);
59110 +int gr_acl_handle_rename(struct dentry *new_dentry,
59111 + struct dentry *parent_dentry,
59112 + const struct vfsmount *parent_mnt,
59113 + struct dentry *old_dentry,
59114 + struct inode *old_parent_inode,
59115 + struct vfsmount *old_mnt, const char *newname);
59116 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59117 + struct dentry *old_dentry,
59118 + struct dentry *new_dentry,
59119 + struct vfsmount *mnt, const __u8 replace);
59120 +__u32 gr_check_link(const struct dentry *new_dentry,
59121 + const struct dentry *parent_dentry,
59122 + const struct vfsmount *parent_mnt,
59123 + const struct dentry *old_dentry,
59124 + const struct vfsmount *old_mnt);
59125 +int gr_acl_handle_filldir(const struct file *file, const char *name,
59126 + const unsigned int namelen, const ino_t ino);
59127 +
59128 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
59129 + const struct vfsmount *mnt);
59130 +void gr_acl_handle_exit(void);
59131 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
59132 +int gr_acl_handle_procpidmem(const struct task_struct *task);
59133 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59134 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59135 +void gr_audit_ptrace(struct task_struct *task);
59136 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59137 +
59138 +#ifdef CONFIG_GRKERNSEC
59139 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59140 +void gr_handle_vm86(void);
59141 +void gr_handle_mem_readwrite(u64 from, u64 to);
59142 +
59143 +extern int grsec_enable_dmesg;
59144 +extern int grsec_disable_privio;
59145 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59146 +extern int grsec_enable_chroot_findtask;
59147 +#endif
59148 +#endif
59149 +
59150 +#endif
59151 diff -urNp linux-2.6.32.45/include/linux/hdpu_features.h linux-2.6.32.45/include/linux/hdpu_features.h
59152 --- linux-2.6.32.45/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
59153 +++ linux-2.6.32.45/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
59154 @@ -3,7 +3,7 @@
59155 struct cpustate_t {
59156 spinlock_t lock;
59157 int excl;
59158 - int open_count;
59159 + atomic_t open_count;
59160 unsigned char cached_val;
59161 int inited;
59162 unsigned long *set_addr;
59163 diff -urNp linux-2.6.32.45/include/linux/highmem.h linux-2.6.32.45/include/linux/highmem.h
59164 --- linux-2.6.32.45/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
59165 +++ linux-2.6.32.45/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
59166 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct
59167 kunmap_atomic(kaddr, KM_USER0);
59168 }
59169
59170 +static inline void sanitize_highpage(struct page *page)
59171 +{
59172 + void *kaddr;
59173 + unsigned long flags;
59174 +
59175 + local_irq_save(flags);
59176 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
59177 + clear_page(kaddr);
59178 + kunmap_atomic(kaddr, KM_CLEARPAGE);
59179 + local_irq_restore(flags);
59180 +}
59181 +
59182 static inline void zero_user_segments(struct page *page,
59183 unsigned start1, unsigned end1,
59184 unsigned start2, unsigned end2)
59185 diff -urNp linux-2.6.32.45/include/linux/i2o.h linux-2.6.32.45/include/linux/i2o.h
59186 --- linux-2.6.32.45/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
59187 +++ linux-2.6.32.45/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
59188 @@ -564,7 +564,7 @@ struct i2o_controller {
59189 struct i2o_device *exec; /* Executive */
59190 #if BITS_PER_LONG == 64
59191 spinlock_t context_list_lock; /* lock for context_list */
59192 - atomic_t context_list_counter; /* needed for unique contexts */
59193 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59194 struct list_head context_list; /* list of context id's
59195 and pointers */
59196 #endif
59197 diff -urNp linux-2.6.32.45/include/linux/init_task.h linux-2.6.32.45/include/linux/init_task.h
59198 --- linux-2.6.32.45/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
59199 +++ linux-2.6.32.45/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
59200 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
59201 #define INIT_IDS
59202 #endif
59203
59204 +#ifdef CONFIG_X86
59205 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59206 +#else
59207 +#define INIT_TASK_THREAD_INFO
59208 +#endif
59209 +
59210 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
59211 /*
59212 * Because of the reduced scope of CAP_SETPCAP when filesystem
59213 @@ -156,6 +162,7 @@ extern struct cred init_cred;
59214 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
59215 .comm = "swapper", \
59216 .thread = INIT_THREAD, \
59217 + INIT_TASK_THREAD_INFO \
59218 .fs = &init_fs, \
59219 .files = &init_files, \
59220 .signal = &init_signals, \
59221 diff -urNp linux-2.6.32.45/include/linux/intel-iommu.h linux-2.6.32.45/include/linux/intel-iommu.h
59222 --- linux-2.6.32.45/include/linux/intel-iommu.h 2011-03-27 14:31:47.000000000 -0400
59223 +++ linux-2.6.32.45/include/linux/intel-iommu.h 2011-08-05 20:33:55.000000000 -0400
59224 @@ -296,7 +296,7 @@ struct iommu_flush {
59225 u8 fm, u64 type);
59226 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59227 unsigned int size_order, u64 type);
59228 -};
59229 +} __no_const;
59230
59231 enum {
59232 SR_DMAR_FECTL_REG,
59233 diff -urNp linux-2.6.32.45/include/linux/interrupt.h linux-2.6.32.45/include/linux/interrupt.h
59234 --- linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
59235 +++ linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
59236 @@ -363,7 +363,7 @@ enum
59237 /* map softirq index to softirq name. update 'softirq_to_name' in
59238 * kernel/softirq.c when adding a new softirq.
59239 */
59240 -extern char *softirq_to_name[NR_SOFTIRQS];
59241 +extern const char * const softirq_to_name[NR_SOFTIRQS];
59242
59243 /* softirq mask and active fields moved to irq_cpustat_t in
59244 * asm/hardirq.h to get better cache usage. KAO
59245 @@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
59246
59247 struct softirq_action
59248 {
59249 - void (*action)(struct softirq_action *);
59250 + void (*action)(void);
59251 };
59252
59253 asmlinkage void do_softirq(void);
59254 asmlinkage void __do_softirq(void);
59255 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59256 +extern void open_softirq(int nr, void (*action)(void));
59257 extern void softirq_init(void);
59258 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
59259 extern void raise_softirq_irqoff(unsigned int nr);
59260 diff -urNp linux-2.6.32.45/include/linux/irq.h linux-2.6.32.45/include/linux/irq.h
59261 --- linux-2.6.32.45/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
59262 +++ linux-2.6.32.45/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
59263 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
59264 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
59265 bool boot)
59266 {
59267 +#ifdef CONFIG_CPUMASK_OFFSTACK
59268 gfp_t gfp = GFP_ATOMIC;
59269
59270 if (boot)
59271 gfp = GFP_NOWAIT;
59272
59273 -#ifdef CONFIG_CPUMASK_OFFSTACK
59274 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
59275 return false;
59276
59277 diff -urNp linux-2.6.32.45/include/linux/kallsyms.h linux-2.6.32.45/include/linux/kallsyms.h
59278 --- linux-2.6.32.45/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
59279 +++ linux-2.6.32.45/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
59280 @@ -15,7 +15,8 @@
59281
59282 struct module;
59283
59284 -#ifdef CONFIG_KALLSYMS
59285 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59286 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59287 /* Lookup the address for a symbol. Returns 0 if not found. */
59288 unsigned long kallsyms_lookup_name(const char *name);
59289
59290 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
59291 /* Stupid that this does nothing, but I didn't create this mess. */
59292 #define __print_symbol(fmt, addr)
59293 #endif /*CONFIG_KALLSYMS*/
59294 +#else /* when included by kallsyms.c, vsnprintf.c, or
59295 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59296 +extern void __print_symbol(const char *fmt, unsigned long address);
59297 +extern int sprint_symbol(char *buffer, unsigned long address);
59298 +const char *kallsyms_lookup(unsigned long addr,
59299 + unsigned long *symbolsize,
59300 + unsigned long *offset,
59301 + char **modname, char *namebuf);
59302 +#endif
59303
59304 /* This macro allows us to keep printk typechecking */
59305 static void __check_printsym_format(const char *fmt, ...)
59306 diff -urNp linux-2.6.32.45/include/linux/kgdb.h linux-2.6.32.45/include/linux/kgdb.h
59307 --- linux-2.6.32.45/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
59308 +++ linux-2.6.32.45/include/linux/kgdb.h 2011-08-05 20:33:55.000000000 -0400
59309 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
59310
59311 extern int kgdb_connected;
59312
59313 -extern atomic_t kgdb_setting_breakpoint;
59314 -extern atomic_t kgdb_cpu_doing_single_step;
59315 +extern atomic_unchecked_t kgdb_setting_breakpoint;
59316 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59317
59318 extern struct task_struct *kgdb_usethread;
59319 extern struct task_struct *kgdb_contthread;
59320 @@ -226,8 +226,8 @@ extern int kgdb_arch_remove_breakpoint(u
59321 * hardware debug registers.
59322 */
59323 struct kgdb_arch {
59324 - unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59325 - unsigned long flags;
59326 + const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59327 + const unsigned long flags;
59328
59329 int (*set_breakpoint)(unsigned long, char *);
59330 int (*remove_breakpoint)(unsigned long, char *);
59331 @@ -251,20 +251,20 @@ struct kgdb_arch {
59332 */
59333 struct kgdb_io {
59334 const char *name;
59335 - int (*read_char) (void);
59336 - void (*write_char) (u8);
59337 - void (*flush) (void);
59338 - int (*init) (void);
59339 - void (*pre_exception) (void);
59340 - void (*post_exception) (void);
59341 + int (* const read_char) (void);
59342 + void (* const write_char) (u8);
59343 + void (* const flush) (void);
59344 + int (* const init) (void);
59345 + void (* const pre_exception) (void);
59346 + void (* const post_exception) (void);
59347 };
59348
59349 -extern struct kgdb_arch arch_kgdb_ops;
59350 +extern const struct kgdb_arch arch_kgdb_ops;
59351
59352 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
59353
59354 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
59355 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
59356 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
59357 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
59358
59359 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
59360 extern int kgdb_mem2hex(char *mem, char *buf, int count);
59361 diff -urNp linux-2.6.32.45/include/linux/kmod.h linux-2.6.32.45/include/linux/kmod.h
59362 --- linux-2.6.32.45/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
59363 +++ linux-2.6.32.45/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
59364 @@ -31,6 +31,8 @@
59365 * usually useless though. */
59366 extern int __request_module(bool wait, const char *name, ...) \
59367 __attribute__((format(printf, 2, 3)));
59368 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59369 + __attribute__((format(printf, 3, 4)));
59370 #define request_module(mod...) __request_module(true, mod)
59371 #define request_module_nowait(mod...) __request_module(false, mod)
59372 #define try_then_request_module(x, mod...) \
59373 diff -urNp linux-2.6.32.45/include/linux/kobject.h linux-2.6.32.45/include/linux/kobject.h
59374 --- linux-2.6.32.45/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
59375 +++ linux-2.6.32.45/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
59376 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
59377
59378 struct kobj_type {
59379 void (*release)(struct kobject *kobj);
59380 - struct sysfs_ops *sysfs_ops;
59381 + const struct sysfs_ops *sysfs_ops;
59382 struct attribute **default_attrs;
59383 };
59384
59385 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
59386 };
59387
59388 struct kset_uevent_ops {
59389 - int (*filter)(struct kset *kset, struct kobject *kobj);
59390 - const char *(*name)(struct kset *kset, struct kobject *kobj);
59391 - int (*uevent)(struct kset *kset, struct kobject *kobj,
59392 + int (* const filter)(struct kset *kset, struct kobject *kobj);
59393 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
59394 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
59395 struct kobj_uevent_env *env);
59396 };
59397
59398 @@ -132,7 +132,7 @@ struct kobj_attribute {
59399 const char *buf, size_t count);
59400 };
59401
59402 -extern struct sysfs_ops kobj_sysfs_ops;
59403 +extern const struct sysfs_ops kobj_sysfs_ops;
59404
59405 /**
59406 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
59407 @@ -155,14 +155,14 @@ struct kset {
59408 struct list_head list;
59409 spinlock_t list_lock;
59410 struct kobject kobj;
59411 - struct kset_uevent_ops *uevent_ops;
59412 + const struct kset_uevent_ops *uevent_ops;
59413 };
59414
59415 extern void kset_init(struct kset *kset);
59416 extern int __must_check kset_register(struct kset *kset);
59417 extern void kset_unregister(struct kset *kset);
59418 extern struct kset * __must_check kset_create_and_add(const char *name,
59419 - struct kset_uevent_ops *u,
59420 + const struct kset_uevent_ops *u,
59421 struct kobject *parent_kobj);
59422
59423 static inline struct kset *to_kset(struct kobject *kobj)
59424 diff -urNp linux-2.6.32.45/include/linux/kvm_host.h linux-2.6.32.45/include/linux/kvm_host.h
59425 --- linux-2.6.32.45/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
59426 +++ linux-2.6.32.45/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
59427 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59428 void vcpu_load(struct kvm_vcpu *vcpu);
59429 void vcpu_put(struct kvm_vcpu *vcpu);
59430
59431 -int kvm_init(void *opaque, unsigned int vcpu_size,
59432 +int kvm_init(const void *opaque, unsigned int vcpu_size,
59433 struct module *module);
59434 void kvm_exit(void);
59435
59436 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59437 struct kvm_guest_debug *dbg);
59438 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59439
59440 -int kvm_arch_init(void *opaque);
59441 +int kvm_arch_init(const void *opaque);
59442 void kvm_arch_exit(void);
59443
59444 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59445 diff -urNp linux-2.6.32.45/include/linux/libata.h linux-2.6.32.45/include/linux/libata.h
59446 --- linux-2.6.32.45/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
59447 +++ linux-2.6.32.45/include/linux/libata.h 2011-08-05 20:33:55.000000000 -0400
59448 @@ -525,11 +525,11 @@ struct ata_ioports {
59449
59450 struct ata_host {
59451 spinlock_t lock;
59452 - struct device *dev;
59453 + struct device *dev;
59454 void __iomem * const *iomap;
59455 unsigned int n_ports;
59456 void *private_data;
59457 - struct ata_port_operations *ops;
59458 + const struct ata_port_operations *ops;
59459 unsigned long flags;
59460 #ifdef CONFIG_ATA_ACPI
59461 acpi_handle acpi_handle;
59462 @@ -710,7 +710,7 @@ struct ata_link {
59463
59464 struct ata_port {
59465 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
59466 - struct ata_port_operations *ops;
59467 + const struct ata_port_operations *ops;
59468 spinlock_t *lock;
59469 /* Flags owned by the EH context. Only EH should touch these once the
59470 port is active */
59471 @@ -883,7 +883,7 @@ struct ata_port_operations {
59472 * ->inherits must be the last field and all the preceding
59473 * fields must be pointers.
59474 */
59475 - const struct ata_port_operations *inherits;
59476 + const struct ata_port_operations * const inherits;
59477 };
59478
59479 struct ata_port_info {
59480 @@ -892,7 +892,7 @@ struct ata_port_info {
59481 unsigned long pio_mask;
59482 unsigned long mwdma_mask;
59483 unsigned long udma_mask;
59484 - struct ata_port_operations *port_ops;
59485 + const struct ata_port_operations *port_ops;
59486 void *private_data;
59487 };
59488
59489 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
59490 extern const unsigned long sata_deb_timing_hotplug[];
59491 extern const unsigned long sata_deb_timing_long[];
59492
59493 -extern struct ata_port_operations ata_dummy_port_ops;
59494 +extern const struct ata_port_operations ata_dummy_port_ops;
59495 extern const struct ata_port_info ata_dummy_port_info;
59496
59497 static inline const unsigned long *
59498 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
59499 struct scsi_host_template *sht);
59500 extern void ata_host_detach(struct ata_host *host);
59501 extern void ata_host_init(struct ata_host *, struct device *,
59502 - unsigned long, struct ata_port_operations *);
59503 + unsigned long, const struct ata_port_operations *);
59504 extern int ata_scsi_detect(struct scsi_host_template *sht);
59505 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
59506 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
59507 diff -urNp linux-2.6.32.45/include/linux/lockd/bind.h linux-2.6.32.45/include/linux/lockd/bind.h
59508 --- linux-2.6.32.45/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
59509 +++ linux-2.6.32.45/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
59510 @@ -23,13 +23,13 @@ struct svc_rqst;
59511 * This is the set of functions for lockd->nfsd communication
59512 */
59513 struct nlmsvc_binding {
59514 - __be32 (*fopen)(struct svc_rqst *,
59515 + __be32 (* const fopen)(struct svc_rqst *,
59516 struct nfs_fh *,
59517 struct file **);
59518 - void (*fclose)(struct file *);
59519 + void (* const fclose)(struct file *);
59520 };
59521
59522 -extern struct nlmsvc_binding * nlmsvc_ops;
59523 +extern const struct nlmsvc_binding * nlmsvc_ops;
59524
59525 /*
59526 * Similar to nfs_client_initdata, but without the NFS-specific
59527 diff -urNp linux-2.6.32.45/include/linux/mca.h linux-2.6.32.45/include/linux/mca.h
59528 --- linux-2.6.32.45/include/linux/mca.h 2011-03-27 14:31:47.000000000 -0400
59529 +++ linux-2.6.32.45/include/linux/mca.h 2011-08-05 20:33:55.000000000 -0400
59530 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59531 int region);
59532 void * (*mca_transform_memory)(struct mca_device *,
59533 void *memory);
59534 -};
59535 +} __no_const;
59536
59537 struct mca_bus {
59538 u64 default_dma_mask;
59539 diff -urNp linux-2.6.32.45/include/linux/memory.h linux-2.6.32.45/include/linux/memory.h
59540 --- linux-2.6.32.45/include/linux/memory.h 2011-03-27 14:31:47.000000000 -0400
59541 +++ linux-2.6.32.45/include/linux/memory.h 2011-08-05 20:33:55.000000000 -0400
59542 @@ -108,7 +108,7 @@ struct memory_accessor {
59543 size_t count);
59544 ssize_t (*write)(struct memory_accessor *, const char *buf,
59545 off_t offset, size_t count);
59546 -};
59547 +} __no_const;
59548
59549 /*
59550 * Kernel text modification mutex, used for code patching. Users of this lock
59551 diff -urNp linux-2.6.32.45/include/linux/mm.h linux-2.6.32.45/include/linux/mm.h
59552 --- linux-2.6.32.45/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
59553 +++ linux-2.6.32.45/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
59554 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
59555
59556 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59557 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59558 +
59559 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59560 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59561 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59562 +#else
59563 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59564 +#endif
59565 +
59566 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59567 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59568
59569 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
59570 int set_page_dirty_lock(struct page *page);
59571 int clear_page_dirty_for_io(struct page *page);
59572
59573 -/* Is the vma a continuation of the stack vma above it? */
59574 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
59575 -{
59576 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59577 -}
59578 -
59579 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59580 unsigned long old_addr, struct vm_area_struct *new_vma,
59581 unsigned long new_addr, unsigned long len);
59582 @@ -890,6 +891,8 @@ struct shrinker {
59583 extern void register_shrinker(struct shrinker *);
59584 extern void unregister_shrinker(struct shrinker *);
59585
59586 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
59587 +
59588 int vma_wants_writenotify(struct vm_area_struct *vma);
59589
59590 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
59591 @@ -1162,6 +1165,7 @@ out:
59592 }
59593
59594 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59595 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59596
59597 extern unsigned long do_brk(unsigned long, unsigned long);
59598
59599 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
59600 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59601 struct vm_area_struct **pprev);
59602
59603 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59604 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59605 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59606 +
59607 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59608 NULL if none. Assume start_addr < end_addr. */
59609 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59610 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
59611 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59612 }
59613
59614 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
59615 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59616 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59617 unsigned long pfn, unsigned long size, pgprot_t);
59618 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
59619 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
59620 extern int sysctl_memory_failure_early_kill;
59621 extern int sysctl_memory_failure_recovery;
59622 -extern atomic_long_t mce_bad_pages;
59623 +extern atomic_long_unchecked_t mce_bad_pages;
59624 +
59625 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59626 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59627 +#else
59628 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59629 +#endif
59630
59631 #endif /* __KERNEL__ */
59632 #endif /* _LINUX_MM_H */
59633 diff -urNp linux-2.6.32.45/include/linux/mm_types.h linux-2.6.32.45/include/linux/mm_types.h
59634 --- linux-2.6.32.45/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
59635 +++ linux-2.6.32.45/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
59636 @@ -186,6 +186,8 @@ struct vm_area_struct {
59637 #ifdef CONFIG_NUMA
59638 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59639 #endif
59640 +
59641 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59642 };
59643
59644 struct core_thread {
59645 @@ -287,6 +289,24 @@ struct mm_struct {
59646 #ifdef CONFIG_MMU_NOTIFIER
59647 struct mmu_notifier_mm *mmu_notifier_mm;
59648 #endif
59649 +
59650 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59651 + unsigned long pax_flags;
59652 +#endif
59653 +
59654 +#ifdef CONFIG_PAX_DLRESOLVE
59655 + unsigned long call_dl_resolve;
59656 +#endif
59657 +
59658 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59659 + unsigned long call_syscall;
59660 +#endif
59661 +
59662 +#ifdef CONFIG_PAX_ASLR
59663 + unsigned long delta_mmap; /* randomized offset */
59664 + unsigned long delta_stack; /* randomized offset */
59665 +#endif
59666 +
59667 };
59668
59669 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
59670 diff -urNp linux-2.6.32.45/include/linux/mmu_notifier.h linux-2.6.32.45/include/linux/mmu_notifier.h
59671 --- linux-2.6.32.45/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
59672 +++ linux-2.6.32.45/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
59673 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
59674 */
59675 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59676 ({ \
59677 - pte_t __pte; \
59678 + pte_t ___pte; \
59679 struct vm_area_struct *___vma = __vma; \
59680 unsigned long ___address = __address; \
59681 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59682 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59683 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59684 - __pte; \
59685 + ___pte; \
59686 })
59687
59688 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
59689 diff -urNp linux-2.6.32.45/include/linux/mmzone.h linux-2.6.32.45/include/linux/mmzone.h
59690 --- linux-2.6.32.45/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
59691 +++ linux-2.6.32.45/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
59692 @@ -350,7 +350,7 @@ struct zone {
59693 unsigned long flags; /* zone flags, see below */
59694
59695 /* Zone statistics */
59696 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59697 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59698
59699 /*
59700 * prev_priority holds the scanning priority for this zone. It is
59701 diff -urNp linux-2.6.32.45/include/linux/mod_devicetable.h linux-2.6.32.45/include/linux/mod_devicetable.h
59702 --- linux-2.6.32.45/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
59703 +++ linux-2.6.32.45/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
59704 @@ -12,7 +12,7 @@
59705 typedef unsigned long kernel_ulong_t;
59706 #endif
59707
59708 -#define PCI_ANY_ID (~0)
59709 +#define PCI_ANY_ID ((__u16)~0)
59710
59711 struct pci_device_id {
59712 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59713 @@ -131,7 +131,7 @@ struct usb_device_id {
59714 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59715 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59716
59717 -#define HID_ANY_ID (~0)
59718 +#define HID_ANY_ID (~0U)
59719
59720 struct hid_device_id {
59721 __u16 bus;
59722 diff -urNp linux-2.6.32.45/include/linux/module.h linux-2.6.32.45/include/linux/module.h
59723 --- linux-2.6.32.45/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
59724 +++ linux-2.6.32.45/include/linux/module.h 2011-08-05 20:33:55.000000000 -0400
59725 @@ -16,6 +16,7 @@
59726 #include <linux/kobject.h>
59727 #include <linux/moduleparam.h>
59728 #include <linux/tracepoint.h>
59729 +#include <linux/fs.h>
59730
59731 #include <asm/local.h>
59732 #include <asm/module.h>
59733 @@ -287,16 +288,16 @@ struct module
59734 int (*init)(void);
59735
59736 /* If this is non-NULL, vfree after init() returns */
59737 - void *module_init;
59738 + void *module_init_rx, *module_init_rw;
59739
59740 /* Here is the actual code + data, vfree'd on unload. */
59741 - void *module_core;
59742 + void *module_core_rx, *module_core_rw;
59743
59744 /* Here are the sizes of the init and core sections */
59745 - unsigned int init_size, core_size;
59746 + unsigned int init_size_rw, core_size_rw;
59747
59748 /* The size of the executable code in each section. */
59749 - unsigned int init_text_size, core_text_size;
59750 + unsigned int init_size_rx, core_size_rx;
59751
59752 /* Arch-specific module values */
59753 struct mod_arch_specific arch;
59754 @@ -345,6 +346,10 @@ struct module
59755 #ifdef CONFIG_EVENT_TRACING
59756 struct ftrace_event_call *trace_events;
59757 unsigned int num_trace_events;
59758 + struct file_operations trace_id;
59759 + struct file_operations trace_enable;
59760 + struct file_operations trace_format;
59761 + struct file_operations trace_filter;
59762 #endif
59763 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59764 unsigned long *ftrace_callsites;
59765 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned
59766 bool is_module_address(unsigned long addr);
59767 bool is_module_text_address(unsigned long addr);
59768
59769 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59770 +{
59771 +
59772 +#ifdef CONFIG_PAX_KERNEXEC
59773 + if (ktla_ktva(addr) >= (unsigned long)start &&
59774 + ktla_ktva(addr) < (unsigned long)start + size)
59775 + return 1;
59776 +#endif
59777 +
59778 + return ((void *)addr >= start && (void *)addr < start + size);
59779 +}
59780 +
59781 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59782 +{
59783 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59784 +}
59785 +
59786 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59787 +{
59788 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59789 +}
59790 +
59791 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59792 +{
59793 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59794 +}
59795 +
59796 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59797 +{
59798 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59799 +}
59800 +
59801 static inline int within_module_core(unsigned long addr, struct module *mod)
59802 {
59803 - return (unsigned long)mod->module_core <= addr &&
59804 - addr < (unsigned long)mod->module_core + mod->core_size;
59805 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59806 }
59807
59808 static inline int within_module_init(unsigned long addr, struct module *mod)
59809 {
59810 - return (unsigned long)mod->module_init <= addr &&
59811 - addr < (unsigned long)mod->module_init + mod->init_size;
59812 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59813 }
59814
59815 /* Search for module by name: must hold module_mutex. */
59816 diff -urNp linux-2.6.32.45/include/linux/moduleloader.h linux-2.6.32.45/include/linux/moduleloader.h
59817 --- linux-2.6.32.45/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
59818 +++ linux-2.6.32.45/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
59819 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59820 sections. Returns NULL on failure. */
59821 void *module_alloc(unsigned long size);
59822
59823 +#ifdef CONFIG_PAX_KERNEXEC
59824 +void *module_alloc_exec(unsigned long size);
59825 +#else
59826 +#define module_alloc_exec(x) module_alloc(x)
59827 +#endif
59828 +
59829 /* Free memory returned from module_alloc. */
59830 void module_free(struct module *mod, void *module_region);
59831
59832 +#ifdef CONFIG_PAX_KERNEXEC
59833 +void module_free_exec(struct module *mod, void *module_region);
59834 +#else
59835 +#define module_free_exec(x, y) module_free((x), (y))
59836 +#endif
59837 +
59838 /* Apply the given relocation to the (simplified) ELF. Return -error
59839 or 0. */
59840 int apply_relocate(Elf_Shdr *sechdrs,
59841 diff -urNp linux-2.6.32.45/include/linux/moduleparam.h linux-2.6.32.45/include/linux/moduleparam.h
59842 --- linux-2.6.32.45/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
59843 +++ linux-2.6.32.45/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
59844 @@ -132,7 +132,7 @@ struct kparam_array
59845
59846 /* Actually copy string: maxlen param is usually sizeof(string). */
59847 #define module_param_string(name, string, len, perm) \
59848 - static const struct kparam_string __param_string_##name \
59849 + static const struct kparam_string __param_string_##name __used \
59850 = { len, string }; \
59851 __module_param_call(MODULE_PARAM_PREFIX, name, \
59852 param_set_copystring, param_get_string, \
59853 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
59854
59855 /* Comma-separated array: *nump is set to number they actually specified. */
59856 #define module_param_array_named(name, array, type, nump, perm) \
59857 - static const struct kparam_array __param_arr_##name \
59858 + static const struct kparam_array __param_arr_##name __used \
59859 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
59860 sizeof(array[0]), array }; \
59861 __module_param_call(MODULE_PARAM_PREFIX, name, \
59862 diff -urNp linux-2.6.32.45/include/linux/mutex.h linux-2.6.32.45/include/linux/mutex.h
59863 --- linux-2.6.32.45/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
59864 +++ linux-2.6.32.45/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
59865 @@ -51,7 +51,7 @@ struct mutex {
59866 spinlock_t wait_lock;
59867 struct list_head wait_list;
59868 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
59869 - struct thread_info *owner;
59870 + struct task_struct *owner;
59871 #endif
59872 #ifdef CONFIG_DEBUG_MUTEXES
59873 const char *name;
59874 diff -urNp linux-2.6.32.45/include/linux/namei.h linux-2.6.32.45/include/linux/namei.h
59875 --- linux-2.6.32.45/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
59876 +++ linux-2.6.32.45/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
59877 @@ -22,7 +22,7 @@ struct nameidata {
59878 unsigned int flags;
59879 int last_type;
59880 unsigned depth;
59881 - char *saved_names[MAX_NESTED_LINKS + 1];
59882 + const char *saved_names[MAX_NESTED_LINKS + 1];
59883
59884 /* Intent data */
59885 union {
59886 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
59887 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59888 extern void unlock_rename(struct dentry *, struct dentry *);
59889
59890 -static inline void nd_set_link(struct nameidata *nd, char *path)
59891 +static inline void nd_set_link(struct nameidata *nd, const char *path)
59892 {
59893 nd->saved_names[nd->depth] = path;
59894 }
59895
59896 -static inline char *nd_get_link(struct nameidata *nd)
59897 +static inline const char *nd_get_link(const struct nameidata *nd)
59898 {
59899 return nd->saved_names[nd->depth];
59900 }
59901 diff -urNp linux-2.6.32.45/include/linux/netfilter/xt_gradm.h linux-2.6.32.45/include/linux/netfilter/xt_gradm.h
59902 --- linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59903 +++ linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
59904 @@ -0,0 +1,9 @@
59905 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
59906 +#define _LINUX_NETFILTER_XT_GRADM_H 1
59907 +
59908 +struct xt_gradm_mtinfo {
59909 + __u16 flags;
59910 + __u16 invflags;
59911 +};
59912 +
59913 +#endif
59914 diff -urNp linux-2.6.32.45/include/linux/nodemask.h linux-2.6.32.45/include/linux/nodemask.h
59915 --- linux-2.6.32.45/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
59916 +++ linux-2.6.32.45/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
59917 @@ -464,11 +464,11 @@ static inline int num_node_state(enum no
59918
59919 #define any_online_node(mask) \
59920 ({ \
59921 - int node; \
59922 - for_each_node_mask(node, (mask)) \
59923 - if (node_online(node)) \
59924 + int __node; \
59925 + for_each_node_mask(__node, (mask)) \
59926 + if (node_online(__node)) \
59927 break; \
59928 - node; \
59929 + __node; \
59930 })
59931
59932 #define num_online_nodes() num_node_state(N_ONLINE)
59933 diff -urNp linux-2.6.32.45/include/linux/oprofile.h linux-2.6.32.45/include/linux/oprofile.h
59934 --- linux-2.6.32.45/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
59935 +++ linux-2.6.32.45/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
59936 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
59937 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59938 char const * name, ulong * val);
59939
59940 -/** Create a file for read-only access to an atomic_t. */
59941 +/** Create a file for read-only access to an atomic_unchecked_t. */
59942 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59943 - char const * name, atomic_t * val);
59944 + char const * name, atomic_unchecked_t * val);
59945
59946 /** create a directory */
59947 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59948 diff -urNp linux-2.6.32.45/include/linux/pagemap.h linux-2.6.32.45/include/linux/pagemap.h
59949 --- linux-2.6.32.45/include/linux/pagemap.h 2011-03-27 14:31:47.000000000 -0400
59950 +++ linux-2.6.32.45/include/linux/pagemap.h 2011-08-17 19:36:28.000000000 -0400
59951 @@ -425,6 +425,7 @@ static inline int fault_in_pages_readabl
59952 if (((unsigned long)uaddr & PAGE_MASK) !=
59953 ((unsigned long)end & PAGE_MASK))
59954 ret = __get_user(c, end);
59955 + (void)c;
59956 }
59957 return ret;
59958 }
59959 diff -urNp linux-2.6.32.45/include/linux/perf_event.h linux-2.6.32.45/include/linux/perf_event.h
59960 --- linux-2.6.32.45/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
59961 +++ linux-2.6.32.45/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
59962 @@ -476,7 +476,7 @@ struct hw_perf_event {
59963 struct hrtimer hrtimer;
59964 };
59965 };
59966 - atomic64_t prev_count;
59967 + atomic64_unchecked_t prev_count;
59968 u64 sample_period;
59969 u64 last_period;
59970 atomic64_t period_left;
59971 @@ -557,7 +557,7 @@ struct perf_event {
59972 const struct pmu *pmu;
59973
59974 enum perf_event_active_state state;
59975 - atomic64_t count;
59976 + atomic64_unchecked_t count;
59977
59978 /*
59979 * These are the total time in nanoseconds that the event
59980 @@ -595,8 +595,8 @@ struct perf_event {
59981 * These accumulate total time (in nanoseconds) that children
59982 * events have been enabled and running, respectively.
59983 */
59984 - atomic64_t child_total_time_enabled;
59985 - atomic64_t child_total_time_running;
59986 + atomic64_unchecked_t child_total_time_enabled;
59987 + atomic64_unchecked_t child_total_time_running;
59988
59989 /*
59990 * Protect attach/detach and child_list:
59991 diff -urNp linux-2.6.32.45/include/linux/pipe_fs_i.h linux-2.6.32.45/include/linux/pipe_fs_i.h
59992 --- linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
59993 +++ linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
59994 @@ -46,9 +46,9 @@ struct pipe_inode_info {
59995 wait_queue_head_t wait;
59996 unsigned int nrbufs, curbuf;
59997 struct page *tmp_page;
59998 - unsigned int readers;
59999 - unsigned int writers;
60000 - unsigned int waiting_writers;
60001 + atomic_t readers;
60002 + atomic_t writers;
60003 + atomic_t waiting_writers;
60004 unsigned int r_counter;
60005 unsigned int w_counter;
60006 struct fasync_struct *fasync_readers;
60007 diff -urNp linux-2.6.32.45/include/linux/poison.h linux-2.6.32.45/include/linux/poison.h
60008 --- linux-2.6.32.45/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
60009 +++ linux-2.6.32.45/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
60010 @@ -19,8 +19,8 @@
60011 * under normal circumstances, used to verify that nobody uses
60012 * non-initialized list entries.
60013 */
60014 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60015 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60016 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60017 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60018
60019 /********** include/linux/timer.h **********/
60020 /*
60021 diff -urNp linux-2.6.32.45/include/linux/posix-timers.h linux-2.6.32.45/include/linux/posix-timers.h
60022 --- linux-2.6.32.45/include/linux/posix-timers.h 2011-03-27 14:31:47.000000000 -0400
60023 +++ linux-2.6.32.45/include/linux/posix-timers.h 2011-08-05 20:33:55.000000000 -0400
60024 @@ -67,7 +67,7 @@ struct k_itimer {
60025 };
60026
60027 struct k_clock {
60028 - int res; /* in nanoseconds */
60029 + const int res; /* in nanoseconds */
60030 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
60031 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
60032 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
60033 diff -urNp linux-2.6.32.45/include/linux/preempt.h linux-2.6.32.45/include/linux/preempt.h
60034 --- linux-2.6.32.45/include/linux/preempt.h 2011-03-27 14:31:47.000000000 -0400
60035 +++ linux-2.6.32.45/include/linux/preempt.h 2011-08-05 20:33:55.000000000 -0400
60036 @@ -110,7 +110,7 @@ struct preempt_ops {
60037 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60038 void (*sched_out)(struct preempt_notifier *notifier,
60039 struct task_struct *next);
60040 -};
60041 +} __no_const;
60042
60043 /**
60044 * preempt_notifier - key for installing preemption notifiers
60045 diff -urNp linux-2.6.32.45/include/linux/proc_fs.h linux-2.6.32.45/include/linux/proc_fs.h
60046 --- linux-2.6.32.45/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
60047 +++ linux-2.6.32.45/include/linux/proc_fs.h 2011-08-05 20:33:55.000000000 -0400
60048 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
60049 return proc_create_data(name, mode, parent, proc_fops, NULL);
60050 }
60051
60052 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60053 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60054 +{
60055 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60056 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60057 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60058 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60059 +#else
60060 + return proc_create_data(name, mode, parent, proc_fops, NULL);
60061 +#endif
60062 +}
60063 +
60064 +
60065 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60066 mode_t mode, struct proc_dir_entry *base,
60067 read_proc_t *read_proc, void * data)
60068 @@ -256,7 +269,7 @@ union proc_op {
60069 int (*proc_show)(struct seq_file *m,
60070 struct pid_namespace *ns, struct pid *pid,
60071 struct task_struct *task);
60072 -};
60073 +} __no_const;
60074
60075 struct ctl_table_header;
60076 struct ctl_table;
60077 diff -urNp linux-2.6.32.45/include/linux/ptrace.h linux-2.6.32.45/include/linux/ptrace.h
60078 --- linux-2.6.32.45/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
60079 +++ linux-2.6.32.45/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
60080 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
60081 extern void exit_ptrace(struct task_struct *tracer);
60082 #define PTRACE_MODE_READ 1
60083 #define PTRACE_MODE_ATTACH 2
60084 -/* Returns 0 on success, -errno on denial. */
60085 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60086 /* Returns true on success, false on denial. */
60087 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60088 +/* Returns true on success, false on denial. */
60089 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60090
60091 static inline int ptrace_reparented(struct task_struct *child)
60092 {
60093 diff -urNp linux-2.6.32.45/include/linux/random.h linux-2.6.32.45/include/linux/random.h
60094 --- linux-2.6.32.45/include/linux/random.h 2011-08-16 20:37:25.000000000 -0400
60095 +++ linux-2.6.32.45/include/linux/random.h 2011-08-07 19:48:09.000000000 -0400
60096 @@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned l
60097 u32 random32(void);
60098 void srandom32(u32 seed);
60099
60100 +static inline unsigned long pax_get_random_long(void)
60101 +{
60102 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60103 +}
60104 +
60105 #endif /* __KERNEL___ */
60106
60107 #endif /* _LINUX_RANDOM_H */
60108 diff -urNp linux-2.6.32.45/include/linux/reboot.h linux-2.6.32.45/include/linux/reboot.h
60109 --- linux-2.6.32.45/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
60110 +++ linux-2.6.32.45/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
60111 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
60112 * Architecture-specific implementations of sys_reboot commands.
60113 */
60114
60115 -extern void machine_restart(char *cmd);
60116 -extern void machine_halt(void);
60117 -extern void machine_power_off(void);
60118 +extern void machine_restart(char *cmd) __noreturn;
60119 +extern void machine_halt(void) __noreturn;
60120 +extern void machine_power_off(void) __noreturn;
60121
60122 extern void machine_shutdown(void);
60123 struct pt_regs;
60124 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
60125 */
60126
60127 extern void kernel_restart_prepare(char *cmd);
60128 -extern void kernel_restart(char *cmd);
60129 -extern void kernel_halt(void);
60130 -extern void kernel_power_off(void);
60131 +extern void kernel_restart(char *cmd) __noreturn;
60132 +extern void kernel_halt(void) __noreturn;
60133 +extern void kernel_power_off(void) __noreturn;
60134
60135 void ctrl_alt_del(void);
60136
60137 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
60138 * Emergency restart, callable from an interrupt handler.
60139 */
60140
60141 -extern void emergency_restart(void);
60142 +extern void emergency_restart(void) __noreturn;
60143 #include <asm/emergency-restart.h>
60144
60145 #endif
60146 diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs.h linux-2.6.32.45/include/linux/reiserfs_fs.h
60147 --- linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
60148 +++ linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
60149 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
60150 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60151
60152 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60153 -#define get_generation(s) atomic_read (&fs_generation(s))
60154 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60155 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60156 #define __fs_changed(gen,s) (gen != get_generation (s))
60157 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
60158 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
60159 */
60160
60161 struct item_operations {
60162 - int (*bytes_number) (struct item_head * ih, int block_size);
60163 - void (*decrement_key) (struct cpu_key *);
60164 - int (*is_left_mergeable) (struct reiserfs_key * ih,
60165 + int (* const bytes_number) (struct item_head * ih, int block_size);
60166 + void (* const decrement_key) (struct cpu_key *);
60167 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
60168 unsigned long bsize);
60169 - void (*print_item) (struct item_head *, char *item);
60170 - void (*check_item) (struct item_head *, char *item);
60171 + void (* const print_item) (struct item_head *, char *item);
60172 + void (* const check_item) (struct item_head *, char *item);
60173
60174 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
60175 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
60176 int is_affected, int insert_size);
60177 - int (*check_left) (struct virtual_item * vi, int free,
60178 + int (* const check_left) (struct virtual_item * vi, int free,
60179 int start_skip, int end_skip);
60180 - int (*check_right) (struct virtual_item * vi, int free);
60181 - int (*part_size) (struct virtual_item * vi, int from, int to);
60182 - int (*unit_num) (struct virtual_item * vi);
60183 - void (*print_vi) (struct virtual_item * vi);
60184 + int (* const check_right) (struct virtual_item * vi, int free);
60185 + int (* const part_size) (struct virtual_item * vi, int from, int to);
60186 + int (* const unit_num) (struct virtual_item * vi);
60187 + void (* const print_vi) (struct virtual_item * vi);
60188 };
60189
60190 -extern struct item_operations *item_ops[TYPE_ANY + 1];
60191 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
60192
60193 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
60194 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
60195 diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs_sb.h linux-2.6.32.45/include/linux/reiserfs_fs_sb.h
60196 --- linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
60197 +++ linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
60198 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
60199 /* Comment? -Hans */
60200 wait_queue_head_t s_wait;
60201 /* To be obsoleted soon by per buffer seals.. -Hans */
60202 - atomic_t s_generation_counter; // increased by one every time the
60203 + atomic_unchecked_t s_generation_counter; // increased by one every time the
60204 // tree gets re-balanced
60205 unsigned long s_properties; /* File system properties. Currently holds
60206 on-disk FS format */
60207 diff -urNp linux-2.6.32.45/include/linux/relay.h linux-2.6.32.45/include/linux/relay.h
60208 --- linux-2.6.32.45/include/linux/relay.h 2011-03-27 14:31:47.000000000 -0400
60209 +++ linux-2.6.32.45/include/linux/relay.h 2011-08-05 20:33:55.000000000 -0400
60210 @@ -159,7 +159,7 @@ struct rchan_callbacks
60211 * The callback should return 0 if successful, negative if not.
60212 */
60213 int (*remove_buf_file)(struct dentry *dentry);
60214 -};
60215 +} __no_const;
60216
60217 /*
60218 * CONFIG_RELAY kernel API, kernel/relay.c
60219 diff -urNp linux-2.6.32.45/include/linux/sched.h linux-2.6.32.45/include/linux/sched.h
60220 --- linux-2.6.32.45/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
60221 +++ linux-2.6.32.45/include/linux/sched.h 2011-08-11 19:48:55.000000000 -0400
60222 @@ -101,6 +101,7 @@ struct bio;
60223 struct fs_struct;
60224 struct bts_context;
60225 struct perf_event_context;
60226 +struct linux_binprm;
60227
60228 /*
60229 * List of flags we want to share for kernel threads,
60230 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
60231 extern signed long schedule_timeout_uninterruptible(signed long timeout);
60232 asmlinkage void __schedule(void);
60233 asmlinkage void schedule(void);
60234 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
60235 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
60236
60237 struct nsproxy;
60238 struct user_namespace;
60239 @@ -371,9 +372,12 @@ struct user_namespace;
60240 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60241
60242 extern int sysctl_max_map_count;
60243 +extern unsigned long sysctl_heap_stack_gap;
60244
60245 #include <linux/aio.h>
60246
60247 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60248 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60249 extern unsigned long
60250 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60251 unsigned long, unsigned long);
60252 @@ -666,6 +670,16 @@ struct signal_struct {
60253 struct tty_audit_buf *tty_audit_buf;
60254 #endif
60255
60256 +#ifdef CONFIG_GRKERNSEC
60257 + u32 curr_ip;
60258 + u32 saved_ip;
60259 + u32 gr_saddr;
60260 + u32 gr_daddr;
60261 + u16 gr_sport;
60262 + u16 gr_dport;
60263 + u8 used_accept:1;
60264 +#endif
60265 +
60266 int oom_adj; /* OOM kill score adjustment (bit shift) */
60267 };
60268
60269 @@ -723,6 +737,11 @@ struct user_struct {
60270 struct key *session_keyring; /* UID's default session keyring */
60271 #endif
60272
60273 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60274 + unsigned int banned;
60275 + unsigned long ban_expires;
60276 +#endif
60277 +
60278 /* Hash table maintenance information */
60279 struct hlist_node uidhash_node;
60280 uid_t uid;
60281 @@ -1328,8 +1347,8 @@ struct task_struct {
60282 struct list_head thread_group;
60283
60284 struct completion *vfork_done; /* for vfork() */
60285 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60286 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60287 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60288 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60289
60290 cputime_t utime, stime, utimescaled, stimescaled;
60291 cputime_t gtime;
60292 @@ -1343,16 +1362,6 @@ struct task_struct {
60293 struct task_cputime cputime_expires;
60294 struct list_head cpu_timers[3];
60295
60296 -/* process credentials */
60297 - const struct cred *real_cred; /* objective and real subjective task
60298 - * credentials (COW) */
60299 - const struct cred *cred; /* effective (overridable) subjective task
60300 - * credentials (COW) */
60301 - struct mutex cred_guard_mutex; /* guard against foreign influences on
60302 - * credential calculations
60303 - * (notably. ptrace) */
60304 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60305 -
60306 char comm[TASK_COMM_LEN]; /* executable name excluding path
60307 - access with [gs]et_task_comm (which lock
60308 it with task_lock())
60309 @@ -1369,6 +1378,10 @@ struct task_struct {
60310 #endif
60311 /* CPU-specific state of this task */
60312 struct thread_struct thread;
60313 +/* thread_info moved to task_struct */
60314 +#ifdef CONFIG_X86
60315 + struct thread_info tinfo;
60316 +#endif
60317 /* filesystem information */
60318 struct fs_struct *fs;
60319 /* open file information */
60320 @@ -1436,6 +1449,15 @@ struct task_struct {
60321 int hardirq_context;
60322 int softirq_context;
60323 #endif
60324 +
60325 +/* process credentials */
60326 + const struct cred *real_cred; /* objective and real subjective task
60327 + * credentials (COW) */
60328 + struct mutex cred_guard_mutex; /* guard against foreign influences on
60329 + * credential calculations
60330 + * (notably. ptrace) */
60331 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60332 +
60333 #ifdef CONFIG_LOCKDEP
60334 # define MAX_LOCK_DEPTH 48UL
60335 u64 curr_chain_key;
60336 @@ -1456,6 +1478,9 @@ struct task_struct {
60337
60338 struct backing_dev_info *backing_dev_info;
60339
60340 + const struct cred *cred; /* effective (overridable) subjective task
60341 + * credentials (COW) */
60342 +
60343 struct io_context *io_context;
60344
60345 unsigned long ptrace_message;
60346 @@ -1519,6 +1544,21 @@ struct task_struct {
60347 unsigned long default_timer_slack_ns;
60348
60349 struct list_head *scm_work_list;
60350 +
60351 +#ifdef CONFIG_GRKERNSEC
60352 + /* grsecurity */
60353 + struct dentry *gr_chroot_dentry;
60354 + struct acl_subject_label *acl;
60355 + struct acl_role_label *role;
60356 + struct file *exec_file;
60357 + u16 acl_role_id;
60358 + /* is this the task that authenticated to the special role */
60359 + u8 acl_sp_role;
60360 + u8 is_writable;
60361 + u8 brute;
60362 + u8 gr_is_chrooted;
60363 +#endif
60364 +
60365 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60366 /* Index of current stored adress in ret_stack */
60367 int curr_ret_stack;
60368 @@ -1542,6 +1582,57 @@ struct task_struct {
60369 #endif /* CONFIG_TRACING */
60370 };
60371
60372 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60373 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60374 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60375 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60376 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60377 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60378 +
60379 +#ifdef CONFIG_PAX_SOFTMODE
60380 +extern int pax_softmode;
60381 +#endif
60382 +
60383 +extern int pax_check_flags(unsigned long *);
60384 +
60385 +/* if tsk != current then task_lock must be held on it */
60386 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60387 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
60388 +{
60389 + if (likely(tsk->mm))
60390 + return tsk->mm->pax_flags;
60391 + else
60392 + return 0UL;
60393 +}
60394 +
60395 +/* if tsk != current then task_lock must be held on it */
60396 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60397 +{
60398 + if (likely(tsk->mm)) {
60399 + tsk->mm->pax_flags = flags;
60400 + return 0;
60401 + }
60402 + return -EINVAL;
60403 +}
60404 +#endif
60405 +
60406 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60407 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
60408 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60409 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60410 +#endif
60411 +
60412 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60413 +extern void pax_report_insns(void *pc, void *sp);
60414 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
60415 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60416 +
60417 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60418 +extern void pax_track_stack(void);
60419 +#else
60420 +static inline void pax_track_stack(void) {}
60421 +#endif
60422 +
60423 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60424 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
60425
60426 @@ -1740,7 +1831,7 @@ extern void thread_group_times(struct ta
60427 #define PF_DUMPCORE 0x00000200 /* dumped core */
60428 #define PF_SIGNALED 0x00000400 /* killed by a signal */
60429 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
60430 -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
60431 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
60432 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
60433 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
60434 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
60435 @@ -1978,7 +2069,9 @@ void yield(void);
60436 extern struct exec_domain default_exec_domain;
60437
60438 union thread_union {
60439 +#ifndef CONFIG_X86
60440 struct thread_info thread_info;
60441 +#endif
60442 unsigned long stack[THREAD_SIZE/sizeof(long)];
60443 };
60444
60445 @@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
60446 */
60447
60448 extern struct task_struct *find_task_by_vpid(pid_t nr);
60449 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60450 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60451 struct pid_namespace *ns);
60452
60453 @@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
60454 extern void exit_itimers(struct signal_struct *);
60455 extern void flush_itimer_signals(void);
60456
60457 -extern NORET_TYPE void do_group_exit(int);
60458 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60459
60460 extern void daemonize(const char *, ...);
60461 extern int allow_signal(int);
60462 @@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
60463
60464 #endif
60465
60466 -static inline int object_is_on_stack(void *obj)
60467 +static inline int object_starts_on_stack(void *obj)
60468 {
60469 - void *stack = task_stack_page(current);
60470 + const void *stack = task_stack_page(current);
60471
60472 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60473 }
60474
60475 +#ifdef CONFIG_PAX_USERCOPY
60476 +extern int object_is_on_stack(const void *obj, unsigned long len);
60477 +#endif
60478 +
60479 extern void thread_info_cache_init(void);
60480
60481 #ifdef CONFIG_DEBUG_STACK_USAGE
60482 diff -urNp linux-2.6.32.45/include/linux/screen_info.h linux-2.6.32.45/include/linux/screen_info.h
60483 --- linux-2.6.32.45/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
60484 +++ linux-2.6.32.45/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
60485 @@ -42,7 +42,8 @@ struct screen_info {
60486 __u16 pages; /* 0x32 */
60487 __u16 vesa_attributes; /* 0x34 */
60488 __u32 capabilities; /* 0x36 */
60489 - __u8 _reserved[6]; /* 0x3a */
60490 + __u16 vesapm_size; /* 0x3a */
60491 + __u8 _reserved[4]; /* 0x3c */
60492 } __attribute__((packed));
60493
60494 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60495 diff -urNp linux-2.6.32.45/include/linux/security.h linux-2.6.32.45/include/linux/security.h
60496 --- linux-2.6.32.45/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
60497 +++ linux-2.6.32.45/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
60498 @@ -34,6 +34,7 @@
60499 #include <linux/key.h>
60500 #include <linux/xfrm.h>
60501 #include <linux/gfp.h>
60502 +#include <linux/grsecurity.h>
60503 #include <net/flow.h>
60504
60505 /* Maximum number of letters for an LSM name string */
60506 diff -urNp linux-2.6.32.45/include/linux/shm.h linux-2.6.32.45/include/linux/shm.h
60507 --- linux-2.6.32.45/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
60508 +++ linux-2.6.32.45/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
60509 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
60510 pid_t shm_cprid;
60511 pid_t shm_lprid;
60512 struct user_struct *mlock_user;
60513 +#ifdef CONFIG_GRKERNSEC
60514 + time_t shm_createtime;
60515 + pid_t shm_lapid;
60516 +#endif
60517 };
60518
60519 /* shm_mode upper byte flags */
60520 diff -urNp linux-2.6.32.45/include/linux/skbuff.h linux-2.6.32.45/include/linux/skbuff.h
60521 --- linux-2.6.32.45/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
60522 +++ linux-2.6.32.45/include/linux/skbuff.h 2011-07-06 19:53:33.000000000 -0400
60523 @@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
60524 */
60525 static inline int skb_queue_empty(const struct sk_buff_head *list)
60526 {
60527 - return list->next == (struct sk_buff *)list;
60528 + return list->next == (const struct sk_buff *)list;
60529 }
60530
60531 /**
60532 @@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
60533 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60534 const struct sk_buff *skb)
60535 {
60536 - return (skb->next == (struct sk_buff *) list);
60537 + return (skb->next == (const struct sk_buff *) list);
60538 }
60539
60540 /**
60541 @@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
60542 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60543 const struct sk_buff *skb)
60544 {
60545 - return (skb->prev == (struct sk_buff *) list);
60546 + return (skb->prev == (const struct sk_buff *) list);
60547 }
60548
60549 /**
60550 @@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
60551 * headroom, you should not reduce this.
60552 */
60553 #ifndef NET_SKB_PAD
60554 -#define NET_SKB_PAD 32
60555 +#define NET_SKB_PAD (_AC(32,UL))
60556 #endif
60557
60558 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60559 diff -urNp linux-2.6.32.45/include/linux/slab_def.h linux-2.6.32.45/include/linux/slab_def.h
60560 --- linux-2.6.32.45/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
60561 +++ linux-2.6.32.45/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
60562 @@ -69,10 +69,10 @@ struct kmem_cache {
60563 unsigned long node_allocs;
60564 unsigned long node_frees;
60565 unsigned long node_overflow;
60566 - atomic_t allochit;
60567 - atomic_t allocmiss;
60568 - atomic_t freehit;
60569 - atomic_t freemiss;
60570 + atomic_unchecked_t allochit;
60571 + atomic_unchecked_t allocmiss;
60572 + atomic_unchecked_t freehit;
60573 + atomic_unchecked_t freemiss;
60574
60575 /*
60576 * If debugging is enabled, then the allocator can add additional
60577 diff -urNp linux-2.6.32.45/include/linux/slab.h linux-2.6.32.45/include/linux/slab.h
60578 --- linux-2.6.32.45/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
60579 +++ linux-2.6.32.45/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
60580 @@ -11,12 +11,20 @@
60581
60582 #include <linux/gfp.h>
60583 #include <linux/types.h>
60584 +#include <linux/err.h>
60585
60586 /*
60587 * Flags to pass to kmem_cache_create().
60588 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60589 */
60590 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60591 +
60592 +#ifdef CONFIG_PAX_USERCOPY
60593 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60594 +#else
60595 +#define SLAB_USERCOPY 0x00000000UL
60596 +#endif
60597 +
60598 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60599 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60600 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60601 @@ -82,10 +90,13 @@
60602 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60603 * Both make kfree a no-op.
60604 */
60605 -#define ZERO_SIZE_PTR ((void *)16)
60606 +#define ZERO_SIZE_PTR \
60607 +({ \
60608 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60609 + (void *)(-MAX_ERRNO-1L); \
60610 +})
60611
60612 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60613 - (unsigned long)ZERO_SIZE_PTR)
60614 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60615
60616 /*
60617 * struct kmem_cache related prototypes
60618 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void
60619 void kfree(const void *);
60620 void kzfree(const void *);
60621 size_t ksize(const void *);
60622 +void check_object_size(const void *ptr, unsigned long n, bool to);
60623
60624 /*
60625 * Allocator specific definitions. These are mainly used to establish optimized
60626 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
60627
60628 void __init kmem_cache_init_late(void);
60629
60630 +#define kmalloc(x, y) \
60631 +({ \
60632 + void *___retval; \
60633 + intoverflow_t ___x = (intoverflow_t)x; \
60634 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
60635 + ___retval = NULL; \
60636 + else \
60637 + ___retval = kmalloc((size_t)___x, (y)); \
60638 + ___retval; \
60639 +})
60640 +
60641 +#define kmalloc_node(x, y, z) \
60642 +({ \
60643 + void *___retval; \
60644 + intoverflow_t ___x = (intoverflow_t)x; \
60645 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60646 + ___retval = NULL; \
60647 + else \
60648 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
60649 + ___retval; \
60650 +})
60651 +
60652 +#define kzalloc(x, y) \
60653 +({ \
60654 + void *___retval; \
60655 + intoverflow_t ___x = (intoverflow_t)x; \
60656 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
60657 + ___retval = NULL; \
60658 + else \
60659 + ___retval = kzalloc((size_t)___x, (y)); \
60660 + ___retval; \
60661 +})
60662 +
60663 #endif /* _LINUX_SLAB_H */
60664 diff -urNp linux-2.6.32.45/include/linux/slub_def.h linux-2.6.32.45/include/linux/slub_def.h
60665 --- linux-2.6.32.45/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
60666 +++ linux-2.6.32.45/include/linux/slub_def.h 2011-08-05 20:33:55.000000000 -0400
60667 @@ -86,7 +86,7 @@ struct kmem_cache {
60668 struct kmem_cache_order_objects max;
60669 struct kmem_cache_order_objects min;
60670 gfp_t allocflags; /* gfp flags to use on each alloc */
60671 - int refcount; /* Refcount for slab cache destroy */
60672 + atomic_t refcount; /* Refcount for slab cache destroy */
60673 void (*ctor)(void *);
60674 int inuse; /* Offset to metadata */
60675 int align; /* Alignment */
60676 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache
60677 #endif
60678
60679 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60680 -void *__kmalloc(size_t size, gfp_t flags);
60681 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60682
60683 #ifdef CONFIG_KMEMTRACE
60684 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
60685 diff -urNp linux-2.6.32.45/include/linux/sonet.h linux-2.6.32.45/include/linux/sonet.h
60686 --- linux-2.6.32.45/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
60687 +++ linux-2.6.32.45/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
60688 @@ -61,7 +61,7 @@ struct sonet_stats {
60689 #include <asm/atomic.h>
60690
60691 struct k_sonet_stats {
60692 -#define __HANDLE_ITEM(i) atomic_t i
60693 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60694 __SONET_ITEMS
60695 #undef __HANDLE_ITEM
60696 };
60697 diff -urNp linux-2.6.32.45/include/linux/sunrpc/cache.h linux-2.6.32.45/include/linux/sunrpc/cache.h
60698 --- linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-03-27 14:31:47.000000000 -0400
60699 +++ linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-08-05 20:33:55.000000000 -0400
60700 @@ -125,7 +125,7 @@ struct cache_detail {
60701 */
60702 struct cache_req {
60703 struct cache_deferred_req *(*defer)(struct cache_req *req);
60704 -};
60705 +} __no_const;
60706 /* this must be embedded in a deferred_request that is being
60707 * delayed awaiting cache-fill
60708 */
60709 diff -urNp linux-2.6.32.45/include/linux/sunrpc/clnt.h linux-2.6.32.45/include/linux/sunrpc/clnt.h
60710 --- linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
60711 +++ linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
60712 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
60713 {
60714 switch (sap->sa_family) {
60715 case AF_INET:
60716 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
60717 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60718 case AF_INET6:
60719 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60720 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60721 }
60722 return 0;
60723 }
60724 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
60725 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60726 const struct sockaddr *src)
60727 {
60728 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60729 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60730 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60731
60732 dsin->sin_family = ssin->sin_family;
60733 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
60734 if (sa->sa_family != AF_INET6)
60735 return 0;
60736
60737 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60738 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60739 }
60740
60741 #endif /* __KERNEL__ */
60742 diff -urNp linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h
60743 --- linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
60744 +++ linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
60745 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60746 extern unsigned int svcrdma_max_requests;
60747 extern unsigned int svcrdma_max_req_size;
60748
60749 -extern atomic_t rdma_stat_recv;
60750 -extern atomic_t rdma_stat_read;
60751 -extern atomic_t rdma_stat_write;
60752 -extern atomic_t rdma_stat_sq_starve;
60753 -extern atomic_t rdma_stat_rq_starve;
60754 -extern atomic_t rdma_stat_rq_poll;
60755 -extern atomic_t rdma_stat_rq_prod;
60756 -extern atomic_t rdma_stat_sq_poll;
60757 -extern atomic_t rdma_stat_sq_prod;
60758 +extern atomic_unchecked_t rdma_stat_recv;
60759 +extern atomic_unchecked_t rdma_stat_read;
60760 +extern atomic_unchecked_t rdma_stat_write;
60761 +extern atomic_unchecked_t rdma_stat_sq_starve;
60762 +extern atomic_unchecked_t rdma_stat_rq_starve;
60763 +extern atomic_unchecked_t rdma_stat_rq_poll;
60764 +extern atomic_unchecked_t rdma_stat_rq_prod;
60765 +extern atomic_unchecked_t rdma_stat_sq_poll;
60766 +extern atomic_unchecked_t rdma_stat_sq_prod;
60767
60768 #define RPCRDMA_VERSION 1
60769
60770 diff -urNp linux-2.6.32.45/include/linux/suspend.h linux-2.6.32.45/include/linux/suspend.h
60771 --- linux-2.6.32.45/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
60772 +++ linux-2.6.32.45/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
60773 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
60774 * which require special recovery actions in that situation.
60775 */
60776 struct platform_suspend_ops {
60777 - int (*valid)(suspend_state_t state);
60778 - int (*begin)(suspend_state_t state);
60779 - int (*prepare)(void);
60780 - int (*prepare_late)(void);
60781 - int (*enter)(suspend_state_t state);
60782 - void (*wake)(void);
60783 - void (*finish)(void);
60784 - void (*end)(void);
60785 - void (*recover)(void);
60786 + int (* const valid)(suspend_state_t state);
60787 + int (* const begin)(suspend_state_t state);
60788 + int (* const prepare)(void);
60789 + int (* const prepare_late)(void);
60790 + int (* const enter)(suspend_state_t state);
60791 + void (* const wake)(void);
60792 + void (* const finish)(void);
60793 + void (* const end)(void);
60794 + void (* const recover)(void);
60795 };
60796
60797 #ifdef CONFIG_SUSPEND
60798 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
60799 * suspend_set_ops - set platform dependent suspend operations
60800 * @ops: The new suspend operations to set.
60801 */
60802 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
60803 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
60804 extern int suspend_valid_only_mem(suspend_state_t state);
60805
60806 /**
60807 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
60808 #else /* !CONFIG_SUSPEND */
60809 #define suspend_valid_only_mem NULL
60810
60811 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
60812 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
60813 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
60814 #endif /* !CONFIG_SUSPEND */
60815
60816 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
60817 * platforms which require special recovery actions in that situation.
60818 */
60819 struct platform_hibernation_ops {
60820 - int (*begin)(void);
60821 - void (*end)(void);
60822 - int (*pre_snapshot)(void);
60823 - void (*finish)(void);
60824 - int (*prepare)(void);
60825 - int (*enter)(void);
60826 - void (*leave)(void);
60827 - int (*pre_restore)(void);
60828 - void (*restore_cleanup)(void);
60829 - void (*recover)(void);
60830 + int (* const begin)(void);
60831 + void (* const end)(void);
60832 + int (* const pre_snapshot)(void);
60833 + void (* const finish)(void);
60834 + int (* const prepare)(void);
60835 + int (* const enter)(void);
60836 + void (* const leave)(void);
60837 + int (* const pre_restore)(void);
60838 + void (* const restore_cleanup)(void);
60839 + void (* const recover)(void);
60840 };
60841
60842 #ifdef CONFIG_HIBERNATION
60843 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
60844 extern void swsusp_unset_page_free(struct page *);
60845 extern unsigned long get_safe_page(gfp_t gfp_mask);
60846
60847 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
60848 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
60849 extern int hibernate(void);
60850 extern bool system_entering_hibernation(void);
60851 #else /* CONFIG_HIBERNATION */
60852 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
60853 static inline void swsusp_set_page_free(struct page *p) {}
60854 static inline void swsusp_unset_page_free(struct page *p) {}
60855
60856 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
60857 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
60858 static inline int hibernate(void) { return -ENOSYS; }
60859 static inline bool system_entering_hibernation(void) { return false; }
60860 #endif /* CONFIG_HIBERNATION */
60861 diff -urNp linux-2.6.32.45/include/linux/sysctl.h linux-2.6.32.45/include/linux/sysctl.h
60862 --- linux-2.6.32.45/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
60863 +++ linux-2.6.32.45/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
60864 @@ -164,7 +164,11 @@ enum
60865 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60866 };
60867
60868 -
60869 +#ifdef CONFIG_PAX_SOFTMODE
60870 +enum {
60871 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60872 +};
60873 +#endif
60874
60875 /* CTL_VM names: */
60876 enum
60877 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
60878
60879 extern int proc_dostring(struct ctl_table *, int,
60880 void __user *, size_t *, loff_t *);
60881 +extern int proc_dostring_modpriv(struct ctl_table *, int,
60882 + void __user *, size_t *, loff_t *);
60883 extern int proc_dointvec(struct ctl_table *, int,
60884 void __user *, size_t *, loff_t *);
60885 extern int proc_dointvec_minmax(struct ctl_table *, int,
60886 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
60887
60888 extern ctl_handler sysctl_data;
60889 extern ctl_handler sysctl_string;
60890 +extern ctl_handler sysctl_string_modpriv;
60891 extern ctl_handler sysctl_intvec;
60892 extern ctl_handler sysctl_jiffies;
60893 extern ctl_handler sysctl_ms_jiffies;
60894 diff -urNp linux-2.6.32.45/include/linux/sysfs.h linux-2.6.32.45/include/linux/sysfs.h
60895 --- linux-2.6.32.45/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
60896 +++ linux-2.6.32.45/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
60897 @@ -75,8 +75,8 @@ struct bin_attribute {
60898 };
60899
60900 struct sysfs_ops {
60901 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
60902 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
60903 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
60904 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
60905 };
60906
60907 struct sysfs_dirent;
60908 diff -urNp linux-2.6.32.45/include/linux/thread_info.h linux-2.6.32.45/include/linux/thread_info.h
60909 --- linux-2.6.32.45/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
60910 +++ linux-2.6.32.45/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
60911 @@ -23,7 +23,7 @@ struct restart_block {
60912 };
60913 /* For futex_wait and futex_wait_requeue_pi */
60914 struct {
60915 - u32 *uaddr;
60916 + u32 __user *uaddr;
60917 u32 val;
60918 u32 flags;
60919 u32 bitset;
60920 diff -urNp linux-2.6.32.45/include/linux/tty.h linux-2.6.32.45/include/linux/tty.h
60921 --- linux-2.6.32.45/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
60922 +++ linux-2.6.32.45/include/linux/tty.h 2011-08-05 20:33:55.000000000 -0400
60923 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
60924 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
60925 extern void tty_ldisc_enable(struct tty_struct *tty);
60926
60927 -
60928 /* n_tty.c */
60929 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
60930
60931 diff -urNp linux-2.6.32.45/include/linux/tty_ldisc.h linux-2.6.32.45/include/linux/tty_ldisc.h
60932 --- linux-2.6.32.45/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
60933 +++ linux-2.6.32.45/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
60934 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
60935
60936 struct module *owner;
60937
60938 - int refcount;
60939 + atomic_t refcount;
60940 };
60941
60942 struct tty_ldisc {
60943 diff -urNp linux-2.6.32.45/include/linux/types.h linux-2.6.32.45/include/linux/types.h
60944 --- linux-2.6.32.45/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
60945 +++ linux-2.6.32.45/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
60946 @@ -191,10 +191,26 @@ typedef struct {
60947 volatile int counter;
60948 } atomic_t;
60949
60950 +#ifdef CONFIG_PAX_REFCOUNT
60951 +typedef struct {
60952 + volatile int counter;
60953 +} atomic_unchecked_t;
60954 +#else
60955 +typedef atomic_t atomic_unchecked_t;
60956 +#endif
60957 +
60958 #ifdef CONFIG_64BIT
60959 typedef struct {
60960 volatile long counter;
60961 } atomic64_t;
60962 +
60963 +#ifdef CONFIG_PAX_REFCOUNT
60964 +typedef struct {
60965 + volatile long counter;
60966 +} atomic64_unchecked_t;
60967 +#else
60968 +typedef atomic64_t atomic64_unchecked_t;
60969 +#endif
60970 #endif
60971
60972 struct ustat {
60973 diff -urNp linux-2.6.32.45/include/linux/uaccess.h linux-2.6.32.45/include/linux/uaccess.h
60974 --- linux-2.6.32.45/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
60975 +++ linux-2.6.32.45/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
60976 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60977 long ret; \
60978 mm_segment_t old_fs = get_fs(); \
60979 \
60980 - set_fs(KERNEL_DS); \
60981 pagefault_disable(); \
60982 + set_fs(KERNEL_DS); \
60983 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60984 - pagefault_enable(); \
60985 set_fs(old_fs); \
60986 + pagefault_enable(); \
60987 ret; \
60988 })
60989
60990 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
60991 * Safely read from address @src to the buffer at @dst. If a kernel fault
60992 * happens, handle that and return -EFAULT.
60993 */
60994 -extern long probe_kernel_read(void *dst, void *src, size_t size);
60995 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
60996
60997 /*
60998 * probe_kernel_write(): safely attempt to write to a location
60999 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
61000 * Safely write to address @dst from the buffer at @src. If a kernel fault
61001 * happens, handle that and return -EFAULT.
61002 */
61003 -extern long probe_kernel_write(void *dst, void *src, size_t size);
61004 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
61005
61006 #endif /* __LINUX_UACCESS_H__ */
61007 diff -urNp linux-2.6.32.45/include/linux/unaligned/access_ok.h linux-2.6.32.45/include/linux/unaligned/access_ok.h
61008 --- linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
61009 +++ linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
61010 @@ -6,32 +6,32 @@
61011
61012 static inline u16 get_unaligned_le16(const void *p)
61013 {
61014 - return le16_to_cpup((__le16 *)p);
61015 + return le16_to_cpup((const __le16 *)p);
61016 }
61017
61018 static inline u32 get_unaligned_le32(const void *p)
61019 {
61020 - return le32_to_cpup((__le32 *)p);
61021 + return le32_to_cpup((const __le32 *)p);
61022 }
61023
61024 static inline u64 get_unaligned_le64(const void *p)
61025 {
61026 - return le64_to_cpup((__le64 *)p);
61027 + return le64_to_cpup((const __le64 *)p);
61028 }
61029
61030 static inline u16 get_unaligned_be16(const void *p)
61031 {
61032 - return be16_to_cpup((__be16 *)p);
61033 + return be16_to_cpup((const __be16 *)p);
61034 }
61035
61036 static inline u32 get_unaligned_be32(const void *p)
61037 {
61038 - return be32_to_cpup((__be32 *)p);
61039 + return be32_to_cpup((const __be32 *)p);
61040 }
61041
61042 static inline u64 get_unaligned_be64(const void *p)
61043 {
61044 - return be64_to_cpup((__be64 *)p);
61045 + return be64_to_cpup((const __be64 *)p);
61046 }
61047
61048 static inline void put_unaligned_le16(u16 val, void *p)
61049 diff -urNp linux-2.6.32.45/include/linux/vmalloc.h linux-2.6.32.45/include/linux/vmalloc.h
61050 --- linux-2.6.32.45/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
61051 +++ linux-2.6.32.45/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
61052 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
61053 #define VM_MAP 0x00000004 /* vmap()ed pages */
61054 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61055 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61056 +
61057 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61058 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
61059 +#endif
61060 +
61061 /* bits [20..32] reserved for arch specific ioremap internals */
61062
61063 /*
61064 @@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
61065
61066 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
61067
61068 +#define vmalloc(x) \
61069 +({ \
61070 + void *___retval; \
61071 + intoverflow_t ___x = (intoverflow_t)x; \
61072 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61073 + ___retval = NULL; \
61074 + else \
61075 + ___retval = vmalloc((unsigned long)___x); \
61076 + ___retval; \
61077 +})
61078 +
61079 +#define __vmalloc(x, y, z) \
61080 +({ \
61081 + void *___retval; \
61082 + intoverflow_t ___x = (intoverflow_t)x; \
61083 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61084 + ___retval = NULL; \
61085 + else \
61086 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61087 + ___retval; \
61088 +})
61089 +
61090 +#define vmalloc_user(x) \
61091 +({ \
61092 + void *___retval; \
61093 + intoverflow_t ___x = (intoverflow_t)x; \
61094 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61095 + ___retval = NULL; \
61096 + else \
61097 + ___retval = vmalloc_user((unsigned long)___x); \
61098 + ___retval; \
61099 +})
61100 +
61101 +#define vmalloc_exec(x) \
61102 +({ \
61103 + void *___retval; \
61104 + intoverflow_t ___x = (intoverflow_t)x; \
61105 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61106 + ___retval = NULL; \
61107 + else \
61108 + ___retval = vmalloc_exec((unsigned long)___x); \
61109 + ___retval; \
61110 +})
61111 +
61112 +#define vmalloc_node(x, y) \
61113 +({ \
61114 + void *___retval; \
61115 + intoverflow_t ___x = (intoverflow_t)x; \
61116 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61117 + ___retval = NULL; \
61118 + else \
61119 + ___retval = vmalloc_node((unsigned long)___x, (y));\
61120 + ___retval; \
61121 +})
61122 +
61123 +#define vmalloc_32(x) \
61124 +({ \
61125 + void *___retval; \
61126 + intoverflow_t ___x = (intoverflow_t)x; \
61127 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61128 + ___retval = NULL; \
61129 + else \
61130 + ___retval = vmalloc_32((unsigned long)___x); \
61131 + ___retval; \
61132 +})
61133 +
61134 +#define vmalloc_32_user(x) \
61135 +({ \
61136 + void *___retval; \
61137 + intoverflow_t ___x = (intoverflow_t)x; \
61138 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61139 + ___retval = NULL; \
61140 + else \
61141 + ___retval = vmalloc_32_user((unsigned long)___x);\
61142 + ___retval; \
61143 +})
61144 +
61145 #endif /* _LINUX_VMALLOC_H */
61146 diff -urNp linux-2.6.32.45/include/linux/vmstat.h linux-2.6.32.45/include/linux/vmstat.h
61147 --- linux-2.6.32.45/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
61148 +++ linux-2.6.32.45/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
61149 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
61150 /*
61151 * Zone based page accounting with per cpu differentials.
61152 */
61153 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61154 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61155
61156 static inline void zone_page_state_add(long x, struct zone *zone,
61157 enum zone_stat_item item)
61158 {
61159 - atomic_long_add(x, &zone->vm_stat[item]);
61160 - atomic_long_add(x, &vm_stat[item]);
61161 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61162 + atomic_long_add_unchecked(x, &vm_stat[item]);
61163 }
61164
61165 static inline unsigned long global_page_state(enum zone_stat_item item)
61166 {
61167 - long x = atomic_long_read(&vm_stat[item]);
61168 + long x = atomic_long_read_unchecked(&vm_stat[item]);
61169 #ifdef CONFIG_SMP
61170 if (x < 0)
61171 x = 0;
61172 @@ -158,7 +158,7 @@ static inline unsigned long global_page_
61173 static inline unsigned long zone_page_state(struct zone *zone,
61174 enum zone_stat_item item)
61175 {
61176 - long x = atomic_long_read(&zone->vm_stat[item]);
61177 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61178 #ifdef CONFIG_SMP
61179 if (x < 0)
61180 x = 0;
61181 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
61182 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61183 enum zone_stat_item item)
61184 {
61185 - long x = atomic_long_read(&zone->vm_stat[item]);
61186 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61187
61188 #ifdef CONFIG_SMP
61189 int cpu;
61190 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
61191
61192 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61193 {
61194 - atomic_long_inc(&zone->vm_stat[item]);
61195 - atomic_long_inc(&vm_stat[item]);
61196 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
61197 + atomic_long_inc_unchecked(&vm_stat[item]);
61198 }
61199
61200 static inline void __inc_zone_page_state(struct page *page,
61201 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
61202
61203 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61204 {
61205 - atomic_long_dec(&zone->vm_stat[item]);
61206 - atomic_long_dec(&vm_stat[item]);
61207 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
61208 + atomic_long_dec_unchecked(&vm_stat[item]);
61209 }
61210
61211 static inline void __dec_zone_page_state(struct page *page,
61212 diff -urNp linux-2.6.32.45/include/media/v4l2-dev.h linux-2.6.32.45/include/media/v4l2-dev.h
61213 --- linux-2.6.32.45/include/media/v4l2-dev.h 2011-03-27 14:31:47.000000000 -0400
61214 +++ linux-2.6.32.45/include/media/v4l2-dev.h 2011-08-05 20:33:55.000000000 -0400
61215 @@ -34,7 +34,7 @@ struct v4l2_device;
61216 #define V4L2_FL_UNREGISTERED (0)
61217
61218 struct v4l2_file_operations {
61219 - struct module *owner;
61220 + struct module * const owner;
61221 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61222 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61223 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61224 diff -urNp linux-2.6.32.45/include/media/v4l2-device.h linux-2.6.32.45/include/media/v4l2-device.h
61225 --- linux-2.6.32.45/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
61226 +++ linux-2.6.32.45/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
61227 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
61228 this function returns 0. If the name ends with a digit (e.g. cx18),
61229 then the name will be set to cx18-0 since cx180 looks really odd. */
61230 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
61231 - atomic_t *instance);
61232 + atomic_unchecked_t *instance);
61233
61234 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
61235 Since the parent disappears this ensures that v4l2_dev doesn't have an
61236 diff -urNp linux-2.6.32.45/include/net/flow.h linux-2.6.32.45/include/net/flow.h
61237 --- linux-2.6.32.45/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
61238 +++ linux-2.6.32.45/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
61239 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
61240 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
61241 u8 dir, flow_resolve_t resolver);
61242 extern void flow_cache_flush(void);
61243 -extern atomic_t flow_cache_genid;
61244 +extern atomic_unchecked_t flow_cache_genid;
61245
61246 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
61247 {
61248 diff -urNp linux-2.6.32.45/include/net/inetpeer.h linux-2.6.32.45/include/net/inetpeer.h
61249 --- linux-2.6.32.45/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
61250 +++ linux-2.6.32.45/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
61251 @@ -24,7 +24,7 @@ struct inet_peer
61252 __u32 dtime; /* the time of last use of not
61253 * referenced entries */
61254 atomic_t refcnt;
61255 - atomic_t rid; /* Frag reception counter */
61256 + atomic_unchecked_t rid; /* Frag reception counter */
61257 __u32 tcp_ts;
61258 unsigned long tcp_ts_stamp;
61259 };
61260 diff -urNp linux-2.6.32.45/include/net/ip_vs.h linux-2.6.32.45/include/net/ip_vs.h
61261 --- linux-2.6.32.45/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
61262 +++ linux-2.6.32.45/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
61263 @@ -365,7 +365,7 @@ struct ip_vs_conn {
61264 struct ip_vs_conn *control; /* Master control connection */
61265 atomic_t n_control; /* Number of controlled ones */
61266 struct ip_vs_dest *dest; /* real server */
61267 - atomic_t in_pkts; /* incoming packet counter */
61268 + atomic_unchecked_t in_pkts; /* incoming packet counter */
61269
61270 /* packet transmitter for different forwarding methods. If it
61271 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61272 @@ -466,7 +466,7 @@ struct ip_vs_dest {
61273 union nf_inet_addr addr; /* IP address of the server */
61274 __be16 port; /* port number of the server */
61275 volatile unsigned flags; /* dest status flags */
61276 - atomic_t conn_flags; /* flags to copy to conn */
61277 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
61278 atomic_t weight; /* server weight */
61279
61280 atomic_t refcnt; /* reference counter */
61281 diff -urNp linux-2.6.32.45/include/net/irda/ircomm_core.h linux-2.6.32.45/include/net/irda/ircomm_core.h
61282 --- linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-03-27 14:31:47.000000000 -0400
61283 +++ linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-08-05 20:33:55.000000000 -0400
61284 @@ -51,7 +51,7 @@ typedef struct {
61285 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61286 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61287 struct ircomm_info *);
61288 -} call_t;
61289 +} __no_const call_t;
61290
61291 struct ircomm_cb {
61292 irda_queue_t queue;
61293 diff -urNp linux-2.6.32.45/include/net/irda/ircomm_tty.h linux-2.6.32.45/include/net/irda/ircomm_tty.h
61294 --- linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
61295 +++ linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
61296 @@ -35,6 +35,7 @@
61297 #include <linux/termios.h>
61298 #include <linux/timer.h>
61299 #include <linux/tty.h> /* struct tty_struct */
61300 +#include <asm/local.h>
61301
61302 #include <net/irda/irias_object.h>
61303 #include <net/irda/ircomm_core.h>
61304 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61305 unsigned short close_delay;
61306 unsigned short closing_wait; /* time to wait before closing */
61307
61308 - int open_count;
61309 - int blocked_open; /* # of blocked opens */
61310 + local_t open_count;
61311 + local_t blocked_open; /* # of blocked opens */
61312
61313 /* Protect concurent access to :
61314 * o self->open_count
61315 diff -urNp linux-2.6.32.45/include/net/iucv/af_iucv.h linux-2.6.32.45/include/net/iucv/af_iucv.h
61316 --- linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
61317 +++ linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
61318 @@ -87,7 +87,7 @@ struct iucv_sock {
61319 struct iucv_sock_list {
61320 struct hlist_head head;
61321 rwlock_t lock;
61322 - atomic_t autobind_name;
61323 + atomic_unchecked_t autobind_name;
61324 };
61325
61326 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61327 diff -urNp linux-2.6.32.45/include/net/lapb.h linux-2.6.32.45/include/net/lapb.h
61328 --- linux-2.6.32.45/include/net/lapb.h 2011-03-27 14:31:47.000000000 -0400
61329 +++ linux-2.6.32.45/include/net/lapb.h 2011-08-05 20:33:55.000000000 -0400
61330 @@ -95,7 +95,7 @@ struct lapb_cb {
61331 struct sk_buff_head write_queue;
61332 struct sk_buff_head ack_queue;
61333 unsigned char window;
61334 - struct lapb_register_struct callbacks;
61335 + struct lapb_register_struct *callbacks;
61336
61337 /* FRMR control information */
61338 struct lapb_frame frmr_data;
61339 diff -urNp linux-2.6.32.45/include/net/neighbour.h linux-2.6.32.45/include/net/neighbour.h
61340 --- linux-2.6.32.45/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
61341 +++ linux-2.6.32.45/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
61342 @@ -125,12 +125,12 @@ struct neighbour
61343 struct neigh_ops
61344 {
61345 int family;
61346 - void (*solicit)(struct neighbour *, struct sk_buff*);
61347 - void (*error_report)(struct neighbour *, struct sk_buff*);
61348 - int (*output)(struct sk_buff*);
61349 - int (*connected_output)(struct sk_buff*);
61350 - int (*hh_output)(struct sk_buff*);
61351 - int (*queue_xmit)(struct sk_buff*);
61352 + void (* const solicit)(struct neighbour *, struct sk_buff*);
61353 + void (* const error_report)(struct neighbour *, struct sk_buff*);
61354 + int (* const output)(struct sk_buff*);
61355 + int (* const connected_output)(struct sk_buff*);
61356 + int (* const hh_output)(struct sk_buff*);
61357 + int (* const queue_xmit)(struct sk_buff*);
61358 };
61359
61360 struct pneigh_entry
61361 diff -urNp linux-2.6.32.45/include/net/netlink.h linux-2.6.32.45/include/net/netlink.h
61362 --- linux-2.6.32.45/include/net/netlink.h 2011-07-13 17:23:04.000000000 -0400
61363 +++ linux-2.6.32.45/include/net/netlink.h 2011-07-13 17:23:19.000000000 -0400
61364 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
61365 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61366 {
61367 if (mark)
61368 - skb_trim(skb, (unsigned char *) mark - skb->data);
61369 + skb_trim(skb, (const unsigned char *) mark - skb->data);
61370 }
61371
61372 /**
61373 diff -urNp linux-2.6.32.45/include/net/netns/ipv4.h linux-2.6.32.45/include/net/netns/ipv4.h
61374 --- linux-2.6.32.45/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
61375 +++ linux-2.6.32.45/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
61376 @@ -54,7 +54,7 @@ struct netns_ipv4 {
61377 int current_rt_cache_rebuild_count;
61378
61379 struct timer_list rt_secret_timer;
61380 - atomic_t rt_genid;
61381 + atomic_unchecked_t rt_genid;
61382
61383 #ifdef CONFIG_IP_MROUTE
61384 struct sock *mroute_sk;
61385 diff -urNp linux-2.6.32.45/include/net/sctp/sctp.h linux-2.6.32.45/include/net/sctp/sctp.h
61386 --- linux-2.6.32.45/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
61387 +++ linux-2.6.32.45/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
61388 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
61389
61390 #else /* SCTP_DEBUG */
61391
61392 -#define SCTP_DEBUG_PRINTK(whatever...)
61393 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61394 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61395 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61396 #define SCTP_ENABLE_DEBUG
61397 #define SCTP_DISABLE_DEBUG
61398 #define SCTP_ASSERT(expr, str, func)
61399 diff -urNp linux-2.6.32.45/include/net/secure_seq.h linux-2.6.32.45/include/net/secure_seq.h
61400 --- linux-2.6.32.45/include/net/secure_seq.h 2011-08-16 20:37:25.000000000 -0400
61401 +++ linux-2.6.32.45/include/net/secure_seq.h 2011-08-07 19:48:09.000000000 -0400
61402 @@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
61403 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
61404 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
61405 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
61406 - __be16 dport);
61407 + __be16 dport);
61408 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
61409 __be16 sport, __be16 dport);
61410 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61411 - __be16 sport, __be16 dport);
61412 + __be16 sport, __be16 dport);
61413 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
61414 - __be16 sport, __be16 dport);
61415 + __be16 sport, __be16 dport);
61416 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61417 - __be16 sport, __be16 dport);
61418 + __be16 sport, __be16 dport);
61419
61420 #endif /* _NET_SECURE_SEQ */
61421 diff -urNp linux-2.6.32.45/include/net/sock.h linux-2.6.32.45/include/net/sock.h
61422 --- linux-2.6.32.45/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
61423 +++ linux-2.6.32.45/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
61424 @@ -272,7 +272,7 @@ struct sock {
61425 rwlock_t sk_callback_lock;
61426 int sk_err,
61427 sk_err_soft;
61428 - atomic_t sk_drops;
61429 + atomic_unchecked_t sk_drops;
61430 unsigned short sk_ack_backlog;
61431 unsigned short sk_max_ack_backlog;
61432 __u32 sk_priority;
61433 diff -urNp linux-2.6.32.45/include/net/tcp.h linux-2.6.32.45/include/net/tcp.h
61434 --- linux-2.6.32.45/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
61435 +++ linux-2.6.32.45/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
61436 @@ -1444,6 +1444,7 @@ enum tcp_seq_states {
61437 struct tcp_seq_afinfo {
61438 char *name;
61439 sa_family_t family;
61440 + /* cannot be const */
61441 struct file_operations seq_fops;
61442 struct seq_operations seq_ops;
61443 };
61444 diff -urNp linux-2.6.32.45/include/net/udp.h linux-2.6.32.45/include/net/udp.h
61445 --- linux-2.6.32.45/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
61446 +++ linux-2.6.32.45/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
61447 @@ -187,6 +187,7 @@ struct udp_seq_afinfo {
61448 char *name;
61449 sa_family_t family;
61450 struct udp_table *udp_table;
61451 + /* cannot be const */
61452 struct file_operations seq_fops;
61453 struct seq_operations seq_ops;
61454 };
61455 diff -urNp linux-2.6.32.45/include/rdma/iw_cm.h linux-2.6.32.45/include/rdma/iw_cm.h
61456 --- linux-2.6.32.45/include/rdma/iw_cm.h 2011-03-27 14:31:47.000000000 -0400
61457 +++ linux-2.6.32.45/include/rdma/iw_cm.h 2011-08-05 20:33:55.000000000 -0400
61458 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
61459 int backlog);
61460
61461 int (*destroy_listen)(struct iw_cm_id *cm_id);
61462 -};
61463 +} __no_const;
61464
61465 /**
61466 * iw_create_cm_id - Create an IW CM identifier.
61467 diff -urNp linux-2.6.32.45/include/scsi/scsi_device.h linux-2.6.32.45/include/scsi/scsi_device.h
61468 --- linux-2.6.32.45/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
61469 +++ linux-2.6.32.45/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
61470 @@ -156,9 +156,9 @@ struct scsi_device {
61471 unsigned int max_device_blocked; /* what device_blocked counts down from */
61472 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61473
61474 - atomic_t iorequest_cnt;
61475 - atomic_t iodone_cnt;
61476 - atomic_t ioerr_cnt;
61477 + atomic_unchecked_t iorequest_cnt;
61478 + atomic_unchecked_t iodone_cnt;
61479 + atomic_unchecked_t ioerr_cnt;
61480
61481 struct device sdev_gendev,
61482 sdev_dev;
61483 diff -urNp linux-2.6.32.45/include/scsi/scsi_transport_fc.h linux-2.6.32.45/include/scsi/scsi_transport_fc.h
61484 --- linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-03-27 14:31:47.000000000 -0400
61485 +++ linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-08-05 20:33:55.000000000 -0400
61486 @@ -663,9 +663,9 @@ struct fc_function_template {
61487 int (*bsg_timeout)(struct fc_bsg_job *);
61488
61489 /* allocation lengths for host-specific data */
61490 - u32 dd_fcrport_size;
61491 - u32 dd_fcvport_size;
61492 - u32 dd_bsg_size;
61493 + const u32 dd_fcrport_size;
61494 + const u32 dd_fcvport_size;
61495 + const u32 dd_bsg_size;
61496
61497 /*
61498 * The driver sets these to tell the transport class it
61499 @@ -675,39 +675,39 @@ struct fc_function_template {
61500 */
61501
61502 /* remote port fixed attributes */
61503 - unsigned long show_rport_maxframe_size:1;
61504 - unsigned long show_rport_supported_classes:1;
61505 - unsigned long show_rport_dev_loss_tmo:1;
61506 + const unsigned long show_rport_maxframe_size:1;
61507 + const unsigned long show_rport_supported_classes:1;
61508 + const unsigned long show_rport_dev_loss_tmo:1;
61509
61510 /*
61511 * target dynamic attributes
61512 * These should all be "1" if the driver uses the remote port
61513 * add/delete functions (so attributes reflect rport values).
61514 */
61515 - unsigned long show_starget_node_name:1;
61516 - unsigned long show_starget_port_name:1;
61517 - unsigned long show_starget_port_id:1;
61518 + const unsigned long show_starget_node_name:1;
61519 + const unsigned long show_starget_port_name:1;
61520 + const unsigned long show_starget_port_id:1;
61521
61522 /* host fixed attributes */
61523 - unsigned long show_host_node_name:1;
61524 - unsigned long show_host_port_name:1;
61525 - unsigned long show_host_permanent_port_name:1;
61526 - unsigned long show_host_supported_classes:1;
61527 - unsigned long show_host_supported_fc4s:1;
61528 - unsigned long show_host_supported_speeds:1;
61529 - unsigned long show_host_maxframe_size:1;
61530 - unsigned long show_host_serial_number:1;
61531 + const unsigned long show_host_node_name:1;
61532 + const unsigned long show_host_port_name:1;
61533 + const unsigned long show_host_permanent_port_name:1;
61534 + const unsigned long show_host_supported_classes:1;
61535 + const unsigned long show_host_supported_fc4s:1;
61536 + const unsigned long show_host_supported_speeds:1;
61537 + const unsigned long show_host_maxframe_size:1;
61538 + const unsigned long show_host_serial_number:1;
61539 /* host dynamic attributes */
61540 - unsigned long show_host_port_id:1;
61541 - unsigned long show_host_port_type:1;
61542 - unsigned long show_host_port_state:1;
61543 - unsigned long show_host_active_fc4s:1;
61544 - unsigned long show_host_speed:1;
61545 - unsigned long show_host_fabric_name:1;
61546 - unsigned long show_host_symbolic_name:1;
61547 - unsigned long show_host_system_hostname:1;
61548 + const unsigned long show_host_port_id:1;
61549 + const unsigned long show_host_port_type:1;
61550 + const unsigned long show_host_port_state:1;
61551 + const unsigned long show_host_active_fc4s:1;
61552 + const unsigned long show_host_speed:1;
61553 + const unsigned long show_host_fabric_name:1;
61554 + const unsigned long show_host_symbolic_name:1;
61555 + const unsigned long show_host_system_hostname:1;
61556
61557 - unsigned long disable_target_scan:1;
61558 + const unsigned long disable_target_scan:1;
61559 };
61560
61561
61562 diff -urNp linux-2.6.32.45/include/sound/ac97_codec.h linux-2.6.32.45/include/sound/ac97_codec.h
61563 --- linux-2.6.32.45/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
61564 +++ linux-2.6.32.45/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
61565 @@ -419,15 +419,15 @@
61566 struct snd_ac97;
61567
61568 struct snd_ac97_build_ops {
61569 - int (*build_3d) (struct snd_ac97 *ac97);
61570 - int (*build_specific) (struct snd_ac97 *ac97);
61571 - int (*build_spdif) (struct snd_ac97 *ac97);
61572 - int (*build_post_spdif) (struct snd_ac97 *ac97);
61573 + int (* const build_3d) (struct snd_ac97 *ac97);
61574 + int (* const build_specific) (struct snd_ac97 *ac97);
61575 + int (* const build_spdif) (struct snd_ac97 *ac97);
61576 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
61577 #ifdef CONFIG_PM
61578 - void (*suspend) (struct snd_ac97 *ac97);
61579 - void (*resume) (struct snd_ac97 *ac97);
61580 + void (* const suspend) (struct snd_ac97 *ac97);
61581 + void (* const resume) (struct snd_ac97 *ac97);
61582 #endif
61583 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61584 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61585 };
61586
61587 struct snd_ac97_bus_ops {
61588 @@ -477,7 +477,7 @@ struct snd_ac97_template {
61589
61590 struct snd_ac97 {
61591 /* -- lowlevel (hardware) driver specific -- */
61592 - struct snd_ac97_build_ops * build_ops;
61593 + const struct snd_ac97_build_ops * build_ops;
61594 void *private_data;
61595 void (*private_free) (struct snd_ac97 *ac97);
61596 /* --- */
61597 diff -urNp linux-2.6.32.45/include/sound/ak4xxx-adda.h linux-2.6.32.45/include/sound/ak4xxx-adda.h
61598 --- linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-03-27 14:31:47.000000000 -0400
61599 +++ linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-08-05 20:33:55.000000000 -0400
61600 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61601 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61602 unsigned char val);
61603 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61604 -};
61605 +} __no_const;
61606
61607 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61608
61609 diff -urNp linux-2.6.32.45/include/sound/hwdep.h linux-2.6.32.45/include/sound/hwdep.h
61610 --- linux-2.6.32.45/include/sound/hwdep.h 2011-03-27 14:31:47.000000000 -0400
61611 +++ linux-2.6.32.45/include/sound/hwdep.h 2011-08-05 20:33:55.000000000 -0400
61612 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61613 struct snd_hwdep_dsp_status *status);
61614 int (*dsp_load)(struct snd_hwdep *hw,
61615 struct snd_hwdep_dsp_image *image);
61616 -};
61617 +} __no_const;
61618
61619 struct snd_hwdep {
61620 struct snd_card *card;
61621 diff -urNp linux-2.6.32.45/include/sound/info.h linux-2.6.32.45/include/sound/info.h
61622 --- linux-2.6.32.45/include/sound/info.h 2011-03-27 14:31:47.000000000 -0400
61623 +++ linux-2.6.32.45/include/sound/info.h 2011-08-05 20:33:55.000000000 -0400
61624 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
61625 struct snd_info_buffer *buffer);
61626 void (*write)(struct snd_info_entry *entry,
61627 struct snd_info_buffer *buffer);
61628 -};
61629 +} __no_const;
61630
61631 struct snd_info_entry_ops {
61632 int (*open)(struct snd_info_entry *entry,
61633 diff -urNp linux-2.6.32.45/include/sound/sb16_csp.h linux-2.6.32.45/include/sound/sb16_csp.h
61634 --- linux-2.6.32.45/include/sound/sb16_csp.h 2011-03-27 14:31:47.000000000 -0400
61635 +++ linux-2.6.32.45/include/sound/sb16_csp.h 2011-08-05 20:33:55.000000000 -0400
61636 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
61637 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61638 int (*csp_stop) (struct snd_sb_csp * p);
61639 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61640 -};
61641 +} __no_const;
61642
61643 /*
61644 * CSP private data
61645 diff -urNp linux-2.6.32.45/include/sound/ymfpci.h linux-2.6.32.45/include/sound/ymfpci.h
61646 --- linux-2.6.32.45/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
61647 +++ linux-2.6.32.45/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
61648 @@ -358,7 +358,7 @@ struct snd_ymfpci {
61649 spinlock_t reg_lock;
61650 spinlock_t voice_lock;
61651 wait_queue_head_t interrupt_sleep;
61652 - atomic_t interrupt_sleep_count;
61653 + atomic_unchecked_t interrupt_sleep_count;
61654 struct snd_info_entry *proc_entry;
61655 const struct firmware *dsp_microcode;
61656 const struct firmware *controller_microcode;
61657 diff -urNp linux-2.6.32.45/include/trace/events/irq.h linux-2.6.32.45/include/trace/events/irq.h
61658 --- linux-2.6.32.45/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
61659 +++ linux-2.6.32.45/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
61660 @@ -34,7 +34,7 @@
61661 */
61662 TRACE_EVENT(irq_handler_entry,
61663
61664 - TP_PROTO(int irq, struct irqaction *action),
61665 + TP_PROTO(int irq, const struct irqaction *action),
61666
61667 TP_ARGS(irq, action),
61668
61669 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
61670 */
61671 TRACE_EVENT(irq_handler_exit,
61672
61673 - TP_PROTO(int irq, struct irqaction *action, int ret),
61674 + TP_PROTO(int irq, const struct irqaction *action, int ret),
61675
61676 TP_ARGS(irq, action, ret),
61677
61678 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
61679 */
61680 TRACE_EVENT(softirq_entry,
61681
61682 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61683 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61684
61685 TP_ARGS(h, vec),
61686
61687 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
61688 */
61689 TRACE_EVENT(softirq_exit,
61690
61691 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61692 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61693
61694 TP_ARGS(h, vec),
61695
61696 diff -urNp linux-2.6.32.45/include/video/uvesafb.h linux-2.6.32.45/include/video/uvesafb.h
61697 --- linux-2.6.32.45/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
61698 +++ linux-2.6.32.45/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
61699 @@ -177,6 +177,7 @@ struct uvesafb_par {
61700 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61701 u8 pmi_setpal; /* PMI for palette changes */
61702 u16 *pmi_base; /* protected mode interface location */
61703 + u8 *pmi_code; /* protected mode code location */
61704 void *pmi_start;
61705 void *pmi_pal;
61706 u8 *vbe_state_orig; /*
61707 diff -urNp linux-2.6.32.45/init/do_mounts.c linux-2.6.32.45/init/do_mounts.c
61708 --- linux-2.6.32.45/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
61709 +++ linux-2.6.32.45/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
61710 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
61711
61712 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61713 {
61714 - int err = sys_mount(name, "/root", fs, flags, data);
61715 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
61716 if (err)
61717 return err;
61718
61719 - sys_chdir("/root");
61720 + sys_chdir((__force const char __user *)"/root");
61721 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61722 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61723 current->fs->pwd.mnt->mnt_sb->s_type->name,
61724 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
61725 va_start(args, fmt);
61726 vsprintf(buf, fmt, args);
61727 va_end(args);
61728 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61729 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61730 if (fd >= 0) {
61731 sys_ioctl(fd, FDEJECT, 0);
61732 sys_close(fd);
61733 }
61734 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61735 - fd = sys_open("/dev/console", O_RDWR, 0);
61736 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
61737 if (fd >= 0) {
61738 sys_ioctl(fd, TCGETS, (long)&termios);
61739 termios.c_lflag &= ~ICANON;
61740 sys_ioctl(fd, TCSETSF, (long)&termios);
61741 - sys_read(fd, &c, 1);
61742 + sys_read(fd, (char __user *)&c, 1);
61743 termios.c_lflag |= ICANON;
61744 sys_ioctl(fd, TCSETSF, (long)&termios);
61745 sys_close(fd);
61746 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
61747 mount_root();
61748 out:
61749 devtmpfs_mount("dev");
61750 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61751 - sys_chroot(".");
61752 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61753 + sys_chroot((__force char __user *)".");
61754 }
61755 diff -urNp linux-2.6.32.45/init/do_mounts.h linux-2.6.32.45/init/do_mounts.h
61756 --- linux-2.6.32.45/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
61757 +++ linux-2.6.32.45/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
61758 @@ -15,15 +15,15 @@ extern int root_mountflags;
61759
61760 static inline int create_dev(char *name, dev_t dev)
61761 {
61762 - sys_unlink(name);
61763 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61764 + sys_unlink((__force char __user *)name);
61765 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
61766 }
61767
61768 #if BITS_PER_LONG == 32
61769 static inline u32 bstat(char *name)
61770 {
61771 struct stat64 stat;
61772 - if (sys_stat64(name, &stat) != 0)
61773 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
61774 return 0;
61775 if (!S_ISBLK(stat.st_mode))
61776 return 0;
61777 diff -urNp linux-2.6.32.45/init/do_mounts_initrd.c linux-2.6.32.45/init/do_mounts_initrd.c
61778 --- linux-2.6.32.45/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
61779 +++ linux-2.6.32.45/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
61780 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
61781 sys_close(old_fd);sys_close(root_fd);
61782 sys_close(0);sys_close(1);sys_close(2);
61783 sys_setsid();
61784 - (void) sys_open("/dev/console",O_RDWR,0);
61785 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
61786 (void) sys_dup(0);
61787 (void) sys_dup(0);
61788 return kernel_execve(shell, argv, envp_init);
61789 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
61790 create_dev("/dev/root.old", Root_RAM0);
61791 /* mount initrd on rootfs' /root */
61792 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61793 - sys_mkdir("/old", 0700);
61794 - root_fd = sys_open("/", 0, 0);
61795 - old_fd = sys_open("/old", 0, 0);
61796 + sys_mkdir((__force const char __user *)"/old", 0700);
61797 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
61798 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
61799 /* move initrd over / and chdir/chroot in initrd root */
61800 - sys_chdir("/root");
61801 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61802 - sys_chroot(".");
61803 + sys_chdir((__force const char __user *)"/root");
61804 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61805 + sys_chroot((__force const char __user *)".");
61806
61807 /*
61808 * In case that a resume from disk is carried out by linuxrc or one of
61809 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
61810
61811 /* move initrd to rootfs' /old */
61812 sys_fchdir(old_fd);
61813 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
61814 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
61815 /* switch root and cwd back to / of rootfs */
61816 sys_fchdir(root_fd);
61817 - sys_chroot(".");
61818 + sys_chroot((__force const char __user *)".");
61819 sys_close(old_fd);
61820 sys_close(root_fd);
61821
61822 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61823 - sys_chdir("/old");
61824 + sys_chdir((__force const char __user *)"/old");
61825 return;
61826 }
61827
61828 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
61829 mount_root();
61830
61831 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61832 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61833 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
61834 if (!error)
61835 printk("okay\n");
61836 else {
61837 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
61838 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
61839 if (error == -ENOENT)
61840 printk("/initrd does not exist. Ignored.\n");
61841 else
61842 printk("failed\n");
61843 printk(KERN_NOTICE "Unmounting old root\n");
61844 - sys_umount("/old", MNT_DETACH);
61845 + sys_umount((__force char __user *)"/old", MNT_DETACH);
61846 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61847 if (fd < 0) {
61848 error = fd;
61849 @@ -119,11 +119,11 @@ int __init initrd_load(void)
61850 * mounted in the normal path.
61851 */
61852 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61853 - sys_unlink("/initrd.image");
61854 + sys_unlink((__force const char __user *)"/initrd.image");
61855 handle_initrd();
61856 return 1;
61857 }
61858 }
61859 - sys_unlink("/initrd.image");
61860 + sys_unlink((__force const char __user *)"/initrd.image");
61861 return 0;
61862 }
61863 diff -urNp linux-2.6.32.45/init/do_mounts_md.c linux-2.6.32.45/init/do_mounts_md.c
61864 --- linux-2.6.32.45/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
61865 +++ linux-2.6.32.45/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
61866 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61867 partitioned ? "_d" : "", minor,
61868 md_setup_args[ent].device_names);
61869
61870 - fd = sys_open(name, 0, 0);
61871 + fd = sys_open((__force char __user *)name, 0, 0);
61872 if (fd < 0) {
61873 printk(KERN_ERR "md: open failed - cannot start "
61874 "array %s\n", name);
61875 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61876 * array without it
61877 */
61878 sys_close(fd);
61879 - fd = sys_open(name, 0, 0);
61880 + fd = sys_open((__force char __user *)name, 0, 0);
61881 sys_ioctl(fd, BLKRRPART, 0);
61882 }
61883 sys_close(fd);
61884 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61885
61886 wait_for_device_probe();
61887
61888 - fd = sys_open("/dev/md0", 0, 0);
61889 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
61890 if (fd >= 0) {
61891 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61892 sys_close(fd);
61893 diff -urNp linux-2.6.32.45/init/initramfs.c linux-2.6.32.45/init/initramfs.c
61894 --- linux-2.6.32.45/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
61895 +++ linux-2.6.32.45/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
61896 @@ -74,7 +74,7 @@ static void __init free_hash(void)
61897 }
61898 }
61899
61900 -static long __init do_utime(char __user *filename, time_t mtime)
61901 +static long __init do_utime(__force char __user *filename, time_t mtime)
61902 {
61903 struct timespec t[2];
61904
61905 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
61906 struct dir_entry *de, *tmp;
61907 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61908 list_del(&de->list);
61909 - do_utime(de->name, de->mtime);
61910 + do_utime((__force char __user *)de->name, de->mtime);
61911 kfree(de->name);
61912 kfree(de);
61913 }
61914 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
61915 if (nlink >= 2) {
61916 char *old = find_link(major, minor, ino, mode, collected);
61917 if (old)
61918 - return (sys_link(old, collected) < 0) ? -1 : 1;
61919 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
61920 }
61921 return 0;
61922 }
61923 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
61924 {
61925 struct stat st;
61926
61927 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61928 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
61929 if (S_ISDIR(st.st_mode))
61930 - sys_rmdir(path);
61931 + sys_rmdir((__force char __user *)path);
61932 else
61933 - sys_unlink(path);
61934 + sys_unlink((__force char __user *)path);
61935 }
61936 }
61937
61938 @@ -305,7 +305,7 @@ static int __init do_name(void)
61939 int openflags = O_WRONLY|O_CREAT;
61940 if (ml != 1)
61941 openflags |= O_TRUNC;
61942 - wfd = sys_open(collected, openflags, mode);
61943 + wfd = sys_open((__force char __user *)collected, openflags, mode);
61944
61945 if (wfd >= 0) {
61946 sys_fchown(wfd, uid, gid);
61947 @@ -317,17 +317,17 @@ static int __init do_name(void)
61948 }
61949 }
61950 } else if (S_ISDIR(mode)) {
61951 - sys_mkdir(collected, mode);
61952 - sys_chown(collected, uid, gid);
61953 - sys_chmod(collected, mode);
61954 + sys_mkdir((__force char __user *)collected, mode);
61955 + sys_chown((__force char __user *)collected, uid, gid);
61956 + sys_chmod((__force char __user *)collected, mode);
61957 dir_add(collected, mtime);
61958 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61959 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61960 if (maybe_link() == 0) {
61961 - sys_mknod(collected, mode, rdev);
61962 - sys_chown(collected, uid, gid);
61963 - sys_chmod(collected, mode);
61964 - do_utime(collected, mtime);
61965 + sys_mknod((__force char __user *)collected, mode, rdev);
61966 + sys_chown((__force char __user *)collected, uid, gid);
61967 + sys_chmod((__force char __user *)collected, mode);
61968 + do_utime((__force char __user *)collected, mtime);
61969 }
61970 }
61971 return 0;
61972 @@ -336,15 +336,15 @@ static int __init do_name(void)
61973 static int __init do_copy(void)
61974 {
61975 if (count >= body_len) {
61976 - sys_write(wfd, victim, body_len);
61977 + sys_write(wfd, (__force char __user *)victim, body_len);
61978 sys_close(wfd);
61979 - do_utime(vcollected, mtime);
61980 + do_utime((__force char __user *)vcollected, mtime);
61981 kfree(vcollected);
61982 eat(body_len);
61983 state = SkipIt;
61984 return 0;
61985 } else {
61986 - sys_write(wfd, victim, count);
61987 + sys_write(wfd, (__force char __user *)victim, count);
61988 body_len -= count;
61989 eat(count);
61990 return 1;
61991 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
61992 {
61993 collected[N_ALIGN(name_len) + body_len] = '\0';
61994 clean_path(collected, 0);
61995 - sys_symlink(collected + N_ALIGN(name_len), collected);
61996 - sys_lchown(collected, uid, gid);
61997 - do_utime(collected, mtime);
61998 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
61999 + sys_lchown((__force char __user *)collected, uid, gid);
62000 + do_utime((__force char __user *)collected, mtime);
62001 state = SkipIt;
62002 next_state = Reset;
62003 return 0;
62004 diff -urNp linux-2.6.32.45/init/Kconfig linux-2.6.32.45/init/Kconfig
62005 --- linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
62006 +++ linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
62007 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
62008
62009 config COMPAT_BRK
62010 bool "Disable heap randomization"
62011 - default y
62012 + default n
62013 help
62014 Randomizing heap placement makes heap exploits harder, but it
62015 also breaks ancient binaries (including anything libc5 based).
62016 diff -urNp linux-2.6.32.45/init/main.c linux-2.6.32.45/init/main.c
62017 --- linux-2.6.32.45/init/main.c 2011-05-10 22:12:01.000000000 -0400
62018 +++ linux-2.6.32.45/init/main.c 2011-08-05 20:33:55.000000000 -0400
62019 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
62020 #ifdef CONFIG_TC
62021 extern void tc_init(void);
62022 #endif
62023 +extern void grsecurity_init(void);
62024
62025 enum system_states system_state __read_mostly;
62026 EXPORT_SYMBOL(system_state);
62027 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char
62028
62029 __setup("reset_devices", set_reset_devices);
62030
62031 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62032 +extern char pax_enter_kernel_user[];
62033 +extern char pax_exit_kernel_user[];
62034 +extern pgdval_t clone_pgd_mask;
62035 +#endif
62036 +
62037 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62038 +static int __init setup_pax_nouderef(char *str)
62039 +{
62040 +#ifdef CONFIG_X86_32
62041 + unsigned int cpu;
62042 + struct desc_struct *gdt;
62043 +
62044 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
62045 + gdt = get_cpu_gdt_table(cpu);
62046 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62047 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62048 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62049 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62050 + }
62051 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62052 +#else
62053 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62054 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62055 + clone_pgd_mask = ~(pgdval_t)0UL;
62056 +#endif
62057 +
62058 + return 0;
62059 +}
62060 +early_param("pax_nouderef", setup_pax_nouderef);
62061 +#endif
62062 +
62063 +#ifdef CONFIG_PAX_SOFTMODE
62064 +int pax_softmode;
62065 +
62066 +static int __init setup_pax_softmode(char *str)
62067 +{
62068 + get_option(&str, &pax_softmode);
62069 + return 1;
62070 +}
62071 +__setup("pax_softmode=", setup_pax_softmode);
62072 +#endif
62073 +
62074 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62075 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62076 static const char *panic_later, *panic_param;
62077 @@ -705,52 +749,53 @@ int initcall_debug;
62078 core_param(initcall_debug, initcall_debug, bool, 0644);
62079
62080 static char msgbuf[64];
62081 -static struct boot_trace_call call;
62082 -static struct boot_trace_ret ret;
62083 +static struct boot_trace_call trace_call;
62084 +static struct boot_trace_ret trace_ret;
62085
62086 int do_one_initcall(initcall_t fn)
62087 {
62088 int count = preempt_count();
62089 ktime_t calltime, delta, rettime;
62090 + const char *msg1 = "", *msg2 = "";
62091
62092 if (initcall_debug) {
62093 - call.caller = task_pid_nr(current);
62094 - printk("calling %pF @ %i\n", fn, call.caller);
62095 + trace_call.caller = task_pid_nr(current);
62096 + printk("calling %pF @ %i\n", fn, trace_call.caller);
62097 calltime = ktime_get();
62098 - trace_boot_call(&call, fn);
62099 + trace_boot_call(&trace_call, fn);
62100 enable_boot_trace();
62101 }
62102
62103 - ret.result = fn();
62104 + trace_ret.result = fn();
62105
62106 if (initcall_debug) {
62107 disable_boot_trace();
62108 rettime = ktime_get();
62109 delta = ktime_sub(rettime, calltime);
62110 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
62111 - trace_boot_ret(&ret, fn);
62112 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
62113 + trace_boot_ret(&trace_ret, fn);
62114 printk("initcall %pF returned %d after %Ld usecs\n", fn,
62115 - ret.result, ret.duration);
62116 + trace_ret.result, trace_ret.duration);
62117 }
62118
62119 msgbuf[0] = 0;
62120
62121 - if (ret.result && ret.result != -ENODEV && initcall_debug)
62122 - sprintf(msgbuf, "error code %d ", ret.result);
62123 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
62124 + sprintf(msgbuf, "error code %d ", trace_ret.result);
62125
62126 if (preempt_count() != count) {
62127 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62128 + msg1 = " preemption imbalance";
62129 preempt_count() = count;
62130 }
62131 if (irqs_disabled()) {
62132 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62133 + msg2 = " disabled interrupts";
62134 local_irq_enable();
62135 }
62136 - if (msgbuf[0]) {
62137 - printk("initcall %pF returned with %s\n", fn, msgbuf);
62138 + if (msgbuf[0] || *msg1 || *msg2) {
62139 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62140 }
62141
62142 - return ret.result;
62143 + return trace_ret.result;
62144 }
62145
62146
62147 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
62148 if (!ramdisk_execute_command)
62149 ramdisk_execute_command = "/init";
62150
62151 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62152 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
62153 ramdisk_execute_command = NULL;
62154 prepare_namespace();
62155 }
62156
62157 + grsecurity_init();
62158 +
62159 /*
62160 * Ok, we have completed the initial bootup, and
62161 * we're essentially up and running. Get rid of the
62162 diff -urNp linux-2.6.32.45/init/noinitramfs.c linux-2.6.32.45/init/noinitramfs.c
62163 --- linux-2.6.32.45/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
62164 +++ linux-2.6.32.45/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
62165 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
62166 {
62167 int err;
62168
62169 - err = sys_mkdir("/dev", 0755);
62170 + err = sys_mkdir((const char __user *)"/dev", 0755);
62171 if (err < 0)
62172 goto out;
62173
62174 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
62175 if (err < 0)
62176 goto out;
62177
62178 - err = sys_mkdir("/root", 0700);
62179 + err = sys_mkdir((const char __user *)"/root", 0700);
62180 if (err < 0)
62181 goto out;
62182
62183 diff -urNp linux-2.6.32.45/ipc/mqueue.c linux-2.6.32.45/ipc/mqueue.c
62184 --- linux-2.6.32.45/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
62185 +++ linux-2.6.32.45/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
62186 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
62187 mq_bytes = (mq_msg_tblsz +
62188 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62189
62190 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62191 spin_lock(&mq_lock);
62192 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62193 u->mq_bytes + mq_bytes >
62194 diff -urNp linux-2.6.32.45/ipc/msg.c linux-2.6.32.45/ipc/msg.c
62195 --- linux-2.6.32.45/ipc/msg.c 2011-03-27 14:31:47.000000000 -0400
62196 +++ linux-2.6.32.45/ipc/msg.c 2011-08-05 20:33:55.000000000 -0400
62197 @@ -310,18 +310,19 @@ static inline int msg_security(struct ke
62198 return security_msg_queue_associate(msq, msgflg);
62199 }
62200
62201 +static struct ipc_ops msg_ops = {
62202 + .getnew = newque,
62203 + .associate = msg_security,
62204 + .more_checks = NULL
62205 +};
62206 +
62207 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62208 {
62209 struct ipc_namespace *ns;
62210 - struct ipc_ops msg_ops;
62211 struct ipc_params msg_params;
62212
62213 ns = current->nsproxy->ipc_ns;
62214
62215 - msg_ops.getnew = newque;
62216 - msg_ops.associate = msg_security;
62217 - msg_ops.more_checks = NULL;
62218 -
62219 msg_params.key = key;
62220 msg_params.flg = msgflg;
62221
62222 diff -urNp linux-2.6.32.45/ipc/sem.c linux-2.6.32.45/ipc/sem.c
62223 --- linux-2.6.32.45/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
62224 +++ linux-2.6.32.45/ipc/sem.c 2011-08-05 20:33:55.000000000 -0400
62225 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct
62226 return 0;
62227 }
62228
62229 +static struct ipc_ops sem_ops = {
62230 + .getnew = newary,
62231 + .associate = sem_security,
62232 + .more_checks = sem_more_checks
62233 +};
62234 +
62235 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62236 {
62237 struct ipc_namespace *ns;
62238 - struct ipc_ops sem_ops;
62239 struct ipc_params sem_params;
62240
62241 ns = current->nsproxy->ipc_ns;
62242 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62243 if (nsems < 0 || nsems > ns->sc_semmsl)
62244 return -EINVAL;
62245
62246 - sem_ops.getnew = newary;
62247 - sem_ops.associate = sem_security;
62248 - sem_ops.more_checks = sem_more_checks;
62249 -
62250 sem_params.key = key;
62251 sem_params.flg = semflg;
62252 sem_params.u.nsems = nsems;
62253 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namesp
62254 ushort* sem_io = fast_sem_io;
62255 int nsems;
62256
62257 + pax_track_stack();
62258 +
62259 sma = sem_lock_check(ns, semid);
62260 if (IS_ERR(sma))
62261 return PTR_ERR(sma);
62262 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62263 unsigned long jiffies_left = 0;
62264 struct ipc_namespace *ns;
62265
62266 + pax_track_stack();
62267 +
62268 ns = current->nsproxy->ipc_ns;
62269
62270 if (nsops < 1 || semid < 0)
62271 diff -urNp linux-2.6.32.45/ipc/shm.c linux-2.6.32.45/ipc/shm.c
62272 --- linux-2.6.32.45/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
62273 +++ linux-2.6.32.45/ipc/shm.c 2011-08-05 20:33:55.000000000 -0400
62274 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
62275 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62276 #endif
62277
62278 +#ifdef CONFIG_GRKERNSEC
62279 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62280 + const time_t shm_createtime, const uid_t cuid,
62281 + const int shmid);
62282 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62283 + const time_t shm_createtime);
62284 +#endif
62285 +
62286 void shm_init_ns(struct ipc_namespace *ns)
62287 {
62288 ns->shm_ctlmax = SHMMAX;
62289 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
62290 shp->shm_lprid = 0;
62291 shp->shm_atim = shp->shm_dtim = 0;
62292 shp->shm_ctim = get_seconds();
62293 +#ifdef CONFIG_GRKERNSEC
62294 + {
62295 + struct timespec timeval;
62296 + do_posix_clock_monotonic_gettime(&timeval);
62297 +
62298 + shp->shm_createtime = timeval.tv_sec;
62299 + }
62300 +#endif
62301 shp->shm_segsz = size;
62302 shp->shm_nattch = 0;
62303 shp->shm_file = file;
62304 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct
62305 return 0;
62306 }
62307
62308 +static struct ipc_ops shm_ops = {
62309 + .getnew = newseg,
62310 + .associate = shm_security,
62311 + .more_checks = shm_more_checks
62312 +};
62313 +
62314 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62315 {
62316 struct ipc_namespace *ns;
62317 - struct ipc_ops shm_ops;
62318 struct ipc_params shm_params;
62319
62320 ns = current->nsproxy->ipc_ns;
62321
62322 - shm_ops.getnew = newseg;
62323 - shm_ops.associate = shm_security;
62324 - shm_ops.more_checks = shm_more_checks;
62325 -
62326 shm_params.key = key;
62327 shm_params.flg = shmflg;
62328 shm_params.u.size = size;
62329 @@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *sh
62330 if (err)
62331 goto out_unlock;
62332
62333 +#ifdef CONFIG_GRKERNSEC
62334 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62335 + shp->shm_perm.cuid, shmid) ||
62336 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62337 + err = -EACCES;
62338 + goto out_unlock;
62339 + }
62340 +#endif
62341 +
62342 path.dentry = dget(shp->shm_file->f_path.dentry);
62343 path.mnt = shp->shm_file->f_path.mnt;
62344 shp->shm_nattch++;
62345 +#ifdef CONFIG_GRKERNSEC
62346 + shp->shm_lapid = current->pid;
62347 +#endif
62348 size = i_size_read(path.dentry->d_inode);
62349 shm_unlock(shp);
62350
62351 diff -urNp linux-2.6.32.45/kernel/acct.c linux-2.6.32.45/kernel/acct.c
62352 --- linux-2.6.32.45/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
62353 +++ linux-2.6.32.45/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
62354 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
62355 */
62356 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62357 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62358 - file->f_op->write(file, (char *)&ac,
62359 + file->f_op->write(file, (__force char __user *)&ac,
62360 sizeof(acct_t), &file->f_pos);
62361 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62362 set_fs(fs);
62363 diff -urNp linux-2.6.32.45/kernel/audit.c linux-2.6.32.45/kernel/audit.c
62364 --- linux-2.6.32.45/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
62365 +++ linux-2.6.32.45/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
62366 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
62367 3) suppressed due to audit_rate_limit
62368 4) suppressed due to audit_backlog_limit
62369 */
62370 -static atomic_t audit_lost = ATOMIC_INIT(0);
62371 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62372
62373 /* The netlink socket. */
62374 static struct sock *audit_sock;
62375 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
62376 unsigned long now;
62377 int print;
62378
62379 - atomic_inc(&audit_lost);
62380 + atomic_inc_unchecked(&audit_lost);
62381
62382 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62383
62384 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
62385 printk(KERN_WARNING
62386 "audit: audit_lost=%d audit_rate_limit=%d "
62387 "audit_backlog_limit=%d\n",
62388 - atomic_read(&audit_lost),
62389 + atomic_read_unchecked(&audit_lost),
62390 audit_rate_limit,
62391 audit_backlog_limit);
62392 audit_panic(message);
62393 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
62394 status_set.pid = audit_pid;
62395 status_set.rate_limit = audit_rate_limit;
62396 status_set.backlog_limit = audit_backlog_limit;
62397 - status_set.lost = atomic_read(&audit_lost);
62398 + status_set.lost = atomic_read_unchecked(&audit_lost);
62399 status_set.backlog = skb_queue_len(&audit_skb_queue);
62400 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62401 &status_set, sizeof(status_set));
62402 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
62403 spin_unlock_irq(&tsk->sighand->siglock);
62404 }
62405 read_unlock(&tasklist_lock);
62406 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
62407 - &s, sizeof(s));
62408 +
62409 + if (!err)
62410 + audit_send_reply(NETLINK_CB(skb).pid, seq,
62411 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
62412 break;
62413 }
62414 case AUDIT_TTY_SET: {
62415 diff -urNp linux-2.6.32.45/kernel/auditsc.c linux-2.6.32.45/kernel/auditsc.c
62416 --- linux-2.6.32.45/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
62417 +++ linux-2.6.32.45/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
62418 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
62419 }
62420
62421 /* global counter which is incremented every time something logs in */
62422 -static atomic_t session_id = ATOMIC_INIT(0);
62423 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62424
62425 /**
62426 * audit_set_loginuid - set a task's audit_context loginuid
62427 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
62428 */
62429 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62430 {
62431 - unsigned int sessionid = atomic_inc_return(&session_id);
62432 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62433 struct audit_context *context = task->audit_context;
62434
62435 if (context && context->in_syscall) {
62436 diff -urNp linux-2.6.32.45/kernel/capability.c linux-2.6.32.45/kernel/capability.c
62437 --- linux-2.6.32.45/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
62438 +++ linux-2.6.32.45/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
62439 @@ -305,10 +305,26 @@ int capable(int cap)
62440 BUG();
62441 }
62442
62443 - if (security_capable(cap) == 0) {
62444 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
62445 current->flags |= PF_SUPERPRIV;
62446 return 1;
62447 }
62448 return 0;
62449 }
62450 +
62451 +int capable_nolog(int cap)
62452 +{
62453 + if (unlikely(!cap_valid(cap))) {
62454 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62455 + BUG();
62456 + }
62457 +
62458 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
62459 + current->flags |= PF_SUPERPRIV;
62460 + return 1;
62461 + }
62462 + return 0;
62463 +}
62464 +
62465 EXPORT_SYMBOL(capable);
62466 +EXPORT_SYMBOL(capable_nolog);
62467 diff -urNp linux-2.6.32.45/kernel/cgroup.c linux-2.6.32.45/kernel/cgroup.c
62468 --- linux-2.6.32.45/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
62469 +++ linux-2.6.32.45/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
62470 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
62471 struct hlist_head *hhead;
62472 struct cg_cgroup_link *link;
62473
62474 + pax_track_stack();
62475 +
62476 /* First see if we already have a cgroup group that matches
62477 * the desired set */
62478 read_lock(&css_set_lock);
62479 diff -urNp linux-2.6.32.45/kernel/configs.c linux-2.6.32.45/kernel/configs.c
62480 --- linux-2.6.32.45/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
62481 +++ linux-2.6.32.45/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
62482 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
62483 struct proc_dir_entry *entry;
62484
62485 /* create the current config file */
62486 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62487 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62488 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62489 + &ikconfig_file_ops);
62490 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62491 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62492 + &ikconfig_file_ops);
62493 +#endif
62494 +#else
62495 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62496 &ikconfig_file_ops);
62497 +#endif
62498 +
62499 if (!entry)
62500 return -ENOMEM;
62501
62502 diff -urNp linux-2.6.32.45/kernel/cpu.c linux-2.6.32.45/kernel/cpu.c
62503 --- linux-2.6.32.45/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
62504 +++ linux-2.6.32.45/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
62505 @@ -19,7 +19,7 @@
62506 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
62507 static DEFINE_MUTEX(cpu_add_remove_lock);
62508
62509 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
62510 +static RAW_NOTIFIER_HEAD(cpu_chain);
62511
62512 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
62513 * Should always be manipulated under cpu_add_remove_lock
62514 diff -urNp linux-2.6.32.45/kernel/cred.c linux-2.6.32.45/kernel/cred.c
62515 --- linux-2.6.32.45/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
62516 +++ linux-2.6.32.45/kernel/cred.c 2011-08-11 19:49:38.000000000 -0400
62517 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
62518 */
62519 void __put_cred(struct cred *cred)
62520 {
62521 + pax_track_stack();
62522 +
62523 kdebug("__put_cred(%p{%d,%d})", cred,
62524 atomic_read(&cred->usage),
62525 read_cred_subscribers(cred));
62526 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
62527 {
62528 struct cred *cred;
62529
62530 + pax_track_stack();
62531 +
62532 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62533 atomic_read(&tsk->cred->usage),
62534 read_cred_subscribers(tsk->cred));
62535 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
62536 {
62537 const struct cred *cred;
62538
62539 + pax_track_stack();
62540 +
62541 rcu_read_lock();
62542
62543 do {
62544 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
62545 {
62546 struct cred *new;
62547
62548 + pax_track_stack();
62549 +
62550 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62551 if (!new)
62552 return NULL;
62553 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
62554 const struct cred *old;
62555 struct cred *new;
62556
62557 + pax_track_stack();
62558 +
62559 validate_process_creds();
62560
62561 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62562 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
62563 struct thread_group_cred *tgcred = NULL;
62564 struct cred *new;
62565
62566 + pax_track_stack();
62567 +
62568 #ifdef CONFIG_KEYS
62569 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62570 if (!tgcred)
62571 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
62572 struct cred *new;
62573 int ret;
62574
62575 + pax_track_stack();
62576 +
62577 mutex_init(&p->cred_guard_mutex);
62578
62579 if (
62580 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
62581 struct task_struct *task = current;
62582 const struct cred *old = task->real_cred;
62583
62584 + pax_track_stack();
62585 +
62586 kdebug("commit_creds(%p{%d,%d})", new,
62587 atomic_read(&new->usage),
62588 read_cred_subscribers(new));
62589 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
62590
62591 get_cred(new); /* we will require a ref for the subj creds too */
62592
62593 + gr_set_role_label(task, new->uid, new->gid);
62594 +
62595 /* dumpability changes */
62596 if (old->euid != new->euid ||
62597 old->egid != new->egid ||
62598 @@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
62599 key_fsgid_changed(task);
62600
62601 /* do it
62602 - * - What if a process setreuid()'s and this brings the
62603 - * new uid over his NPROC rlimit? We can check this now
62604 - * cheaply with the new uid cache, so if it matters
62605 - * we should be checking for it. -DaveM
62606 + * RLIMIT_NPROC limits on user->processes have already been checked
62607 + * in set_user().
62608 */
62609 alter_cred_subscribers(new, 2);
62610 if (new->user != old->user)
62611 @@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
62612 */
62613 void abort_creds(struct cred *new)
62614 {
62615 + pax_track_stack();
62616 +
62617 kdebug("abort_creds(%p{%d,%d})", new,
62618 atomic_read(&new->usage),
62619 read_cred_subscribers(new));
62620 @@ -629,6 +647,8 @@ const struct cred *override_creds(const
62621 {
62622 const struct cred *old = current->cred;
62623
62624 + pax_track_stack();
62625 +
62626 kdebug("override_creds(%p{%d,%d})", new,
62627 atomic_read(&new->usage),
62628 read_cred_subscribers(new));
62629 @@ -658,6 +678,8 @@ void revert_creds(const struct cred *old
62630 {
62631 const struct cred *override = current->cred;
62632
62633 + pax_track_stack();
62634 +
62635 kdebug("revert_creds(%p{%d,%d})", old,
62636 atomic_read(&old->usage),
62637 read_cred_subscribers(old));
62638 @@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct
62639 const struct cred *old;
62640 struct cred *new;
62641
62642 + pax_track_stack();
62643 +
62644 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62645 if (!new)
62646 return NULL;
62647 @@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62648 */
62649 int set_security_override(struct cred *new, u32 secid)
62650 {
62651 + pax_track_stack();
62652 +
62653 return security_kernel_act_as(new, secid);
62654 }
62655 EXPORT_SYMBOL(set_security_override);
62656 @@ -777,6 +803,8 @@ int set_security_override_from_ctx(struc
62657 u32 secid;
62658 int ret;
62659
62660 + pax_track_stack();
62661 +
62662 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62663 if (ret < 0)
62664 return ret;
62665 diff -urNp linux-2.6.32.45/kernel/exit.c linux-2.6.32.45/kernel/exit.c
62666 --- linux-2.6.32.45/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
62667 +++ linux-2.6.32.45/kernel/exit.c 2011-08-17 19:19:50.000000000 -0400
62668 @@ -55,6 +55,10 @@
62669 #include <asm/pgtable.h>
62670 #include <asm/mmu_context.h>
62671
62672 +#ifdef CONFIG_GRKERNSEC
62673 +extern rwlock_t grsec_exec_file_lock;
62674 +#endif
62675 +
62676 static void exit_mm(struct task_struct * tsk);
62677
62678 static void __unhash_process(struct task_struct *p)
62679 @@ -174,6 +178,10 @@ void release_task(struct task_struct * p
62680 struct task_struct *leader;
62681 int zap_leader;
62682 repeat:
62683 +#ifdef CONFIG_NET
62684 + gr_del_task_from_ip_table(p);
62685 +#endif
62686 +
62687 tracehook_prepare_release_task(p);
62688 /* don't need to get the RCU readlock here - the process is dead and
62689 * can't be modifying its own credentials */
62690 @@ -341,11 +349,22 @@ static void reparent_to_kthreadd(void)
62691 {
62692 write_lock_irq(&tasklist_lock);
62693
62694 +#ifdef CONFIG_GRKERNSEC
62695 + write_lock(&grsec_exec_file_lock);
62696 + if (current->exec_file) {
62697 + fput(current->exec_file);
62698 + current->exec_file = NULL;
62699 + }
62700 + write_unlock(&grsec_exec_file_lock);
62701 +#endif
62702 +
62703 ptrace_unlink(current);
62704 /* Reparent to init */
62705 current->real_parent = current->parent = kthreadd_task;
62706 list_move_tail(&current->sibling, &current->real_parent->children);
62707
62708 + gr_set_kernel_label(current);
62709 +
62710 /* Set the exit signal to SIGCHLD so we signal init on exit */
62711 current->exit_signal = SIGCHLD;
62712
62713 @@ -397,7 +416,7 @@ int allow_signal(int sig)
62714 * know it'll be handled, so that they don't get converted to
62715 * SIGKILL or just silently dropped.
62716 */
62717 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62718 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62719 recalc_sigpending();
62720 spin_unlock_irq(&current->sighand->siglock);
62721 return 0;
62722 @@ -433,6 +452,17 @@ void daemonize(const char *name, ...)
62723 vsnprintf(current->comm, sizeof(current->comm), name, args);
62724 va_end(args);
62725
62726 +#ifdef CONFIG_GRKERNSEC
62727 + write_lock(&grsec_exec_file_lock);
62728 + if (current->exec_file) {
62729 + fput(current->exec_file);
62730 + current->exec_file = NULL;
62731 + }
62732 + write_unlock(&grsec_exec_file_lock);
62733 +#endif
62734 +
62735 + gr_set_kernel_label(current);
62736 +
62737 /*
62738 * If we were started as result of loading a module, close all of the
62739 * user space pages. We don't need them, and if we didn't close them
62740 @@ -897,17 +927,17 @@ NORET_TYPE void do_exit(long code)
62741 struct task_struct *tsk = current;
62742 int group_dead;
62743
62744 - profile_task_exit(tsk);
62745 -
62746 - WARN_ON(atomic_read(&tsk->fs_excl));
62747 -
62748 + /*
62749 + * Check this first since set_fs() below depends on
62750 + * current_thread_info(), which we better not access when we're in
62751 + * interrupt context. Other than that, we want to do the set_fs()
62752 + * as early as possible.
62753 + */
62754 if (unlikely(in_interrupt()))
62755 panic("Aiee, killing interrupt handler!");
62756 - if (unlikely(!tsk->pid))
62757 - panic("Attempted to kill the idle task!");
62758
62759 /*
62760 - * If do_exit is called because this processes oopsed, it's possible
62761 + * If do_exit is called because this processes Oops'ed, it's possible
62762 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
62763 * continuing. Amongst other possible reasons, this is to prevent
62764 * mm_release()->clear_child_tid() from writing to a user-controlled
62765 @@ -915,6 +945,13 @@ NORET_TYPE void do_exit(long code)
62766 */
62767 set_fs(USER_DS);
62768
62769 + profile_task_exit(tsk);
62770 +
62771 + WARN_ON(atomic_read(&tsk->fs_excl));
62772 +
62773 + if (unlikely(!tsk->pid))
62774 + panic("Attempted to kill the idle task!");
62775 +
62776 tracehook_report_exit(&code);
62777
62778 validate_creds_for_do_exit(tsk);
62779 @@ -973,6 +1010,9 @@ NORET_TYPE void do_exit(long code)
62780 tsk->exit_code = code;
62781 taskstats_exit(tsk, group_dead);
62782
62783 + gr_acl_handle_psacct(tsk, code);
62784 + gr_acl_handle_exit();
62785 +
62786 exit_mm(tsk);
62787
62788 if (group_dead)
62789 @@ -1188,7 +1228,7 @@ static int wait_task_zombie(struct wait_
62790
62791 if (unlikely(wo->wo_flags & WNOWAIT)) {
62792 int exit_code = p->exit_code;
62793 - int why, status;
62794 + int why;
62795
62796 get_task_struct(p);
62797 read_unlock(&tasklist_lock);
62798 diff -urNp linux-2.6.32.45/kernel/fork.c linux-2.6.32.45/kernel/fork.c
62799 --- linux-2.6.32.45/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
62800 +++ linux-2.6.32.45/kernel/fork.c 2011-08-11 19:50:07.000000000 -0400
62801 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
62802 *stackend = STACK_END_MAGIC; /* for overflow detection */
62803
62804 #ifdef CONFIG_CC_STACKPROTECTOR
62805 - tsk->stack_canary = get_random_int();
62806 + tsk->stack_canary = pax_get_random_long();
62807 #endif
62808
62809 /* One for us, one for whoever does the "release_task()" (usually parent) */
62810 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
62811 mm->locked_vm = 0;
62812 mm->mmap = NULL;
62813 mm->mmap_cache = NULL;
62814 - mm->free_area_cache = oldmm->mmap_base;
62815 - mm->cached_hole_size = ~0UL;
62816 + mm->free_area_cache = oldmm->free_area_cache;
62817 + mm->cached_hole_size = oldmm->cached_hole_size;
62818 mm->map_count = 0;
62819 cpumask_clear(mm_cpumask(mm));
62820 mm->mm_rb = RB_ROOT;
62821 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
62822 tmp->vm_flags &= ~VM_LOCKED;
62823 tmp->vm_mm = mm;
62824 tmp->vm_next = tmp->vm_prev = NULL;
62825 + tmp->vm_mirror = NULL;
62826 anon_vma_link(tmp);
62827 file = tmp->vm_file;
62828 if (file) {
62829 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
62830 if (retval)
62831 goto out;
62832 }
62833 +
62834 +#ifdef CONFIG_PAX_SEGMEXEC
62835 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
62836 + struct vm_area_struct *mpnt_m;
62837 +
62838 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
62839 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
62840 +
62841 + if (!mpnt->vm_mirror)
62842 + continue;
62843 +
62844 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
62845 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
62846 + mpnt->vm_mirror = mpnt_m;
62847 + } else {
62848 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
62849 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
62850 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
62851 + mpnt->vm_mirror->vm_mirror = mpnt;
62852 + }
62853 + }
62854 + BUG_ON(mpnt_m);
62855 + }
62856 +#endif
62857 +
62858 /* a new mm has just been created */
62859 arch_dup_mmap(oldmm, mm);
62860 retval = 0;
62861 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
62862 write_unlock(&fs->lock);
62863 return -EAGAIN;
62864 }
62865 - fs->users++;
62866 + atomic_inc(&fs->users);
62867 write_unlock(&fs->lock);
62868 return 0;
62869 }
62870 tsk->fs = copy_fs_struct(fs);
62871 if (!tsk->fs)
62872 return -ENOMEM;
62873 + gr_set_chroot_entries(tsk, &tsk->fs->root);
62874 return 0;
62875 }
62876
62877 @@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(
62878 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
62879 #endif
62880 retval = -EAGAIN;
62881 +
62882 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
62883 +
62884 if (atomic_read(&p->real_cred->user->processes) >=
62885 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
62886 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
62887 - p->real_cred->user != INIT_USER)
62888 + if (p->real_cred->user != INIT_USER &&
62889 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
62890 goto bad_fork_free;
62891 }
62892 + current->flags &= ~PF_NPROC_EXCEEDED;
62893
62894 retval = copy_creds(p, clone_flags);
62895 if (retval < 0)
62896 @@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(
62897 goto bad_fork_free_pid;
62898 }
62899
62900 + gr_copy_label(p);
62901 +
62902 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
62903 /*
62904 * Clear TID on mm_release()?
62905 @@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
62906 bad_fork_free:
62907 free_task(p);
62908 fork_out:
62909 + gr_log_forkfail(retval);
62910 +
62911 return ERR_PTR(retval);
62912 }
62913
62914 @@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
62915 if (clone_flags & CLONE_PARENT_SETTID)
62916 put_user(nr, parent_tidptr);
62917
62918 + gr_handle_brute_check();
62919 +
62920 if (clone_flags & CLONE_VFORK) {
62921 p->vfork_done = &vfork;
62922 init_completion(&vfork);
62923 @@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unsh
62924 return 0;
62925
62926 /* don't need lock here; in the worst case we'll do useless copy */
62927 - if (fs->users == 1)
62928 + if (atomic_read(&fs->users) == 1)
62929 return 0;
62930
62931 *new_fsp = copy_fs_struct(fs);
62932 @@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
62933 fs = current->fs;
62934 write_lock(&fs->lock);
62935 current->fs = new_fs;
62936 - if (--fs->users)
62937 + gr_set_chroot_entries(current, &current->fs->root);
62938 + if (atomic_dec_return(&fs->users))
62939 new_fs = NULL;
62940 else
62941 new_fs = fs;
62942 diff -urNp linux-2.6.32.45/kernel/futex.c linux-2.6.32.45/kernel/futex.c
62943 --- linux-2.6.32.45/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
62944 +++ linux-2.6.32.45/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
62945 @@ -54,6 +54,7 @@
62946 #include <linux/mount.h>
62947 #include <linux/pagemap.h>
62948 #include <linux/syscalls.h>
62949 +#include <linux/ptrace.h>
62950 #include <linux/signal.h>
62951 #include <linux/module.h>
62952 #include <linux/magic.h>
62953 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
62954 struct page *page;
62955 int err;
62956
62957 +#ifdef CONFIG_PAX_SEGMEXEC
62958 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
62959 + return -EFAULT;
62960 +#endif
62961 +
62962 /*
62963 * The futex address must be "naturally" aligned.
62964 */
62965 @@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
62966 struct futex_q q;
62967 int ret;
62968
62969 + pax_track_stack();
62970 +
62971 if (!bitset)
62972 return -EINVAL;
62973
62974 @@ -1841,7 +1849,7 @@ retry:
62975
62976 restart = &current_thread_info()->restart_block;
62977 restart->fn = futex_wait_restart;
62978 - restart->futex.uaddr = (u32 *)uaddr;
62979 + restart->futex.uaddr = uaddr;
62980 restart->futex.val = val;
62981 restart->futex.time = abs_time->tv64;
62982 restart->futex.bitset = bitset;
62983 @@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
62984 struct futex_q q;
62985 int res, ret;
62986
62987 + pax_track_stack();
62988 +
62989 if (!bitset)
62990 return -EINVAL;
62991
62992 @@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62993 {
62994 struct robust_list_head __user *head;
62995 unsigned long ret;
62996 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62997 const struct cred *cred = current_cred(), *pcred;
62998 +#endif
62999
63000 if (!futex_cmpxchg_enabled)
63001 return -ENOSYS;
63002 @@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63003 if (!p)
63004 goto err_unlock;
63005 ret = -EPERM;
63006 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63007 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63008 + goto err_unlock;
63009 +#else
63010 pcred = __task_cred(p);
63011 if (cred->euid != pcred->euid &&
63012 cred->euid != pcred->uid &&
63013 !capable(CAP_SYS_PTRACE))
63014 goto err_unlock;
63015 +#endif
63016 head = p->robust_list;
63017 rcu_read_unlock();
63018 }
63019 @@ -2459,7 +2476,7 @@ retry:
63020 */
63021 static inline int fetch_robust_entry(struct robust_list __user **entry,
63022 struct robust_list __user * __user *head,
63023 - int *pi)
63024 + unsigned int *pi)
63025 {
63026 unsigned long uentry;
63027
63028 @@ -2640,6 +2657,7 @@ static int __init futex_init(void)
63029 {
63030 u32 curval;
63031 int i;
63032 + mm_segment_t oldfs;
63033
63034 /*
63035 * This will fail and we want it. Some arch implementations do
63036 @@ -2651,7 +2669,10 @@ static int __init futex_init(void)
63037 * implementation, the non functional ones will return
63038 * -ENOSYS.
63039 */
63040 + oldfs = get_fs();
63041 + set_fs(USER_DS);
63042 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
63043 + set_fs(oldfs);
63044 if (curval == -EFAULT)
63045 futex_cmpxchg_enabled = 1;
63046
63047 diff -urNp linux-2.6.32.45/kernel/futex_compat.c linux-2.6.32.45/kernel/futex_compat.c
63048 --- linux-2.6.32.45/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
63049 +++ linux-2.6.32.45/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
63050 @@ -10,6 +10,7 @@
63051 #include <linux/compat.h>
63052 #include <linux/nsproxy.h>
63053 #include <linux/futex.h>
63054 +#include <linux/ptrace.h>
63055
63056 #include <asm/uaccess.h>
63057
63058 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
63059 {
63060 struct compat_robust_list_head __user *head;
63061 unsigned long ret;
63062 - const struct cred *cred = current_cred(), *pcred;
63063 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63064 + const struct cred *cred = current_cred();
63065 + const struct cred *pcred;
63066 +#endif
63067
63068 if (!futex_cmpxchg_enabled)
63069 return -ENOSYS;
63070 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
63071 if (!p)
63072 goto err_unlock;
63073 ret = -EPERM;
63074 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63075 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63076 + goto err_unlock;
63077 +#else
63078 pcred = __task_cred(p);
63079 if (cred->euid != pcred->euid &&
63080 cred->euid != pcred->uid &&
63081 !capable(CAP_SYS_PTRACE))
63082 goto err_unlock;
63083 +#endif
63084 head = p->compat_robust_list;
63085 read_unlock(&tasklist_lock);
63086 }
63087 diff -urNp linux-2.6.32.45/kernel/gcov/base.c linux-2.6.32.45/kernel/gcov/base.c
63088 --- linux-2.6.32.45/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
63089 +++ linux-2.6.32.45/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
63090 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
63091 }
63092
63093 #ifdef CONFIG_MODULES
63094 -static inline int within(void *addr, void *start, unsigned long size)
63095 -{
63096 - return ((addr >= start) && (addr < start + size));
63097 -}
63098 -
63099 /* Update list and generate events when modules are unloaded. */
63100 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63101 void *data)
63102 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
63103 prev = NULL;
63104 /* Remove entries located in module from linked list. */
63105 for (info = gcov_info_head; info; info = info->next) {
63106 - if (within(info, mod->module_core, mod->core_size)) {
63107 + if (within_module_core_rw((unsigned long)info, mod)) {
63108 if (prev)
63109 prev->next = info->next;
63110 else
63111 diff -urNp linux-2.6.32.45/kernel/hrtimer.c linux-2.6.32.45/kernel/hrtimer.c
63112 --- linux-2.6.32.45/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
63113 +++ linux-2.6.32.45/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
63114 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
63115 local_irq_restore(flags);
63116 }
63117
63118 -static void run_hrtimer_softirq(struct softirq_action *h)
63119 +static void run_hrtimer_softirq(void)
63120 {
63121 hrtimer_peek_ahead_timers();
63122 }
63123 diff -urNp linux-2.6.32.45/kernel/kallsyms.c linux-2.6.32.45/kernel/kallsyms.c
63124 --- linux-2.6.32.45/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
63125 +++ linux-2.6.32.45/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
63126 @@ -11,6 +11,9 @@
63127 * Changed the compression method from stem compression to "table lookup"
63128 * compression (see scripts/kallsyms.c for a more complete description)
63129 */
63130 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63131 +#define __INCLUDED_BY_HIDESYM 1
63132 +#endif
63133 #include <linux/kallsyms.h>
63134 #include <linux/module.h>
63135 #include <linux/init.h>
63136 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
63137
63138 static inline int is_kernel_inittext(unsigned long addr)
63139 {
63140 + if (system_state != SYSTEM_BOOTING)
63141 + return 0;
63142 +
63143 if (addr >= (unsigned long)_sinittext
63144 && addr <= (unsigned long)_einittext)
63145 return 1;
63146 return 0;
63147 }
63148
63149 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63150 +#ifdef CONFIG_MODULES
63151 +static inline int is_module_text(unsigned long addr)
63152 +{
63153 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63154 + return 1;
63155 +
63156 + addr = ktla_ktva(addr);
63157 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63158 +}
63159 +#else
63160 +static inline int is_module_text(unsigned long addr)
63161 +{
63162 + return 0;
63163 +}
63164 +#endif
63165 +#endif
63166 +
63167 static inline int is_kernel_text(unsigned long addr)
63168 {
63169 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63170 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
63171
63172 static inline int is_kernel(unsigned long addr)
63173 {
63174 +
63175 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63176 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
63177 + return 1;
63178 +
63179 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63180 +#else
63181 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63182 +#endif
63183 +
63184 return 1;
63185 return in_gate_area_no_task(addr);
63186 }
63187
63188 static int is_ksym_addr(unsigned long addr)
63189 {
63190 +
63191 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63192 + if (is_module_text(addr))
63193 + return 0;
63194 +#endif
63195 +
63196 if (all_var)
63197 return is_kernel(addr);
63198
63199 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
63200
63201 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63202 {
63203 - iter->name[0] = '\0';
63204 iter->nameoff = get_symbol_offset(new_pos);
63205 iter->pos = new_pos;
63206 }
63207 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
63208 {
63209 struct kallsym_iter *iter = m->private;
63210
63211 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63212 + if (current_uid())
63213 + return 0;
63214 +#endif
63215 +
63216 /* Some debugging symbols have no name. Ignore them. */
63217 if (!iter->name[0])
63218 return 0;
63219 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
63220 struct kallsym_iter *iter;
63221 int ret;
63222
63223 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63224 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63225 if (!iter)
63226 return -ENOMEM;
63227 reset_iter(iter, 0);
63228 diff -urNp linux-2.6.32.45/kernel/kgdb.c linux-2.6.32.45/kernel/kgdb.c
63229 --- linux-2.6.32.45/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
63230 +++ linux-2.6.32.45/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
63231 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
63232 /* Guard for recursive entry */
63233 static int exception_level;
63234
63235 -static struct kgdb_io *kgdb_io_ops;
63236 +static const struct kgdb_io *kgdb_io_ops;
63237 static DEFINE_SPINLOCK(kgdb_registration_lock);
63238
63239 /* kgdb console driver is loaded */
63240 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
63241 */
63242 static atomic_t passive_cpu_wait[NR_CPUS];
63243 static atomic_t cpu_in_kgdb[NR_CPUS];
63244 -atomic_t kgdb_setting_breakpoint;
63245 +atomic_unchecked_t kgdb_setting_breakpoint;
63246
63247 struct task_struct *kgdb_usethread;
63248 struct task_struct *kgdb_contthread;
63249 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
63250 sizeof(unsigned long)];
63251
63252 /* to keep track of the CPU which is doing the single stepping*/
63253 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63254 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63255
63256 /*
63257 * If you are debugging a problem where roundup (the collection of
63258 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
63259 return 0;
63260 if (kgdb_connected)
63261 return 1;
63262 - if (atomic_read(&kgdb_setting_breakpoint))
63263 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
63264 return 1;
63265 if (print_wait)
63266 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
63267 @@ -1426,8 +1426,8 @@ acquirelock:
63268 * instance of the exception handler wanted to come into the
63269 * debugger on a different CPU via a single step
63270 */
63271 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63272 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
63273 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63274 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
63275
63276 atomic_set(&kgdb_active, -1);
63277 touch_softlockup_watchdog();
63278 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
63279 *
63280 * Register it with the KGDB core.
63281 */
63282 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
63283 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
63284 {
63285 int err;
63286
63287 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
63288 *
63289 * Unregister it with the KGDB core.
63290 */
63291 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
63292 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
63293 {
63294 BUG_ON(kgdb_connected);
63295
63296 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
63297 */
63298 void kgdb_breakpoint(void)
63299 {
63300 - atomic_set(&kgdb_setting_breakpoint, 1);
63301 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
63302 wmb(); /* Sync point before breakpoint */
63303 arch_kgdb_breakpoint();
63304 wmb(); /* Sync point after breakpoint */
63305 - atomic_set(&kgdb_setting_breakpoint, 0);
63306 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
63307 }
63308 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
63309
63310 diff -urNp linux-2.6.32.45/kernel/kmod.c linux-2.6.32.45/kernel/kmod.c
63311 --- linux-2.6.32.45/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
63312 +++ linux-2.6.32.45/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
63313 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63314 * If module auto-loading support is disabled then this function
63315 * becomes a no-operation.
63316 */
63317 -int __request_module(bool wait, const char *fmt, ...)
63318 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63319 {
63320 - va_list args;
63321 char module_name[MODULE_NAME_LEN];
63322 unsigned int max_modprobes;
63323 int ret;
63324 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63325 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63326 static char *envp[] = { "HOME=/",
63327 "TERM=linux",
63328 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63329 @@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
63330 if (ret)
63331 return ret;
63332
63333 - va_start(args, fmt);
63334 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63335 - va_end(args);
63336 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63337 if (ret >= MODULE_NAME_LEN)
63338 return -ENAMETOOLONG;
63339
63340 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63341 + if (!current_uid()) {
63342 + /* hack to workaround consolekit/udisks stupidity */
63343 + read_lock(&tasklist_lock);
63344 + if (!strcmp(current->comm, "mount") &&
63345 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63346 + read_unlock(&tasklist_lock);
63347 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63348 + return -EPERM;
63349 + }
63350 + read_unlock(&tasklist_lock);
63351 + }
63352 +#endif
63353 +
63354 /* If modprobe needs a service that is in a module, we get a recursive
63355 * loop. Limit the number of running kmod threads to max_threads/2 or
63356 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63357 @@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
63358 atomic_dec(&kmod_concurrent);
63359 return ret;
63360 }
63361 +
63362 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63363 +{
63364 + va_list args;
63365 + int ret;
63366 +
63367 + va_start(args, fmt);
63368 + ret = ____request_module(wait, module_param, fmt, args);
63369 + va_end(args);
63370 +
63371 + return ret;
63372 +}
63373 +
63374 +int __request_module(bool wait, const char *fmt, ...)
63375 +{
63376 + va_list args;
63377 + int ret;
63378 +
63379 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63380 + if (current_uid()) {
63381 + char module_param[MODULE_NAME_LEN];
63382 +
63383 + memset(module_param, 0, sizeof(module_param));
63384 +
63385 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63386 +
63387 + va_start(args, fmt);
63388 + ret = ____request_module(wait, module_param, fmt, args);
63389 + va_end(args);
63390 +
63391 + return ret;
63392 + }
63393 +#endif
63394 +
63395 + va_start(args, fmt);
63396 + ret = ____request_module(wait, NULL, fmt, args);
63397 + va_end(args);
63398 +
63399 + return ret;
63400 +}
63401 +
63402 +
63403 EXPORT_SYMBOL(__request_module);
63404 #endif /* CONFIG_MODULES */
63405
63406 diff -urNp linux-2.6.32.45/kernel/kprobes.c linux-2.6.32.45/kernel/kprobes.c
63407 --- linux-2.6.32.45/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
63408 +++ linux-2.6.32.45/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
63409 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
63410 * kernel image and loaded module images reside. This is required
63411 * so x86_64 can correctly handle the %rip-relative fixups.
63412 */
63413 - kip->insns = module_alloc(PAGE_SIZE);
63414 + kip->insns = module_alloc_exec(PAGE_SIZE);
63415 if (!kip->insns) {
63416 kfree(kip);
63417 return NULL;
63418 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
63419 */
63420 if (!list_is_singular(&kprobe_insn_pages)) {
63421 list_del(&kip->list);
63422 - module_free(NULL, kip->insns);
63423 + module_free_exec(NULL, kip->insns);
63424 kfree(kip);
63425 }
63426 return 1;
63427 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
63428 {
63429 int i, err = 0;
63430 unsigned long offset = 0, size = 0;
63431 - char *modname, namebuf[128];
63432 + char *modname, namebuf[KSYM_NAME_LEN];
63433 const char *symbol_name;
63434 void *addr;
63435 struct kprobe_blackpoint *kb;
63436 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
63437 const char *sym = NULL;
63438 unsigned int i = *(loff_t *) v;
63439 unsigned long offset = 0;
63440 - char *modname, namebuf[128];
63441 + char *modname, namebuf[KSYM_NAME_LEN];
63442
63443 head = &kprobe_table[i];
63444 preempt_disable();
63445 diff -urNp linux-2.6.32.45/kernel/lockdep.c linux-2.6.32.45/kernel/lockdep.c
63446 --- linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
63447 +++ linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
63448 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
63449 /*
63450 * Various lockdep statistics:
63451 */
63452 -atomic_t chain_lookup_hits;
63453 -atomic_t chain_lookup_misses;
63454 -atomic_t hardirqs_on_events;
63455 -atomic_t hardirqs_off_events;
63456 -atomic_t redundant_hardirqs_on;
63457 -atomic_t redundant_hardirqs_off;
63458 -atomic_t softirqs_on_events;
63459 -atomic_t softirqs_off_events;
63460 -atomic_t redundant_softirqs_on;
63461 -atomic_t redundant_softirqs_off;
63462 -atomic_t nr_unused_locks;
63463 -atomic_t nr_cyclic_checks;
63464 -atomic_t nr_find_usage_forwards_checks;
63465 -atomic_t nr_find_usage_backwards_checks;
63466 +atomic_unchecked_t chain_lookup_hits;
63467 +atomic_unchecked_t chain_lookup_misses;
63468 +atomic_unchecked_t hardirqs_on_events;
63469 +atomic_unchecked_t hardirqs_off_events;
63470 +atomic_unchecked_t redundant_hardirqs_on;
63471 +atomic_unchecked_t redundant_hardirqs_off;
63472 +atomic_unchecked_t softirqs_on_events;
63473 +atomic_unchecked_t softirqs_off_events;
63474 +atomic_unchecked_t redundant_softirqs_on;
63475 +atomic_unchecked_t redundant_softirqs_off;
63476 +atomic_unchecked_t nr_unused_locks;
63477 +atomic_unchecked_t nr_cyclic_checks;
63478 +atomic_unchecked_t nr_find_usage_forwards_checks;
63479 +atomic_unchecked_t nr_find_usage_backwards_checks;
63480 #endif
63481
63482 /*
63483 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
63484 int i;
63485 #endif
63486
63487 +#ifdef CONFIG_PAX_KERNEXEC
63488 + start = ktla_ktva(start);
63489 +#endif
63490 +
63491 /*
63492 * static variable?
63493 */
63494 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
63495 */
63496 for_each_possible_cpu(i) {
63497 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
63498 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
63499 - + per_cpu_offset(i);
63500 + end = start + PERCPU_ENOUGH_ROOM;
63501
63502 if ((addr >= start) && (addr < end))
63503 return 1;
63504 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
63505 if (!static_obj(lock->key)) {
63506 debug_locks_off();
63507 printk("INFO: trying to register non-static key.\n");
63508 + printk("lock:%pS key:%pS.\n", lock, lock->key);
63509 printk("the code is fine but needs lockdep annotation.\n");
63510 printk("turning off the locking correctness validator.\n");
63511 dump_stack();
63512 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
63513 if (!class)
63514 return 0;
63515 }
63516 - debug_atomic_inc((atomic_t *)&class->ops);
63517 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
63518 if (very_verbose(class)) {
63519 printk("\nacquire class [%p] %s", class->key, class->name);
63520 if (class->name_version > 1)
63521 diff -urNp linux-2.6.32.45/kernel/lockdep_internals.h linux-2.6.32.45/kernel/lockdep_internals.h
63522 --- linux-2.6.32.45/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
63523 +++ linux-2.6.32.45/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
63524 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
63525 /*
63526 * Various lockdep statistics:
63527 */
63528 -extern atomic_t chain_lookup_hits;
63529 -extern atomic_t chain_lookup_misses;
63530 -extern atomic_t hardirqs_on_events;
63531 -extern atomic_t hardirqs_off_events;
63532 -extern atomic_t redundant_hardirqs_on;
63533 -extern atomic_t redundant_hardirqs_off;
63534 -extern atomic_t softirqs_on_events;
63535 -extern atomic_t softirqs_off_events;
63536 -extern atomic_t redundant_softirqs_on;
63537 -extern atomic_t redundant_softirqs_off;
63538 -extern atomic_t nr_unused_locks;
63539 -extern atomic_t nr_cyclic_checks;
63540 -extern atomic_t nr_cyclic_check_recursions;
63541 -extern atomic_t nr_find_usage_forwards_checks;
63542 -extern atomic_t nr_find_usage_forwards_recursions;
63543 -extern atomic_t nr_find_usage_backwards_checks;
63544 -extern atomic_t nr_find_usage_backwards_recursions;
63545 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
63546 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
63547 -# define debug_atomic_read(ptr) atomic_read(ptr)
63548 +extern atomic_unchecked_t chain_lookup_hits;
63549 +extern atomic_unchecked_t chain_lookup_misses;
63550 +extern atomic_unchecked_t hardirqs_on_events;
63551 +extern atomic_unchecked_t hardirqs_off_events;
63552 +extern atomic_unchecked_t redundant_hardirqs_on;
63553 +extern atomic_unchecked_t redundant_hardirqs_off;
63554 +extern atomic_unchecked_t softirqs_on_events;
63555 +extern atomic_unchecked_t softirqs_off_events;
63556 +extern atomic_unchecked_t redundant_softirqs_on;
63557 +extern atomic_unchecked_t redundant_softirqs_off;
63558 +extern atomic_unchecked_t nr_unused_locks;
63559 +extern atomic_unchecked_t nr_cyclic_checks;
63560 +extern atomic_unchecked_t nr_cyclic_check_recursions;
63561 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
63562 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
63563 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
63564 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
63565 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
63566 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
63567 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
63568 #else
63569 # define debug_atomic_inc(ptr) do { } while (0)
63570 # define debug_atomic_dec(ptr) do { } while (0)
63571 diff -urNp linux-2.6.32.45/kernel/lockdep_proc.c linux-2.6.32.45/kernel/lockdep_proc.c
63572 --- linux-2.6.32.45/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
63573 +++ linux-2.6.32.45/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
63574 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63575
63576 static void print_name(struct seq_file *m, struct lock_class *class)
63577 {
63578 - char str[128];
63579 + char str[KSYM_NAME_LEN];
63580 const char *name = class->name;
63581
63582 if (!name) {
63583 diff -urNp linux-2.6.32.45/kernel/module.c linux-2.6.32.45/kernel/module.c
63584 --- linux-2.6.32.45/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
63585 +++ linux-2.6.32.45/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
63586 @@ -55,6 +55,7 @@
63587 #include <linux/async.h>
63588 #include <linux/percpu.h>
63589 #include <linux/kmemleak.h>
63590 +#include <linux/grsecurity.h>
63591
63592 #define CREATE_TRACE_POINTS
63593 #include <trace/events/module.h>
63594 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
63595 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
63596
63597 /* Bounds of module allocation, for speeding __module_address */
63598 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63599 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63600 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63601
63602 int register_module_notifier(struct notifier_block * nb)
63603 {
63604 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
63605 return true;
63606
63607 list_for_each_entry_rcu(mod, &modules, list) {
63608 - struct symsearch arr[] = {
63609 + struct symsearch modarr[] = {
63610 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63611 NOT_GPL_ONLY, false },
63612 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63613 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
63614 #endif
63615 };
63616
63617 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63618 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63619 return true;
63620 }
63621 return false;
63622 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
63623 void *ptr;
63624 int cpu;
63625
63626 - if (align > PAGE_SIZE) {
63627 + if (align-1 >= PAGE_SIZE) {
63628 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63629 name, align, PAGE_SIZE);
63630 align = PAGE_SIZE;
63631 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
63632 * /sys/module/foo/sections stuff
63633 * J. Corbet <corbet@lwn.net>
63634 */
63635 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
63636 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63637
63638 static inline bool sect_empty(const Elf_Shdr *sect)
63639 {
63640 @@ -1545,7 +1547,8 @@ static void free_module(struct module *m
63641 destroy_params(mod->kp, mod->num_kp);
63642
63643 /* This may be NULL, but that's OK */
63644 - module_free(mod, mod->module_init);
63645 + module_free(mod, mod->module_init_rw);
63646 + module_free_exec(mod, mod->module_init_rx);
63647 kfree(mod->args);
63648 if (mod->percpu)
63649 percpu_modfree(mod->percpu);
63650 @@ -1554,10 +1557,12 @@ static void free_module(struct module *m
63651 percpu_modfree(mod->refptr);
63652 #endif
63653 /* Free lock-classes: */
63654 - lockdep_free_key_range(mod->module_core, mod->core_size);
63655 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63656 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63657
63658 /* Finally, free the core (containing the module structure) */
63659 - module_free(mod, mod->module_core);
63660 + module_free_exec(mod, mod->module_core_rx);
63661 + module_free(mod, mod->module_core_rw);
63662
63663 #ifdef CONFIG_MPU
63664 update_protections(current->mm);
63665 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
63666 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63667 int ret = 0;
63668 const struct kernel_symbol *ksym;
63669 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63670 + int is_fs_load = 0;
63671 + int register_filesystem_found = 0;
63672 + char *p;
63673 +
63674 + p = strstr(mod->args, "grsec_modharden_fs");
63675 +
63676 + if (p) {
63677 + char *endptr = p + strlen("grsec_modharden_fs");
63678 + /* copy \0 as well */
63679 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63680 + is_fs_load = 1;
63681 + }
63682 +#endif
63683 +
63684
63685 for (i = 1; i < n; i++) {
63686 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63687 + const char *name = strtab + sym[i].st_name;
63688 +
63689 + /* it's a real shame this will never get ripped and copied
63690 + upstream! ;(
63691 + */
63692 + if (is_fs_load && !strcmp(name, "register_filesystem"))
63693 + register_filesystem_found = 1;
63694 +#endif
63695 switch (sym[i].st_shndx) {
63696 case SHN_COMMON:
63697 /* We compiled with -fno-common. These are not
63698 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
63699 strtab + sym[i].st_name, mod);
63700 /* Ok if resolved. */
63701 if (ksym) {
63702 + pax_open_kernel();
63703 sym[i].st_value = ksym->value;
63704 + pax_close_kernel();
63705 break;
63706 }
63707
63708 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
63709 secbase = (unsigned long)mod->percpu;
63710 else
63711 secbase = sechdrs[sym[i].st_shndx].sh_addr;
63712 + pax_open_kernel();
63713 sym[i].st_value += secbase;
63714 + pax_close_kernel();
63715 break;
63716 }
63717 }
63718
63719 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63720 + if (is_fs_load && !register_filesystem_found) {
63721 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63722 + ret = -EPERM;
63723 + }
63724 +#endif
63725 +
63726 return ret;
63727 }
63728
63729 @@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
63730 || s->sh_entsize != ~0UL
63731 || strstarts(secstrings + s->sh_name, ".init"))
63732 continue;
63733 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63734 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63735 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63736 + else
63737 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63738 DEBUGP("\t%s\n", secstrings + s->sh_name);
63739 }
63740 - if (m == 0)
63741 - mod->core_text_size = mod->core_size;
63742 }
63743
63744 DEBUGP("Init section allocation order:\n");
63745 @@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
63746 || s->sh_entsize != ~0UL
63747 || !strstarts(secstrings + s->sh_name, ".init"))
63748 continue;
63749 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63750 - | INIT_OFFSET_MASK);
63751 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63752 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63753 + else
63754 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63755 + s->sh_entsize |= INIT_OFFSET_MASK;
63756 DEBUGP("\t%s\n", secstrings + s->sh_name);
63757 }
63758 - if (m == 0)
63759 - mod->init_text_size = mod->init_size;
63760 }
63761 }
63762
63763 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
63764
63765 /* As per nm */
63766 static char elf_type(const Elf_Sym *sym,
63767 - Elf_Shdr *sechdrs,
63768 - const char *secstrings,
63769 - struct module *mod)
63770 + const Elf_Shdr *sechdrs,
63771 + const char *secstrings)
63772 {
63773 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
63774 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
63775 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
63776
63777 /* Put symbol section at end of init part of module. */
63778 symsect->sh_flags |= SHF_ALLOC;
63779 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63780 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63781 symindex) | INIT_OFFSET_MASK;
63782 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
63783
63784 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
63785 }
63786
63787 /* Append room for core symbols at end of core part. */
63788 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63789 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
63790 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63791 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
63792
63793 /* Put string table section at end of init part of module. */
63794 strsect->sh_flags |= SHF_ALLOC;
63795 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63796 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63797 strindex) | INIT_OFFSET_MASK;
63798 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
63799
63800 /* Append room for core symbols' strings at end of core part. */
63801 - *pstroffs = mod->core_size;
63802 + *pstroffs = mod->core_size_rx;
63803 __set_bit(0, strmap);
63804 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
63805 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
63806
63807 return symoffs;
63808 }
63809 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
63810 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63811 mod->strtab = (void *)sechdrs[strindex].sh_addr;
63812
63813 + pax_open_kernel();
63814 +
63815 /* Set types up while we still have access to sections. */
63816 for (i = 0; i < mod->num_symtab; i++)
63817 mod->symtab[i].st_info
63818 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
63819 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
63820
63821 - mod->core_symtab = dst = mod->module_core + symoffs;
63822 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
63823 src = mod->symtab;
63824 *dst = *src;
63825 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63826 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
63827 }
63828 mod->core_num_syms = ndst;
63829
63830 - mod->core_strtab = s = mod->module_core + stroffs;
63831 + mod->core_strtab = s = mod->module_core_rx + stroffs;
63832 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
63833 if (test_bit(i, strmap))
63834 *++s = mod->strtab[i];
63835 +
63836 + pax_close_kernel();
63837 }
63838 #else
63839 static inline unsigned long layout_symtab(struct module *mod,
63840 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
63841 #endif
63842 }
63843
63844 -static void *module_alloc_update_bounds(unsigned long size)
63845 +static void *module_alloc_update_bounds_rw(unsigned long size)
63846 {
63847 void *ret = module_alloc(size);
63848
63849 if (ret) {
63850 /* Update module bounds. */
63851 - if ((unsigned long)ret < module_addr_min)
63852 - module_addr_min = (unsigned long)ret;
63853 - if ((unsigned long)ret + size > module_addr_max)
63854 - module_addr_max = (unsigned long)ret + size;
63855 + if ((unsigned long)ret < module_addr_min_rw)
63856 + module_addr_min_rw = (unsigned long)ret;
63857 + if ((unsigned long)ret + size > module_addr_max_rw)
63858 + module_addr_max_rw = (unsigned long)ret + size;
63859 + }
63860 + return ret;
63861 +}
63862 +
63863 +static void *module_alloc_update_bounds_rx(unsigned long size)
63864 +{
63865 + void *ret = module_alloc_exec(size);
63866 +
63867 + if (ret) {
63868 + /* Update module bounds. */
63869 + if ((unsigned long)ret < module_addr_min_rx)
63870 + module_addr_min_rx = (unsigned long)ret;
63871 + if ((unsigned long)ret + size > module_addr_max_rx)
63872 + module_addr_max_rx = (unsigned long)ret + size;
63873 }
63874 return ret;
63875 }
63876 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
63877 unsigned int i;
63878
63879 /* only scan the sections containing data */
63880 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
63881 - (unsigned long)mod->module_core,
63882 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
63883 + (unsigned long)mod->module_core_rw,
63884 sizeof(struct module), GFP_KERNEL);
63885
63886 for (i = 1; i < hdr->e_shnum; i++) {
63887 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
63888 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
63889 continue;
63890
63891 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
63892 - (unsigned long)mod->module_core,
63893 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
63894 + (unsigned long)mod->module_core_rw,
63895 sechdrs[i].sh_size, GFP_KERNEL);
63896 }
63897 }
63898 @@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
63899 secstrings, &stroffs, strmap);
63900
63901 /* Do the allocs. */
63902 - ptr = module_alloc_update_bounds(mod->core_size);
63903 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
63904 /*
63905 * The pointer to this block is stored in the module structure
63906 * which is inside the block. Just mark it as not being a
63907 @@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
63908 err = -ENOMEM;
63909 goto free_percpu;
63910 }
63911 - memset(ptr, 0, mod->core_size);
63912 - mod->module_core = ptr;
63913 + memset(ptr, 0, mod->core_size_rw);
63914 + mod->module_core_rw = ptr;
63915
63916 - ptr = module_alloc_update_bounds(mod->init_size);
63917 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
63918 /*
63919 * The pointer to this block is stored in the module structure
63920 * which is inside the block. This block doesn't need to be
63921 * scanned as it contains data and code that will be freed
63922 * after the module is initialized.
63923 */
63924 - kmemleak_ignore(ptr);
63925 - if (!ptr && mod->init_size) {
63926 + kmemleak_not_leak(ptr);
63927 + if (!ptr && mod->init_size_rw) {
63928 + err = -ENOMEM;
63929 + goto free_core_rw;
63930 + }
63931 + memset(ptr, 0, mod->init_size_rw);
63932 + mod->module_init_rw = ptr;
63933 +
63934 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
63935 + kmemleak_not_leak(ptr);
63936 + if (!ptr) {
63937 err = -ENOMEM;
63938 - goto free_core;
63939 + goto free_init_rw;
63940 }
63941 - memset(ptr, 0, mod->init_size);
63942 - mod->module_init = ptr;
63943 +
63944 + pax_open_kernel();
63945 + memset(ptr, 0, mod->core_size_rx);
63946 + pax_close_kernel();
63947 + mod->module_core_rx = ptr;
63948 +
63949 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
63950 + kmemleak_not_leak(ptr);
63951 + if (!ptr && mod->init_size_rx) {
63952 + err = -ENOMEM;
63953 + goto free_core_rx;
63954 + }
63955 +
63956 + pax_open_kernel();
63957 + memset(ptr, 0, mod->init_size_rx);
63958 + pax_close_kernel();
63959 + mod->module_init_rx = ptr;
63960
63961 /* Transfer each section which specifies SHF_ALLOC */
63962 DEBUGP("final section addresses:\n");
63963 @@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
63964 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
63965 continue;
63966
63967 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
63968 - dest = mod->module_init
63969 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63970 - else
63971 - dest = mod->module_core + sechdrs[i].sh_entsize;
63972 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
63973 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63974 + dest = mod->module_init_rw
63975 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63976 + else
63977 + dest = mod->module_init_rx
63978 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63979 + } else {
63980 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63981 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
63982 + else
63983 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
63984 + }
63985 +
63986 + if (sechdrs[i].sh_type != SHT_NOBITS) {
63987
63988 - if (sechdrs[i].sh_type != SHT_NOBITS)
63989 - memcpy(dest, (void *)sechdrs[i].sh_addr,
63990 - sechdrs[i].sh_size);
63991 +#ifdef CONFIG_PAX_KERNEXEC
63992 +#ifdef CONFIG_X86_64
63993 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
63994 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
63995 +#endif
63996 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
63997 + pax_open_kernel();
63998 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63999 + pax_close_kernel();
64000 + } else
64001 +#endif
64002 +
64003 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
64004 + }
64005 /* Update sh_addr to point to copy in image. */
64006 - sechdrs[i].sh_addr = (unsigned long)dest;
64007 +
64008 +#ifdef CONFIG_PAX_KERNEXEC
64009 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
64010 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
64011 + else
64012 +#endif
64013 +
64014 + sechdrs[i].sh_addr = (unsigned long)dest;
64015 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
64016 }
64017 /* Module has been moved. */
64018 @@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
64019 mod->name);
64020 if (!mod->refptr) {
64021 err = -ENOMEM;
64022 - goto free_init;
64023 + goto free_init_rx;
64024 }
64025 #endif
64026 /* Now we've moved module, initialize linked lists, etc. */
64027 @@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
64028 /* Set up MODINFO_ATTR fields */
64029 setup_modinfo(mod, sechdrs, infoindex);
64030
64031 + mod->args = args;
64032 +
64033 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64034 + {
64035 + char *p, *p2;
64036 +
64037 + if (strstr(mod->args, "grsec_modharden_netdev")) {
64038 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64039 + err = -EPERM;
64040 + goto cleanup;
64041 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64042 + p += strlen("grsec_modharden_normal");
64043 + p2 = strstr(p, "_");
64044 + if (p2) {
64045 + *p2 = '\0';
64046 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64047 + *p2 = '_';
64048 + }
64049 + err = -EPERM;
64050 + goto cleanup;
64051 + }
64052 + }
64053 +#endif
64054 +
64055 +
64056 /* Fix up syms, so that st_value is a pointer to location. */
64057 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
64058 mod);
64059 @@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
64060
64061 /* Now do relocations. */
64062 for (i = 1; i < hdr->e_shnum; i++) {
64063 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
64064 unsigned int info = sechdrs[i].sh_info;
64065 + strtab = (char *)sechdrs[strindex].sh_addr;
64066
64067 /* Not a valid relocation section? */
64068 if (info >= hdr->e_shnum)
64069 @@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
64070 * Do it before processing of module parameters, so the module
64071 * can provide parameter accessor functions of its own.
64072 */
64073 - if (mod->module_init)
64074 - flush_icache_range((unsigned long)mod->module_init,
64075 - (unsigned long)mod->module_init
64076 - + mod->init_size);
64077 - flush_icache_range((unsigned long)mod->module_core,
64078 - (unsigned long)mod->module_core + mod->core_size);
64079 + if (mod->module_init_rx)
64080 + flush_icache_range((unsigned long)mod->module_init_rx,
64081 + (unsigned long)mod->module_init_rx
64082 + + mod->init_size_rx);
64083 + flush_icache_range((unsigned long)mod->module_core_rx,
64084 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
64085
64086 set_fs(old_fs);
64087
64088 - mod->args = args;
64089 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
64090 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
64091 mod->name);
64092 @@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
64093 free_unload:
64094 module_unload_free(mod);
64095 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
64096 + free_init_rx:
64097 percpu_modfree(mod->refptr);
64098 - free_init:
64099 #endif
64100 - module_free(mod, mod->module_init);
64101 - free_core:
64102 - module_free(mod, mod->module_core);
64103 + module_free_exec(mod, mod->module_init_rx);
64104 + free_core_rx:
64105 + module_free_exec(mod, mod->module_core_rx);
64106 + free_init_rw:
64107 + module_free(mod, mod->module_init_rw);
64108 + free_core_rw:
64109 + module_free(mod, mod->module_core_rw);
64110 /* mod will be freed with core. Don't access it beyond this line! */
64111 free_percpu:
64112 if (percpu)
64113 @@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
64114 mod->symtab = mod->core_symtab;
64115 mod->strtab = mod->core_strtab;
64116 #endif
64117 - module_free(mod, mod->module_init);
64118 - mod->module_init = NULL;
64119 - mod->init_size = 0;
64120 - mod->init_text_size = 0;
64121 + module_free(mod, mod->module_init_rw);
64122 + module_free_exec(mod, mod->module_init_rx);
64123 + mod->module_init_rw = NULL;
64124 + mod->module_init_rx = NULL;
64125 + mod->init_size_rw = 0;
64126 + mod->init_size_rx = 0;
64127 mutex_unlock(&module_mutex);
64128
64129 return 0;
64130 @@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
64131 unsigned long nextval;
64132
64133 /* At worse, next value is at end of module */
64134 - if (within_module_init(addr, mod))
64135 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
64136 + if (within_module_init_rx(addr, mod))
64137 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64138 + else if (within_module_init_rw(addr, mod))
64139 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64140 + else if (within_module_core_rx(addr, mod))
64141 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64142 + else if (within_module_core_rw(addr, mod))
64143 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64144 else
64145 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
64146 + return NULL;
64147
64148 /* Scan for closest preceeding symbol, and next symbol. (ELF
64149 starts real symbols at 1). */
64150 @@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
64151 char buf[8];
64152
64153 seq_printf(m, "%s %u",
64154 - mod->name, mod->init_size + mod->core_size);
64155 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64156 print_unload_info(m, mod);
64157
64158 /* Informative for users. */
64159 @@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
64160 mod->state == MODULE_STATE_COMING ? "Loading":
64161 "Live");
64162 /* Used by oprofile and other similar tools. */
64163 - seq_printf(m, " 0x%p", mod->module_core);
64164 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
64165
64166 /* Taints info */
64167 if (mod->taints)
64168 @@ -2981,7 +3128,17 @@ static const struct file_operations proc
64169
64170 static int __init proc_modules_init(void)
64171 {
64172 +#ifndef CONFIG_GRKERNSEC_HIDESYM
64173 +#ifdef CONFIG_GRKERNSEC_PROC_USER
64174 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64175 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64176 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64177 +#else
64178 proc_create("modules", 0, NULL, &proc_modules_operations);
64179 +#endif
64180 +#else
64181 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64182 +#endif
64183 return 0;
64184 }
64185 module_init(proc_modules_init);
64186 @@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
64187 {
64188 struct module *mod;
64189
64190 - if (addr < module_addr_min || addr > module_addr_max)
64191 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64192 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
64193 return NULL;
64194
64195 list_for_each_entry_rcu(mod, &modules, list)
64196 - if (within_module_core(addr, mod)
64197 - || within_module_init(addr, mod))
64198 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
64199 return mod;
64200 return NULL;
64201 }
64202 @@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
64203 */
64204 struct module *__module_text_address(unsigned long addr)
64205 {
64206 - struct module *mod = __module_address(addr);
64207 + struct module *mod;
64208 +
64209 +#ifdef CONFIG_X86_32
64210 + addr = ktla_ktva(addr);
64211 +#endif
64212 +
64213 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64214 + return NULL;
64215 +
64216 + mod = __module_address(addr);
64217 +
64218 if (mod) {
64219 /* Make sure it's within the text section. */
64220 - if (!within(addr, mod->module_init, mod->init_text_size)
64221 - && !within(addr, mod->module_core, mod->core_text_size))
64222 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64223 mod = NULL;
64224 }
64225 return mod;
64226 diff -urNp linux-2.6.32.45/kernel/mutex.c linux-2.6.32.45/kernel/mutex.c
64227 --- linux-2.6.32.45/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
64228 +++ linux-2.6.32.45/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
64229 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
64230 */
64231
64232 for (;;) {
64233 - struct thread_info *owner;
64234 + struct task_struct *owner;
64235
64236 /*
64237 * If we own the BKL, then don't spin. The owner of
64238 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
64239 spin_lock_mutex(&lock->wait_lock, flags);
64240
64241 debug_mutex_lock_common(lock, &waiter);
64242 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64243 + debug_mutex_add_waiter(lock, &waiter, task);
64244
64245 /* add waiting tasks to the end of the waitqueue (FIFO): */
64246 list_add_tail(&waiter.list, &lock->wait_list);
64247 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
64248 * TASK_UNINTERRUPTIBLE case.)
64249 */
64250 if (unlikely(signal_pending_state(state, task))) {
64251 - mutex_remove_waiter(lock, &waiter,
64252 - task_thread_info(task));
64253 + mutex_remove_waiter(lock, &waiter, task);
64254 mutex_release(&lock->dep_map, 1, ip);
64255 spin_unlock_mutex(&lock->wait_lock, flags);
64256
64257 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
64258 done:
64259 lock_acquired(&lock->dep_map, ip);
64260 /* got the lock - rejoice! */
64261 - mutex_remove_waiter(lock, &waiter, current_thread_info());
64262 + mutex_remove_waiter(lock, &waiter, task);
64263 mutex_set_owner(lock);
64264
64265 /* set it to 0 if there are no waiters left: */
64266 diff -urNp linux-2.6.32.45/kernel/mutex-debug.c linux-2.6.32.45/kernel/mutex-debug.c
64267 --- linux-2.6.32.45/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
64268 +++ linux-2.6.32.45/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
64269 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64270 }
64271
64272 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64273 - struct thread_info *ti)
64274 + struct task_struct *task)
64275 {
64276 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64277
64278 /* Mark the current thread as blocked on the lock: */
64279 - ti->task->blocked_on = waiter;
64280 + task->blocked_on = waiter;
64281 }
64282
64283 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64284 - struct thread_info *ti)
64285 + struct task_struct *task)
64286 {
64287 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64288 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64289 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64290 - ti->task->blocked_on = NULL;
64291 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
64292 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64293 + task->blocked_on = NULL;
64294
64295 list_del_init(&waiter->list);
64296 waiter->task = NULL;
64297 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
64298 return;
64299
64300 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
64301 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
64302 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
64303 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
64304 mutex_clear_owner(lock);
64305 }
64306 diff -urNp linux-2.6.32.45/kernel/mutex-debug.h linux-2.6.32.45/kernel/mutex-debug.h
64307 --- linux-2.6.32.45/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
64308 +++ linux-2.6.32.45/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
64309 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
64310 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64311 extern void debug_mutex_add_waiter(struct mutex *lock,
64312 struct mutex_waiter *waiter,
64313 - struct thread_info *ti);
64314 + struct task_struct *task);
64315 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64316 - struct thread_info *ti);
64317 + struct task_struct *task);
64318 extern void debug_mutex_unlock(struct mutex *lock);
64319 extern void debug_mutex_init(struct mutex *lock, const char *name,
64320 struct lock_class_key *key);
64321
64322 static inline void mutex_set_owner(struct mutex *lock)
64323 {
64324 - lock->owner = current_thread_info();
64325 + lock->owner = current;
64326 }
64327
64328 static inline void mutex_clear_owner(struct mutex *lock)
64329 diff -urNp linux-2.6.32.45/kernel/mutex.h linux-2.6.32.45/kernel/mutex.h
64330 --- linux-2.6.32.45/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
64331 +++ linux-2.6.32.45/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
64332 @@ -19,7 +19,7 @@
64333 #ifdef CONFIG_SMP
64334 static inline void mutex_set_owner(struct mutex *lock)
64335 {
64336 - lock->owner = current_thread_info();
64337 + lock->owner = current;
64338 }
64339
64340 static inline void mutex_clear_owner(struct mutex *lock)
64341 diff -urNp linux-2.6.32.45/kernel/panic.c linux-2.6.32.45/kernel/panic.c
64342 --- linux-2.6.32.45/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
64343 +++ linux-2.6.32.45/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
64344 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
64345 const char *board;
64346
64347 printk(KERN_WARNING "------------[ cut here ]------------\n");
64348 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64349 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64350 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64351 if (board)
64352 printk(KERN_WARNING "Hardware name: %s\n", board);
64353 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64354 */
64355 void __stack_chk_fail(void)
64356 {
64357 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
64358 + dump_stack();
64359 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64360 __builtin_return_address(0));
64361 }
64362 EXPORT_SYMBOL(__stack_chk_fail);
64363 diff -urNp linux-2.6.32.45/kernel/params.c linux-2.6.32.45/kernel/params.c
64364 --- linux-2.6.32.45/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
64365 +++ linux-2.6.32.45/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
64366 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
64367 return ret;
64368 }
64369
64370 -static struct sysfs_ops module_sysfs_ops = {
64371 +static const struct sysfs_ops module_sysfs_ops = {
64372 .show = module_attr_show,
64373 .store = module_attr_store,
64374 };
64375 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
64376 return 0;
64377 }
64378
64379 -static struct kset_uevent_ops module_uevent_ops = {
64380 +static const struct kset_uevent_ops module_uevent_ops = {
64381 .filter = uevent_filter,
64382 };
64383
64384 diff -urNp linux-2.6.32.45/kernel/perf_event.c linux-2.6.32.45/kernel/perf_event.c
64385 --- linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:35:30.000000000 -0400
64386 +++ linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:34:01.000000000 -0400
64387 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
64388 */
64389 int sysctl_perf_event_sample_rate __read_mostly = 100000;
64390
64391 -static atomic64_t perf_event_id;
64392 +static atomic64_unchecked_t perf_event_id;
64393
64394 /*
64395 * Lock for (sysadmin-configurable) event reservations:
64396 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
64397 * In order to keep per-task stats reliable we need to flip the event
64398 * values when we flip the contexts.
64399 */
64400 - value = atomic64_read(&next_event->count);
64401 - value = atomic64_xchg(&event->count, value);
64402 - atomic64_set(&next_event->count, value);
64403 + value = atomic64_read_unchecked(&next_event->count);
64404 + value = atomic64_xchg_unchecked(&event->count, value);
64405 + atomic64_set_unchecked(&next_event->count, value);
64406
64407 swap(event->total_time_enabled, next_event->total_time_enabled);
64408 swap(event->total_time_running, next_event->total_time_running);
64409 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
64410 update_event_times(event);
64411 }
64412
64413 - return atomic64_read(&event->count);
64414 + return atomic64_read_unchecked(&event->count);
64415 }
64416
64417 /*
64418 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
64419 values[n++] = 1 + leader->nr_siblings;
64420 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64421 values[n++] = leader->total_time_enabled +
64422 - atomic64_read(&leader->child_total_time_enabled);
64423 + atomic64_read_unchecked(&leader->child_total_time_enabled);
64424 }
64425 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64426 values[n++] = leader->total_time_running +
64427 - atomic64_read(&leader->child_total_time_running);
64428 + atomic64_read_unchecked(&leader->child_total_time_running);
64429 }
64430
64431 size = n * sizeof(u64);
64432 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
64433 values[n++] = perf_event_read_value(event);
64434 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64435 values[n++] = event->total_time_enabled +
64436 - atomic64_read(&event->child_total_time_enabled);
64437 + atomic64_read_unchecked(&event->child_total_time_enabled);
64438 }
64439 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64440 values[n++] = event->total_time_running +
64441 - atomic64_read(&event->child_total_time_running);
64442 + atomic64_read_unchecked(&event->child_total_time_running);
64443 }
64444 if (read_format & PERF_FORMAT_ID)
64445 values[n++] = primary_event_id(event);
64446 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
64447 static void perf_event_reset(struct perf_event *event)
64448 {
64449 (void)perf_event_read(event);
64450 - atomic64_set(&event->count, 0);
64451 + atomic64_set_unchecked(&event->count, 0);
64452 perf_event_update_userpage(event);
64453 }
64454
64455 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
64456 ++userpg->lock;
64457 barrier();
64458 userpg->index = perf_event_index(event);
64459 - userpg->offset = atomic64_read(&event->count);
64460 + userpg->offset = atomic64_read_unchecked(&event->count);
64461 if (event->state == PERF_EVENT_STATE_ACTIVE)
64462 - userpg->offset -= atomic64_read(&event->hw.prev_count);
64463 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
64464
64465 userpg->time_enabled = event->total_time_enabled +
64466 - atomic64_read(&event->child_total_time_enabled);
64467 + atomic64_read_unchecked(&event->child_total_time_enabled);
64468
64469 userpg->time_running = event->total_time_running +
64470 - atomic64_read(&event->child_total_time_running);
64471 + atomic64_read_unchecked(&event->child_total_time_running);
64472
64473 barrier();
64474 ++userpg->lock;
64475 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
64476 u64 values[4];
64477 int n = 0;
64478
64479 - values[n++] = atomic64_read(&event->count);
64480 + values[n++] = atomic64_read_unchecked(&event->count);
64481 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64482 values[n++] = event->total_time_enabled +
64483 - atomic64_read(&event->child_total_time_enabled);
64484 + atomic64_read_unchecked(&event->child_total_time_enabled);
64485 }
64486 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64487 values[n++] = event->total_time_running +
64488 - atomic64_read(&event->child_total_time_running);
64489 + atomic64_read_unchecked(&event->child_total_time_running);
64490 }
64491 if (read_format & PERF_FORMAT_ID)
64492 values[n++] = primary_event_id(event);
64493 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
64494 if (leader != event)
64495 leader->pmu->read(leader);
64496
64497 - values[n++] = atomic64_read(&leader->count);
64498 + values[n++] = atomic64_read_unchecked(&leader->count);
64499 if (read_format & PERF_FORMAT_ID)
64500 values[n++] = primary_event_id(leader);
64501
64502 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
64503 if (sub != event)
64504 sub->pmu->read(sub);
64505
64506 - values[n++] = atomic64_read(&sub->count);
64507 + values[n++] = atomic64_read_unchecked(&sub->count);
64508 if (read_format & PERF_FORMAT_ID)
64509 values[n++] = primary_event_id(sub);
64510
64511 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf
64512 {
64513 struct hw_perf_event *hwc = &event->hw;
64514
64515 - atomic64_add(nr, &event->count);
64516 + atomic64_add_unchecked(nr, &event->count);
64517
64518 if (!hwc->sample_period)
64519 return;
64520 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(
64521 u64 now;
64522
64523 now = cpu_clock(cpu);
64524 - prev = atomic64_read(&event->hw.prev_count);
64525 - atomic64_set(&event->hw.prev_count, now);
64526 - atomic64_add(now - prev, &event->count);
64527 + prev = atomic64_read_unchecked(&event->hw.prev_count);
64528 + atomic64_set_unchecked(&event->hw.prev_count, now);
64529 + atomic64_add_unchecked(now - prev, &event->count);
64530 }
64531
64532 static int cpu_clock_perf_event_enable(struct perf_event *event)
64533 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(s
64534 struct hw_perf_event *hwc = &event->hw;
64535 int cpu = raw_smp_processor_id();
64536
64537 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
64538 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
64539 perf_swevent_start_hrtimer(event);
64540
64541 return 0;
64542 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update
64543 u64 prev;
64544 s64 delta;
64545
64546 - prev = atomic64_xchg(&event->hw.prev_count, now);
64547 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
64548 delta = now - prev;
64549 - atomic64_add(delta, &event->count);
64550 + atomic64_add_unchecked(delta, &event->count);
64551 }
64552
64553 static int task_clock_perf_event_enable(struct perf_event *event)
64554 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(
64555
64556 now = event->ctx->time;
64557
64558 - atomic64_set(&hwc->prev_count, now);
64559 + atomic64_set_unchecked(&hwc->prev_count, now);
64560
64561 perf_swevent_start_hrtimer(event);
64562
64563 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr
64564 event->parent = parent_event;
64565
64566 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64567 - event->id = atomic64_inc_return(&perf_event_id);
64568 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
64569
64570 event->state = PERF_EVENT_STATE_INACTIVE;
64571
64572 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf
64573 if (child_event->attr.inherit_stat)
64574 perf_event_read_event(child_event, child);
64575
64576 - child_val = atomic64_read(&child_event->count);
64577 + child_val = atomic64_read_unchecked(&child_event->count);
64578
64579 /*
64580 * Add back the child's count to the parent's count:
64581 */
64582 - atomic64_add(child_val, &parent_event->count);
64583 - atomic64_add(child_event->total_time_enabled,
64584 + atomic64_add_unchecked(child_val, &parent_event->count);
64585 + atomic64_add_unchecked(child_event->total_time_enabled,
64586 &parent_event->child_total_time_enabled);
64587 - atomic64_add(child_event->total_time_running,
64588 + atomic64_add_unchecked(child_event->total_time_running,
64589 &parent_event->child_total_time_running);
64590
64591 /*
64592 diff -urNp linux-2.6.32.45/kernel/pid.c linux-2.6.32.45/kernel/pid.c
64593 --- linux-2.6.32.45/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
64594 +++ linux-2.6.32.45/kernel/pid.c 2011-07-14 19:15:33.000000000 -0400
64595 @@ -33,6 +33,7 @@
64596 #include <linux/rculist.h>
64597 #include <linux/bootmem.h>
64598 #include <linux/hash.h>
64599 +#include <linux/security.h>
64600 #include <linux/pid_namespace.h>
64601 #include <linux/init_task.h>
64602 #include <linux/syscalls.h>
64603 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64604
64605 int pid_max = PID_MAX_DEFAULT;
64606
64607 -#define RESERVED_PIDS 300
64608 +#define RESERVED_PIDS 500
64609
64610 int pid_max_min = RESERVED_PIDS + 1;
64611 int pid_max_max = PID_MAX_LIMIT;
64612 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
64613 */
64614 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64615 {
64616 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64617 + struct task_struct *task;
64618 +
64619 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64620 +
64621 + if (gr_pid_is_chrooted(task))
64622 + return NULL;
64623 +
64624 + return task;
64625 }
64626
64627 struct task_struct *find_task_by_vpid(pid_t vnr)
64628 @@ -391,6 +399,13 @@ struct task_struct *find_task_by_vpid(pi
64629 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64630 }
64631
64632 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64633 +{
64634 + struct task_struct *task;
64635 +
64636 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64637 +}
64638 +
64639 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64640 {
64641 struct pid *pid;
64642 diff -urNp linux-2.6.32.45/kernel/posix-cpu-timers.c linux-2.6.32.45/kernel/posix-cpu-timers.c
64643 --- linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
64644 +++ linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-08-06 09:33:44.000000000 -0400
64645 @@ -6,6 +6,7 @@
64646 #include <linux/posix-timers.h>
64647 #include <linux/errno.h>
64648 #include <linux/math64.h>
64649 +#include <linux/security.h>
64650 #include <asm/uaccess.h>
64651 #include <linux/kernel_stat.h>
64652 #include <trace/events/timer.h>
64653 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(st
64654
64655 static __init int init_posix_cpu_timers(void)
64656 {
64657 - struct k_clock process = {
64658 + static struct k_clock process = {
64659 .clock_getres = process_cpu_clock_getres,
64660 .clock_get = process_cpu_clock_get,
64661 .clock_set = do_posix_clock_nosettime,
64662 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(
64663 .nsleep = process_cpu_nsleep,
64664 .nsleep_restart = process_cpu_nsleep_restart,
64665 };
64666 - struct k_clock thread = {
64667 + static struct k_clock thread = {
64668 .clock_getres = thread_cpu_clock_getres,
64669 .clock_get = thread_cpu_clock_get,
64670 .clock_set = do_posix_clock_nosettime,
64671 diff -urNp linux-2.6.32.45/kernel/posix-timers.c linux-2.6.32.45/kernel/posix-timers.c
64672 --- linux-2.6.32.45/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
64673 +++ linux-2.6.32.45/kernel/posix-timers.c 2011-08-06 09:34:14.000000000 -0400
64674 @@ -42,6 +42,7 @@
64675 #include <linux/compiler.h>
64676 #include <linux/idr.h>
64677 #include <linux/posix-timers.h>
64678 +#include <linux/grsecurity.h>
64679 #include <linux/syscalls.h>
64680 #include <linux/wait.h>
64681 #include <linux/workqueue.h>
64682 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
64683 * which we beg off on and pass to do_sys_settimeofday().
64684 */
64685
64686 -static struct k_clock posix_clocks[MAX_CLOCKS];
64687 +static struct k_clock *posix_clocks[MAX_CLOCKS];
64688
64689 /*
64690 * These ones are defined below.
64691 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k
64692 */
64693 #define CLOCK_DISPATCH(clock, call, arglist) \
64694 ((clock) < 0 ? posix_cpu_##call arglist : \
64695 - (posix_clocks[clock].call != NULL \
64696 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
64697 + (posix_clocks[clock]->call != NULL \
64698 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
64699
64700 /*
64701 * Default clock hook functions when the struct k_clock passed
64702 @@ -172,7 +173,7 @@ static inline int common_clock_getres(co
64703 struct timespec *tp)
64704 {
64705 tp->tv_sec = 0;
64706 - tp->tv_nsec = posix_clocks[which_clock].res;
64707 + tp->tv_nsec = posix_clocks[which_clock]->res;
64708 return 0;
64709 }
64710
64711 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const
64712 return 0;
64713 if ((unsigned) which_clock >= MAX_CLOCKS)
64714 return 1;
64715 - if (posix_clocks[which_clock].clock_getres != NULL)
64716 + if (!posix_clocks[which_clock])
64717 return 0;
64718 - if (posix_clocks[which_clock].res != 0)
64719 + if (posix_clocks[which_clock]->clock_getres != NULL)
64720 + return 0;
64721 + if (posix_clocks[which_clock]->res != 0)
64722 return 0;
64723 return 1;
64724 }
64725 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t
64726 */
64727 static __init int init_posix_timers(void)
64728 {
64729 - struct k_clock clock_realtime = {
64730 + static struct k_clock clock_realtime = {
64731 .clock_getres = hrtimer_get_res,
64732 };
64733 - struct k_clock clock_monotonic = {
64734 + static struct k_clock clock_monotonic = {
64735 .clock_getres = hrtimer_get_res,
64736 .clock_get = posix_ktime_get_ts,
64737 .clock_set = do_posix_clock_nosettime,
64738 };
64739 - struct k_clock clock_monotonic_raw = {
64740 + static struct k_clock clock_monotonic_raw = {
64741 .clock_getres = hrtimer_get_res,
64742 .clock_get = posix_get_monotonic_raw,
64743 .clock_set = do_posix_clock_nosettime,
64744 .timer_create = no_timer_create,
64745 .nsleep = no_nsleep,
64746 };
64747 - struct k_clock clock_realtime_coarse = {
64748 + static struct k_clock clock_realtime_coarse = {
64749 .clock_getres = posix_get_coarse_res,
64750 .clock_get = posix_get_realtime_coarse,
64751 .clock_set = do_posix_clock_nosettime,
64752 .timer_create = no_timer_create,
64753 .nsleep = no_nsleep,
64754 };
64755 - struct k_clock clock_monotonic_coarse = {
64756 + static struct k_clock clock_monotonic_coarse = {
64757 .clock_getres = posix_get_coarse_res,
64758 .clock_get = posix_get_monotonic_coarse,
64759 .clock_set = do_posix_clock_nosettime,
64760 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void
64761 .nsleep = no_nsleep,
64762 };
64763
64764 + pax_track_stack();
64765 +
64766 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
64767 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
64768 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64769 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_
64770 return;
64771 }
64772
64773 - posix_clocks[clock_id] = *new_clock;
64774 + posix_clocks[clock_id] = new_clock;
64775 }
64776 EXPORT_SYMBOL_GPL(register_posix_clock);
64777
64778 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64779 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64780 return -EFAULT;
64781
64782 + /* only the CLOCK_REALTIME clock can be set, all other clocks
64783 + have their clock_set fptr set to a nosettime dummy function
64784 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64785 + call common_clock_set, which calls do_sys_settimeofday, which
64786 + we hook
64787 + */
64788 +
64789 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
64790 }
64791
64792 diff -urNp linux-2.6.32.45/kernel/power/hibernate.c linux-2.6.32.45/kernel/power/hibernate.c
64793 --- linux-2.6.32.45/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
64794 +++ linux-2.6.32.45/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
64795 @@ -48,14 +48,14 @@ enum {
64796
64797 static int hibernation_mode = HIBERNATION_SHUTDOWN;
64798
64799 -static struct platform_hibernation_ops *hibernation_ops;
64800 +static const struct platform_hibernation_ops *hibernation_ops;
64801
64802 /**
64803 * hibernation_set_ops - set the global hibernate operations
64804 * @ops: the hibernation operations to use in subsequent hibernation transitions
64805 */
64806
64807 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
64808 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
64809 {
64810 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
64811 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
64812 diff -urNp linux-2.6.32.45/kernel/power/poweroff.c linux-2.6.32.45/kernel/power/poweroff.c
64813 --- linux-2.6.32.45/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
64814 +++ linux-2.6.32.45/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
64815 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64816 .enable_mask = SYSRQ_ENABLE_BOOT,
64817 };
64818
64819 -static int pm_sysrq_init(void)
64820 +static int __init pm_sysrq_init(void)
64821 {
64822 register_sysrq_key('o', &sysrq_poweroff_op);
64823 return 0;
64824 diff -urNp linux-2.6.32.45/kernel/power/process.c linux-2.6.32.45/kernel/power/process.c
64825 --- linux-2.6.32.45/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
64826 +++ linux-2.6.32.45/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
64827 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
64828 struct timeval start, end;
64829 u64 elapsed_csecs64;
64830 unsigned int elapsed_csecs;
64831 + bool timedout = false;
64832
64833 do_gettimeofday(&start);
64834
64835 end_time = jiffies + TIMEOUT;
64836 do {
64837 todo = 0;
64838 + if (time_after(jiffies, end_time))
64839 + timedout = true;
64840 read_lock(&tasklist_lock);
64841 do_each_thread(g, p) {
64842 if (frozen(p) || !freezeable(p))
64843 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
64844 * It is "frozen enough". If the task does wake
64845 * up, it will immediately call try_to_freeze.
64846 */
64847 - if (!task_is_stopped_or_traced(p) &&
64848 - !freezer_should_skip(p))
64849 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64850 todo++;
64851 + if (timedout) {
64852 + printk(KERN_ERR "Task refusing to freeze:\n");
64853 + sched_show_task(p);
64854 + }
64855 + }
64856 } while_each_thread(g, p);
64857 read_unlock(&tasklist_lock);
64858 yield(); /* Yield is okay here */
64859 - if (time_after(jiffies, end_time))
64860 - break;
64861 - } while (todo);
64862 + } while (todo && !timedout);
64863
64864 do_gettimeofday(&end);
64865 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
64866 diff -urNp linux-2.6.32.45/kernel/power/suspend.c linux-2.6.32.45/kernel/power/suspend.c
64867 --- linux-2.6.32.45/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
64868 +++ linux-2.6.32.45/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
64869 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
64870 [PM_SUSPEND_MEM] = "mem",
64871 };
64872
64873 -static struct platform_suspend_ops *suspend_ops;
64874 +static const struct platform_suspend_ops *suspend_ops;
64875
64876 /**
64877 * suspend_set_ops - Set the global suspend method table.
64878 * @ops: Pointer to ops structure.
64879 */
64880 -void suspend_set_ops(struct platform_suspend_ops *ops)
64881 +void suspend_set_ops(const struct platform_suspend_ops *ops)
64882 {
64883 mutex_lock(&pm_mutex);
64884 suspend_ops = ops;
64885 diff -urNp linux-2.6.32.45/kernel/printk.c linux-2.6.32.45/kernel/printk.c
64886 --- linux-2.6.32.45/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
64887 +++ linux-2.6.32.45/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
64888 @@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
64889 char c;
64890 int error = 0;
64891
64892 +#ifdef CONFIG_GRKERNSEC_DMESG
64893 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
64894 + return -EPERM;
64895 +#endif
64896 +
64897 error = security_syslog(type);
64898 if (error)
64899 return error;
64900 diff -urNp linux-2.6.32.45/kernel/profile.c linux-2.6.32.45/kernel/profile.c
64901 --- linux-2.6.32.45/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
64902 +++ linux-2.6.32.45/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
64903 @@ -39,7 +39,7 @@ struct profile_hit {
64904 /* Oprofile timer tick hook */
64905 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64906
64907 -static atomic_t *prof_buffer;
64908 +static atomic_unchecked_t *prof_buffer;
64909 static unsigned long prof_len, prof_shift;
64910
64911 int prof_on __read_mostly;
64912 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
64913 hits[i].pc = 0;
64914 continue;
64915 }
64916 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64917 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64918 hits[i].hits = hits[i].pc = 0;
64919 }
64920 }
64921 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
64922 * Add the current hit(s) and flush the write-queue out
64923 * to the global buffer:
64924 */
64925 - atomic_add(nr_hits, &prof_buffer[pc]);
64926 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64927 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64928 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64929 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64930 hits[i].pc = hits[i].hits = 0;
64931 }
64932 out:
64933 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
64934 if (prof_on != type || !prof_buffer)
64935 return;
64936 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64937 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64938 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64939 }
64940 #endif /* !CONFIG_SMP */
64941 EXPORT_SYMBOL_GPL(profile_hits);
64942 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64943 return -EFAULT;
64944 buf++; p++; count--; read++;
64945 }
64946 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64947 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64948 if (copy_to_user(buf, (void *)pnt, count))
64949 return -EFAULT;
64950 read += count;
64951 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64952 }
64953 #endif
64954 profile_discard_flip_buffers();
64955 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64956 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64957 return count;
64958 }
64959
64960 diff -urNp linux-2.6.32.45/kernel/ptrace.c linux-2.6.32.45/kernel/ptrace.c
64961 --- linux-2.6.32.45/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
64962 +++ linux-2.6.32.45/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
64963 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
64964 return ret;
64965 }
64966
64967 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64968 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64969 + unsigned int log)
64970 {
64971 const struct cred *cred = current_cred(), *tcred;
64972
64973 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
64974 cred->gid != tcred->egid ||
64975 cred->gid != tcred->sgid ||
64976 cred->gid != tcred->gid) &&
64977 - !capable(CAP_SYS_PTRACE)) {
64978 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64979 + (log && !capable(CAP_SYS_PTRACE)))
64980 + ) {
64981 rcu_read_unlock();
64982 return -EPERM;
64983 }
64984 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
64985 smp_rmb();
64986 if (task->mm)
64987 dumpable = get_dumpable(task->mm);
64988 - if (!dumpable && !capable(CAP_SYS_PTRACE))
64989 + if (!dumpable &&
64990 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64991 + (log && !capable(CAP_SYS_PTRACE))))
64992 return -EPERM;
64993
64994 return security_ptrace_access_check(task, mode);
64995 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
64996 {
64997 int err;
64998 task_lock(task);
64999 - err = __ptrace_may_access(task, mode);
65000 + err = __ptrace_may_access(task, mode, 0);
65001 + task_unlock(task);
65002 + return !err;
65003 +}
65004 +
65005 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65006 +{
65007 + int err;
65008 + task_lock(task);
65009 + err = __ptrace_may_access(task, mode, 1);
65010 task_unlock(task);
65011 return !err;
65012 }
65013 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
65014 goto out;
65015
65016 task_lock(task);
65017 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65018 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65019 task_unlock(task);
65020 if (retval)
65021 goto unlock_creds;
65022 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
65023 goto unlock_tasklist;
65024
65025 task->ptrace = PT_PTRACED;
65026 - if (capable(CAP_SYS_PTRACE))
65027 + if (capable_nolog(CAP_SYS_PTRACE))
65028 task->ptrace |= PT_PTRACE_CAP;
65029
65030 __ptrace_link(task, current);
65031 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
65032 {
65033 int copied = 0;
65034
65035 + pax_track_stack();
65036 +
65037 while (len > 0) {
65038 char buf[128];
65039 int this_len, retval;
65040 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
65041 {
65042 int copied = 0;
65043
65044 + pax_track_stack();
65045 +
65046 while (len > 0) {
65047 char buf[128];
65048 int this_len, retval;
65049 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
65050 int ret = -EIO;
65051 siginfo_t siginfo;
65052
65053 + pax_track_stack();
65054 +
65055 switch (request) {
65056 case PTRACE_PEEKTEXT:
65057 case PTRACE_PEEKDATA:
65058 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
65059 ret = ptrace_setoptions(child, data);
65060 break;
65061 case PTRACE_GETEVENTMSG:
65062 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
65063 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
65064 break;
65065
65066 case PTRACE_GETSIGINFO:
65067 ret = ptrace_getsiginfo(child, &siginfo);
65068 if (!ret)
65069 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
65070 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
65071 &siginfo);
65072 break;
65073
65074 case PTRACE_SETSIGINFO:
65075 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
65076 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
65077 sizeof siginfo))
65078 ret = -EFAULT;
65079 else
65080 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
65081 goto out;
65082 }
65083
65084 + if (gr_handle_ptrace(child, request)) {
65085 + ret = -EPERM;
65086 + goto out_put_task_struct;
65087 + }
65088 +
65089 if (request == PTRACE_ATTACH) {
65090 ret = ptrace_attach(child);
65091 /*
65092 * Some architectures need to do book-keeping after
65093 * a ptrace attach.
65094 */
65095 - if (!ret)
65096 + if (!ret) {
65097 arch_ptrace_attach(child);
65098 + gr_audit_ptrace(child);
65099 + }
65100 goto out_put_task_struct;
65101 }
65102
65103 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
65104 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65105 if (copied != sizeof(tmp))
65106 return -EIO;
65107 - return put_user(tmp, (unsigned long __user *)data);
65108 + return put_user(tmp, (__force unsigned long __user *)data);
65109 }
65110
65111 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
65112 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
65113 siginfo_t siginfo;
65114 int ret;
65115
65116 + pax_track_stack();
65117 +
65118 switch (request) {
65119 case PTRACE_PEEKTEXT:
65120 case PTRACE_PEEKDATA:
65121 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
65122 goto out;
65123 }
65124
65125 + if (gr_handle_ptrace(child, request)) {
65126 + ret = -EPERM;
65127 + goto out_put_task_struct;
65128 + }
65129 +
65130 if (request == PTRACE_ATTACH) {
65131 ret = ptrace_attach(child);
65132 /*
65133 * Some architectures need to do book-keeping after
65134 * a ptrace attach.
65135 */
65136 - if (!ret)
65137 + if (!ret) {
65138 arch_ptrace_attach(child);
65139 + gr_audit_ptrace(child);
65140 + }
65141 goto out_put_task_struct;
65142 }
65143
65144 diff -urNp linux-2.6.32.45/kernel/rcutorture.c linux-2.6.32.45/kernel/rcutorture.c
65145 --- linux-2.6.32.45/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
65146 +++ linux-2.6.32.45/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
65147 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
65148 { 0 };
65149 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65150 { 0 };
65151 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65152 -static atomic_t n_rcu_torture_alloc;
65153 -static atomic_t n_rcu_torture_alloc_fail;
65154 -static atomic_t n_rcu_torture_free;
65155 -static atomic_t n_rcu_torture_mberror;
65156 -static atomic_t n_rcu_torture_error;
65157 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65158 +static atomic_unchecked_t n_rcu_torture_alloc;
65159 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
65160 +static atomic_unchecked_t n_rcu_torture_free;
65161 +static atomic_unchecked_t n_rcu_torture_mberror;
65162 +static atomic_unchecked_t n_rcu_torture_error;
65163 static long n_rcu_torture_timers;
65164 static struct list_head rcu_torture_removed;
65165 static cpumask_var_t shuffle_tmp_mask;
65166 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
65167
65168 spin_lock_bh(&rcu_torture_lock);
65169 if (list_empty(&rcu_torture_freelist)) {
65170 - atomic_inc(&n_rcu_torture_alloc_fail);
65171 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65172 spin_unlock_bh(&rcu_torture_lock);
65173 return NULL;
65174 }
65175 - atomic_inc(&n_rcu_torture_alloc);
65176 + atomic_inc_unchecked(&n_rcu_torture_alloc);
65177 p = rcu_torture_freelist.next;
65178 list_del_init(p);
65179 spin_unlock_bh(&rcu_torture_lock);
65180 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
65181 static void
65182 rcu_torture_free(struct rcu_torture *p)
65183 {
65184 - atomic_inc(&n_rcu_torture_free);
65185 + atomic_inc_unchecked(&n_rcu_torture_free);
65186 spin_lock_bh(&rcu_torture_lock);
65187 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65188 spin_unlock_bh(&rcu_torture_lock);
65189 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
65190 i = rp->rtort_pipe_count;
65191 if (i > RCU_TORTURE_PIPE_LEN)
65192 i = RCU_TORTURE_PIPE_LEN;
65193 - atomic_inc(&rcu_torture_wcount[i]);
65194 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65195 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65196 rp->rtort_mbtest = 0;
65197 rcu_torture_free(rp);
65198 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
65199 i = rp->rtort_pipe_count;
65200 if (i > RCU_TORTURE_PIPE_LEN)
65201 i = RCU_TORTURE_PIPE_LEN;
65202 - atomic_inc(&rcu_torture_wcount[i]);
65203 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65204 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65205 rp->rtort_mbtest = 0;
65206 list_del(&rp->rtort_free);
65207 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
65208 i = old_rp->rtort_pipe_count;
65209 if (i > RCU_TORTURE_PIPE_LEN)
65210 i = RCU_TORTURE_PIPE_LEN;
65211 - atomic_inc(&rcu_torture_wcount[i]);
65212 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65213 old_rp->rtort_pipe_count++;
65214 cur_ops->deferred_free(old_rp);
65215 }
65216 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
65217 return;
65218 }
65219 if (p->rtort_mbtest == 0)
65220 - atomic_inc(&n_rcu_torture_mberror);
65221 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65222 spin_lock(&rand_lock);
65223 cur_ops->read_delay(&rand);
65224 n_rcu_torture_timers++;
65225 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
65226 continue;
65227 }
65228 if (p->rtort_mbtest == 0)
65229 - atomic_inc(&n_rcu_torture_mberror);
65230 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65231 cur_ops->read_delay(&rand);
65232 preempt_disable();
65233 pipe_count = p->rtort_pipe_count;
65234 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
65235 rcu_torture_current,
65236 rcu_torture_current_version,
65237 list_empty(&rcu_torture_freelist),
65238 - atomic_read(&n_rcu_torture_alloc),
65239 - atomic_read(&n_rcu_torture_alloc_fail),
65240 - atomic_read(&n_rcu_torture_free),
65241 - atomic_read(&n_rcu_torture_mberror),
65242 + atomic_read_unchecked(&n_rcu_torture_alloc),
65243 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65244 + atomic_read_unchecked(&n_rcu_torture_free),
65245 + atomic_read_unchecked(&n_rcu_torture_mberror),
65246 n_rcu_torture_timers);
65247 - if (atomic_read(&n_rcu_torture_mberror) != 0)
65248 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
65249 cnt += sprintf(&page[cnt], " !!!");
65250 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65251 if (i > 1) {
65252 cnt += sprintf(&page[cnt], "!!! ");
65253 - atomic_inc(&n_rcu_torture_error);
65254 + atomic_inc_unchecked(&n_rcu_torture_error);
65255 WARN_ON_ONCE(1);
65256 }
65257 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65258 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
65259 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65260 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65261 cnt += sprintf(&page[cnt], " %d",
65262 - atomic_read(&rcu_torture_wcount[i]));
65263 + atomic_read_unchecked(&rcu_torture_wcount[i]));
65264 }
65265 cnt += sprintf(&page[cnt], "\n");
65266 if (cur_ops->stats)
65267 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
65268
65269 if (cur_ops->cleanup)
65270 cur_ops->cleanup();
65271 - if (atomic_read(&n_rcu_torture_error))
65272 + if (atomic_read_unchecked(&n_rcu_torture_error))
65273 rcu_torture_print_module_parms("End of test: FAILURE");
65274 else
65275 rcu_torture_print_module_parms("End of test: SUCCESS");
65276 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
65277
65278 rcu_torture_current = NULL;
65279 rcu_torture_current_version = 0;
65280 - atomic_set(&n_rcu_torture_alloc, 0);
65281 - atomic_set(&n_rcu_torture_alloc_fail, 0);
65282 - atomic_set(&n_rcu_torture_free, 0);
65283 - atomic_set(&n_rcu_torture_mberror, 0);
65284 - atomic_set(&n_rcu_torture_error, 0);
65285 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65286 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65287 + atomic_set_unchecked(&n_rcu_torture_free, 0);
65288 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65289 + atomic_set_unchecked(&n_rcu_torture_error, 0);
65290 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65291 - atomic_set(&rcu_torture_wcount[i], 0);
65292 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65293 for_each_possible_cpu(cpu) {
65294 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65295 per_cpu(rcu_torture_count, cpu)[i] = 0;
65296 diff -urNp linux-2.6.32.45/kernel/rcutree.c linux-2.6.32.45/kernel/rcutree.c
65297 --- linux-2.6.32.45/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
65298 +++ linux-2.6.32.45/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
65299 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
65300 /*
65301 * Do softirq processing for the current CPU.
65302 */
65303 -static void rcu_process_callbacks(struct softirq_action *unused)
65304 +static void rcu_process_callbacks(void)
65305 {
65306 /*
65307 * Memory references from any prior RCU read-side critical sections
65308 diff -urNp linux-2.6.32.45/kernel/rcutree_plugin.h linux-2.6.32.45/kernel/rcutree_plugin.h
65309 --- linux-2.6.32.45/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
65310 +++ linux-2.6.32.45/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
65311 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
65312 */
65313 void __rcu_read_lock(void)
65314 {
65315 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
65316 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
65317 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
65318 }
65319 EXPORT_SYMBOL_GPL(__rcu_read_lock);
65320 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
65321 struct task_struct *t = current;
65322
65323 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
65324 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
65325 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
65326 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
65327 rcu_read_unlock_special(t);
65328 }
65329 diff -urNp linux-2.6.32.45/kernel/relay.c linux-2.6.32.45/kernel/relay.c
65330 --- linux-2.6.32.45/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
65331 +++ linux-2.6.32.45/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
65332 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
65333 unsigned int flags,
65334 int *nonpad_ret)
65335 {
65336 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
65337 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
65338 struct rchan_buf *rbuf = in->private_data;
65339 unsigned int subbuf_size = rbuf->chan->subbuf_size;
65340 uint64_t pos = (uint64_t) *ppos;
65341 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
65342 .ops = &relay_pipe_buf_ops,
65343 .spd_release = relay_page_release,
65344 };
65345 + ssize_t ret;
65346 +
65347 + pax_track_stack();
65348
65349 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65350 return 0;
65351 diff -urNp linux-2.6.32.45/kernel/resource.c linux-2.6.32.45/kernel/resource.c
65352 --- linux-2.6.32.45/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
65353 +++ linux-2.6.32.45/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
65354 @@ -132,8 +132,18 @@ static const struct file_operations proc
65355
65356 static int __init ioresources_init(void)
65357 {
65358 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65359 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65360 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65361 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65362 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65363 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65364 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65365 +#endif
65366 +#else
65367 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65368 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65369 +#endif
65370 return 0;
65371 }
65372 __initcall(ioresources_init);
65373 diff -urNp linux-2.6.32.45/kernel/rtmutex.c linux-2.6.32.45/kernel/rtmutex.c
65374 --- linux-2.6.32.45/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
65375 +++ linux-2.6.32.45/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
65376 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
65377 */
65378 spin_lock_irqsave(&pendowner->pi_lock, flags);
65379
65380 - WARN_ON(!pendowner->pi_blocked_on);
65381 + BUG_ON(!pendowner->pi_blocked_on);
65382 WARN_ON(pendowner->pi_blocked_on != waiter);
65383 WARN_ON(pendowner->pi_blocked_on->lock != lock);
65384
65385 diff -urNp linux-2.6.32.45/kernel/rtmutex-tester.c linux-2.6.32.45/kernel/rtmutex-tester.c
65386 --- linux-2.6.32.45/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
65387 +++ linux-2.6.32.45/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
65388 @@ -21,7 +21,7 @@
65389 #define MAX_RT_TEST_MUTEXES 8
65390
65391 static spinlock_t rttest_lock;
65392 -static atomic_t rttest_event;
65393 +static atomic_unchecked_t rttest_event;
65394
65395 struct test_thread_data {
65396 int opcode;
65397 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
65398
65399 case RTTEST_LOCKCONT:
65400 td->mutexes[td->opdata] = 1;
65401 - td->event = atomic_add_return(1, &rttest_event);
65402 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65403 return 0;
65404
65405 case RTTEST_RESET:
65406 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
65407 return 0;
65408
65409 case RTTEST_RESETEVENT:
65410 - atomic_set(&rttest_event, 0);
65411 + atomic_set_unchecked(&rttest_event, 0);
65412 return 0;
65413
65414 default:
65415 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
65416 return ret;
65417
65418 td->mutexes[id] = 1;
65419 - td->event = atomic_add_return(1, &rttest_event);
65420 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65421 rt_mutex_lock(&mutexes[id]);
65422 - td->event = atomic_add_return(1, &rttest_event);
65423 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65424 td->mutexes[id] = 4;
65425 return 0;
65426
65427 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
65428 return ret;
65429
65430 td->mutexes[id] = 1;
65431 - td->event = atomic_add_return(1, &rttest_event);
65432 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65433 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65434 - td->event = atomic_add_return(1, &rttest_event);
65435 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65436 td->mutexes[id] = ret ? 0 : 4;
65437 return ret ? -EINTR : 0;
65438
65439 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
65440 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65441 return ret;
65442
65443 - td->event = atomic_add_return(1, &rttest_event);
65444 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65445 rt_mutex_unlock(&mutexes[id]);
65446 - td->event = atomic_add_return(1, &rttest_event);
65447 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65448 td->mutexes[id] = 0;
65449 return 0;
65450
65451 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
65452 break;
65453
65454 td->mutexes[dat] = 2;
65455 - td->event = atomic_add_return(1, &rttest_event);
65456 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65457 break;
65458
65459 case RTTEST_LOCKBKL:
65460 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
65461 return;
65462
65463 td->mutexes[dat] = 3;
65464 - td->event = atomic_add_return(1, &rttest_event);
65465 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65466 break;
65467
65468 case RTTEST_LOCKNOWAIT:
65469 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
65470 return;
65471
65472 td->mutexes[dat] = 1;
65473 - td->event = atomic_add_return(1, &rttest_event);
65474 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65475 return;
65476
65477 case RTTEST_LOCKBKL:
65478 diff -urNp linux-2.6.32.45/kernel/sched.c linux-2.6.32.45/kernel/sched.c
65479 --- linux-2.6.32.45/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
65480 +++ linux-2.6.32.45/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
65481 @@ -5043,7 +5043,7 @@ out:
65482 * In CONFIG_NO_HZ case, the idle load balance owner will do the
65483 * rebalancing for all the cpus for whom scheduler ticks are stopped.
65484 */
65485 -static void run_rebalance_domains(struct softirq_action *h)
65486 +static void run_rebalance_domains(void)
65487 {
65488 int this_cpu = smp_processor_id();
65489 struct rq *this_rq = cpu_rq(this_cpu);
65490 @@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
65491 struct rq *rq;
65492 int cpu;
65493
65494 + pax_track_stack();
65495 +
65496 need_resched:
65497 preempt_disable();
65498 cpu = smp_processor_id();
65499 @@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
65500 * Look out! "owner" is an entirely speculative pointer
65501 * access and not reliable.
65502 */
65503 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
65504 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
65505 {
65506 unsigned int cpu;
65507 struct rq *rq;
65508 @@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
65509 * DEBUG_PAGEALLOC could have unmapped it if
65510 * the mutex owner just released it and exited.
65511 */
65512 - if (probe_kernel_address(&owner->cpu, cpu))
65513 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
65514 return 0;
65515 #else
65516 - cpu = owner->cpu;
65517 + cpu = task_thread_info(owner)->cpu;
65518 #endif
65519
65520 /*
65521 @@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
65522 /*
65523 * Is that owner really running on that cpu?
65524 */
65525 - if (task_thread_info(rq->curr) != owner || need_resched())
65526 + if (rq->curr != owner || need_resched())
65527 return 0;
65528
65529 cpu_relax();
65530 @@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
65531 /* convert nice value [19,-20] to rlimit style value [1,40] */
65532 int nice_rlim = 20 - nice;
65533
65534 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65535 +
65536 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
65537 capable(CAP_SYS_NICE));
65538 }
65539 @@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65540 if (nice > 19)
65541 nice = 19;
65542
65543 - if (increment < 0 && !can_nice(current, nice))
65544 + if (increment < 0 && (!can_nice(current, nice) ||
65545 + gr_handle_chroot_nice()))
65546 return -EPERM;
65547
65548 retval = security_task_setnice(current, nice);
65549 @@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
65550 long power;
65551 int weight;
65552
65553 - WARN_ON(!sd || !sd->groups);
65554 + BUG_ON(!sd || !sd->groups);
65555
65556 if (cpu != group_first_cpu(sd->groups))
65557 return;
65558 diff -urNp linux-2.6.32.45/kernel/signal.c linux-2.6.32.45/kernel/signal.c
65559 --- linux-2.6.32.45/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
65560 +++ linux-2.6.32.45/kernel/signal.c 2011-08-16 21:15:58.000000000 -0400
65561 @@ -41,12 +41,12 @@
65562
65563 static struct kmem_cache *sigqueue_cachep;
65564
65565 -static void __user *sig_handler(struct task_struct *t, int sig)
65566 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
65567 {
65568 return t->sighand->action[sig - 1].sa.sa_handler;
65569 }
65570
65571 -static int sig_handler_ignored(void __user *handler, int sig)
65572 +static int sig_handler_ignored(__sighandler_t handler, int sig)
65573 {
65574 /* Is it explicitly or implicitly ignored? */
65575 return handler == SIG_IGN ||
65576 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
65577 static int sig_task_ignored(struct task_struct *t, int sig,
65578 int from_ancestor_ns)
65579 {
65580 - void __user *handler;
65581 + __sighandler_t handler;
65582
65583 handler = sig_handler(t, sig);
65584
65585 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
65586 */
65587 user = get_uid(__task_cred(t)->user);
65588 atomic_inc(&user->sigpending);
65589 +
65590 + if (!override_rlimit)
65591 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65592 if (override_rlimit ||
65593 atomic_read(&user->sigpending) <=
65594 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
65595 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
65596
65597 int unhandled_signal(struct task_struct *tsk, int sig)
65598 {
65599 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65600 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65601 if (is_global_init(tsk))
65602 return 1;
65603 if (handler != SIG_IGN && handler != SIG_DFL)
65604 @@ -627,6 +630,13 @@ static int check_kill_permission(int sig
65605 }
65606 }
65607
65608 + /* allow glibc communication via tgkill to other threads in our
65609 + thread group */
65610 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65611 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65612 + && gr_handle_signal(t, sig))
65613 + return -EPERM;
65614 +
65615 return security_task_kill(t, info, sig, 0);
65616 }
65617
65618 @@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct si
65619 return send_signal(sig, info, p, 1);
65620 }
65621
65622 -static int
65623 +int
65624 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65625 {
65626 return send_signal(sig, info, t, 0);
65627 @@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *
65628 unsigned long int flags;
65629 int ret, blocked, ignored;
65630 struct k_sigaction *action;
65631 + int is_unhandled = 0;
65632
65633 spin_lock_irqsave(&t->sighand->siglock, flags);
65634 action = &t->sighand->action[sig-1];
65635 @@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *
65636 }
65637 if (action->sa.sa_handler == SIG_DFL)
65638 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65639 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65640 + is_unhandled = 1;
65641 ret = specific_send_sig_info(sig, info, t);
65642 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65643
65644 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
65645 + normal operation */
65646 + if (is_unhandled) {
65647 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65648 + gr_handle_crash(t, sig);
65649 + }
65650 +
65651 return ret;
65652 }
65653
65654 @@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct
65655 {
65656 int ret = check_kill_permission(sig, info, p);
65657
65658 - if (!ret && sig)
65659 + if (!ret && sig) {
65660 ret = do_send_sig_info(sig, info, p, true);
65661 + if (!ret)
65662 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65663 + }
65664
65665 return ret;
65666 }
65667 @@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
65668 {
65669 siginfo_t info;
65670
65671 + pax_track_stack();
65672 +
65673 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65674
65675 memset(&info, 0, sizeof info);
65676 @@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65677 int error = -ESRCH;
65678
65679 rcu_read_lock();
65680 - p = find_task_by_vpid(pid);
65681 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65682 + /* allow glibc communication via tgkill to other threads in our
65683 + thread group */
65684 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65685 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
65686 + p = find_task_by_vpid_unrestricted(pid);
65687 + else
65688 +#endif
65689 + p = find_task_by_vpid(pid);
65690 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65691 error = check_kill_permission(sig, info, p);
65692 /*
65693 diff -urNp linux-2.6.32.45/kernel/smp.c linux-2.6.32.45/kernel/smp.c
65694 --- linux-2.6.32.45/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
65695 +++ linux-2.6.32.45/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
65696 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
65697 }
65698 EXPORT_SYMBOL(smp_call_function);
65699
65700 -void ipi_call_lock(void)
65701 +void ipi_call_lock(void) __acquires(call_function.lock)
65702 {
65703 spin_lock(&call_function.lock);
65704 }
65705
65706 -void ipi_call_unlock(void)
65707 +void ipi_call_unlock(void) __releases(call_function.lock)
65708 {
65709 spin_unlock(&call_function.lock);
65710 }
65711
65712 -void ipi_call_lock_irq(void)
65713 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
65714 {
65715 spin_lock_irq(&call_function.lock);
65716 }
65717
65718 -void ipi_call_unlock_irq(void)
65719 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
65720 {
65721 spin_unlock_irq(&call_function.lock);
65722 }
65723 diff -urNp linux-2.6.32.45/kernel/softirq.c linux-2.6.32.45/kernel/softirq.c
65724 --- linux-2.6.32.45/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
65725 +++ linux-2.6.32.45/kernel/softirq.c 2011-08-05 20:33:55.000000000 -0400
65726 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65727
65728 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65729
65730 -char *softirq_to_name[NR_SOFTIRQS] = {
65731 +const char * const softirq_to_name[NR_SOFTIRQS] = {
65732 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65733 "TASKLET", "SCHED", "HRTIMER", "RCU"
65734 };
65735 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
65736
65737 asmlinkage void __do_softirq(void)
65738 {
65739 - struct softirq_action *h;
65740 + const struct softirq_action *h;
65741 __u32 pending;
65742 int max_restart = MAX_SOFTIRQ_RESTART;
65743 int cpu;
65744 @@ -233,7 +233,7 @@ restart:
65745 kstat_incr_softirqs_this_cpu(h - softirq_vec);
65746
65747 trace_softirq_entry(h, softirq_vec);
65748 - h->action(h);
65749 + h->action();
65750 trace_softirq_exit(h, softirq_vec);
65751 if (unlikely(prev_count != preempt_count())) {
65752 printk(KERN_ERR "huh, entered softirq %td %s %p"
65753 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
65754 local_irq_restore(flags);
65755 }
65756
65757 -void open_softirq(int nr, void (*action)(struct softirq_action *))
65758 +void open_softirq(int nr, void (*action)(void))
65759 {
65760 - softirq_vec[nr].action = action;
65761 + pax_open_kernel();
65762 + *(void **)&softirq_vec[nr].action = action;
65763 + pax_close_kernel();
65764 }
65765
65766 /*
65767 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct
65768
65769 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65770
65771 -static void tasklet_action(struct softirq_action *a)
65772 +static void tasklet_action(void)
65773 {
65774 struct tasklet_struct *list;
65775
65776 @@ -454,7 +456,7 @@ static void tasklet_action(struct softir
65777 }
65778 }
65779
65780 -static void tasklet_hi_action(struct softirq_action *a)
65781 +static void tasklet_hi_action(void)
65782 {
65783 struct tasklet_struct *list;
65784
65785 diff -urNp linux-2.6.32.45/kernel/sys.c linux-2.6.32.45/kernel/sys.c
65786 --- linux-2.6.32.45/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
65787 +++ linux-2.6.32.45/kernel/sys.c 2011-08-11 19:51:54.000000000 -0400
65788 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
65789 error = -EACCES;
65790 goto out;
65791 }
65792 +
65793 + if (gr_handle_chroot_setpriority(p, niceval)) {
65794 + error = -EACCES;
65795 + goto out;
65796 + }
65797 +
65798 no_nice = security_task_setnice(p, niceval);
65799 if (no_nice) {
65800 error = no_nice;
65801 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
65802 !(user = find_user(who)))
65803 goto out_unlock; /* No processes for this user */
65804
65805 - do_each_thread(g, p)
65806 + do_each_thread(g, p) {
65807 if (__task_cred(p)->uid == who)
65808 error = set_one_prio(p, niceval, error);
65809 - while_each_thread(g, p);
65810 + } while_each_thread(g, p);
65811 if (who != cred->uid)
65812 free_uid(user); /* For find_user() */
65813 break;
65814 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
65815 !(user = find_user(who)))
65816 goto out_unlock; /* No processes for this user */
65817
65818 - do_each_thread(g, p)
65819 + do_each_thread(g, p) {
65820 if (__task_cred(p)->uid == who) {
65821 niceval = 20 - task_nice(p);
65822 if (niceval > retval)
65823 retval = niceval;
65824 }
65825 - while_each_thread(g, p);
65826 + } while_each_thread(g, p);
65827 if (who != cred->uid)
65828 free_uid(user); /* for find_user() */
65829 break;
65830 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65831 goto error;
65832 }
65833
65834 + if (gr_check_group_change(new->gid, new->egid, -1))
65835 + goto error;
65836 +
65837 if (rgid != (gid_t) -1 ||
65838 (egid != (gid_t) -1 && egid != old->gid))
65839 new->sgid = new->egid;
65840 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65841 goto error;
65842
65843 retval = -EPERM;
65844 +
65845 + if (gr_check_group_change(gid, gid, gid))
65846 + goto error;
65847 +
65848 if (capable(CAP_SETGID))
65849 new->gid = new->egid = new->sgid = new->fsgid = gid;
65850 else if (gid == old->gid || gid == old->sgid)
65851 @@ -567,12 +580,19 @@ static int set_user(struct cred *new)
65852 if (!new_user)
65853 return -EAGAIN;
65854
65855 + /*
65856 + * We don't fail in case of NPROC limit excess here because too many
65857 + * poorly written programs don't check set*uid() return code, assuming
65858 + * it never fails if called by root. We may still enforce NPROC limit
65859 + * for programs doing set*uid()+execve() by harmlessly deferring the
65860 + * failure to the execve() stage.
65861 + */
65862 if (atomic_read(&new_user->processes) >=
65863 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
65864 - new_user != INIT_USER) {
65865 - free_uid(new_user);
65866 - return -EAGAIN;
65867 - }
65868 + new_user != INIT_USER)
65869 + current->flags |= PF_NPROC_EXCEEDED;
65870 + else
65871 + current->flags &= ~PF_NPROC_EXCEEDED;
65872
65873 free_uid(new->user);
65874 new->user = new_user;
65875 @@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65876 goto error;
65877 }
65878
65879 + if (gr_check_user_change(new->uid, new->euid, -1))
65880 + goto error;
65881 +
65882 if (new->uid != old->uid) {
65883 retval = set_user(new);
65884 if (retval < 0)
65885 @@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65886 goto error;
65887
65888 retval = -EPERM;
65889 +
65890 + if (gr_check_crash_uid(uid))
65891 + goto error;
65892 + if (gr_check_user_change(uid, uid, uid))
65893 + goto error;
65894 +
65895 if (capable(CAP_SETUID)) {
65896 new->suid = new->uid = uid;
65897 if (uid != old->uid) {
65898 @@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65899 goto error;
65900 }
65901
65902 + if (gr_check_user_change(ruid, euid, -1))
65903 + goto error;
65904 +
65905 if (ruid != (uid_t) -1) {
65906 new->uid = ruid;
65907 if (ruid != old->uid) {
65908 @@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65909 goto error;
65910 }
65911
65912 + if (gr_check_group_change(rgid, egid, -1))
65913 + goto error;
65914 +
65915 if (rgid != (gid_t) -1)
65916 new->gid = rgid;
65917 if (egid != (gid_t) -1)
65918 @@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65919 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
65920 goto error;
65921
65922 + if (gr_check_user_change(-1, -1, uid))
65923 + goto error;
65924 +
65925 if (uid == old->uid || uid == old->euid ||
65926 uid == old->suid || uid == old->fsuid ||
65927 capable(CAP_SETUID)) {
65928 @@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65929 if (gid == old->gid || gid == old->egid ||
65930 gid == old->sgid || gid == old->fsgid ||
65931 capable(CAP_SETGID)) {
65932 + if (gr_check_group_change(-1, -1, gid))
65933 + goto error;
65934 +
65935 if (gid != old_fsgid) {
65936 new->fsgid = gid;
65937 goto change_okay;
65938 @@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65939 error = get_dumpable(me->mm);
65940 break;
65941 case PR_SET_DUMPABLE:
65942 - if (arg2 < 0 || arg2 > 1) {
65943 + if (arg2 > 1) {
65944 error = -EINVAL;
65945 break;
65946 }
65947 diff -urNp linux-2.6.32.45/kernel/sysctl.c linux-2.6.32.45/kernel/sysctl.c
65948 --- linux-2.6.32.45/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
65949 +++ linux-2.6.32.45/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
65950 @@ -63,6 +63,13 @@
65951 static int deprecated_sysctl_warning(struct __sysctl_args *args);
65952
65953 #if defined(CONFIG_SYSCTL)
65954 +#include <linux/grsecurity.h>
65955 +#include <linux/grinternal.h>
65956 +
65957 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
65958 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
65959 + const int op);
65960 +extern int gr_handle_chroot_sysctl(const int op);
65961
65962 /* External variables not in a header file. */
65963 extern int C_A_D;
65964 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
65965 static int proc_taint(struct ctl_table *table, int write,
65966 void __user *buffer, size_t *lenp, loff_t *ppos);
65967 #endif
65968 +extern ctl_table grsecurity_table[];
65969
65970 static struct ctl_table root_table[];
65971 static struct ctl_table_root sysctl_table_root;
65972 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
65973 int sysctl_legacy_va_layout;
65974 #endif
65975
65976 +#ifdef CONFIG_PAX_SOFTMODE
65977 +static ctl_table pax_table[] = {
65978 + {
65979 + .ctl_name = CTL_UNNUMBERED,
65980 + .procname = "softmode",
65981 + .data = &pax_softmode,
65982 + .maxlen = sizeof(unsigned int),
65983 + .mode = 0600,
65984 + .proc_handler = &proc_dointvec,
65985 + },
65986 +
65987 + { .ctl_name = 0 }
65988 +};
65989 +#endif
65990 +
65991 extern int prove_locking;
65992 extern int lock_stat;
65993
65994 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
65995 #endif
65996
65997 static struct ctl_table kern_table[] = {
65998 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65999 + {
66000 + .ctl_name = CTL_UNNUMBERED,
66001 + .procname = "grsecurity",
66002 + .mode = 0500,
66003 + .child = grsecurity_table,
66004 + },
66005 +#endif
66006 +
66007 +#ifdef CONFIG_PAX_SOFTMODE
66008 + {
66009 + .ctl_name = CTL_UNNUMBERED,
66010 + .procname = "pax",
66011 + .mode = 0500,
66012 + .child = pax_table,
66013 + },
66014 +#endif
66015 +
66016 {
66017 .ctl_name = CTL_UNNUMBERED,
66018 .procname = "sched_child_runs_first",
66019 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
66020 .data = &modprobe_path,
66021 .maxlen = KMOD_PATH_LEN,
66022 .mode = 0644,
66023 - .proc_handler = &proc_dostring,
66024 - .strategy = &sysctl_string,
66025 + .proc_handler = &proc_dostring_modpriv,
66026 + .strategy = &sysctl_string_modpriv,
66027 },
66028 {
66029 .ctl_name = CTL_UNNUMBERED,
66030 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
66031 .mode = 0644,
66032 .proc_handler = &proc_dointvec
66033 },
66034 + {
66035 + .procname = "heap_stack_gap",
66036 + .data = &sysctl_heap_stack_gap,
66037 + .maxlen = sizeof(sysctl_heap_stack_gap),
66038 + .mode = 0644,
66039 + .proc_handler = proc_doulongvec_minmax,
66040 + },
66041 #else
66042 {
66043 .ctl_name = CTL_UNNUMBERED,
66044 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
66045 return 0;
66046 }
66047
66048 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
66049 +
66050 static int parse_table(int __user *name, int nlen,
66051 void __user *oldval, size_t __user *oldlenp,
66052 void __user *newval, size_t newlen,
66053 @@ -1821,7 +1871,7 @@ repeat:
66054 if (n == table->ctl_name) {
66055 int error;
66056 if (table->child) {
66057 - if (sysctl_perm(root, table, MAY_EXEC))
66058 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
66059 return -EPERM;
66060 name++;
66061 nlen--;
66062 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
66063 int error;
66064 int mode;
66065
66066 + if (table->parent != NULL && table->parent->procname != NULL &&
66067 + table->procname != NULL &&
66068 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66069 + return -EACCES;
66070 + if (gr_handle_chroot_sysctl(op))
66071 + return -EACCES;
66072 + error = gr_handle_sysctl(table, op);
66073 + if (error)
66074 + return error;
66075 +
66076 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
66077 + if (error)
66078 + return error;
66079 +
66080 + if (root->permissions)
66081 + mode = root->permissions(root, current->nsproxy, table);
66082 + else
66083 + mode = table->mode;
66084 +
66085 + return test_perm(mode, op);
66086 +}
66087 +
66088 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
66089 +{
66090 + int error;
66091 + int mode;
66092 +
66093 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
66094 if (error)
66095 return error;
66096 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
66097 buffer, lenp, ppos);
66098 }
66099
66100 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66101 + void __user *buffer, size_t *lenp, loff_t *ppos)
66102 +{
66103 + if (write && !capable(CAP_SYS_MODULE))
66104 + return -EPERM;
66105 +
66106 + return _proc_do_string(table->data, table->maxlen, write,
66107 + buffer, lenp, ppos);
66108 +}
66109 +
66110
66111 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
66112 int *valp,
66113 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
66114 vleft = table->maxlen / sizeof(unsigned long);
66115 left = *lenp;
66116
66117 - for (; left && vleft--; i++, min++, max++, first=0) {
66118 + for (; left && vleft--; i++, first=0) {
66119 if (write) {
66120 while (left) {
66121 char c;
66122 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
66123 return -ENOSYS;
66124 }
66125
66126 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66127 + void __user *buffer, size_t *lenp, loff_t *ppos)
66128 +{
66129 + return -ENOSYS;
66130 +}
66131 +
66132 int proc_dointvec(struct ctl_table *table, int write,
66133 void __user *buffer, size_t *lenp, loff_t *ppos)
66134 {
66135 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
66136 return 1;
66137 }
66138
66139 +int sysctl_string_modpriv(struct ctl_table *table,
66140 + void __user *oldval, size_t __user *oldlenp,
66141 + void __user *newval, size_t newlen)
66142 +{
66143 + if (newval && newlen && !capable(CAP_SYS_MODULE))
66144 + return -EPERM;
66145 +
66146 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
66147 +}
66148 +
66149 /*
66150 * This function makes sure that all of the integers in the vector
66151 * are between the minimum and maximum values given in the arrays
66152 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
66153 return -ENOSYS;
66154 }
66155
66156 +int sysctl_string_modpriv(struct ctl_table *table,
66157 + void __user *oldval, size_t __user *oldlenp,
66158 + void __user *newval, size_t newlen)
66159 +{
66160 + return -ENOSYS;
66161 +}
66162 +
66163 int sysctl_intvec(struct ctl_table *table,
66164 void __user *oldval, size_t __user *oldlenp,
66165 void __user *newval, size_t newlen)
66166 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66167 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66168 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66169 EXPORT_SYMBOL(proc_dostring);
66170 +EXPORT_SYMBOL(proc_dostring_modpriv);
66171 EXPORT_SYMBOL(proc_doulongvec_minmax);
66172 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66173 EXPORT_SYMBOL(register_sysctl_table);
66174 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
66175 EXPORT_SYMBOL(sysctl_jiffies);
66176 EXPORT_SYMBOL(sysctl_ms_jiffies);
66177 EXPORT_SYMBOL(sysctl_string);
66178 +EXPORT_SYMBOL(sysctl_string_modpriv);
66179 EXPORT_SYMBOL(sysctl_data);
66180 EXPORT_SYMBOL(unregister_sysctl_table);
66181 diff -urNp linux-2.6.32.45/kernel/sysctl_check.c linux-2.6.32.45/kernel/sysctl_check.c
66182 --- linux-2.6.32.45/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
66183 +++ linux-2.6.32.45/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
66184 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
66185 } else {
66186 if ((table->strategy == sysctl_data) ||
66187 (table->strategy == sysctl_string) ||
66188 + (table->strategy == sysctl_string_modpriv) ||
66189 (table->strategy == sysctl_intvec) ||
66190 (table->strategy == sysctl_jiffies) ||
66191 (table->strategy == sysctl_ms_jiffies) ||
66192 (table->proc_handler == proc_dostring) ||
66193 + (table->proc_handler == proc_dostring_modpriv) ||
66194 (table->proc_handler == proc_dointvec) ||
66195 (table->proc_handler == proc_dointvec_minmax) ||
66196 (table->proc_handler == proc_dointvec_jiffies) ||
66197 diff -urNp linux-2.6.32.45/kernel/taskstats.c linux-2.6.32.45/kernel/taskstats.c
66198 --- linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:04.000000000 -0400
66199 +++ linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:19.000000000 -0400
66200 @@ -26,9 +26,12 @@
66201 #include <linux/cgroup.h>
66202 #include <linux/fs.h>
66203 #include <linux/file.h>
66204 +#include <linux/grsecurity.h>
66205 #include <net/genetlink.h>
66206 #include <asm/atomic.h>
66207
66208 +extern int gr_is_taskstats_denied(int pid);
66209 +
66210 /*
66211 * Maximum length of a cpumask that can be specified in
66212 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66213 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
66214 size_t size;
66215 cpumask_var_t mask;
66216
66217 + if (gr_is_taskstats_denied(current->pid))
66218 + return -EACCES;
66219 +
66220 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
66221 return -ENOMEM;
66222
66223 diff -urNp linux-2.6.32.45/kernel/time/tick-broadcast.c linux-2.6.32.45/kernel/time/tick-broadcast.c
66224 --- linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
66225 +++ linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
66226 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
66227 * then clear the broadcast bit.
66228 */
66229 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66230 - int cpu = smp_processor_id();
66231 + cpu = smp_processor_id();
66232
66233 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66234 tick_broadcast_clear_oneshot(cpu);
66235 diff -urNp linux-2.6.32.45/kernel/time/timekeeping.c linux-2.6.32.45/kernel/time/timekeeping.c
66236 --- linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
66237 +++ linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
66238 @@ -14,6 +14,7 @@
66239 #include <linux/init.h>
66240 #include <linux/mm.h>
66241 #include <linux/sched.h>
66242 +#include <linux/grsecurity.h>
66243 #include <linux/sysdev.h>
66244 #include <linux/clocksource.h>
66245 #include <linux/jiffies.h>
66246 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
66247 */
66248 struct timespec ts = xtime;
66249 timespec_add_ns(&ts, nsec);
66250 - ACCESS_ONCE(xtime_cache) = ts;
66251 + ACCESS_ONCE_RW(xtime_cache) = ts;
66252 }
66253
66254 /* must hold xtime_lock */
66255 @@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
66256 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66257 return -EINVAL;
66258
66259 + gr_log_timechange();
66260 +
66261 write_seqlock_irqsave(&xtime_lock, flags);
66262
66263 timekeeping_forward_now();
66264 diff -urNp linux-2.6.32.45/kernel/time/timer_list.c linux-2.6.32.45/kernel/time/timer_list.c
66265 --- linux-2.6.32.45/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
66266 +++ linux-2.6.32.45/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
66267 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66268
66269 static void print_name_offset(struct seq_file *m, void *sym)
66270 {
66271 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66272 + SEQ_printf(m, "<%p>", NULL);
66273 +#else
66274 char symname[KSYM_NAME_LEN];
66275
66276 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66277 SEQ_printf(m, "<%p>", sym);
66278 else
66279 SEQ_printf(m, "%s", symname);
66280 +#endif
66281 }
66282
66283 static void
66284 @@ -112,7 +116,11 @@ next_one:
66285 static void
66286 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66287 {
66288 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66289 + SEQ_printf(m, " .base: %p\n", NULL);
66290 +#else
66291 SEQ_printf(m, " .base: %p\n", base);
66292 +#endif
66293 SEQ_printf(m, " .index: %d\n",
66294 base->index);
66295 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66296 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
66297 {
66298 struct proc_dir_entry *pe;
66299
66300 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66301 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66302 +#else
66303 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66304 +#endif
66305 if (!pe)
66306 return -ENOMEM;
66307 return 0;
66308 diff -urNp linux-2.6.32.45/kernel/time/timer_stats.c linux-2.6.32.45/kernel/time/timer_stats.c
66309 --- linux-2.6.32.45/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
66310 +++ linux-2.6.32.45/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
66311 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66312 static unsigned long nr_entries;
66313 static struct entry entries[MAX_ENTRIES];
66314
66315 -static atomic_t overflow_count;
66316 +static atomic_unchecked_t overflow_count;
66317
66318 /*
66319 * The entries are in a hash-table, for fast lookup:
66320 @@ -140,7 +140,7 @@ static void reset_entries(void)
66321 nr_entries = 0;
66322 memset(entries, 0, sizeof(entries));
66323 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66324 - atomic_set(&overflow_count, 0);
66325 + atomic_set_unchecked(&overflow_count, 0);
66326 }
66327
66328 static struct entry *alloc_entry(void)
66329 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66330 if (likely(entry))
66331 entry->count++;
66332 else
66333 - atomic_inc(&overflow_count);
66334 + atomic_inc_unchecked(&overflow_count);
66335
66336 out_unlock:
66337 spin_unlock_irqrestore(lock, flags);
66338 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66339
66340 static void print_name_offset(struct seq_file *m, unsigned long addr)
66341 {
66342 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66343 + seq_printf(m, "<%p>", NULL);
66344 +#else
66345 char symname[KSYM_NAME_LEN];
66346
66347 if (lookup_symbol_name(addr, symname) < 0)
66348 seq_printf(m, "<%p>", (void *)addr);
66349 else
66350 seq_printf(m, "%s", symname);
66351 +#endif
66352 }
66353
66354 static int tstats_show(struct seq_file *m, void *v)
66355 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66356
66357 seq_puts(m, "Timer Stats Version: v0.2\n");
66358 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66359 - if (atomic_read(&overflow_count))
66360 + if (atomic_read_unchecked(&overflow_count))
66361 seq_printf(m, "Overflow: %d entries\n",
66362 - atomic_read(&overflow_count));
66363 + atomic_read_unchecked(&overflow_count));
66364
66365 for (i = 0; i < nr_entries; i++) {
66366 entry = entries + i;
66367 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
66368 {
66369 struct proc_dir_entry *pe;
66370
66371 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66372 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66373 +#else
66374 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66375 +#endif
66376 if (!pe)
66377 return -ENOMEM;
66378 return 0;
66379 diff -urNp linux-2.6.32.45/kernel/time.c linux-2.6.32.45/kernel/time.c
66380 --- linux-2.6.32.45/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
66381 +++ linux-2.6.32.45/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
66382 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
66383 return error;
66384
66385 if (tz) {
66386 + /* we log in do_settimeofday called below, so don't log twice
66387 + */
66388 + if (!tv)
66389 + gr_log_timechange();
66390 +
66391 /* SMP safe, global irq locking makes it work. */
66392 sys_tz = *tz;
66393 update_vsyscall_tz();
66394 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
66395 * Avoid unnecessary multiplications/divisions in the
66396 * two most common HZ cases:
66397 */
66398 -unsigned int inline jiffies_to_msecs(const unsigned long j)
66399 +inline unsigned int jiffies_to_msecs(const unsigned long j)
66400 {
66401 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
66402 return (MSEC_PER_SEC / HZ) * j;
66403 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
66404 }
66405 EXPORT_SYMBOL(jiffies_to_msecs);
66406
66407 -unsigned int inline jiffies_to_usecs(const unsigned long j)
66408 +inline unsigned int jiffies_to_usecs(const unsigned long j)
66409 {
66410 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
66411 return (USEC_PER_SEC / HZ) * j;
66412 diff -urNp linux-2.6.32.45/kernel/timer.c linux-2.6.32.45/kernel/timer.c
66413 --- linux-2.6.32.45/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
66414 +++ linux-2.6.32.45/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
66415 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
66416 /*
66417 * This function runs timers and the timer-tq in bottom half context.
66418 */
66419 -static void run_timer_softirq(struct softirq_action *h)
66420 +static void run_timer_softirq(void)
66421 {
66422 struct tvec_base *base = __get_cpu_var(tvec_bases);
66423
66424 diff -urNp linux-2.6.32.45/kernel/trace/blktrace.c linux-2.6.32.45/kernel/trace/blktrace.c
66425 --- linux-2.6.32.45/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
66426 +++ linux-2.6.32.45/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
66427 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
66428 struct blk_trace *bt = filp->private_data;
66429 char buf[16];
66430
66431 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66432 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66433
66434 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66435 }
66436 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
66437 return 1;
66438
66439 bt = buf->chan->private_data;
66440 - atomic_inc(&bt->dropped);
66441 + atomic_inc_unchecked(&bt->dropped);
66442 return 0;
66443 }
66444
66445 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
66446
66447 bt->dir = dir;
66448 bt->dev = dev;
66449 - atomic_set(&bt->dropped, 0);
66450 + atomic_set_unchecked(&bt->dropped, 0);
66451
66452 ret = -EIO;
66453 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66454 diff -urNp linux-2.6.32.45/kernel/trace/ftrace.c linux-2.6.32.45/kernel/trace/ftrace.c
66455 --- linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
66456 +++ linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
66457 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
66458
66459 ip = rec->ip;
66460
66461 + ret = ftrace_arch_code_modify_prepare();
66462 + FTRACE_WARN_ON(ret);
66463 + if (ret)
66464 + return 0;
66465 +
66466 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66467 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66468 if (ret) {
66469 ftrace_bug(ret, ip);
66470 rec->flags |= FTRACE_FL_FAILED;
66471 - return 0;
66472 }
66473 - return 1;
66474 + return ret ? 0 : 1;
66475 }
66476
66477 /*
66478 diff -urNp linux-2.6.32.45/kernel/trace/ring_buffer.c linux-2.6.32.45/kernel/trace/ring_buffer.c
66479 --- linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
66480 +++ linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
66481 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
66482 * the reader page). But if the next page is a header page,
66483 * its flags will be non zero.
66484 */
66485 -static int inline
66486 +static inline int
66487 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
66488 struct buffer_page *page, struct list_head *list)
66489 {
66490 diff -urNp linux-2.6.32.45/kernel/trace/trace.c linux-2.6.32.45/kernel/trace/trace.c
66491 --- linux-2.6.32.45/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
66492 +++ linux-2.6.32.45/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
66493 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
66494 size_t rem;
66495 unsigned int i;
66496
66497 + pax_track_stack();
66498 +
66499 /* copy the tracer to avoid using a global lock all around */
66500 mutex_lock(&trace_types_lock);
66501 if (unlikely(old_tracer != current_trace && current_trace)) {
66502 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
66503 int entries, size, i;
66504 size_t ret;
66505
66506 + pax_track_stack();
66507 +
66508 if (*ppos & (PAGE_SIZE - 1)) {
66509 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
66510 return -EINVAL;
66511 @@ -3816,10 +3820,9 @@ static const struct file_operations trac
66512 };
66513 #endif
66514
66515 -static struct dentry *d_tracer;
66516 -
66517 struct dentry *tracing_init_dentry(void)
66518 {
66519 + static struct dentry *d_tracer;
66520 static int once;
66521
66522 if (d_tracer)
66523 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
66524 return d_tracer;
66525 }
66526
66527 -static struct dentry *d_percpu;
66528 -
66529 struct dentry *tracing_dentry_percpu(void)
66530 {
66531 + static struct dentry *d_percpu;
66532 static int once;
66533 struct dentry *d_tracer;
66534
66535 diff -urNp linux-2.6.32.45/kernel/trace/trace_events.c linux-2.6.32.45/kernel/trace/trace_events.c
66536 --- linux-2.6.32.45/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
66537 +++ linux-2.6.32.45/kernel/trace/trace_events.c 2011-08-05 20:33:55.000000000 -0400
66538 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list
66539 * Modules must own their file_operations to keep up with
66540 * reference counting.
66541 */
66542 +
66543 struct ftrace_module_file_ops {
66544 struct list_head list;
66545 struct module *mod;
66546 - struct file_operations id;
66547 - struct file_operations enable;
66548 - struct file_operations format;
66549 - struct file_operations filter;
66550 };
66551
66552 static void remove_subsystem_dir(const char *name)
66553 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod
66554
66555 file_ops->mod = mod;
66556
66557 - file_ops->id = ftrace_event_id_fops;
66558 - file_ops->id.owner = mod;
66559 -
66560 - file_ops->enable = ftrace_enable_fops;
66561 - file_ops->enable.owner = mod;
66562 -
66563 - file_ops->filter = ftrace_event_filter_fops;
66564 - file_ops->filter.owner = mod;
66565 -
66566 - file_ops->format = ftrace_event_format_fops;
66567 - file_ops->format.owner = mod;
66568 + pax_open_kernel();
66569 + *(void **)&mod->trace_id.owner = mod;
66570 + *(void **)&mod->trace_enable.owner = mod;
66571 + *(void **)&mod->trace_filter.owner = mod;
66572 + *(void **)&mod->trace_format.owner = mod;
66573 + pax_close_kernel();
66574
66575 list_add(&file_ops->list, &ftrace_module_file_list);
66576
66577 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(stru
66578 call->mod = mod;
66579 list_add(&call->list, &ftrace_events);
66580 event_create_dir(call, d_events,
66581 - &file_ops->id, &file_ops->enable,
66582 - &file_ops->filter, &file_ops->format);
66583 + &mod->trace_id, &mod->trace_enable,
66584 + &mod->trace_filter, &mod->trace_format);
66585 }
66586 }
66587
66588 diff -urNp linux-2.6.32.45/kernel/trace/trace_mmiotrace.c linux-2.6.32.45/kernel/trace/trace_mmiotrace.c
66589 --- linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
66590 +++ linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
66591 @@ -23,7 +23,7 @@ struct header_iter {
66592 static struct trace_array *mmio_trace_array;
66593 static bool overrun_detected;
66594 static unsigned long prev_overruns;
66595 -static atomic_t dropped_count;
66596 +static atomic_unchecked_t dropped_count;
66597
66598 static void mmio_reset_data(struct trace_array *tr)
66599 {
66600 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
66601
66602 static unsigned long count_overruns(struct trace_iterator *iter)
66603 {
66604 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
66605 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66606 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66607
66608 if (over > prev_overruns)
66609 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
66610 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66611 sizeof(*entry), 0, pc);
66612 if (!event) {
66613 - atomic_inc(&dropped_count);
66614 + atomic_inc_unchecked(&dropped_count);
66615 return;
66616 }
66617 entry = ring_buffer_event_data(event);
66618 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
66619 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66620 sizeof(*entry), 0, pc);
66621 if (!event) {
66622 - atomic_inc(&dropped_count);
66623 + atomic_inc_unchecked(&dropped_count);
66624 return;
66625 }
66626 entry = ring_buffer_event_data(event);
66627 diff -urNp linux-2.6.32.45/kernel/trace/trace_output.c linux-2.6.32.45/kernel/trace/trace_output.c
66628 --- linux-2.6.32.45/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
66629 +++ linux-2.6.32.45/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
66630 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
66631 return 0;
66632 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66633 if (!IS_ERR(p)) {
66634 - p = mangle_path(s->buffer + s->len, p, "\n");
66635 + p = mangle_path(s->buffer + s->len, p, "\n\\");
66636 if (p) {
66637 s->len = p - s->buffer;
66638 return 1;
66639 diff -urNp linux-2.6.32.45/kernel/trace/trace_stack.c linux-2.6.32.45/kernel/trace/trace_stack.c
66640 --- linux-2.6.32.45/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
66641 +++ linux-2.6.32.45/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
66642 @@ -50,7 +50,7 @@ static inline void check_stack(void)
66643 return;
66644
66645 /* we do not handle interrupt stacks yet */
66646 - if (!object_is_on_stack(&this_size))
66647 + if (!object_starts_on_stack(&this_size))
66648 return;
66649
66650 local_irq_save(flags);
66651 diff -urNp linux-2.6.32.45/kernel/trace/trace_workqueue.c linux-2.6.32.45/kernel/trace/trace_workqueue.c
66652 --- linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
66653 +++ linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
66654 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
66655 int cpu;
66656 pid_t pid;
66657 /* Can be inserted from interrupt or user context, need to be atomic */
66658 - atomic_t inserted;
66659 + atomic_unchecked_t inserted;
66660 /*
66661 * Don't need to be atomic, works are serialized in a single workqueue thread
66662 * on a single CPU.
66663 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
66664 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66665 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66666 if (node->pid == wq_thread->pid) {
66667 - atomic_inc(&node->inserted);
66668 + atomic_inc_unchecked(&node->inserted);
66669 goto found;
66670 }
66671 }
66672 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
66673 tsk = get_pid_task(pid, PIDTYPE_PID);
66674 if (tsk) {
66675 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66676 - atomic_read(&cws->inserted), cws->executed,
66677 + atomic_read_unchecked(&cws->inserted), cws->executed,
66678 tsk->comm);
66679 put_task_struct(tsk);
66680 }
66681 diff -urNp linux-2.6.32.45/kernel/user.c linux-2.6.32.45/kernel/user.c
66682 --- linux-2.6.32.45/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
66683 +++ linux-2.6.32.45/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
66684 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
66685 spin_lock_irq(&uidhash_lock);
66686 up = uid_hash_find(uid, hashent);
66687 if (up) {
66688 + put_user_ns(ns);
66689 key_put(new->uid_keyring);
66690 key_put(new->session_keyring);
66691 kmem_cache_free(uid_cachep, new);
66692 diff -urNp linux-2.6.32.45/lib/bug.c linux-2.6.32.45/lib/bug.c
66693 --- linux-2.6.32.45/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
66694 +++ linux-2.6.32.45/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
66695 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
66696 return BUG_TRAP_TYPE_NONE;
66697
66698 bug = find_bug(bugaddr);
66699 + if (!bug)
66700 + return BUG_TRAP_TYPE_NONE;
66701
66702 printk(KERN_EMERG "------------[ cut here ]------------\n");
66703
66704 diff -urNp linux-2.6.32.45/lib/debugobjects.c linux-2.6.32.45/lib/debugobjects.c
66705 --- linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:04.000000000 -0400
66706 +++ linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:19.000000000 -0400
66707 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
66708 if (limit > 4)
66709 return;
66710
66711 - is_on_stack = object_is_on_stack(addr);
66712 + is_on_stack = object_starts_on_stack(addr);
66713 if (is_on_stack == onstack)
66714 return;
66715
66716 diff -urNp linux-2.6.32.45/lib/dma-debug.c linux-2.6.32.45/lib/dma-debug.c
66717 --- linux-2.6.32.45/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
66718 +++ linux-2.6.32.45/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
66719 @@ -861,7 +861,7 @@ out:
66720
66721 static void check_for_stack(struct device *dev, void *addr)
66722 {
66723 - if (object_is_on_stack(addr))
66724 + if (object_starts_on_stack(addr))
66725 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66726 "stack [addr=%p]\n", addr);
66727 }
66728 diff -urNp linux-2.6.32.45/lib/idr.c linux-2.6.32.45/lib/idr.c
66729 --- linux-2.6.32.45/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
66730 +++ linux-2.6.32.45/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
66731 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
66732 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
66733
66734 /* if already at the top layer, we need to grow */
66735 - if (id >= 1 << (idp->layers * IDR_BITS)) {
66736 + if (id >= (1 << (idp->layers * IDR_BITS))) {
66737 *starting_id = id;
66738 return IDR_NEED_TO_GROW;
66739 }
66740 diff -urNp linux-2.6.32.45/lib/inflate.c linux-2.6.32.45/lib/inflate.c
66741 --- linux-2.6.32.45/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
66742 +++ linux-2.6.32.45/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
66743 @@ -266,7 +266,7 @@ static void free(void *where)
66744 malloc_ptr = free_mem_ptr;
66745 }
66746 #else
66747 -#define malloc(a) kmalloc(a, GFP_KERNEL)
66748 +#define malloc(a) kmalloc((a), GFP_KERNEL)
66749 #define free(a) kfree(a)
66750 #endif
66751
66752 diff -urNp linux-2.6.32.45/lib/Kconfig.debug linux-2.6.32.45/lib/Kconfig.debug
66753 --- linux-2.6.32.45/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
66754 +++ linux-2.6.32.45/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
66755 @@ -905,7 +905,7 @@ config LATENCYTOP
66756 select STACKTRACE
66757 select SCHEDSTATS
66758 select SCHED_DEBUG
66759 - depends on HAVE_LATENCYTOP_SUPPORT
66760 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
66761 help
66762 Enable this option if you want to use the LatencyTOP tool
66763 to find out which userspace is blocking on what kernel operations.
66764 diff -urNp linux-2.6.32.45/lib/kobject.c linux-2.6.32.45/lib/kobject.c
66765 --- linux-2.6.32.45/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
66766 +++ linux-2.6.32.45/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
66767 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
66768 return ret;
66769 }
66770
66771 -struct sysfs_ops kobj_sysfs_ops = {
66772 +const struct sysfs_ops kobj_sysfs_ops = {
66773 .show = kobj_attr_show,
66774 .store = kobj_attr_store,
66775 };
66776 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
66777 * If the kset was not able to be created, NULL will be returned.
66778 */
66779 static struct kset *kset_create(const char *name,
66780 - struct kset_uevent_ops *uevent_ops,
66781 + const struct kset_uevent_ops *uevent_ops,
66782 struct kobject *parent_kobj)
66783 {
66784 struct kset *kset;
66785 @@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
66786 * If the kset was not able to be created, NULL will be returned.
66787 */
66788 struct kset *kset_create_and_add(const char *name,
66789 - struct kset_uevent_ops *uevent_ops,
66790 + const struct kset_uevent_ops *uevent_ops,
66791 struct kobject *parent_kobj)
66792 {
66793 struct kset *kset;
66794 diff -urNp linux-2.6.32.45/lib/kobject_uevent.c linux-2.6.32.45/lib/kobject_uevent.c
66795 --- linux-2.6.32.45/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
66796 +++ linux-2.6.32.45/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
66797 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
66798 const char *subsystem;
66799 struct kobject *top_kobj;
66800 struct kset *kset;
66801 - struct kset_uevent_ops *uevent_ops;
66802 + const struct kset_uevent_ops *uevent_ops;
66803 u64 seq;
66804 int i = 0;
66805 int retval = 0;
66806 diff -urNp linux-2.6.32.45/lib/kref.c linux-2.6.32.45/lib/kref.c
66807 --- linux-2.6.32.45/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
66808 +++ linux-2.6.32.45/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
66809 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
66810 */
66811 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66812 {
66813 - WARN_ON(release == NULL);
66814 + BUG_ON(release == NULL);
66815 WARN_ON(release == (void (*)(struct kref *))kfree);
66816
66817 if (atomic_dec_and_test(&kref->refcount)) {
66818 diff -urNp linux-2.6.32.45/lib/parser.c linux-2.6.32.45/lib/parser.c
66819 --- linux-2.6.32.45/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
66820 +++ linux-2.6.32.45/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
66821 @@ -126,7 +126,7 @@ static int match_number(substring_t *s,
66822 char *buf;
66823 int ret;
66824
66825 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
66826 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
66827 if (!buf)
66828 return -ENOMEM;
66829 memcpy(buf, s->from, s->to - s->from);
66830 diff -urNp linux-2.6.32.45/lib/radix-tree.c linux-2.6.32.45/lib/radix-tree.c
66831 --- linux-2.6.32.45/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
66832 +++ linux-2.6.32.45/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
66833 @@ -81,7 +81,7 @@ struct radix_tree_preload {
66834 int nr;
66835 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66836 };
66837 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66838 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66839
66840 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
66841 {
66842 diff -urNp linux-2.6.32.45/lib/random32.c linux-2.6.32.45/lib/random32.c
66843 --- linux-2.6.32.45/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
66844 +++ linux-2.6.32.45/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
66845 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
66846 */
66847 static inline u32 __seed(u32 x, u32 m)
66848 {
66849 - return (x < m) ? x + m : x;
66850 + return (x <= m) ? x + m + 1 : x;
66851 }
66852
66853 /**
66854 diff -urNp linux-2.6.32.45/lib/vsprintf.c linux-2.6.32.45/lib/vsprintf.c
66855 --- linux-2.6.32.45/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
66856 +++ linux-2.6.32.45/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
66857 @@ -16,6 +16,9 @@
66858 * - scnprintf and vscnprintf
66859 */
66860
66861 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66862 +#define __INCLUDED_BY_HIDESYM 1
66863 +#endif
66864 #include <stdarg.h>
66865 #include <linux/module.h>
66866 #include <linux/types.h>
66867 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end
66868 return buf;
66869 }
66870
66871 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
66872 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
66873 {
66874 int len, i;
66875
66876 if ((unsigned long)s < PAGE_SIZE)
66877 - s = "<NULL>";
66878 + s = "(null)";
66879
66880 len = strnlen(s, spec.precision);
66881
66882 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
66883 unsigned long value = (unsigned long) ptr;
66884 #ifdef CONFIG_KALLSYMS
66885 char sym[KSYM_SYMBOL_LEN];
66886 - if (ext != 'f' && ext != 's')
66887 + if (ext != 'f' && ext != 's' && ext != 'a')
66888 sprint_symbol(sym, value);
66889 else
66890 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66891 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
66892 * - 'f' For simple symbolic function names without offset
66893 * - 'S' For symbolic direct pointers with offset
66894 * - 's' For symbolic direct pointers without offset
66895 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66896 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66897 * - 'R' For a struct resource pointer, it prints the range of
66898 * addresses (not the name nor the flags)
66899 * - 'M' For a 6-byte MAC address, it prints the address in the
66900 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
66901 struct printf_spec spec)
66902 {
66903 if (!ptr)
66904 - return string(buf, end, "(null)", spec);
66905 + return string(buf, end, "(nil)", spec);
66906
66907 switch (*fmt) {
66908 case 'F':
66909 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
66910 case 's':
66911 /* Fallthrough */
66912 case 'S':
66913 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66914 + break;
66915 +#else
66916 + return symbol_string(buf, end, ptr, spec, *fmt);
66917 +#endif
66918 + case 'a':
66919 + /* Fallthrough */
66920 + case 'A':
66921 return symbol_string(buf, end, ptr, spec, *fmt);
66922 case 'R':
66923 return resource_string(buf, end, ptr, spec);
66924 @@ -1445,7 +1458,7 @@ do { \
66925 size_t len;
66926 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
66927 || (unsigned long)save_str < PAGE_SIZE)
66928 - save_str = "<NULL>";
66929 + save_str = "(null)";
66930 len = strlen(save_str);
66931 if (str + len + 1 < end)
66932 memcpy(str, save_str, len + 1);
66933 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
66934 typeof(type) value; \
66935 if (sizeof(type) == 8) { \
66936 args = PTR_ALIGN(args, sizeof(u32)); \
66937 - *(u32 *)&value = *(u32 *)args; \
66938 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66939 + *(u32 *)&value = *(const u32 *)args; \
66940 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66941 } else { \
66942 args = PTR_ALIGN(args, sizeof(type)); \
66943 - value = *(typeof(type) *)args; \
66944 + value = *(const typeof(type) *)args; \
66945 } \
66946 args += sizeof(type); \
66947 value; \
66948 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
66949 const char *str_arg = args;
66950 size_t len = strlen(str_arg);
66951 args += len + 1;
66952 - str = string(str, end, (char *)str_arg, spec);
66953 + str = string(str, end, str_arg, spec);
66954 break;
66955 }
66956
66957 diff -urNp linux-2.6.32.45/localversion-grsec linux-2.6.32.45/localversion-grsec
66958 --- linux-2.6.32.45/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66959 +++ linux-2.6.32.45/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
66960 @@ -0,0 +1 @@
66961 +-grsec
66962 diff -urNp linux-2.6.32.45/Makefile linux-2.6.32.45/Makefile
66963 --- linux-2.6.32.45/Makefile 2011-08-16 20:37:25.000000000 -0400
66964 +++ linux-2.6.32.45/Makefile 2011-08-16 20:42:28.000000000 -0400
66965 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
66966
66967 HOSTCC = gcc
66968 HOSTCXX = g++
66969 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
66970 -HOSTCXXFLAGS = -O2
66971 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
66972 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
66973 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
66974
66975 # Decide whether to build built-in, modular, or both.
66976 # Normally, just do built-in.
66977 @@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
66978 KBUILD_CPPFLAGS := -D__KERNEL__
66979
66980 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
66981 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
66982 -fno-strict-aliasing -fno-common \
66983 -Werror-implicit-function-declaration \
66984 -Wno-format-security \
66985 -fno-delete-null-pointer-checks
66986 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
66987 KBUILD_AFLAGS := -D__ASSEMBLY__
66988
66989 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
66990 @@ -376,8 +379,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
66991 # Rules shared between *config targets and build targets
66992
66993 # Basic helpers built in scripts/
66994 -PHONY += scripts_basic
66995 -scripts_basic:
66996 +PHONY += scripts_basic gcc-plugins
66997 +scripts_basic: gcc-plugins
66998 $(Q)$(MAKE) $(build)=scripts/basic
66999
67000 # To avoid any implicit rule to kick in, define an empty command.
67001 @@ -403,7 +406,7 @@ endif
67002 # of make so .config is not included in this case either (for *config).
67003
67004 no-dot-config-targets := clean mrproper distclean \
67005 - cscope TAGS tags help %docs check% \
67006 + cscope gtags TAGS tags help %docs check% \
67007 include/linux/version.h headers_% \
67008 kernelrelease kernelversion
67009
67010 @@ -526,6 +529,25 @@ else
67011 KBUILD_CFLAGS += -O2
67012 endif
67013
67014 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
67015 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
67016 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
67017 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
67018 +endif
67019 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
67020 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
67021 +gcc-plugins:
67022 + $(Q)$(MAKE) $(build)=tools/gcc
67023 +else
67024 +gcc-plugins:
67025 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
67026 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
67027 +else
67028 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
67029 +endif
67030 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
67031 +endif
67032 +
67033 include $(srctree)/arch/$(SRCARCH)/Makefile
67034
67035 ifneq ($(CONFIG_FRAME_WARN),0)
67036 @@ -644,7 +666,7 @@ export mod_strip_cmd
67037
67038
67039 ifeq ($(KBUILD_EXTMOD),)
67040 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
67041 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
67042
67043 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
67044 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
67045 @@ -970,7 +992,7 @@ ifneq ($(KBUILD_SRC),)
67046 endif
67047
67048 # prepare2 creates a makefile if using a separate output directory
67049 -prepare2: prepare3 outputmakefile
67050 +prepare2: prepare3 outputmakefile gcc-plugins
67051
67052 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
67053 include/asm include/config/auto.conf
67054 @@ -1198,7 +1220,7 @@ MRPROPER_FILES += .config .config.old in
67055 include/linux/autoconf.h include/linux/version.h \
67056 include/linux/utsrelease.h \
67057 include/linux/bounds.h include/asm*/asm-offsets.h \
67058 - Module.symvers Module.markers tags TAGS cscope*
67059 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
67060
67061 # clean - Delete most, but leave enough to build external modules
67062 #
67063 @@ -1289,6 +1311,7 @@ help:
67064 @echo ' modules_prepare - Set up for building external modules'
67065 @echo ' tags/TAGS - Generate tags file for editors'
67066 @echo ' cscope - Generate cscope index'
67067 + @echo ' gtags - Generate GNU GLOBAL index'
67068 @echo ' kernelrelease - Output the release version string'
67069 @echo ' kernelversion - Output the version stored in Makefile'
67070 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
67071 @@ -1421,7 +1444,7 @@ clean: $(clean-dirs)
67072 $(call cmd,rmdirs)
67073 $(call cmd,rmfiles)
67074 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
67075 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
67076 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
67077 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
67078 -o -name '*.gcno' \) -type f -print | xargs rm -f
67079
67080 @@ -1445,7 +1468,7 @@ endif # KBUILD_EXTMOD
67081 quiet_cmd_tags = GEN $@
67082 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
67083
67084 -tags TAGS cscope: FORCE
67085 +tags TAGS cscope gtags: FORCE
67086 $(call cmd,tags)
67087
67088 # Scripts to check various things for consistency
67089 diff -urNp linux-2.6.32.45/mm/backing-dev.c linux-2.6.32.45/mm/backing-dev.c
67090 --- linux-2.6.32.45/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
67091 +++ linux-2.6.32.45/mm/backing-dev.c 2011-08-11 19:48:17.000000000 -0400
67092 @@ -272,7 +272,7 @@ static void bdi_task_init(struct backing
67093 list_add_tail_rcu(&wb->list, &bdi->wb_list);
67094 spin_unlock(&bdi->wb_lock);
67095
67096 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
67097 + tsk->flags |= PF_SWAPWRITE;
67098 set_freezable();
67099
67100 /*
67101 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
67102 * Add the default flusher task that gets created for any bdi
67103 * that has dirty data pending writeout
67104 */
67105 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67106 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67107 {
67108 if (!bdi_cap_writeback_dirty(bdi))
67109 return;
67110 diff -urNp linux-2.6.32.45/mm/filemap.c linux-2.6.32.45/mm/filemap.c
67111 --- linux-2.6.32.45/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
67112 +++ linux-2.6.32.45/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
67113 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
67114 struct address_space *mapping = file->f_mapping;
67115
67116 if (!mapping->a_ops->readpage)
67117 - return -ENOEXEC;
67118 + return -ENODEV;
67119 file_accessed(file);
67120 vma->vm_ops = &generic_file_vm_ops;
67121 vma->vm_flags |= VM_CAN_NONLINEAR;
67122 @@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
67123 *pos = i_size_read(inode);
67124
67125 if (limit != RLIM_INFINITY) {
67126 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67127 if (*pos >= limit) {
67128 send_sig(SIGXFSZ, current, 0);
67129 return -EFBIG;
67130 diff -urNp linux-2.6.32.45/mm/fremap.c linux-2.6.32.45/mm/fremap.c
67131 --- linux-2.6.32.45/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
67132 +++ linux-2.6.32.45/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
67133 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67134 retry:
67135 vma = find_vma(mm, start);
67136
67137 +#ifdef CONFIG_PAX_SEGMEXEC
67138 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67139 + goto out;
67140 +#endif
67141 +
67142 /*
67143 * Make sure the vma is shared, that it supports prefaulting,
67144 * and that the remapped range is valid and fully within
67145 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67146 /*
67147 * drop PG_Mlocked flag for over-mapped range
67148 */
67149 - unsigned int saved_flags = vma->vm_flags;
67150 + unsigned long saved_flags = vma->vm_flags;
67151 munlock_vma_pages_range(vma, start, start + size);
67152 vma->vm_flags = saved_flags;
67153 }
67154 diff -urNp linux-2.6.32.45/mm/highmem.c linux-2.6.32.45/mm/highmem.c
67155 --- linux-2.6.32.45/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
67156 +++ linux-2.6.32.45/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
67157 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
67158 * So no dangers, even with speculative execution.
67159 */
67160 page = pte_page(pkmap_page_table[i]);
67161 + pax_open_kernel();
67162 pte_clear(&init_mm, (unsigned long)page_address(page),
67163 &pkmap_page_table[i]);
67164 -
67165 + pax_close_kernel();
67166 set_page_address(page, NULL);
67167 need_flush = 1;
67168 }
67169 @@ -177,9 +178,11 @@ start:
67170 }
67171 }
67172 vaddr = PKMAP_ADDR(last_pkmap_nr);
67173 +
67174 + pax_open_kernel();
67175 set_pte_at(&init_mm, vaddr,
67176 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67177 -
67178 + pax_close_kernel();
67179 pkmap_count[last_pkmap_nr] = 1;
67180 set_page_address(page, (void *)vaddr);
67181
67182 diff -urNp linux-2.6.32.45/mm/hugetlb.c linux-2.6.32.45/mm/hugetlb.c
67183 --- linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:04.000000000 -0400
67184 +++ linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:19.000000000 -0400
67185 @@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
67186 return 1;
67187 }
67188
67189 +#ifdef CONFIG_PAX_SEGMEXEC
67190 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67191 +{
67192 + struct mm_struct *mm = vma->vm_mm;
67193 + struct vm_area_struct *vma_m;
67194 + unsigned long address_m;
67195 + pte_t *ptep_m;
67196 +
67197 + vma_m = pax_find_mirror_vma(vma);
67198 + if (!vma_m)
67199 + return;
67200 +
67201 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67202 + address_m = address + SEGMEXEC_TASK_SIZE;
67203 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67204 + get_page(page_m);
67205 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67206 +}
67207 +#endif
67208 +
67209 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
67210 unsigned long address, pte_t *ptep, pte_t pte,
67211 struct page *pagecache_page)
67212 @@ -2004,6 +2024,11 @@ retry_avoidcopy:
67213 huge_ptep_clear_flush(vma, address, ptep);
67214 set_huge_pte_at(mm, address, ptep,
67215 make_huge_pte(vma, new_page, 1));
67216 +
67217 +#ifdef CONFIG_PAX_SEGMEXEC
67218 + pax_mirror_huge_pte(vma, address, new_page);
67219 +#endif
67220 +
67221 /* Make the old page be freed below */
67222 new_page = old_page;
67223 }
67224 @@ -2135,6 +2160,10 @@ retry:
67225 && (vma->vm_flags & VM_SHARED)));
67226 set_huge_pte_at(mm, address, ptep, new_pte);
67227
67228 +#ifdef CONFIG_PAX_SEGMEXEC
67229 + pax_mirror_huge_pte(vma, address, page);
67230 +#endif
67231 +
67232 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67233 /* Optimization, do the COW without a second fault */
67234 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67235 @@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
67236 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67237 struct hstate *h = hstate_vma(vma);
67238
67239 +#ifdef CONFIG_PAX_SEGMEXEC
67240 + struct vm_area_struct *vma_m;
67241 +
67242 + vma_m = pax_find_mirror_vma(vma);
67243 + if (vma_m) {
67244 + unsigned long address_m;
67245 +
67246 + if (vma->vm_start > vma_m->vm_start) {
67247 + address_m = address;
67248 + address -= SEGMEXEC_TASK_SIZE;
67249 + vma = vma_m;
67250 + h = hstate_vma(vma);
67251 + } else
67252 + address_m = address + SEGMEXEC_TASK_SIZE;
67253 +
67254 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67255 + return VM_FAULT_OOM;
67256 + address_m &= HPAGE_MASK;
67257 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67258 + }
67259 +#endif
67260 +
67261 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67262 if (!ptep)
67263 return VM_FAULT_OOM;
67264 diff -urNp linux-2.6.32.45/mm/internal.h linux-2.6.32.45/mm/internal.h
67265 --- linux-2.6.32.45/mm/internal.h 2011-03-27 14:31:47.000000000 -0400
67266 +++ linux-2.6.32.45/mm/internal.h 2011-07-09 09:13:08.000000000 -0400
67267 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
67268 * in mm/page_alloc.c
67269 */
67270 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67271 +extern void free_compound_page(struct page *page);
67272 extern void prep_compound_page(struct page *page, unsigned long order);
67273
67274
67275 diff -urNp linux-2.6.32.45/mm/Kconfig linux-2.6.32.45/mm/Kconfig
67276 --- linux-2.6.32.45/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
67277 +++ linux-2.6.32.45/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
67278 @@ -228,7 +228,7 @@ config KSM
67279 config DEFAULT_MMAP_MIN_ADDR
67280 int "Low address space to protect from user allocation"
67281 depends on MMU
67282 - default 4096
67283 + default 65536
67284 help
67285 This is the portion of low virtual memory which should be protected
67286 from userspace allocation. Keeping a user from writing to low pages
67287 diff -urNp linux-2.6.32.45/mm/kmemleak.c linux-2.6.32.45/mm/kmemleak.c
67288 --- linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
67289 +++ linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
67290 @@ -358,7 +358,7 @@ static void print_unreferenced(struct se
67291
67292 for (i = 0; i < object->trace_len; i++) {
67293 void *ptr = (void *)object->trace[i];
67294 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67295 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67296 }
67297 }
67298
67299 diff -urNp linux-2.6.32.45/mm/maccess.c linux-2.6.32.45/mm/maccess.c
67300 --- linux-2.6.32.45/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
67301 +++ linux-2.6.32.45/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
67302 @@ -14,7 +14,7 @@
67303 * Safely read from address @src to the buffer at @dst. If a kernel fault
67304 * happens, handle that and return -EFAULT.
67305 */
67306 -long probe_kernel_read(void *dst, void *src, size_t size)
67307 +long probe_kernel_read(void *dst, const void *src, size_t size)
67308 {
67309 long ret;
67310 mm_segment_t old_fs = get_fs();
67311 @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
67312 * Safely write to address @dst from the buffer at @src. If a kernel fault
67313 * happens, handle that and return -EFAULT.
67314 */
67315 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
67316 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
67317 {
67318 long ret;
67319 mm_segment_t old_fs = get_fs();
67320 diff -urNp linux-2.6.32.45/mm/madvise.c linux-2.6.32.45/mm/madvise.c
67321 --- linux-2.6.32.45/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
67322 +++ linux-2.6.32.45/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
67323 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
67324 pgoff_t pgoff;
67325 unsigned long new_flags = vma->vm_flags;
67326
67327 +#ifdef CONFIG_PAX_SEGMEXEC
67328 + struct vm_area_struct *vma_m;
67329 +#endif
67330 +
67331 switch (behavior) {
67332 case MADV_NORMAL:
67333 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67334 @@ -103,6 +107,13 @@ success:
67335 /*
67336 * vm_flags is protected by the mmap_sem held in write mode.
67337 */
67338 +
67339 +#ifdef CONFIG_PAX_SEGMEXEC
67340 + vma_m = pax_find_mirror_vma(vma);
67341 + if (vma_m)
67342 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67343 +#endif
67344 +
67345 vma->vm_flags = new_flags;
67346
67347 out:
67348 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
67349 struct vm_area_struct ** prev,
67350 unsigned long start, unsigned long end)
67351 {
67352 +
67353 +#ifdef CONFIG_PAX_SEGMEXEC
67354 + struct vm_area_struct *vma_m;
67355 +#endif
67356 +
67357 *prev = vma;
67358 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67359 return -EINVAL;
67360 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
67361 zap_page_range(vma, start, end - start, &details);
67362 } else
67363 zap_page_range(vma, start, end - start, NULL);
67364 +
67365 +#ifdef CONFIG_PAX_SEGMEXEC
67366 + vma_m = pax_find_mirror_vma(vma);
67367 + if (vma_m) {
67368 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67369 + struct zap_details details = {
67370 + .nonlinear_vma = vma_m,
67371 + .last_index = ULONG_MAX,
67372 + };
67373 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67374 + } else
67375 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67376 + }
67377 +#endif
67378 +
67379 return 0;
67380 }
67381
67382 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67383 if (end < start)
67384 goto out;
67385
67386 +#ifdef CONFIG_PAX_SEGMEXEC
67387 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67388 + if (end > SEGMEXEC_TASK_SIZE)
67389 + goto out;
67390 + } else
67391 +#endif
67392 +
67393 + if (end > TASK_SIZE)
67394 + goto out;
67395 +
67396 error = 0;
67397 if (end == start)
67398 goto out;
67399 diff -urNp linux-2.6.32.45/mm/memory.c linux-2.6.32.45/mm/memory.c
67400 --- linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:04.000000000 -0400
67401 +++ linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:23.000000000 -0400
67402 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
67403 return;
67404
67405 pmd = pmd_offset(pud, start);
67406 +
67407 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67408 pud_clear(pud);
67409 pmd_free_tlb(tlb, pmd, start);
67410 +#endif
67411 +
67412 }
67413
67414 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67415 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct
67416 if (end - 1 > ceiling - 1)
67417 return;
67418
67419 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67420 pud = pud_offset(pgd, start);
67421 pgd_clear(pgd);
67422 pud_free_tlb(tlb, pud, start);
67423 +#endif
67424 +
67425 }
67426
67427 /*
67428 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
67429 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67430 i = 0;
67431
67432 - do {
67433 + while (nr_pages) {
67434 struct vm_area_struct *vma;
67435
67436 - vma = find_extend_vma(mm, start);
67437 + vma = find_vma(mm, start);
67438 if (!vma && in_gate_area(tsk, start)) {
67439 unsigned long pg = start & PAGE_MASK;
67440 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
67441 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
67442 continue;
67443 }
67444
67445 - if (!vma ||
67446 + if (!vma || start < vma->vm_start ||
67447 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67448 !(vm_flags & vma->vm_flags))
67449 return i ? : -EFAULT;
67450 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
67451 start += PAGE_SIZE;
67452 nr_pages--;
67453 } while (nr_pages && start < vma->vm_end);
67454 - } while (nr_pages);
67455 + }
67456 return i;
67457 }
67458
67459 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
67460 page_add_file_rmap(page);
67461 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67462
67463 +#ifdef CONFIG_PAX_SEGMEXEC
67464 + pax_mirror_file_pte(vma, addr, page, ptl);
67465 +#endif
67466 +
67467 retval = 0;
67468 pte_unmap_unlock(pte, ptl);
67469 return retval;
67470 @@ -1560,10 +1571,22 @@ out:
67471 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67472 struct page *page)
67473 {
67474 +
67475 +#ifdef CONFIG_PAX_SEGMEXEC
67476 + struct vm_area_struct *vma_m;
67477 +#endif
67478 +
67479 if (addr < vma->vm_start || addr >= vma->vm_end)
67480 return -EFAULT;
67481 if (!page_count(page))
67482 return -EINVAL;
67483 +
67484 +#ifdef CONFIG_PAX_SEGMEXEC
67485 + vma_m = pax_find_mirror_vma(vma);
67486 + if (vma_m)
67487 + vma_m->vm_flags |= VM_INSERTPAGE;
67488 +#endif
67489 +
67490 vma->vm_flags |= VM_INSERTPAGE;
67491 return insert_page(vma, addr, page, vma->vm_page_prot);
67492 }
67493 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
67494 unsigned long pfn)
67495 {
67496 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67497 + BUG_ON(vma->vm_mirror);
67498
67499 if (addr < vma->vm_start || addr >= vma->vm_end)
67500 return -EFAULT;
67501 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
67502 copy_user_highpage(dst, src, va, vma);
67503 }
67504
67505 +#ifdef CONFIG_PAX_SEGMEXEC
67506 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67507 +{
67508 + struct mm_struct *mm = vma->vm_mm;
67509 + spinlock_t *ptl;
67510 + pte_t *pte, entry;
67511 +
67512 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67513 + entry = *pte;
67514 + if (!pte_present(entry)) {
67515 + if (!pte_none(entry)) {
67516 + BUG_ON(pte_file(entry));
67517 + free_swap_and_cache(pte_to_swp_entry(entry));
67518 + pte_clear_not_present_full(mm, address, pte, 0);
67519 + }
67520 + } else {
67521 + struct page *page;
67522 +
67523 + flush_cache_page(vma, address, pte_pfn(entry));
67524 + entry = ptep_clear_flush(vma, address, pte);
67525 + BUG_ON(pte_dirty(entry));
67526 + page = vm_normal_page(vma, address, entry);
67527 + if (page) {
67528 + update_hiwater_rss(mm);
67529 + if (PageAnon(page))
67530 + dec_mm_counter(mm, anon_rss);
67531 + else
67532 + dec_mm_counter(mm, file_rss);
67533 + page_remove_rmap(page);
67534 + page_cache_release(page);
67535 + }
67536 + }
67537 + pte_unmap_unlock(pte, ptl);
67538 +}
67539 +
67540 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
67541 + *
67542 + * the ptl of the lower mapped page is held on entry and is not released on exit
67543 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67544 + */
67545 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67546 +{
67547 + struct mm_struct *mm = vma->vm_mm;
67548 + unsigned long address_m;
67549 + spinlock_t *ptl_m;
67550 + struct vm_area_struct *vma_m;
67551 + pmd_t *pmd_m;
67552 + pte_t *pte_m, entry_m;
67553 +
67554 + BUG_ON(!page_m || !PageAnon(page_m));
67555 +
67556 + vma_m = pax_find_mirror_vma(vma);
67557 + if (!vma_m)
67558 + return;
67559 +
67560 + BUG_ON(!PageLocked(page_m));
67561 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67562 + address_m = address + SEGMEXEC_TASK_SIZE;
67563 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67564 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67565 + ptl_m = pte_lockptr(mm, pmd_m);
67566 + if (ptl != ptl_m) {
67567 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67568 + if (!pte_none(*pte_m))
67569 + goto out;
67570 + }
67571 +
67572 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67573 + page_cache_get(page_m);
67574 + page_add_anon_rmap(page_m, vma_m, address_m);
67575 + inc_mm_counter(mm, anon_rss);
67576 + set_pte_at(mm, address_m, pte_m, entry_m);
67577 + update_mmu_cache(vma_m, address_m, entry_m);
67578 +out:
67579 + if (ptl != ptl_m)
67580 + spin_unlock(ptl_m);
67581 + pte_unmap_nested(pte_m);
67582 + unlock_page(page_m);
67583 +}
67584 +
67585 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67586 +{
67587 + struct mm_struct *mm = vma->vm_mm;
67588 + unsigned long address_m;
67589 + spinlock_t *ptl_m;
67590 + struct vm_area_struct *vma_m;
67591 + pmd_t *pmd_m;
67592 + pte_t *pte_m, entry_m;
67593 +
67594 + BUG_ON(!page_m || PageAnon(page_m));
67595 +
67596 + vma_m = pax_find_mirror_vma(vma);
67597 + if (!vma_m)
67598 + return;
67599 +
67600 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67601 + address_m = address + SEGMEXEC_TASK_SIZE;
67602 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67603 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67604 + ptl_m = pte_lockptr(mm, pmd_m);
67605 + if (ptl != ptl_m) {
67606 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67607 + if (!pte_none(*pte_m))
67608 + goto out;
67609 + }
67610 +
67611 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67612 + page_cache_get(page_m);
67613 + page_add_file_rmap(page_m);
67614 + inc_mm_counter(mm, file_rss);
67615 + set_pte_at(mm, address_m, pte_m, entry_m);
67616 + update_mmu_cache(vma_m, address_m, entry_m);
67617 +out:
67618 + if (ptl != ptl_m)
67619 + spin_unlock(ptl_m);
67620 + pte_unmap_nested(pte_m);
67621 +}
67622 +
67623 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67624 +{
67625 + struct mm_struct *mm = vma->vm_mm;
67626 + unsigned long address_m;
67627 + spinlock_t *ptl_m;
67628 + struct vm_area_struct *vma_m;
67629 + pmd_t *pmd_m;
67630 + pte_t *pte_m, entry_m;
67631 +
67632 + vma_m = pax_find_mirror_vma(vma);
67633 + if (!vma_m)
67634 + return;
67635 +
67636 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67637 + address_m = address + SEGMEXEC_TASK_SIZE;
67638 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67639 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67640 + ptl_m = pte_lockptr(mm, pmd_m);
67641 + if (ptl != ptl_m) {
67642 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67643 + if (!pte_none(*pte_m))
67644 + goto out;
67645 + }
67646 +
67647 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67648 + set_pte_at(mm, address_m, pte_m, entry_m);
67649 +out:
67650 + if (ptl != ptl_m)
67651 + spin_unlock(ptl_m);
67652 + pte_unmap_nested(pte_m);
67653 +}
67654 +
67655 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67656 +{
67657 + struct page *page_m;
67658 + pte_t entry;
67659 +
67660 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67661 + goto out;
67662 +
67663 + entry = *pte;
67664 + page_m = vm_normal_page(vma, address, entry);
67665 + if (!page_m)
67666 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67667 + else if (PageAnon(page_m)) {
67668 + if (pax_find_mirror_vma(vma)) {
67669 + pte_unmap_unlock(pte, ptl);
67670 + lock_page(page_m);
67671 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67672 + if (pte_same(entry, *pte))
67673 + pax_mirror_anon_pte(vma, address, page_m, ptl);
67674 + else
67675 + unlock_page(page_m);
67676 + }
67677 + } else
67678 + pax_mirror_file_pte(vma, address, page_m, ptl);
67679 +
67680 +out:
67681 + pte_unmap_unlock(pte, ptl);
67682 +}
67683 +#endif
67684 +
67685 /*
67686 * This routine handles present pages, when users try to write
67687 * to a shared page. It is done by copying the page to a new address
67688 @@ -2156,6 +2360,12 @@ gotten:
67689 */
67690 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67691 if (likely(pte_same(*page_table, orig_pte))) {
67692 +
67693 +#ifdef CONFIG_PAX_SEGMEXEC
67694 + if (pax_find_mirror_vma(vma))
67695 + BUG_ON(!trylock_page(new_page));
67696 +#endif
67697 +
67698 if (old_page) {
67699 if (!PageAnon(old_page)) {
67700 dec_mm_counter(mm, file_rss);
67701 @@ -2207,6 +2417,10 @@ gotten:
67702 page_remove_rmap(old_page);
67703 }
67704
67705 +#ifdef CONFIG_PAX_SEGMEXEC
67706 + pax_mirror_anon_pte(vma, address, new_page, ptl);
67707 +#endif
67708 +
67709 /* Free the old page.. */
67710 new_page = old_page;
67711 ret |= VM_FAULT_WRITE;
67712 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
67713 swap_free(entry);
67714 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67715 try_to_free_swap(page);
67716 +
67717 +#ifdef CONFIG_PAX_SEGMEXEC
67718 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67719 +#endif
67720 +
67721 unlock_page(page);
67722
67723 if (flags & FAULT_FLAG_WRITE) {
67724 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
67725
67726 /* No need to invalidate - it was non-present before */
67727 update_mmu_cache(vma, address, pte);
67728 +
67729 +#ifdef CONFIG_PAX_SEGMEXEC
67730 + pax_mirror_anon_pte(vma, address, page, ptl);
67731 +#endif
67732 +
67733 unlock:
67734 pte_unmap_unlock(page_table, ptl);
67735 out:
67736 @@ -2632,40 +2856,6 @@ out_release:
67737 }
67738
67739 /*
67740 - * This is like a special single-page "expand_{down|up}wards()",
67741 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
67742 - * doesn't hit another vma.
67743 - */
67744 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67745 -{
67746 - address &= PAGE_MASK;
67747 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67748 - struct vm_area_struct *prev = vma->vm_prev;
67749 -
67750 - /*
67751 - * Is there a mapping abutting this one below?
67752 - *
67753 - * That's only ok if it's the same stack mapping
67754 - * that has gotten split..
67755 - */
67756 - if (prev && prev->vm_end == address)
67757 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67758 -
67759 - expand_stack(vma, address - PAGE_SIZE);
67760 - }
67761 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67762 - struct vm_area_struct *next = vma->vm_next;
67763 -
67764 - /* As VM_GROWSDOWN but s/below/above/ */
67765 - if (next && next->vm_start == address + PAGE_SIZE)
67766 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67767 -
67768 - expand_upwards(vma, address + PAGE_SIZE);
67769 - }
67770 - return 0;
67771 -}
67772 -
67773 -/*
67774 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67775 * but allow concurrent faults), and pte mapped but not yet locked.
67776 * We return with mmap_sem still held, but pte unmapped and unlocked.
67777 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
67778 unsigned long address, pte_t *page_table, pmd_t *pmd,
67779 unsigned int flags)
67780 {
67781 - struct page *page;
67782 + struct page *page = NULL;
67783 spinlock_t *ptl;
67784 pte_t entry;
67785
67786 - pte_unmap(page_table);
67787 -
67788 - /* Check if we need to add a guard page to the stack */
67789 - if (check_stack_guard_page(vma, address) < 0)
67790 - return VM_FAULT_SIGBUS;
67791 -
67792 - /* Use the zero-page for reads */
67793 if (!(flags & FAULT_FLAG_WRITE)) {
67794 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67795 vma->vm_page_prot));
67796 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67797 + ptl = pte_lockptr(mm, pmd);
67798 + spin_lock(ptl);
67799 if (!pte_none(*page_table))
67800 goto unlock;
67801 goto setpte;
67802 }
67803
67804 /* Allocate our own private page. */
67805 + pte_unmap(page_table);
67806 +
67807 if (unlikely(anon_vma_prepare(vma)))
67808 goto oom;
67809 page = alloc_zeroed_user_highpage_movable(vma, address);
67810 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
67811 if (!pte_none(*page_table))
67812 goto release;
67813
67814 +#ifdef CONFIG_PAX_SEGMEXEC
67815 + if (pax_find_mirror_vma(vma))
67816 + BUG_ON(!trylock_page(page));
67817 +#endif
67818 +
67819 inc_mm_counter(mm, anon_rss);
67820 page_add_new_anon_rmap(page, vma, address);
67821 setpte:
67822 @@ -2720,6 +2911,12 @@ setpte:
67823
67824 /* No need to invalidate - it was non-present before */
67825 update_mmu_cache(vma, address, entry);
67826 +
67827 +#ifdef CONFIG_PAX_SEGMEXEC
67828 + if (page)
67829 + pax_mirror_anon_pte(vma, address, page, ptl);
67830 +#endif
67831 +
67832 unlock:
67833 pte_unmap_unlock(page_table, ptl);
67834 return 0;
67835 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
67836 */
67837 /* Only go through if we didn't race with anybody else... */
67838 if (likely(pte_same(*page_table, orig_pte))) {
67839 +
67840 +#ifdef CONFIG_PAX_SEGMEXEC
67841 + if (anon && pax_find_mirror_vma(vma))
67842 + BUG_ON(!trylock_page(page));
67843 +#endif
67844 +
67845 flush_icache_page(vma, page);
67846 entry = mk_pte(page, vma->vm_page_prot);
67847 if (flags & FAULT_FLAG_WRITE)
67848 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
67849
67850 /* no need to invalidate: a not-present page won't be cached */
67851 update_mmu_cache(vma, address, entry);
67852 +
67853 +#ifdef CONFIG_PAX_SEGMEXEC
67854 + if (anon)
67855 + pax_mirror_anon_pte(vma, address, page, ptl);
67856 + else
67857 + pax_mirror_file_pte(vma, address, page, ptl);
67858 +#endif
67859 +
67860 } else {
67861 if (charged)
67862 mem_cgroup_uncharge_page(page);
67863 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
67864 if (flags & FAULT_FLAG_WRITE)
67865 flush_tlb_page(vma, address);
67866 }
67867 +
67868 +#ifdef CONFIG_PAX_SEGMEXEC
67869 + pax_mirror_pte(vma, address, pte, pmd, ptl);
67870 + return 0;
67871 +#endif
67872 +
67873 unlock:
67874 pte_unmap_unlock(pte, ptl);
67875 return 0;
67876 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
67877 pmd_t *pmd;
67878 pte_t *pte;
67879
67880 +#ifdef CONFIG_PAX_SEGMEXEC
67881 + struct vm_area_struct *vma_m;
67882 +#endif
67883 +
67884 __set_current_state(TASK_RUNNING);
67885
67886 count_vm_event(PGFAULT);
67887 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
67888 if (unlikely(is_vm_hugetlb_page(vma)))
67889 return hugetlb_fault(mm, vma, address, flags);
67890
67891 +#ifdef CONFIG_PAX_SEGMEXEC
67892 + vma_m = pax_find_mirror_vma(vma);
67893 + if (vma_m) {
67894 + unsigned long address_m;
67895 + pgd_t *pgd_m;
67896 + pud_t *pud_m;
67897 + pmd_t *pmd_m;
67898 +
67899 + if (vma->vm_start > vma_m->vm_start) {
67900 + address_m = address;
67901 + address -= SEGMEXEC_TASK_SIZE;
67902 + vma = vma_m;
67903 + } else
67904 + address_m = address + SEGMEXEC_TASK_SIZE;
67905 +
67906 + pgd_m = pgd_offset(mm, address_m);
67907 + pud_m = pud_alloc(mm, pgd_m, address_m);
67908 + if (!pud_m)
67909 + return VM_FAULT_OOM;
67910 + pmd_m = pmd_alloc(mm, pud_m, address_m);
67911 + if (!pmd_m)
67912 + return VM_FAULT_OOM;
67913 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
67914 + return VM_FAULT_OOM;
67915 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67916 + }
67917 +#endif
67918 +
67919 pgd = pgd_offset(mm, address);
67920 pud = pud_alloc(mm, pgd, address);
67921 if (!pud)
67922 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
67923 gate_vma.vm_start = FIXADDR_USER_START;
67924 gate_vma.vm_end = FIXADDR_USER_END;
67925 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
67926 - gate_vma.vm_page_prot = __P101;
67927 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
67928 /*
67929 * Make sure the vDSO gets into every core dump.
67930 * Dumping its contents makes post-mortem fully interpretable later
67931 diff -urNp linux-2.6.32.45/mm/memory-failure.c linux-2.6.32.45/mm/memory-failure.c
67932 --- linux-2.6.32.45/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
67933 +++ linux-2.6.32.45/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
67934 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
67935
67936 int sysctl_memory_failure_recovery __read_mostly = 1;
67937
67938 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67939 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67940
67941 /*
67942 * Send all the processes who have the page mapped an ``action optional''
67943 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
67944 return 0;
67945 }
67946
67947 - atomic_long_add(1, &mce_bad_pages);
67948 + atomic_long_add_unchecked(1, &mce_bad_pages);
67949
67950 /*
67951 * We need/can do nothing about count=0 pages.
67952 diff -urNp linux-2.6.32.45/mm/mempolicy.c linux-2.6.32.45/mm/mempolicy.c
67953 --- linux-2.6.32.45/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
67954 +++ linux-2.6.32.45/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
67955 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
67956 struct vm_area_struct *next;
67957 int err;
67958
67959 +#ifdef CONFIG_PAX_SEGMEXEC
67960 + struct vm_area_struct *vma_m;
67961 +#endif
67962 +
67963 err = 0;
67964 for (; vma && vma->vm_start < end; vma = next) {
67965 next = vma->vm_next;
67966 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
67967 err = policy_vma(vma, new);
67968 if (err)
67969 break;
67970 +
67971 +#ifdef CONFIG_PAX_SEGMEXEC
67972 + vma_m = pax_find_mirror_vma(vma);
67973 + if (vma_m) {
67974 + err = policy_vma(vma_m, new);
67975 + if (err)
67976 + break;
67977 + }
67978 +#endif
67979 +
67980 }
67981 return err;
67982 }
67983 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
67984
67985 if (end < start)
67986 return -EINVAL;
67987 +
67988 +#ifdef CONFIG_PAX_SEGMEXEC
67989 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67990 + if (end > SEGMEXEC_TASK_SIZE)
67991 + return -EINVAL;
67992 + } else
67993 +#endif
67994 +
67995 + if (end > TASK_SIZE)
67996 + return -EINVAL;
67997 +
67998 if (end == start)
67999 return 0;
68000
68001 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68002 if (!mm)
68003 return -EINVAL;
68004
68005 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68006 + if (mm != current->mm &&
68007 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68008 + err = -EPERM;
68009 + goto out;
68010 + }
68011 +#endif
68012 +
68013 /*
68014 * Check if this process has the right to modify the specified
68015 * process. The right exists if the process has administrative
68016 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68017 rcu_read_lock();
68018 tcred = __task_cred(task);
68019 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68020 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68021 - !capable(CAP_SYS_NICE)) {
68022 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68023 rcu_read_unlock();
68024 err = -EPERM;
68025 goto out;
68026 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
68027
68028 if (file) {
68029 seq_printf(m, " file=");
68030 - seq_path(m, &file->f_path, "\n\t= ");
68031 + seq_path(m, &file->f_path, "\n\t\\= ");
68032 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
68033 seq_printf(m, " heap");
68034 } else if (vma->vm_start <= mm->start_stack &&
68035 diff -urNp linux-2.6.32.45/mm/migrate.c linux-2.6.32.45/mm/migrate.c
68036 --- linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:04.000000000 -0400
68037 +++ linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:23.000000000 -0400
68038 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
68039 unsigned long chunk_start;
68040 int err;
68041
68042 + pax_track_stack();
68043 +
68044 task_nodes = cpuset_mems_allowed(task);
68045
68046 err = -ENOMEM;
68047 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68048 if (!mm)
68049 return -EINVAL;
68050
68051 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68052 + if (mm != current->mm &&
68053 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68054 + err = -EPERM;
68055 + goto out;
68056 + }
68057 +#endif
68058 +
68059 /*
68060 * Check if this process has the right to modify the specified
68061 * process. The right exists if the process has administrative
68062 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68063 rcu_read_lock();
68064 tcred = __task_cred(task);
68065 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68066 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68067 - !capable(CAP_SYS_NICE)) {
68068 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68069 rcu_read_unlock();
68070 err = -EPERM;
68071 goto out;
68072 diff -urNp linux-2.6.32.45/mm/mlock.c linux-2.6.32.45/mm/mlock.c
68073 --- linux-2.6.32.45/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
68074 +++ linux-2.6.32.45/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
68075 @@ -13,6 +13,7 @@
68076 #include <linux/pagemap.h>
68077 #include <linux/mempolicy.h>
68078 #include <linux/syscalls.h>
68079 +#include <linux/security.h>
68080 #include <linux/sched.h>
68081 #include <linux/module.h>
68082 #include <linux/rmap.h>
68083 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
68084 }
68085 }
68086
68087 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
68088 -{
68089 - return (vma->vm_flags & VM_GROWSDOWN) &&
68090 - (vma->vm_start == addr) &&
68091 - !vma_stack_continue(vma->vm_prev, addr);
68092 -}
68093 -
68094 /**
68095 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
68096 * @vma: target vma
68097 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
68098 if (vma->vm_flags & VM_WRITE)
68099 gup_flags |= FOLL_WRITE;
68100
68101 - /* We don't try to access the guard page of a stack vma */
68102 - if (stack_guard_page(vma, start)) {
68103 - addr += PAGE_SIZE;
68104 - nr_pages--;
68105 - }
68106 -
68107 while (nr_pages > 0) {
68108 int i;
68109
68110 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
68111 {
68112 unsigned long nstart, end, tmp;
68113 struct vm_area_struct * vma, * prev;
68114 - int error;
68115 + int error = -EINVAL;
68116
68117 len = PAGE_ALIGN(len);
68118 end = start + len;
68119 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
68120 return -EINVAL;
68121 if (end == start)
68122 return 0;
68123 + if (end > TASK_SIZE)
68124 + return -EINVAL;
68125 +
68126 vma = find_vma_prev(current->mm, start, &prev);
68127 if (!vma || vma->vm_start > start)
68128 return -ENOMEM;
68129 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
68130 for (nstart = start ; ; ) {
68131 unsigned int newflags;
68132
68133 +#ifdef CONFIG_PAX_SEGMEXEC
68134 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68135 + break;
68136 +#endif
68137 +
68138 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68139
68140 newflags = vma->vm_flags | VM_LOCKED;
68141 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
68142 lock_limit >>= PAGE_SHIFT;
68143
68144 /* check against resource limits */
68145 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68146 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68147 error = do_mlock(start, len, 1);
68148 up_write(&current->mm->mmap_sem);
68149 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
68150 static int do_mlockall(int flags)
68151 {
68152 struct vm_area_struct * vma, * prev = NULL;
68153 - unsigned int def_flags = 0;
68154
68155 if (flags & MCL_FUTURE)
68156 - def_flags = VM_LOCKED;
68157 - current->mm->def_flags = def_flags;
68158 + current->mm->def_flags |= VM_LOCKED;
68159 + else
68160 + current->mm->def_flags &= ~VM_LOCKED;
68161 if (flags == MCL_FUTURE)
68162 goto out;
68163
68164 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68165 - unsigned int newflags;
68166 + unsigned long newflags;
68167 +
68168 +#ifdef CONFIG_PAX_SEGMEXEC
68169 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68170 + break;
68171 +#endif
68172
68173 + BUG_ON(vma->vm_end > TASK_SIZE);
68174 newflags = vma->vm_flags | VM_LOCKED;
68175 if (!(flags & MCL_CURRENT))
68176 newflags &= ~VM_LOCKED;
68177 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68178 lock_limit >>= PAGE_SHIFT;
68179
68180 ret = -ENOMEM;
68181 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68182 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68183 capable(CAP_IPC_LOCK))
68184 ret = do_mlockall(flags);
68185 diff -urNp linux-2.6.32.45/mm/mmap.c linux-2.6.32.45/mm/mmap.c
68186 --- linux-2.6.32.45/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
68187 +++ linux-2.6.32.45/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
68188 @@ -45,6 +45,16 @@
68189 #define arch_rebalance_pgtables(addr, len) (addr)
68190 #endif
68191
68192 +static inline void verify_mm_writelocked(struct mm_struct *mm)
68193 +{
68194 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68195 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68196 + up_read(&mm->mmap_sem);
68197 + BUG();
68198 + }
68199 +#endif
68200 +}
68201 +
68202 static void unmap_region(struct mm_struct *mm,
68203 struct vm_area_struct *vma, struct vm_area_struct *prev,
68204 unsigned long start, unsigned long end);
68205 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
68206 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68207 *
68208 */
68209 -pgprot_t protection_map[16] = {
68210 +pgprot_t protection_map[16] __read_only = {
68211 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68212 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68213 };
68214
68215 pgprot_t vm_get_page_prot(unsigned long vm_flags)
68216 {
68217 - return __pgprot(pgprot_val(protection_map[vm_flags &
68218 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68219 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68220 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68221 +
68222 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68223 + if (!nx_enabled &&
68224 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68225 + (vm_flags & (VM_READ | VM_WRITE)))
68226 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68227 +#endif
68228 +
68229 + return prot;
68230 }
68231 EXPORT_SYMBOL(vm_get_page_prot);
68232
68233 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
68234 int sysctl_overcommit_ratio = 50; /* default is 50% */
68235 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68236 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68237 struct percpu_counter vm_committed_as;
68238
68239 /*
68240 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
68241 struct vm_area_struct *next = vma->vm_next;
68242
68243 might_sleep();
68244 + BUG_ON(vma->vm_mirror);
68245 if (vma->vm_ops && vma->vm_ops->close)
68246 vma->vm_ops->close(vma);
68247 if (vma->vm_file) {
68248 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68249 * not page aligned -Ram Gupta
68250 */
68251 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
68252 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68253 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68254 (mm->end_data - mm->start_data) > rlim)
68255 goto out;
68256 @@ -704,6 +726,12 @@ static int
68257 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68258 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68259 {
68260 +
68261 +#ifdef CONFIG_PAX_SEGMEXEC
68262 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68263 + return 0;
68264 +#endif
68265 +
68266 if (is_mergeable_vma(vma, file, vm_flags) &&
68267 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68268 if (vma->vm_pgoff == vm_pgoff)
68269 @@ -723,6 +751,12 @@ static int
68270 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68271 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68272 {
68273 +
68274 +#ifdef CONFIG_PAX_SEGMEXEC
68275 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68276 + return 0;
68277 +#endif
68278 +
68279 if (is_mergeable_vma(vma, file, vm_flags) &&
68280 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68281 pgoff_t vm_pglen;
68282 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
68283 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68284 struct vm_area_struct *prev, unsigned long addr,
68285 unsigned long end, unsigned long vm_flags,
68286 - struct anon_vma *anon_vma, struct file *file,
68287 + struct anon_vma *anon_vma, struct file *file,
68288 pgoff_t pgoff, struct mempolicy *policy)
68289 {
68290 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68291 struct vm_area_struct *area, *next;
68292
68293 +#ifdef CONFIG_PAX_SEGMEXEC
68294 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68295 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68296 +
68297 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68298 +#endif
68299 +
68300 /*
68301 * We later require that vma->vm_flags == vm_flags,
68302 * so this tests vma->vm_flags & VM_SPECIAL, too.
68303 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
68304 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68305 next = next->vm_next;
68306
68307 +#ifdef CONFIG_PAX_SEGMEXEC
68308 + if (prev)
68309 + prev_m = pax_find_mirror_vma(prev);
68310 + if (area)
68311 + area_m = pax_find_mirror_vma(area);
68312 + if (next)
68313 + next_m = pax_find_mirror_vma(next);
68314 +#endif
68315 +
68316 /*
68317 * Can it merge with the predecessor?
68318 */
68319 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
68320 /* cases 1, 6 */
68321 vma_adjust(prev, prev->vm_start,
68322 next->vm_end, prev->vm_pgoff, NULL);
68323 - } else /* cases 2, 5, 7 */
68324 +
68325 +#ifdef CONFIG_PAX_SEGMEXEC
68326 + if (prev_m)
68327 + vma_adjust(prev_m, prev_m->vm_start,
68328 + next_m->vm_end, prev_m->vm_pgoff, NULL);
68329 +#endif
68330 +
68331 + } else { /* cases 2, 5, 7 */
68332 vma_adjust(prev, prev->vm_start,
68333 end, prev->vm_pgoff, NULL);
68334 +
68335 +#ifdef CONFIG_PAX_SEGMEXEC
68336 + if (prev_m)
68337 + vma_adjust(prev_m, prev_m->vm_start,
68338 + end_m, prev_m->vm_pgoff, NULL);
68339 +#endif
68340 +
68341 + }
68342 return prev;
68343 }
68344
68345 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
68346 mpol_equal(policy, vma_policy(next)) &&
68347 can_vma_merge_before(next, vm_flags,
68348 anon_vma, file, pgoff+pglen)) {
68349 - if (prev && addr < prev->vm_end) /* case 4 */
68350 + if (prev && addr < prev->vm_end) { /* case 4 */
68351 vma_adjust(prev, prev->vm_start,
68352 addr, prev->vm_pgoff, NULL);
68353 - else /* cases 3, 8 */
68354 +
68355 +#ifdef CONFIG_PAX_SEGMEXEC
68356 + if (prev_m)
68357 + vma_adjust(prev_m, prev_m->vm_start,
68358 + addr_m, prev_m->vm_pgoff, NULL);
68359 +#endif
68360 +
68361 + } else { /* cases 3, 8 */
68362 vma_adjust(area, addr, next->vm_end,
68363 next->vm_pgoff - pglen, NULL);
68364 +
68365 +#ifdef CONFIG_PAX_SEGMEXEC
68366 + if (area_m)
68367 + vma_adjust(area_m, addr_m, next_m->vm_end,
68368 + next_m->vm_pgoff - pglen, NULL);
68369 +#endif
68370 +
68371 + }
68372 return area;
68373 }
68374
68375 @@ -898,14 +978,11 @@ none:
68376 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68377 struct file *file, long pages)
68378 {
68379 - const unsigned long stack_flags
68380 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68381 -
68382 if (file) {
68383 mm->shared_vm += pages;
68384 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68385 mm->exec_vm += pages;
68386 - } else if (flags & stack_flags)
68387 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68388 mm->stack_vm += pages;
68389 if (flags & (VM_RESERVED|VM_IO))
68390 mm->reserved_vm += pages;
68391 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
68392 * (the exception is when the underlying filesystem is noexec
68393 * mounted, in which case we dont add PROT_EXEC.)
68394 */
68395 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68396 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68397 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68398 prot |= PROT_EXEC;
68399
68400 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
68401 /* Obtain the address to map to. we verify (or select) it and ensure
68402 * that it represents a valid section of the address space.
68403 */
68404 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
68405 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68406 if (addr & ~PAGE_MASK)
68407 return addr;
68408
68409 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
68410 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68411 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68412
68413 +#ifdef CONFIG_PAX_MPROTECT
68414 + if (mm->pax_flags & MF_PAX_MPROTECT) {
68415 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
68416 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68417 + gr_log_rwxmmap(file);
68418 +
68419 +#ifdef CONFIG_PAX_EMUPLT
68420 + vm_flags &= ~VM_EXEC;
68421 +#else
68422 + return -EPERM;
68423 +#endif
68424 +
68425 + }
68426 +
68427 + if (!(vm_flags & VM_EXEC))
68428 + vm_flags &= ~VM_MAYEXEC;
68429 +#else
68430 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68431 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68432 +#endif
68433 + else
68434 + vm_flags &= ~VM_MAYWRITE;
68435 + }
68436 +#endif
68437 +
68438 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68439 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68440 + vm_flags &= ~VM_PAGEEXEC;
68441 +#endif
68442 +
68443 if (flags & MAP_LOCKED)
68444 if (!can_do_mlock())
68445 return -EPERM;
68446 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
68447 locked += mm->locked_vm;
68448 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
68449 lock_limit >>= PAGE_SHIFT;
68450 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68451 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68452 return -EAGAIN;
68453 }
68454 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
68455 if (error)
68456 return error;
68457
68458 + if (!gr_acl_handle_mmap(file, prot))
68459 + return -EACCES;
68460 +
68461 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68462 }
68463 EXPORT_SYMBOL(do_mmap_pgoff);
68464 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
68465 */
68466 int vma_wants_writenotify(struct vm_area_struct *vma)
68467 {
68468 - unsigned int vm_flags = vma->vm_flags;
68469 + unsigned long vm_flags = vma->vm_flags;
68470
68471 /* If it was private or non-writable, the write bit is already clear */
68472 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68473 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68474 return 0;
68475
68476 /* The backer wishes to know when pages are first written to? */
68477 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
68478 unsigned long charged = 0;
68479 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68480
68481 +#ifdef CONFIG_PAX_SEGMEXEC
68482 + struct vm_area_struct *vma_m = NULL;
68483 +#endif
68484 +
68485 + /*
68486 + * mm->mmap_sem is required to protect against another thread
68487 + * changing the mappings in case we sleep.
68488 + */
68489 + verify_mm_writelocked(mm);
68490 +
68491 /* Clear old maps */
68492 error = -ENOMEM;
68493 -munmap_back:
68494 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68495 if (vma && vma->vm_start < addr + len) {
68496 if (do_munmap(mm, addr, len))
68497 return -ENOMEM;
68498 - goto munmap_back;
68499 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68500 + BUG_ON(vma && vma->vm_start < addr + len);
68501 }
68502
68503 /* Check against address space limit. */
68504 @@ -1173,6 +1294,16 @@ munmap_back:
68505 goto unacct_error;
68506 }
68507
68508 +#ifdef CONFIG_PAX_SEGMEXEC
68509 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68510 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68511 + if (!vma_m) {
68512 + error = -ENOMEM;
68513 + goto free_vma;
68514 + }
68515 + }
68516 +#endif
68517 +
68518 vma->vm_mm = mm;
68519 vma->vm_start = addr;
68520 vma->vm_end = addr + len;
68521 @@ -1195,6 +1326,19 @@ munmap_back:
68522 error = file->f_op->mmap(file, vma);
68523 if (error)
68524 goto unmap_and_free_vma;
68525 +
68526 +#ifdef CONFIG_PAX_SEGMEXEC
68527 + if (vma_m && (vm_flags & VM_EXECUTABLE))
68528 + added_exe_file_vma(mm);
68529 +#endif
68530 +
68531 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68532 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68533 + vma->vm_flags |= VM_PAGEEXEC;
68534 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68535 + }
68536 +#endif
68537 +
68538 if (vm_flags & VM_EXECUTABLE)
68539 added_exe_file_vma(mm);
68540
68541 @@ -1218,6 +1362,11 @@ munmap_back:
68542 vma_link(mm, vma, prev, rb_link, rb_parent);
68543 file = vma->vm_file;
68544
68545 +#ifdef CONFIG_PAX_SEGMEXEC
68546 + if (vma_m)
68547 + pax_mirror_vma(vma_m, vma);
68548 +#endif
68549 +
68550 /* Once vma denies write, undo our temporary denial count */
68551 if (correct_wcount)
68552 atomic_inc(&inode->i_writecount);
68553 @@ -1226,6 +1375,7 @@ out:
68554
68555 mm->total_vm += len >> PAGE_SHIFT;
68556 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68557 + track_exec_limit(mm, addr, addr + len, vm_flags);
68558 if (vm_flags & VM_LOCKED) {
68559 /*
68560 * makes pages present; downgrades, drops, reacquires mmap_sem
68561 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
68562 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68563 charged = 0;
68564 free_vma:
68565 +
68566 +#ifdef CONFIG_PAX_SEGMEXEC
68567 + if (vma_m)
68568 + kmem_cache_free(vm_area_cachep, vma_m);
68569 +#endif
68570 +
68571 kmem_cache_free(vm_area_cachep, vma);
68572 unacct_error:
68573 if (charged)
68574 @@ -1255,6 +1411,44 @@ unacct_error:
68575 return error;
68576 }
68577
68578 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68579 +{
68580 + if (!vma) {
68581 +#ifdef CONFIG_STACK_GROWSUP
68582 + if (addr > sysctl_heap_stack_gap)
68583 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68584 + else
68585 + vma = find_vma(current->mm, 0);
68586 + if (vma && (vma->vm_flags & VM_GROWSUP))
68587 + return false;
68588 +#endif
68589 + return true;
68590 + }
68591 +
68592 + if (addr + len > vma->vm_start)
68593 + return false;
68594 +
68595 + if (vma->vm_flags & VM_GROWSDOWN)
68596 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68597 +#ifdef CONFIG_STACK_GROWSUP
68598 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68599 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68600 +#endif
68601 +
68602 + return true;
68603 +}
68604 +
68605 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68606 +{
68607 + if (vma->vm_start < len)
68608 + return -ENOMEM;
68609 + if (!(vma->vm_flags & VM_GROWSDOWN))
68610 + return vma->vm_start - len;
68611 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
68612 + return vma->vm_start - len - sysctl_heap_stack_gap;
68613 + return -ENOMEM;
68614 +}
68615 +
68616 /* Get an address range which is currently unmapped.
68617 * For shmat() with addr=0.
68618 *
68619 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
68620 if (flags & MAP_FIXED)
68621 return addr;
68622
68623 +#ifdef CONFIG_PAX_RANDMMAP
68624 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68625 +#endif
68626 +
68627 if (addr) {
68628 addr = PAGE_ALIGN(addr);
68629 - vma = find_vma(mm, addr);
68630 - if (TASK_SIZE - len >= addr &&
68631 - (!vma || addr + len <= vma->vm_start))
68632 - return addr;
68633 + if (TASK_SIZE - len >= addr) {
68634 + vma = find_vma(mm, addr);
68635 + if (check_heap_stack_gap(vma, addr, len))
68636 + return addr;
68637 + }
68638 }
68639 if (len > mm->cached_hole_size) {
68640 - start_addr = addr = mm->free_area_cache;
68641 + start_addr = addr = mm->free_area_cache;
68642 } else {
68643 - start_addr = addr = TASK_UNMAPPED_BASE;
68644 - mm->cached_hole_size = 0;
68645 + start_addr = addr = mm->mmap_base;
68646 + mm->cached_hole_size = 0;
68647 }
68648
68649 full_search:
68650 @@ -1303,34 +1502,40 @@ full_search:
68651 * Start a new search - just in case we missed
68652 * some holes.
68653 */
68654 - if (start_addr != TASK_UNMAPPED_BASE) {
68655 - addr = TASK_UNMAPPED_BASE;
68656 - start_addr = addr;
68657 + if (start_addr != mm->mmap_base) {
68658 + start_addr = addr = mm->mmap_base;
68659 mm->cached_hole_size = 0;
68660 goto full_search;
68661 }
68662 return -ENOMEM;
68663 }
68664 - if (!vma || addr + len <= vma->vm_start) {
68665 - /*
68666 - * Remember the place where we stopped the search:
68667 - */
68668 - mm->free_area_cache = addr + len;
68669 - return addr;
68670 - }
68671 + if (check_heap_stack_gap(vma, addr, len))
68672 + break;
68673 if (addr + mm->cached_hole_size < vma->vm_start)
68674 mm->cached_hole_size = vma->vm_start - addr;
68675 addr = vma->vm_end;
68676 }
68677 +
68678 + /*
68679 + * Remember the place where we stopped the search:
68680 + */
68681 + mm->free_area_cache = addr + len;
68682 + return addr;
68683 }
68684 #endif
68685
68686 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68687 {
68688 +
68689 +#ifdef CONFIG_PAX_SEGMEXEC
68690 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68691 + return;
68692 +#endif
68693 +
68694 /*
68695 * Is this a new hole at the lowest possible address?
68696 */
68697 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68698 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68699 mm->free_area_cache = addr;
68700 mm->cached_hole_size = ~0UL;
68701 }
68702 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
68703 {
68704 struct vm_area_struct *vma;
68705 struct mm_struct *mm = current->mm;
68706 - unsigned long addr = addr0;
68707 + unsigned long base = mm->mmap_base, addr = addr0;
68708
68709 /* requested length too big for entire address space */
68710 if (len > TASK_SIZE)
68711 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
68712 if (flags & MAP_FIXED)
68713 return addr;
68714
68715 +#ifdef CONFIG_PAX_RANDMMAP
68716 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68717 +#endif
68718 +
68719 /* requesting a specific address */
68720 if (addr) {
68721 addr = PAGE_ALIGN(addr);
68722 - vma = find_vma(mm, addr);
68723 - if (TASK_SIZE - len >= addr &&
68724 - (!vma || addr + len <= vma->vm_start))
68725 - return addr;
68726 + if (TASK_SIZE - len >= addr) {
68727 + vma = find_vma(mm, addr);
68728 + if (check_heap_stack_gap(vma, addr, len))
68729 + return addr;
68730 + }
68731 }
68732
68733 /* check if free_area_cache is useful for us */
68734 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
68735 /* make sure it can fit in the remaining address space */
68736 if (addr > len) {
68737 vma = find_vma(mm, addr-len);
68738 - if (!vma || addr <= vma->vm_start)
68739 + if (check_heap_stack_gap(vma, addr - len, len))
68740 /* remember the address as a hint for next time */
68741 return (mm->free_area_cache = addr-len);
68742 }
68743 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
68744 * return with success:
68745 */
68746 vma = find_vma(mm, addr);
68747 - if (!vma || addr+len <= vma->vm_start)
68748 + if (check_heap_stack_gap(vma, addr, len))
68749 /* remember the address as a hint for next time */
68750 return (mm->free_area_cache = addr);
68751
68752 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
68753 mm->cached_hole_size = vma->vm_start - addr;
68754
68755 /* try just below the current vma->vm_start */
68756 - addr = vma->vm_start-len;
68757 - } while (len < vma->vm_start);
68758 + addr = skip_heap_stack_gap(vma, len);
68759 + } while (!IS_ERR_VALUE(addr));
68760
68761 bottomup:
68762 /*
68763 @@ -1414,13 +1624,21 @@ bottomup:
68764 * can happen with large stack limits and large mmap()
68765 * allocations.
68766 */
68767 + mm->mmap_base = TASK_UNMAPPED_BASE;
68768 +
68769 +#ifdef CONFIG_PAX_RANDMMAP
68770 + if (mm->pax_flags & MF_PAX_RANDMMAP)
68771 + mm->mmap_base += mm->delta_mmap;
68772 +#endif
68773 +
68774 + mm->free_area_cache = mm->mmap_base;
68775 mm->cached_hole_size = ~0UL;
68776 - mm->free_area_cache = TASK_UNMAPPED_BASE;
68777 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68778 /*
68779 * Restore the topdown base:
68780 */
68781 - mm->free_area_cache = mm->mmap_base;
68782 + mm->mmap_base = base;
68783 + mm->free_area_cache = base;
68784 mm->cached_hole_size = ~0UL;
68785
68786 return addr;
68787 @@ -1429,6 +1647,12 @@ bottomup:
68788
68789 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68790 {
68791 +
68792 +#ifdef CONFIG_PAX_SEGMEXEC
68793 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68794 + return;
68795 +#endif
68796 +
68797 /*
68798 * Is this a new hole at the highest possible address?
68799 */
68800 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
68801 mm->free_area_cache = addr;
68802
68803 /* dont allow allocations above current base */
68804 - if (mm->free_area_cache > mm->mmap_base)
68805 + if (mm->free_area_cache > mm->mmap_base) {
68806 mm->free_area_cache = mm->mmap_base;
68807 + mm->cached_hole_size = ~0UL;
68808 + }
68809 }
68810
68811 unsigned long
68812 @@ -1545,6 +1771,27 @@ out:
68813 return prev ? prev->vm_next : vma;
68814 }
68815
68816 +#ifdef CONFIG_PAX_SEGMEXEC
68817 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68818 +{
68819 + struct vm_area_struct *vma_m;
68820 +
68821 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68822 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68823 + BUG_ON(vma->vm_mirror);
68824 + return NULL;
68825 + }
68826 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68827 + vma_m = vma->vm_mirror;
68828 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68829 + BUG_ON(vma->vm_file != vma_m->vm_file);
68830 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68831 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
68832 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68833 + return vma_m;
68834 +}
68835 +#endif
68836 +
68837 /*
68838 * Verify that the stack growth is acceptable and
68839 * update accounting. This is shared with both the
68840 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
68841 return -ENOMEM;
68842
68843 /* Stack limit test */
68844 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
68845 if (size > rlim[RLIMIT_STACK].rlim_cur)
68846 return -ENOMEM;
68847
68848 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
68849 unsigned long limit;
68850 locked = mm->locked_vm + grow;
68851 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
68852 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68853 if (locked > limit && !capable(CAP_IPC_LOCK))
68854 return -ENOMEM;
68855 }
68856 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
68857 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68858 * vma is the last one with address > vma->vm_end. Have to extend vma.
68859 */
68860 +#ifndef CONFIG_IA64
68861 +static
68862 +#endif
68863 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68864 {
68865 int error;
68866 + bool locknext;
68867
68868 if (!(vma->vm_flags & VM_GROWSUP))
68869 return -EFAULT;
68870
68871 + /* Also guard against wrapping around to address 0. */
68872 + if (address < PAGE_ALIGN(address+1))
68873 + address = PAGE_ALIGN(address+1);
68874 + else
68875 + return -ENOMEM;
68876 +
68877 /*
68878 * We must make sure the anon_vma is allocated
68879 * so that the anon_vma locking is not a noop.
68880 */
68881 if (unlikely(anon_vma_prepare(vma)))
68882 return -ENOMEM;
68883 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68884 + if (locknext && anon_vma_prepare(vma->vm_next))
68885 + return -ENOMEM;
68886 anon_vma_lock(vma);
68887 + if (locknext)
68888 + anon_vma_lock(vma->vm_next);
68889
68890 /*
68891 * vma->vm_start/vm_end cannot change under us because the caller
68892 * is required to hold the mmap_sem in read mode. We need the
68893 - * anon_vma lock to serialize against concurrent expand_stacks.
68894 - * Also guard against wrapping around to address 0.
68895 + * anon_vma locks to serialize against concurrent expand_stacks
68896 + * and expand_upwards.
68897 */
68898 - if (address < PAGE_ALIGN(address+4))
68899 - address = PAGE_ALIGN(address+4);
68900 - else {
68901 - anon_vma_unlock(vma);
68902 - return -ENOMEM;
68903 - }
68904 error = 0;
68905
68906 /* Somebody else might have raced and expanded it already */
68907 - if (address > vma->vm_end) {
68908 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68909 + error = -ENOMEM;
68910 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68911 unsigned long size, grow;
68912
68913 size = address - vma->vm_start;
68914 @@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
68915 if (!error)
68916 vma->vm_end = address;
68917 }
68918 + if (locknext)
68919 + anon_vma_unlock(vma->vm_next);
68920 anon_vma_unlock(vma);
68921 return error;
68922 }
68923 @@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
68924 unsigned long address)
68925 {
68926 int error;
68927 + bool lockprev = false;
68928 + struct vm_area_struct *prev;
68929
68930 /*
68931 * We must make sure the anon_vma is allocated
68932 @@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
68933 if (error)
68934 return error;
68935
68936 + prev = vma->vm_prev;
68937 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68938 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68939 +#endif
68940 + if (lockprev && anon_vma_prepare(prev))
68941 + return -ENOMEM;
68942 + if (lockprev)
68943 + anon_vma_lock(prev);
68944 +
68945 anon_vma_lock(vma);
68946
68947 /*
68948 @@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
68949 */
68950
68951 /* Somebody else might have raced and expanded it already */
68952 - if (address < vma->vm_start) {
68953 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
68954 + error = -ENOMEM;
68955 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
68956 unsigned long size, grow;
68957
68958 +#ifdef CONFIG_PAX_SEGMEXEC
68959 + struct vm_area_struct *vma_m;
68960 +
68961 + vma_m = pax_find_mirror_vma(vma);
68962 +#endif
68963 +
68964 size = vma->vm_end - address;
68965 grow = (vma->vm_start - address) >> PAGE_SHIFT;
68966
68967 @@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
68968 if (!error) {
68969 vma->vm_start = address;
68970 vma->vm_pgoff -= grow;
68971 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
68972 +
68973 +#ifdef CONFIG_PAX_SEGMEXEC
68974 + if (vma_m) {
68975 + vma_m->vm_start -= grow << PAGE_SHIFT;
68976 + vma_m->vm_pgoff -= grow;
68977 + }
68978 +#endif
68979 +
68980 }
68981 }
68982 anon_vma_unlock(vma);
68983 + if (lockprev)
68984 + anon_vma_unlock(prev);
68985 return error;
68986 }
68987
68988 @@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
68989 do {
68990 long nrpages = vma_pages(vma);
68991
68992 +#ifdef CONFIG_PAX_SEGMEXEC
68993 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
68994 + vma = remove_vma(vma);
68995 + continue;
68996 + }
68997 +#endif
68998 +
68999 mm->total_vm -= nrpages;
69000 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69001 vma = remove_vma(vma);
69002 @@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
69003 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69004 vma->vm_prev = NULL;
69005 do {
69006 +
69007 +#ifdef CONFIG_PAX_SEGMEXEC
69008 + if (vma->vm_mirror) {
69009 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69010 + vma->vm_mirror->vm_mirror = NULL;
69011 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
69012 + vma->vm_mirror = NULL;
69013 + }
69014 +#endif
69015 +
69016 rb_erase(&vma->vm_rb, &mm->mm_rb);
69017 mm->map_count--;
69018 tail_vma = vma;
69019 @@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
69020 struct mempolicy *pol;
69021 struct vm_area_struct *new;
69022
69023 +#ifdef CONFIG_PAX_SEGMEXEC
69024 + struct vm_area_struct *vma_m, *new_m = NULL;
69025 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69026 +#endif
69027 +
69028 if (is_vm_hugetlb_page(vma) && (addr &
69029 ~(huge_page_mask(hstate_vma(vma)))))
69030 return -EINVAL;
69031
69032 +#ifdef CONFIG_PAX_SEGMEXEC
69033 + vma_m = pax_find_mirror_vma(vma);
69034 +
69035 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69036 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69037 + if (mm->map_count >= sysctl_max_map_count-1)
69038 + return -ENOMEM;
69039 + } else
69040 +#endif
69041 +
69042 if (mm->map_count >= sysctl_max_map_count)
69043 return -ENOMEM;
69044
69045 @@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
69046 if (!new)
69047 return -ENOMEM;
69048
69049 +#ifdef CONFIG_PAX_SEGMEXEC
69050 + if (vma_m) {
69051 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69052 + if (!new_m) {
69053 + kmem_cache_free(vm_area_cachep, new);
69054 + return -ENOMEM;
69055 + }
69056 + }
69057 +#endif
69058 +
69059 /* most fields are the same, copy all, and then fixup */
69060 *new = *vma;
69061
69062 @@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
69063 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69064 }
69065
69066 +#ifdef CONFIG_PAX_SEGMEXEC
69067 + if (vma_m) {
69068 + *new_m = *vma_m;
69069 + new_m->vm_mirror = new;
69070 + new->vm_mirror = new_m;
69071 +
69072 + if (new_below)
69073 + new_m->vm_end = addr_m;
69074 + else {
69075 + new_m->vm_start = addr_m;
69076 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69077 + }
69078 + }
69079 +#endif
69080 +
69081 pol = mpol_dup(vma_policy(vma));
69082 if (IS_ERR(pol)) {
69083 +
69084 +#ifdef CONFIG_PAX_SEGMEXEC
69085 + if (new_m)
69086 + kmem_cache_free(vm_area_cachep, new_m);
69087 +#endif
69088 +
69089 kmem_cache_free(vm_area_cachep, new);
69090 return PTR_ERR(pol);
69091 }
69092 @@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
69093 else
69094 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69095
69096 +#ifdef CONFIG_PAX_SEGMEXEC
69097 + if (vma_m) {
69098 + mpol_get(pol);
69099 + vma_set_policy(new_m, pol);
69100 +
69101 + if (new_m->vm_file) {
69102 + get_file(new_m->vm_file);
69103 + if (vma_m->vm_flags & VM_EXECUTABLE)
69104 + added_exe_file_vma(mm);
69105 + }
69106 +
69107 + if (new_m->vm_ops && new_m->vm_ops->open)
69108 + new_m->vm_ops->open(new_m);
69109 +
69110 + if (new_below)
69111 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69112 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69113 + else
69114 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69115 + }
69116 +#endif
69117 +
69118 return 0;
69119 }
69120
69121 @@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
69122 * work. This now handles partial unmappings.
69123 * Jeremy Fitzhardinge <jeremy@goop.org>
69124 */
69125 +#ifdef CONFIG_PAX_SEGMEXEC
69126 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69127 +{
69128 + int ret = __do_munmap(mm, start, len);
69129 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69130 + return ret;
69131 +
69132 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69133 +}
69134 +
69135 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69136 +#else
69137 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69138 +#endif
69139 {
69140 unsigned long end;
69141 struct vm_area_struct *vma, *prev, *last;
69142
69143 + /*
69144 + * mm->mmap_sem is required to protect against another thread
69145 + * changing the mappings in case we sleep.
69146 + */
69147 + verify_mm_writelocked(mm);
69148 +
69149 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69150 return -EINVAL;
69151
69152 @@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
69153 /* Fix up all other VM information */
69154 remove_vma_list(mm, vma);
69155
69156 + track_exec_limit(mm, start, end, 0UL);
69157 +
69158 return 0;
69159 }
69160
69161 @@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
69162
69163 profile_munmap(addr);
69164
69165 +#ifdef CONFIG_PAX_SEGMEXEC
69166 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69167 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69168 + return -EINVAL;
69169 +#endif
69170 +
69171 down_write(&mm->mmap_sem);
69172 ret = do_munmap(mm, addr, len);
69173 up_write(&mm->mmap_sem);
69174 return ret;
69175 }
69176
69177 -static inline void verify_mm_writelocked(struct mm_struct *mm)
69178 -{
69179 -#ifdef CONFIG_DEBUG_VM
69180 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69181 - WARN_ON(1);
69182 - up_read(&mm->mmap_sem);
69183 - }
69184 -#endif
69185 -}
69186 -
69187 /*
69188 * this is really a simplified "do_mmap". it only handles
69189 * anonymous maps. eventually we may be able to do some
69190 @@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
69191 struct rb_node ** rb_link, * rb_parent;
69192 pgoff_t pgoff = addr >> PAGE_SHIFT;
69193 int error;
69194 + unsigned long charged;
69195
69196 len = PAGE_ALIGN(len);
69197 if (!len)
69198 @@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
69199
69200 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69201
69202 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69203 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69204 + flags &= ~VM_EXEC;
69205 +
69206 +#ifdef CONFIG_PAX_MPROTECT
69207 + if (mm->pax_flags & MF_PAX_MPROTECT)
69208 + flags &= ~VM_MAYEXEC;
69209 +#endif
69210 +
69211 + }
69212 +#endif
69213 +
69214 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69215 if (error & ~PAGE_MASK)
69216 return error;
69217
69218 + charged = len >> PAGE_SHIFT;
69219 +
69220 /*
69221 * mlock MCL_FUTURE?
69222 */
69223 if (mm->def_flags & VM_LOCKED) {
69224 unsigned long locked, lock_limit;
69225 - locked = len >> PAGE_SHIFT;
69226 + locked = charged;
69227 locked += mm->locked_vm;
69228 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
69229 lock_limit >>= PAGE_SHIFT;
69230 @@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
69231 /*
69232 * Clear old maps. this also does some error checking for us
69233 */
69234 - munmap_back:
69235 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69236 if (vma && vma->vm_start < addr + len) {
69237 if (do_munmap(mm, addr, len))
69238 return -ENOMEM;
69239 - goto munmap_back;
69240 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69241 + BUG_ON(vma && vma->vm_start < addr + len);
69242 }
69243
69244 /* Check against address space limits *after* clearing old maps... */
69245 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69246 + if (!may_expand_vm(mm, charged))
69247 return -ENOMEM;
69248
69249 if (mm->map_count > sysctl_max_map_count)
69250 return -ENOMEM;
69251
69252 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
69253 + if (security_vm_enough_memory(charged))
69254 return -ENOMEM;
69255
69256 /* Can we just expand an old private anonymous mapping? */
69257 @@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
69258 */
69259 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69260 if (!vma) {
69261 - vm_unacct_memory(len >> PAGE_SHIFT);
69262 + vm_unacct_memory(charged);
69263 return -ENOMEM;
69264 }
69265
69266 @@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
69267 vma->vm_page_prot = vm_get_page_prot(flags);
69268 vma_link(mm, vma, prev, rb_link, rb_parent);
69269 out:
69270 - mm->total_vm += len >> PAGE_SHIFT;
69271 + mm->total_vm += charged;
69272 if (flags & VM_LOCKED) {
69273 if (!mlock_vma_pages_range(vma, addr, addr + len))
69274 - mm->locked_vm += (len >> PAGE_SHIFT);
69275 + mm->locked_vm += charged;
69276 }
69277 + track_exec_limit(mm, addr, addr + len, flags);
69278 return addr;
69279 }
69280
69281 @@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
69282 * Walk the list again, actually closing and freeing it,
69283 * with preemption enabled, without holding any MM locks.
69284 */
69285 - while (vma)
69286 + while (vma) {
69287 + vma->vm_mirror = NULL;
69288 vma = remove_vma(vma);
69289 + }
69290
69291 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69292 }
69293 @@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
69294 struct vm_area_struct * __vma, * prev;
69295 struct rb_node ** rb_link, * rb_parent;
69296
69297 +#ifdef CONFIG_PAX_SEGMEXEC
69298 + struct vm_area_struct *vma_m = NULL;
69299 +#endif
69300 +
69301 /*
69302 * The vm_pgoff of a purely anonymous vma should be irrelevant
69303 * until its first write fault, when page's anon_vma and index
69304 @@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
69305 if ((vma->vm_flags & VM_ACCOUNT) &&
69306 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69307 return -ENOMEM;
69308 +
69309 +#ifdef CONFIG_PAX_SEGMEXEC
69310 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69311 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69312 + if (!vma_m)
69313 + return -ENOMEM;
69314 + }
69315 +#endif
69316 +
69317 vma_link(mm, vma, prev, rb_link, rb_parent);
69318 +
69319 +#ifdef CONFIG_PAX_SEGMEXEC
69320 + if (vma_m)
69321 + pax_mirror_vma(vma_m, vma);
69322 +#endif
69323 +
69324 return 0;
69325 }
69326
69327 @@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
69328 struct rb_node **rb_link, *rb_parent;
69329 struct mempolicy *pol;
69330
69331 + BUG_ON(vma->vm_mirror);
69332 +
69333 /*
69334 * If anonymous vma has not yet been faulted, update new pgoff
69335 * to match new location, to increase its chance of merging.
69336 @@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
69337 return new_vma;
69338 }
69339
69340 +#ifdef CONFIG_PAX_SEGMEXEC
69341 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69342 +{
69343 + struct vm_area_struct *prev_m;
69344 + struct rb_node **rb_link_m, *rb_parent_m;
69345 + struct mempolicy *pol_m;
69346 +
69347 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69348 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69349 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69350 + *vma_m = *vma;
69351 + pol_m = vma_policy(vma_m);
69352 + mpol_get(pol_m);
69353 + vma_set_policy(vma_m, pol_m);
69354 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69355 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69356 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69357 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69358 + if (vma_m->vm_file)
69359 + get_file(vma_m->vm_file);
69360 + if (vma_m->vm_ops && vma_m->vm_ops->open)
69361 + vma_m->vm_ops->open(vma_m);
69362 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69363 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69364 + vma_m->vm_mirror = vma;
69365 + vma->vm_mirror = vma_m;
69366 +}
69367 +#endif
69368 +
69369 /*
69370 * Return true if the calling process may expand its vm space by the passed
69371 * number of pages
69372 @@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
69373 unsigned long lim;
69374
69375 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
69376 -
69377 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69378 if (cur + npages > lim)
69379 return 0;
69380 return 1;
69381 @@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
69382 vma->vm_start = addr;
69383 vma->vm_end = addr + len;
69384
69385 +#ifdef CONFIG_PAX_MPROTECT
69386 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69387 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69388 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69389 + return -EPERM;
69390 + if (!(vm_flags & VM_EXEC))
69391 + vm_flags &= ~VM_MAYEXEC;
69392 +#else
69393 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69394 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69395 +#endif
69396 + else
69397 + vm_flags &= ~VM_MAYWRITE;
69398 + }
69399 +#endif
69400 +
69401 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69402 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69403
69404 diff -urNp linux-2.6.32.45/mm/mprotect.c linux-2.6.32.45/mm/mprotect.c
69405 --- linux-2.6.32.45/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
69406 +++ linux-2.6.32.45/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
69407 @@ -24,10 +24,16 @@
69408 #include <linux/mmu_notifier.h>
69409 #include <linux/migrate.h>
69410 #include <linux/perf_event.h>
69411 +
69412 +#ifdef CONFIG_PAX_MPROTECT
69413 +#include <linux/elf.h>
69414 +#endif
69415 +
69416 #include <asm/uaccess.h>
69417 #include <asm/pgtable.h>
69418 #include <asm/cacheflush.h>
69419 #include <asm/tlbflush.h>
69420 +#include <asm/mmu_context.h>
69421
69422 #ifndef pgprot_modify
69423 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69424 @@ -132,6 +138,48 @@ static void change_protection(struct vm_
69425 flush_tlb_range(vma, start, end);
69426 }
69427
69428 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69429 +/* called while holding the mmap semaphor for writing except stack expansion */
69430 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69431 +{
69432 + unsigned long oldlimit, newlimit = 0UL;
69433 +
69434 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
69435 + return;
69436 +
69437 + spin_lock(&mm->page_table_lock);
69438 + oldlimit = mm->context.user_cs_limit;
69439 + if ((prot & VM_EXEC) && oldlimit < end)
69440 + /* USER_CS limit moved up */
69441 + newlimit = end;
69442 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69443 + /* USER_CS limit moved down */
69444 + newlimit = start;
69445 +
69446 + if (newlimit) {
69447 + mm->context.user_cs_limit = newlimit;
69448 +
69449 +#ifdef CONFIG_SMP
69450 + wmb();
69451 + cpus_clear(mm->context.cpu_user_cs_mask);
69452 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69453 +#endif
69454 +
69455 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69456 + }
69457 + spin_unlock(&mm->page_table_lock);
69458 + if (newlimit == end) {
69459 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
69460 +
69461 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
69462 + if (is_vm_hugetlb_page(vma))
69463 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69464 + else
69465 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69466 + }
69467 +}
69468 +#endif
69469 +
69470 int
69471 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69472 unsigned long start, unsigned long end, unsigned long newflags)
69473 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
69474 int error;
69475 int dirty_accountable = 0;
69476
69477 +#ifdef CONFIG_PAX_SEGMEXEC
69478 + struct vm_area_struct *vma_m = NULL;
69479 + unsigned long start_m, end_m;
69480 +
69481 + start_m = start + SEGMEXEC_TASK_SIZE;
69482 + end_m = end + SEGMEXEC_TASK_SIZE;
69483 +#endif
69484 +
69485 if (newflags == oldflags) {
69486 *pprev = vma;
69487 return 0;
69488 }
69489
69490 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69491 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69492 +
69493 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69494 + return -ENOMEM;
69495 +
69496 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69497 + return -ENOMEM;
69498 + }
69499 +
69500 /*
69501 * If we make a private mapping writable we increase our commit;
69502 * but (without finer accounting) cannot reduce our commit if we
69503 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
69504 }
69505 }
69506
69507 +#ifdef CONFIG_PAX_SEGMEXEC
69508 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69509 + if (start != vma->vm_start) {
69510 + error = split_vma(mm, vma, start, 1);
69511 + if (error)
69512 + goto fail;
69513 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69514 + *pprev = (*pprev)->vm_next;
69515 + }
69516 +
69517 + if (end != vma->vm_end) {
69518 + error = split_vma(mm, vma, end, 0);
69519 + if (error)
69520 + goto fail;
69521 + }
69522 +
69523 + if (pax_find_mirror_vma(vma)) {
69524 + error = __do_munmap(mm, start_m, end_m - start_m);
69525 + if (error)
69526 + goto fail;
69527 + } else {
69528 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69529 + if (!vma_m) {
69530 + error = -ENOMEM;
69531 + goto fail;
69532 + }
69533 + vma->vm_flags = newflags;
69534 + pax_mirror_vma(vma_m, vma);
69535 + }
69536 + }
69537 +#endif
69538 +
69539 /*
69540 * First try to merge with previous and/or next vma.
69541 */
69542 @@ -195,9 +293,21 @@ success:
69543 * vm_flags and vm_page_prot are protected by the mmap_sem
69544 * held in write mode.
69545 */
69546 +
69547 +#ifdef CONFIG_PAX_SEGMEXEC
69548 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69549 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69550 +#endif
69551 +
69552 vma->vm_flags = newflags;
69553 +
69554 +#ifdef CONFIG_PAX_MPROTECT
69555 + if (mm->binfmt && mm->binfmt->handle_mprotect)
69556 + mm->binfmt->handle_mprotect(vma, newflags);
69557 +#endif
69558 +
69559 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69560 - vm_get_page_prot(newflags));
69561 + vm_get_page_prot(vma->vm_flags));
69562
69563 if (vma_wants_writenotify(vma)) {
69564 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69565 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69566 end = start + len;
69567 if (end <= start)
69568 return -ENOMEM;
69569 +
69570 +#ifdef CONFIG_PAX_SEGMEXEC
69571 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69572 + if (end > SEGMEXEC_TASK_SIZE)
69573 + return -EINVAL;
69574 + } else
69575 +#endif
69576 +
69577 + if (end > TASK_SIZE)
69578 + return -EINVAL;
69579 +
69580 if (!arch_validate_prot(prot))
69581 return -EINVAL;
69582
69583 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69584 /*
69585 * Does the application expect PROT_READ to imply PROT_EXEC:
69586 */
69587 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69588 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69589 prot |= PROT_EXEC;
69590
69591 vm_flags = calc_vm_prot_bits(prot);
69592 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69593 if (start > vma->vm_start)
69594 prev = vma;
69595
69596 +#ifdef CONFIG_PAX_MPROTECT
69597 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69598 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
69599 +#endif
69600 +
69601 for (nstart = start ; ; ) {
69602 unsigned long newflags;
69603
69604 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69605
69606 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69607 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69608 + if (prot & (PROT_WRITE | PROT_EXEC))
69609 + gr_log_rwxmprotect(vma->vm_file);
69610 +
69611 + error = -EACCES;
69612 + goto out;
69613 + }
69614 +
69615 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69616 error = -EACCES;
69617 goto out;
69618 }
69619 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69620 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69621 if (error)
69622 goto out;
69623 +
69624 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
69625 +
69626 nstart = tmp;
69627
69628 if (nstart < prev->vm_end)
69629 diff -urNp linux-2.6.32.45/mm/mremap.c linux-2.6.32.45/mm/mremap.c
69630 --- linux-2.6.32.45/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
69631 +++ linux-2.6.32.45/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
69632 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
69633 continue;
69634 pte = ptep_clear_flush(vma, old_addr, old_pte);
69635 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69636 +
69637 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69638 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69639 + pte = pte_exprotect(pte);
69640 +#endif
69641 +
69642 set_pte_at(mm, new_addr, new_pte, pte);
69643 }
69644
69645 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
69646 if (is_vm_hugetlb_page(vma))
69647 goto Einval;
69648
69649 +#ifdef CONFIG_PAX_SEGMEXEC
69650 + if (pax_find_mirror_vma(vma))
69651 + goto Einval;
69652 +#endif
69653 +
69654 /* We can't remap across vm area boundaries */
69655 if (old_len > vma->vm_end - addr)
69656 goto Efault;
69657 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
69658 unsigned long ret = -EINVAL;
69659 unsigned long charged = 0;
69660 unsigned long map_flags;
69661 + unsigned long pax_task_size = TASK_SIZE;
69662
69663 if (new_addr & ~PAGE_MASK)
69664 goto out;
69665
69666 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69667 +#ifdef CONFIG_PAX_SEGMEXEC
69668 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
69669 + pax_task_size = SEGMEXEC_TASK_SIZE;
69670 +#endif
69671 +
69672 + pax_task_size -= PAGE_SIZE;
69673 +
69674 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69675 goto out;
69676
69677 /* Check if the location we're moving into overlaps the
69678 * old location at all, and fail if it does.
69679 */
69680 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
69681 - goto out;
69682 -
69683 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
69684 + if (addr + old_len > new_addr && new_addr + new_len > addr)
69685 goto out;
69686
69687 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69688 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
69689 struct vm_area_struct *vma;
69690 unsigned long ret = -EINVAL;
69691 unsigned long charged = 0;
69692 + unsigned long pax_task_size = TASK_SIZE;
69693
69694 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69695 goto out;
69696 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
69697 if (!new_len)
69698 goto out;
69699
69700 +#ifdef CONFIG_PAX_SEGMEXEC
69701 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
69702 + pax_task_size = SEGMEXEC_TASK_SIZE;
69703 +#endif
69704 +
69705 + pax_task_size -= PAGE_SIZE;
69706 +
69707 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69708 + old_len > pax_task_size || addr > pax_task_size-old_len)
69709 + goto out;
69710 +
69711 if (flags & MREMAP_FIXED) {
69712 if (flags & MREMAP_MAYMOVE)
69713 ret = mremap_to(addr, old_len, new_addr, new_len);
69714 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
69715 addr + new_len);
69716 }
69717 ret = addr;
69718 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69719 goto out;
69720 }
69721 }
69722 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
69723 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69724 if (ret)
69725 goto out;
69726 +
69727 + map_flags = vma->vm_flags;
69728 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69729 + if (!(ret & ~PAGE_MASK)) {
69730 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69731 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69732 + }
69733 }
69734 out:
69735 if (ret & ~PAGE_MASK)
69736 diff -urNp linux-2.6.32.45/mm/nommu.c linux-2.6.32.45/mm/nommu.c
69737 --- linux-2.6.32.45/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
69738 +++ linux-2.6.32.45/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
69739 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69740 int sysctl_overcommit_ratio = 50; /* default is 50% */
69741 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69742 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69743 -int heap_stack_gap = 0;
69744
69745 atomic_long_t mmap_pages_allocated;
69746
69747 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
69748 EXPORT_SYMBOL(find_vma);
69749
69750 /*
69751 - * find a VMA
69752 - * - we don't extend stack VMAs under NOMMU conditions
69753 - */
69754 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69755 -{
69756 - return find_vma(mm, addr);
69757 -}
69758 -
69759 -/*
69760 * expand a stack to a given address
69761 * - not supported under NOMMU conditions
69762 */
69763 diff -urNp linux-2.6.32.45/mm/page_alloc.c linux-2.6.32.45/mm/page_alloc.c
69764 --- linux-2.6.32.45/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
69765 +++ linux-2.6.32.45/mm/page_alloc.c 2011-07-09 09:13:08.000000000 -0400
69766 @@ -289,7 +289,7 @@ out:
69767 * This usage means that zero-order pages may not be compound.
69768 */
69769
69770 -static void free_compound_page(struct page *page)
69771 +void free_compound_page(struct page *page)
69772 {
69773 __free_pages_ok(page, compound_order(page));
69774 }
69775 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
69776 int bad = 0;
69777 int wasMlocked = __TestClearPageMlocked(page);
69778
69779 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69780 + unsigned long index = 1UL << order;
69781 +#endif
69782 +
69783 kmemcheck_free_shadow(page, order);
69784
69785 for (i = 0 ; i < (1 << order) ; ++i)
69786 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
69787 debug_check_no_obj_freed(page_address(page),
69788 PAGE_SIZE << order);
69789 }
69790 +
69791 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69792 + for (; index; --index)
69793 + sanitize_highpage(page + index - 1);
69794 +#endif
69795 +
69796 arch_free_page(page, order);
69797 kernel_map_pages(page, 1 << order, 0);
69798
69799 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
69800 arch_alloc_page(page, order);
69801 kernel_map_pages(page, 1 << order, 1);
69802
69803 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
69804 if (gfp_flags & __GFP_ZERO)
69805 prep_zero_page(page, order, gfp_flags);
69806 +#endif
69807
69808 if (order && (gfp_flags & __GFP_COMP))
69809 prep_compound_page(page, order);
69810 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
69811 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
69812 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
69813 }
69814 +
69815 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69816 + sanitize_highpage(page);
69817 +#endif
69818 +
69819 arch_free_page(page, 0);
69820 kernel_map_pages(page, 1, 0);
69821
69822 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
69823 int cpu;
69824 struct zone *zone;
69825
69826 + pax_track_stack();
69827 +
69828 for_each_populated_zone(zone) {
69829 show_node(zone);
69830 printk("%s per-cpu:\n", zone->name);
69831 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
69832 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
69833 }
69834 #else
69835 -static void inline setup_usemap(struct pglist_data *pgdat,
69836 +static inline void setup_usemap(struct pglist_data *pgdat,
69837 struct zone *zone, unsigned long zonesize) {}
69838 #endif /* CONFIG_SPARSEMEM */
69839
69840 diff -urNp linux-2.6.32.45/mm/percpu.c linux-2.6.32.45/mm/percpu.c
69841 --- linux-2.6.32.45/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
69842 +++ linux-2.6.32.45/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
69843 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
69844 static unsigned int pcpu_last_unit_cpu __read_mostly;
69845
69846 /* the address of the first chunk which starts with the kernel static area */
69847 -void *pcpu_base_addr __read_mostly;
69848 +void *pcpu_base_addr __read_only;
69849 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69850
69851 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69852 diff -urNp linux-2.6.32.45/mm/rmap.c linux-2.6.32.45/mm/rmap.c
69853 --- linux-2.6.32.45/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
69854 +++ linux-2.6.32.45/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
69855 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
69856 /* page_table_lock to protect against threads */
69857 spin_lock(&mm->page_table_lock);
69858 if (likely(!vma->anon_vma)) {
69859 +
69860 +#ifdef CONFIG_PAX_SEGMEXEC
69861 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
69862 +
69863 + if (vma_m) {
69864 + BUG_ON(vma_m->anon_vma);
69865 + vma_m->anon_vma = anon_vma;
69866 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
69867 + }
69868 +#endif
69869 +
69870 vma->anon_vma = anon_vma;
69871 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
69872 allocated = NULL;
69873 diff -urNp linux-2.6.32.45/mm/shmem.c linux-2.6.32.45/mm/shmem.c
69874 --- linux-2.6.32.45/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
69875 +++ linux-2.6.32.45/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
69876 @@ -31,7 +31,7 @@
69877 #include <linux/swap.h>
69878 #include <linux/ima.h>
69879
69880 -static struct vfsmount *shm_mnt;
69881 +struct vfsmount *shm_mnt;
69882
69883 #ifdef CONFIG_SHMEM
69884 /*
69885 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
69886 goto unlock;
69887 }
69888 entry = shmem_swp_entry(info, index, NULL);
69889 + if (!entry)
69890 + goto unlock;
69891 if (entry->val) {
69892 /*
69893 * The more uptodate page coming down from a stacked
69894 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
69895 struct vm_area_struct pvma;
69896 struct page *page;
69897
69898 + pax_track_stack();
69899 +
69900 spol = mpol_cond_copy(&mpol,
69901 mpol_shared_policy_lookup(&info->policy, idx));
69902
69903 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
69904
69905 info = SHMEM_I(inode);
69906 inode->i_size = len-1;
69907 - if (len <= (char *)inode - (char *)info) {
69908 + if (len <= (char *)inode - (char *)info && len <= 64) {
69909 /* do it inline */
69910 memcpy(info, symname, len);
69911 inode->i_op = &shmem_symlink_inline_operations;
69912 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
69913 int err = -ENOMEM;
69914
69915 /* Round up to L1_CACHE_BYTES to resist false sharing */
69916 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
69917 - L1_CACHE_BYTES), GFP_KERNEL);
69918 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
69919 if (!sbinfo)
69920 return -ENOMEM;
69921
69922 diff -urNp linux-2.6.32.45/mm/slab.c linux-2.6.32.45/mm/slab.c
69923 --- linux-2.6.32.45/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
69924 +++ linux-2.6.32.45/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
69925 @@ -174,7 +174,7 @@
69926
69927 /* Legal flag mask for kmem_cache_create(). */
69928 #if DEBUG
69929 -# define CREATE_MASK (SLAB_RED_ZONE | \
69930 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
69931 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
69932 SLAB_CACHE_DMA | \
69933 SLAB_STORE_USER | \
69934 @@ -182,7 +182,7 @@
69935 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69936 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
69937 #else
69938 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
69939 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
69940 SLAB_CACHE_DMA | \
69941 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
69942 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69943 @@ -308,7 +308,7 @@ struct kmem_list3 {
69944 * Need this for bootstrapping a per node allocator.
69945 */
69946 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
69947 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
69948 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
69949 #define CACHE_CACHE 0
69950 #define SIZE_AC MAX_NUMNODES
69951 #define SIZE_L3 (2 * MAX_NUMNODES)
69952 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
69953 if ((x)->max_freeable < i) \
69954 (x)->max_freeable = i; \
69955 } while (0)
69956 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
69957 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
69958 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
69959 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
69960 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
69961 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
69962 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
69963 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
69964 #else
69965 #define STATS_INC_ACTIVE(x) do { } while (0)
69966 #define STATS_DEC_ACTIVE(x) do { } while (0)
69967 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
69968 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
69969 */
69970 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
69971 - const struct slab *slab, void *obj)
69972 + const struct slab *slab, const void *obj)
69973 {
69974 u32 offset = (obj - slab->s_mem);
69975 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
69976 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
69977 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
69978 sizes[INDEX_AC].cs_size,
69979 ARCH_KMALLOC_MINALIGN,
69980 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69981 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69982 NULL);
69983
69984 if (INDEX_AC != INDEX_L3) {
69985 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
69986 kmem_cache_create(names[INDEX_L3].name,
69987 sizes[INDEX_L3].cs_size,
69988 ARCH_KMALLOC_MINALIGN,
69989 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69990 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69991 NULL);
69992 }
69993
69994 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
69995 sizes->cs_cachep = kmem_cache_create(names->name,
69996 sizes->cs_size,
69997 ARCH_KMALLOC_MINALIGN,
69998 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69999 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70000 NULL);
70001 }
70002 #ifdef CONFIG_ZONE_DMA
70003 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
70004 }
70005 /* cpu stats */
70006 {
70007 - unsigned long allochit = atomic_read(&cachep->allochit);
70008 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70009 - unsigned long freehit = atomic_read(&cachep->freehit);
70010 - unsigned long freemiss = atomic_read(&cachep->freemiss);
70011 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70012 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70013 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70014 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70015
70016 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70017 allochit, allocmiss, freehit, freemiss);
70018 @@ -4471,15 +4471,66 @@ static const struct file_operations proc
70019
70020 static int __init slab_proc_init(void)
70021 {
70022 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
70023 + mode_t gr_mode = S_IRUGO;
70024 +
70025 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70026 + gr_mode = S_IRUSR;
70027 +#endif
70028 +
70029 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
70030 #ifdef CONFIG_DEBUG_SLAB_LEAK
70031 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70032 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
70033 #endif
70034 return 0;
70035 }
70036 module_init(slab_proc_init);
70037 #endif
70038
70039 +void check_object_size(const void *ptr, unsigned long n, bool to)
70040 +{
70041 +
70042 +#ifdef CONFIG_PAX_USERCOPY
70043 + struct page *page;
70044 + struct kmem_cache *cachep = NULL;
70045 + struct slab *slabp;
70046 + unsigned int objnr;
70047 + unsigned long offset;
70048 +
70049 + if (!n)
70050 + return;
70051 +
70052 + if (ZERO_OR_NULL_PTR(ptr))
70053 + goto report;
70054 +
70055 + if (!virt_addr_valid(ptr))
70056 + return;
70057 +
70058 + page = virt_to_head_page(ptr);
70059 +
70060 + if (!PageSlab(page)) {
70061 + if (object_is_on_stack(ptr, n) == -1)
70062 + goto report;
70063 + return;
70064 + }
70065 +
70066 + cachep = page_get_cache(page);
70067 + if (!(cachep->flags & SLAB_USERCOPY))
70068 + goto report;
70069 +
70070 + slabp = page_get_slab(page);
70071 + objnr = obj_to_index(cachep, slabp, ptr);
70072 + BUG_ON(objnr >= cachep->num);
70073 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70074 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70075 + return;
70076 +
70077 +report:
70078 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
70079 +#endif
70080 +
70081 +}
70082 +EXPORT_SYMBOL(check_object_size);
70083 +
70084 /**
70085 * ksize - get the actual amount of memory allocated for a given object
70086 * @objp: Pointer to the object
70087 diff -urNp linux-2.6.32.45/mm/slob.c linux-2.6.32.45/mm/slob.c
70088 --- linux-2.6.32.45/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
70089 +++ linux-2.6.32.45/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
70090 @@ -29,7 +29,7 @@
70091 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70092 * alloc_pages() directly, allocating compound pages so the page order
70093 * does not have to be separately tracked, and also stores the exact
70094 - * allocation size in page->private so that it can be used to accurately
70095 + * allocation size in slob_page->size so that it can be used to accurately
70096 * provide ksize(). These objects are detected in kfree() because slob_page()
70097 * is false for them.
70098 *
70099 @@ -58,6 +58,7 @@
70100 */
70101
70102 #include <linux/kernel.h>
70103 +#include <linux/sched.h>
70104 #include <linux/slab.h>
70105 #include <linux/mm.h>
70106 #include <linux/swap.h> /* struct reclaim_state */
70107 @@ -100,7 +101,8 @@ struct slob_page {
70108 unsigned long flags; /* mandatory */
70109 atomic_t _count; /* mandatory */
70110 slobidx_t units; /* free units left in page */
70111 - unsigned long pad[2];
70112 + unsigned long pad[1];
70113 + unsigned long size; /* size when >=PAGE_SIZE */
70114 slob_t *free; /* first free slob_t in page */
70115 struct list_head list; /* linked list of free pages */
70116 };
70117 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
70118 */
70119 static inline int is_slob_page(struct slob_page *sp)
70120 {
70121 - return PageSlab((struct page *)sp);
70122 + return PageSlab((struct page *)sp) && !sp->size;
70123 }
70124
70125 static inline void set_slob_page(struct slob_page *sp)
70126 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
70127
70128 static inline struct slob_page *slob_page(const void *addr)
70129 {
70130 - return (struct slob_page *)virt_to_page(addr);
70131 + return (struct slob_page *)virt_to_head_page(addr);
70132 }
70133
70134 /*
70135 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
70136 /*
70137 * Return the size of a slob block.
70138 */
70139 -static slobidx_t slob_units(slob_t *s)
70140 +static slobidx_t slob_units(const slob_t *s)
70141 {
70142 if (s->units > 0)
70143 return s->units;
70144 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
70145 /*
70146 * Return the next free slob block pointer after this one.
70147 */
70148 -static slob_t *slob_next(slob_t *s)
70149 +static slob_t *slob_next(const slob_t *s)
70150 {
70151 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70152 slobidx_t next;
70153 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
70154 /*
70155 * Returns true if s is the last free block in its page.
70156 */
70157 -static int slob_last(slob_t *s)
70158 +static int slob_last(const slob_t *s)
70159 {
70160 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70161 }
70162 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
70163 if (!page)
70164 return NULL;
70165
70166 + set_slob_page(page);
70167 return page_address(page);
70168 }
70169
70170 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
70171 if (!b)
70172 return NULL;
70173 sp = slob_page(b);
70174 - set_slob_page(sp);
70175
70176 spin_lock_irqsave(&slob_lock, flags);
70177 sp->units = SLOB_UNITS(PAGE_SIZE);
70178 sp->free = b;
70179 + sp->size = 0;
70180 INIT_LIST_HEAD(&sp->list);
70181 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70182 set_slob_page_free(sp, slob_list);
70183 @@ -475,10 +478,9 @@ out:
70184 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
70185 #endif
70186
70187 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70188 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70189 {
70190 - unsigned int *m;
70191 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70192 + slob_t *m;
70193 void *ret;
70194
70195 lockdep_trace_alloc(gfp);
70196 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
70197
70198 if (!m)
70199 return NULL;
70200 - *m = size;
70201 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70202 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70203 + m[0].units = size;
70204 + m[1].units = align;
70205 ret = (void *)m + align;
70206
70207 trace_kmalloc_node(_RET_IP_, ret,
70208 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
70209
70210 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
70211 if (ret) {
70212 - struct page *page;
70213 - page = virt_to_page(ret);
70214 - page->private = size;
70215 + struct slob_page *sp;
70216 + sp = slob_page(ret);
70217 + sp->size = size;
70218 }
70219
70220 trace_kmalloc_node(_RET_IP_, ret,
70221 size, PAGE_SIZE << order, gfp, node);
70222 }
70223
70224 - kmemleak_alloc(ret, size, 1, gfp);
70225 + return ret;
70226 +}
70227 +
70228 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70229 +{
70230 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70231 + void *ret = __kmalloc_node_align(size, gfp, node, align);
70232 +
70233 + if (!ZERO_OR_NULL_PTR(ret))
70234 + kmemleak_alloc(ret, size, 1, gfp);
70235 return ret;
70236 }
70237 EXPORT_SYMBOL(__kmalloc_node);
70238 @@ -528,13 +542,88 @@ void kfree(const void *block)
70239 sp = slob_page(block);
70240 if (is_slob_page(sp)) {
70241 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70242 - unsigned int *m = (unsigned int *)(block - align);
70243 - slob_free(m, *m + align);
70244 - } else
70245 + slob_t *m = (slob_t *)(block - align);
70246 + slob_free(m, m[0].units + align);
70247 + } else {
70248 + clear_slob_page(sp);
70249 + free_slob_page(sp);
70250 + sp->size = 0;
70251 put_page(&sp->page);
70252 + }
70253 }
70254 EXPORT_SYMBOL(kfree);
70255
70256 +void check_object_size(const void *ptr, unsigned long n, bool to)
70257 +{
70258 +
70259 +#ifdef CONFIG_PAX_USERCOPY
70260 + struct slob_page *sp;
70261 + const slob_t *free;
70262 + const void *base;
70263 + unsigned long flags;
70264 +
70265 + if (!n)
70266 + return;
70267 +
70268 + if (ZERO_OR_NULL_PTR(ptr))
70269 + goto report;
70270 +
70271 + if (!virt_addr_valid(ptr))
70272 + return;
70273 +
70274 + sp = slob_page(ptr);
70275 + if (!PageSlab((struct page*)sp)) {
70276 + if (object_is_on_stack(ptr, n) == -1)
70277 + goto report;
70278 + return;
70279 + }
70280 +
70281 + if (sp->size) {
70282 + base = page_address(&sp->page);
70283 + if (base <= ptr && n <= sp->size - (ptr - base))
70284 + return;
70285 + goto report;
70286 + }
70287 +
70288 + /* some tricky double walking to find the chunk */
70289 + spin_lock_irqsave(&slob_lock, flags);
70290 + base = (void *)((unsigned long)ptr & PAGE_MASK);
70291 + free = sp->free;
70292 +
70293 + while (!slob_last(free) && (void *)free <= ptr) {
70294 + base = free + slob_units(free);
70295 + free = slob_next(free);
70296 + }
70297 +
70298 + while (base < (void *)free) {
70299 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70300 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
70301 + int offset;
70302 +
70303 + if (ptr < base + align)
70304 + break;
70305 +
70306 + offset = ptr - base - align;
70307 + if (offset >= m) {
70308 + base += size;
70309 + continue;
70310 + }
70311 +
70312 + if (n > m - offset)
70313 + break;
70314 +
70315 + spin_unlock_irqrestore(&slob_lock, flags);
70316 + return;
70317 + }
70318 +
70319 + spin_unlock_irqrestore(&slob_lock, flags);
70320 +report:
70321 + pax_report_usercopy(ptr, n, to, NULL);
70322 +#endif
70323 +
70324 +}
70325 +EXPORT_SYMBOL(check_object_size);
70326 +
70327 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70328 size_t ksize(const void *block)
70329 {
70330 @@ -547,10 +636,10 @@ size_t ksize(const void *block)
70331 sp = slob_page(block);
70332 if (is_slob_page(sp)) {
70333 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70334 - unsigned int *m = (unsigned int *)(block - align);
70335 - return SLOB_UNITS(*m) * SLOB_UNIT;
70336 + slob_t *m = (slob_t *)(block - align);
70337 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70338 } else
70339 - return sp->page.private;
70340 + return sp->size;
70341 }
70342 EXPORT_SYMBOL(ksize);
70343
70344 @@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
70345 {
70346 struct kmem_cache *c;
70347
70348 +#ifdef CONFIG_PAX_USERCOPY
70349 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
70350 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70351 +#else
70352 c = slob_alloc(sizeof(struct kmem_cache),
70353 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70354 +#endif
70355
70356 if (c) {
70357 c->name = name;
70358 @@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
70359 {
70360 void *b;
70361
70362 +#ifdef CONFIG_PAX_USERCOPY
70363 + b = __kmalloc_node_align(c->size, flags, node, c->align);
70364 +#else
70365 if (c->size < PAGE_SIZE) {
70366 b = slob_alloc(c->size, flags, c->align, node);
70367 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70368 SLOB_UNITS(c->size) * SLOB_UNIT,
70369 flags, node);
70370 } else {
70371 + struct slob_page *sp;
70372 +
70373 b = slob_new_pages(flags, get_order(c->size), node);
70374 + sp = slob_page(b);
70375 + sp->size = c->size;
70376 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70377 PAGE_SIZE << get_order(c->size),
70378 flags, node);
70379 }
70380 +#endif
70381
70382 if (c->ctor)
70383 c->ctor(b);
70384 @@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70385
70386 static void __kmem_cache_free(void *b, int size)
70387 {
70388 - if (size < PAGE_SIZE)
70389 + struct slob_page *sp = slob_page(b);
70390 +
70391 + if (is_slob_page(sp))
70392 slob_free(b, size);
70393 - else
70394 + else {
70395 + clear_slob_page(sp);
70396 + free_slob_page(sp);
70397 + sp->size = 0;
70398 slob_free_pages(b, get_order(size));
70399 + }
70400 }
70401
70402 static void kmem_rcu_free(struct rcu_head *head)
70403 @@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
70404
70405 void kmem_cache_free(struct kmem_cache *c, void *b)
70406 {
70407 + int size = c->size;
70408 +
70409 +#ifdef CONFIG_PAX_USERCOPY
70410 + if (size + c->align < PAGE_SIZE) {
70411 + size += c->align;
70412 + b -= c->align;
70413 + }
70414 +#endif
70415 +
70416 kmemleak_free_recursive(b, c->flags);
70417 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70418 struct slob_rcu *slob_rcu;
70419 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70420 + slob_rcu = b + (size - sizeof(struct slob_rcu));
70421 INIT_RCU_HEAD(&slob_rcu->head);
70422 - slob_rcu->size = c->size;
70423 + slob_rcu->size = size;
70424 call_rcu(&slob_rcu->head, kmem_rcu_free);
70425 } else {
70426 - __kmem_cache_free(b, c->size);
70427 + __kmem_cache_free(b, size);
70428 }
70429
70430 +#ifdef CONFIG_PAX_USERCOPY
70431 + trace_kfree(_RET_IP_, b);
70432 +#else
70433 trace_kmem_cache_free(_RET_IP_, b);
70434 +#endif
70435 +
70436 }
70437 EXPORT_SYMBOL(kmem_cache_free);
70438
70439 diff -urNp linux-2.6.32.45/mm/slub.c linux-2.6.32.45/mm/slub.c
70440 --- linux-2.6.32.45/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
70441 +++ linux-2.6.32.45/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
70442 @@ -410,7 +410,7 @@ static void print_track(const char *s, s
70443 if (!t->addr)
70444 return;
70445
70446 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70447 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70448 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70449 }
70450
70451 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
70452
70453 page = virt_to_head_page(x);
70454
70455 + BUG_ON(!PageSlab(page));
70456 +
70457 slab_free(s, page, x, _RET_IP_);
70458
70459 trace_kmem_cache_free(_RET_IP_, x);
70460 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
70461 * Merge control. If this is set then no merging of slab caches will occur.
70462 * (Could be removed. This was introduced to pacify the merge skeptics.)
70463 */
70464 -static int slub_nomerge;
70465 +static int slub_nomerge = 1;
70466
70467 /*
70468 * Calculate the order of allocation given an slab object size.
70469 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
70470 * list to avoid pounding the page allocator excessively.
70471 */
70472 set_min_partial(s, ilog2(s->size));
70473 - s->refcount = 1;
70474 + atomic_set(&s->refcount, 1);
70475 #ifdef CONFIG_NUMA
70476 s->remote_node_defrag_ratio = 1000;
70477 #endif
70478 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
70479 void kmem_cache_destroy(struct kmem_cache *s)
70480 {
70481 down_write(&slub_lock);
70482 - s->refcount--;
70483 - if (!s->refcount) {
70484 + if (atomic_dec_and_test(&s->refcount)) {
70485 list_del(&s->list);
70486 up_write(&slub_lock);
70487 if (kmem_cache_close(s)) {
70488 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
70489 __setup("slub_nomerge", setup_slub_nomerge);
70490
70491 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
70492 - const char *name, int size, gfp_t gfp_flags)
70493 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
70494 {
70495 - unsigned int flags = 0;
70496 -
70497 if (gfp_flags & SLUB_DMA)
70498 - flags = SLAB_CACHE_DMA;
70499 + flags |= SLAB_CACHE_DMA;
70500
70501 /*
70502 * This function is called with IRQs disabled during early-boot on
70503 @@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
70504 EXPORT_SYMBOL(__kmalloc_node);
70505 #endif
70506
70507 +void check_object_size(const void *ptr, unsigned long n, bool to)
70508 +{
70509 +
70510 +#ifdef CONFIG_PAX_USERCOPY
70511 + struct page *page;
70512 + struct kmem_cache *s = NULL;
70513 + unsigned long offset;
70514 +
70515 + if (!n)
70516 + return;
70517 +
70518 + if (ZERO_OR_NULL_PTR(ptr))
70519 + goto report;
70520 +
70521 + if (!virt_addr_valid(ptr))
70522 + return;
70523 +
70524 + page = get_object_page(ptr);
70525 +
70526 + if (!page) {
70527 + if (object_is_on_stack(ptr, n) == -1)
70528 + goto report;
70529 + return;
70530 + }
70531 +
70532 + s = page->slab;
70533 + if (!(s->flags & SLAB_USERCOPY))
70534 + goto report;
70535 +
70536 + offset = (ptr - page_address(page)) % s->size;
70537 + if (offset <= s->objsize && n <= s->objsize - offset)
70538 + return;
70539 +
70540 +report:
70541 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
70542 +#endif
70543 +
70544 +}
70545 +EXPORT_SYMBOL(check_object_size);
70546 +
70547 size_t ksize(const void *object)
70548 {
70549 struct page *page;
70550 @@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
70551 * kmem_cache_open for slab_state == DOWN.
70552 */
70553 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
70554 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
70555 - kmalloc_caches[0].refcount = -1;
70556 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
70557 + atomic_set(&kmalloc_caches[0].refcount, -1);
70558 caches++;
70559
70560 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
70561 @@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
70562 /* Caches that are not of the two-to-the-power-of size */
70563 if (KMALLOC_MIN_SIZE <= 32) {
70564 create_kmalloc_cache(&kmalloc_caches[1],
70565 - "kmalloc-96", 96, GFP_NOWAIT);
70566 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
70567 caches++;
70568 }
70569 if (KMALLOC_MIN_SIZE <= 64) {
70570 create_kmalloc_cache(&kmalloc_caches[2],
70571 - "kmalloc-192", 192, GFP_NOWAIT);
70572 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
70573 caches++;
70574 }
70575
70576 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70577 create_kmalloc_cache(&kmalloc_caches[i],
70578 - "kmalloc", 1 << i, GFP_NOWAIT);
70579 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
70580 caches++;
70581 }
70582
70583 @@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
70584 /*
70585 * We may have set a slab to be unmergeable during bootstrap.
70586 */
70587 - if (s->refcount < 0)
70588 + if (atomic_read(&s->refcount) < 0)
70589 return 1;
70590
70591 return 0;
70592 @@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
70593 if (s) {
70594 int cpu;
70595
70596 - s->refcount++;
70597 + atomic_inc(&s->refcount);
70598 /*
70599 * Adjust the object sizes so that we clear
70600 * the complete object on kzalloc.
70601 @@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
70602
70603 if (sysfs_slab_alias(s, name)) {
70604 down_write(&slub_lock);
70605 - s->refcount--;
70606 + atomic_dec(&s->refcount);
70607 up_write(&slub_lock);
70608 goto err;
70609 }
70610 @@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
70611
70612 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70613 {
70614 - return sprintf(buf, "%d\n", s->refcount - 1);
70615 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70616 }
70617 SLAB_ATTR_RO(aliases);
70618
70619 @@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
70620 kfree(s);
70621 }
70622
70623 -static struct sysfs_ops slab_sysfs_ops = {
70624 +static const struct sysfs_ops slab_sysfs_ops = {
70625 .show = slab_attr_show,
70626 .store = slab_attr_store,
70627 };
70628 @@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
70629 return 0;
70630 }
70631
70632 -static struct kset_uevent_ops slab_uevent_ops = {
70633 +static const struct kset_uevent_ops slab_uevent_ops = {
70634 .filter = uevent_filter,
70635 };
70636
70637 @@ -4785,7 +4824,13 @@ static const struct file_operations proc
70638
70639 static int __init slab_proc_init(void)
70640 {
70641 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70642 + mode_t gr_mode = S_IRUGO;
70643 +
70644 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70645 + gr_mode = S_IRUSR;
70646 +#endif
70647 +
70648 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70649 return 0;
70650 }
70651 module_init(slab_proc_init);
70652 diff -urNp linux-2.6.32.45/mm/swap.c linux-2.6.32.45/mm/swap.c
70653 --- linux-2.6.32.45/mm/swap.c 2011-03-27 14:31:47.000000000 -0400
70654 +++ linux-2.6.32.45/mm/swap.c 2011-07-09 09:15:19.000000000 -0400
70655 @@ -30,6 +30,7 @@
70656 #include <linux/notifier.h>
70657 #include <linux/backing-dev.h>
70658 #include <linux/memcontrol.h>
70659 +#include <linux/hugetlb.h>
70660
70661 #include "internal.h"
70662
70663 @@ -65,6 +66,8 @@ static void put_compound_page(struct pag
70664 compound_page_dtor *dtor;
70665
70666 dtor = get_compound_page_dtor(page);
70667 + if (!PageHuge(page))
70668 + BUG_ON(dtor != free_compound_page);
70669 (*dtor)(page);
70670 }
70671 }
70672 diff -urNp linux-2.6.32.45/mm/util.c linux-2.6.32.45/mm/util.c
70673 --- linux-2.6.32.45/mm/util.c 2011-03-27 14:31:47.000000000 -0400
70674 +++ linux-2.6.32.45/mm/util.c 2011-04-17 15:56:46.000000000 -0400
70675 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
70676 void arch_pick_mmap_layout(struct mm_struct *mm)
70677 {
70678 mm->mmap_base = TASK_UNMAPPED_BASE;
70679 +
70680 +#ifdef CONFIG_PAX_RANDMMAP
70681 + if (mm->pax_flags & MF_PAX_RANDMMAP)
70682 + mm->mmap_base += mm->delta_mmap;
70683 +#endif
70684 +
70685 mm->get_unmapped_area = arch_get_unmapped_area;
70686 mm->unmap_area = arch_unmap_area;
70687 }
70688 diff -urNp linux-2.6.32.45/mm/vmalloc.c linux-2.6.32.45/mm/vmalloc.c
70689 --- linux-2.6.32.45/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
70690 +++ linux-2.6.32.45/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
70691 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
70692
70693 pte = pte_offset_kernel(pmd, addr);
70694 do {
70695 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70696 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70697 +
70698 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70699 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70700 + BUG_ON(!pte_exec(*pte));
70701 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
70702 + continue;
70703 + }
70704 +#endif
70705 +
70706 + {
70707 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70708 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70709 + }
70710 } while (pte++, addr += PAGE_SIZE, addr != end);
70711 }
70712
70713 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
70714 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
70715 {
70716 pte_t *pte;
70717 + int ret = -ENOMEM;
70718
70719 /*
70720 * nr is a running index into the array which helps higher level
70721 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
70722 pte = pte_alloc_kernel(pmd, addr);
70723 if (!pte)
70724 return -ENOMEM;
70725 +
70726 + pax_open_kernel();
70727 do {
70728 struct page *page = pages[*nr];
70729
70730 - if (WARN_ON(!pte_none(*pte)))
70731 - return -EBUSY;
70732 - if (WARN_ON(!page))
70733 - return -ENOMEM;
70734 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70735 + if (!(pgprot_val(prot) & _PAGE_NX))
70736 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
70737 + else
70738 +#endif
70739 +
70740 + if (WARN_ON(!pte_none(*pte))) {
70741 + ret = -EBUSY;
70742 + goto out;
70743 + }
70744 + if (WARN_ON(!page)) {
70745 + ret = -ENOMEM;
70746 + goto out;
70747 + }
70748 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
70749 (*nr)++;
70750 } while (pte++, addr += PAGE_SIZE, addr != end);
70751 - return 0;
70752 + ret = 0;
70753 +out:
70754 + pax_close_kernel();
70755 + return ret;
70756 }
70757
70758 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
70759 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
70760 * and fall back on vmalloc() if that fails. Others
70761 * just put it in the vmalloc space.
70762 */
70763 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
70764 +#ifdef CONFIG_MODULES
70765 +#ifdef MODULES_VADDR
70766 unsigned long addr = (unsigned long)x;
70767 if (addr >= MODULES_VADDR && addr < MODULES_END)
70768 return 1;
70769 #endif
70770 +
70771 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70772 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
70773 + return 1;
70774 +#endif
70775 +
70776 +#endif
70777 +
70778 return is_vmalloc_addr(x);
70779 }
70780
70781 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
70782
70783 if (!pgd_none(*pgd)) {
70784 pud_t *pud = pud_offset(pgd, addr);
70785 +#ifdef CONFIG_X86
70786 + if (!pud_large(*pud))
70787 +#endif
70788 if (!pud_none(*pud)) {
70789 pmd_t *pmd = pmd_offset(pud, addr);
70790 +#ifdef CONFIG_X86
70791 + if (!pmd_large(*pmd))
70792 +#endif
70793 if (!pmd_none(*pmd)) {
70794 pte_t *ptep, pte;
70795
70796 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
70797 struct rb_node *tmp;
70798
70799 while (*p) {
70800 - struct vmap_area *tmp;
70801 + struct vmap_area *varea;
70802
70803 parent = *p;
70804 - tmp = rb_entry(parent, struct vmap_area, rb_node);
70805 - if (va->va_start < tmp->va_end)
70806 + varea = rb_entry(parent, struct vmap_area, rb_node);
70807 + if (va->va_start < varea->va_end)
70808 p = &(*p)->rb_left;
70809 - else if (va->va_end > tmp->va_start)
70810 + else if (va->va_end > varea->va_start)
70811 p = &(*p)->rb_right;
70812 else
70813 BUG();
70814 @@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
70815 struct vm_struct *area;
70816
70817 BUG_ON(in_interrupt());
70818 +
70819 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70820 + if (flags & VM_KERNEXEC) {
70821 + if (start != VMALLOC_START || end != VMALLOC_END)
70822 + return NULL;
70823 + start = (unsigned long)MODULES_EXEC_VADDR;
70824 + end = (unsigned long)MODULES_EXEC_END;
70825 + }
70826 +#endif
70827 +
70828 if (flags & VM_IOREMAP) {
70829 int bit = fls(size);
70830
70831 @@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
70832 if (count > totalram_pages)
70833 return NULL;
70834
70835 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70836 + if (!(pgprot_val(prot) & _PAGE_NX))
70837 + flags |= VM_KERNEXEC;
70838 +#endif
70839 +
70840 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
70841 __builtin_return_address(0));
70842 if (!area)
70843 @@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
70844 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
70845 return NULL;
70846
70847 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70848 + if (!(pgprot_val(prot) & _PAGE_NX))
70849 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
70850 + node, gfp_mask, caller);
70851 + else
70852 +#endif
70853 +
70854 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
70855 VMALLOC_END, node, gfp_mask, caller);
70856
70857 @@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
70858 return addr;
70859 }
70860
70861 +#undef __vmalloc
70862 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
70863 {
70864 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
70865 @@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
70866 * For tight control over page level allocator and protection flags
70867 * use __vmalloc() instead.
70868 */
70869 +#undef vmalloc
70870 void *vmalloc(unsigned long size)
70871 {
70872 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70873 @@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
70874 * The resulting memory area is zeroed so it can be mapped to userspace
70875 * without leaking data.
70876 */
70877 +#undef vmalloc_user
70878 void *vmalloc_user(unsigned long size)
70879 {
70880 struct vm_struct *area;
70881 @@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
70882 * For tight control over page level allocator and protection flags
70883 * use __vmalloc() instead.
70884 */
70885 +#undef vmalloc_node
70886 void *vmalloc_node(unsigned long size, int node)
70887 {
70888 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70889 @@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
70890 * For tight control over page level allocator and protection flags
70891 * use __vmalloc() instead.
70892 */
70893 -
70894 +#undef vmalloc_exec
70895 void *vmalloc_exec(unsigned long size)
70896 {
70897 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
70898 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
70899 -1, __builtin_return_address(0));
70900 }
70901
70902 @@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
70903 * Allocate enough 32bit PA addressable pages to cover @size from the
70904 * page level allocator and map them into contiguous kernel virtual space.
70905 */
70906 +#undef vmalloc_32
70907 void *vmalloc_32(unsigned long size)
70908 {
70909 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
70910 @@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
70911 * The resulting memory area is 32bit addressable and zeroed so it can be
70912 * mapped to userspace without leaking data.
70913 */
70914 +#undef vmalloc_32_user
70915 void *vmalloc_32_user(unsigned long size)
70916 {
70917 struct vm_struct *area;
70918 @@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
70919 unsigned long uaddr = vma->vm_start;
70920 unsigned long usize = vma->vm_end - vma->vm_start;
70921
70922 + BUG_ON(vma->vm_mirror);
70923 +
70924 if ((PAGE_SIZE-1) & (unsigned long)addr)
70925 return -EINVAL;
70926
70927 diff -urNp linux-2.6.32.45/mm/vmstat.c linux-2.6.32.45/mm/vmstat.c
70928 --- linux-2.6.32.45/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
70929 +++ linux-2.6.32.45/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
70930 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
70931 *
70932 * vm_stat contains the global counters
70933 */
70934 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70935 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70936 EXPORT_SYMBOL(vm_stat);
70937
70938 #ifdef CONFIG_SMP
70939 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
70940 v = p->vm_stat_diff[i];
70941 p->vm_stat_diff[i] = 0;
70942 local_irq_restore(flags);
70943 - atomic_long_add(v, &zone->vm_stat[i]);
70944 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
70945 global_diff[i] += v;
70946 #ifdef CONFIG_NUMA
70947 /* 3 seconds idle till flush */
70948 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
70949
70950 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
70951 if (global_diff[i])
70952 - atomic_long_add(global_diff[i], &vm_stat[i]);
70953 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
70954 }
70955
70956 #endif
70957 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
70958 start_cpu_timer(cpu);
70959 #endif
70960 #ifdef CONFIG_PROC_FS
70961 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
70962 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
70963 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
70964 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
70965 + {
70966 + mode_t gr_mode = S_IRUGO;
70967 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70968 + gr_mode = S_IRUSR;
70969 +#endif
70970 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
70971 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
70972 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
70973 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
70974 +#else
70975 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
70976 +#endif
70977 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
70978 + }
70979 #endif
70980 return 0;
70981 }
70982 diff -urNp linux-2.6.32.45/net/8021q/vlan.c linux-2.6.32.45/net/8021q/vlan.c
70983 --- linux-2.6.32.45/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
70984 +++ linux-2.6.32.45/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
70985 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
70986 err = -EPERM;
70987 if (!capable(CAP_NET_ADMIN))
70988 break;
70989 - if ((args.u.name_type >= 0) &&
70990 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
70991 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
70992 struct vlan_net *vn;
70993
70994 vn = net_generic(net, vlan_net_id);
70995 diff -urNp linux-2.6.32.45/net/atm/atm_misc.c linux-2.6.32.45/net/atm/atm_misc.c
70996 --- linux-2.6.32.45/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
70997 +++ linux-2.6.32.45/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
70998 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
70999 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71000 return 1;
71001 atm_return(vcc,truesize);
71002 - atomic_inc(&vcc->stats->rx_drop);
71003 + atomic_inc_unchecked(&vcc->stats->rx_drop);
71004 return 0;
71005 }
71006
71007 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
71008 }
71009 }
71010 atm_return(vcc,guess);
71011 - atomic_inc(&vcc->stats->rx_drop);
71012 + atomic_inc_unchecked(&vcc->stats->rx_drop);
71013 return NULL;
71014 }
71015
71016 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
71017
71018 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
71019 {
71020 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71021 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71022 __SONET_ITEMS
71023 #undef __HANDLE_ITEM
71024 }
71025 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
71026
71027 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
71028 {
71029 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
71030 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71031 __SONET_ITEMS
71032 #undef __HANDLE_ITEM
71033 }
71034 diff -urNp linux-2.6.32.45/net/atm/lec.h linux-2.6.32.45/net/atm/lec.h
71035 --- linux-2.6.32.45/net/atm/lec.h 2011-03-27 14:31:47.000000000 -0400
71036 +++ linux-2.6.32.45/net/atm/lec.h 2011-08-05 20:33:55.000000000 -0400
71037 @@ -48,7 +48,7 @@ struct lane2_ops {
71038 const u8 *tlvs, u32 sizeoftlvs);
71039 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71040 const u8 *tlvs, u32 sizeoftlvs);
71041 -};
71042 +} __no_const;
71043
71044 /*
71045 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71046 diff -urNp linux-2.6.32.45/net/atm/mpc.c linux-2.6.32.45/net/atm/mpc.c
71047 --- linux-2.6.32.45/net/atm/mpc.c 2011-03-27 14:31:47.000000000 -0400
71048 +++ linux-2.6.32.45/net/atm/mpc.c 2011-08-05 20:33:55.000000000 -0400
71049 @@ -291,8 +291,8 @@ static void start_mpc(struct mpoa_client
71050 printk("mpoa: (%s) start_mpc not starting\n", dev->name);
71051 else {
71052 mpc->old_ops = dev->netdev_ops;
71053 - mpc->new_ops = *mpc->old_ops;
71054 - mpc->new_ops.ndo_start_xmit = mpc_send_packet;
71055 + memcpy((void *)&mpc->new_ops, mpc->old_ops, sizeof(mpc->new_ops));
71056 + *(void **)&mpc->new_ops.ndo_start_xmit = mpc_send_packet;
71057 dev->netdev_ops = &mpc->new_ops;
71058 }
71059 }
71060 diff -urNp linux-2.6.32.45/net/atm/mpoa_caches.c linux-2.6.32.45/net/atm/mpoa_caches.c
71061 --- linux-2.6.32.45/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
71062 +++ linux-2.6.32.45/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
71063 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
71064 struct timeval now;
71065 struct k_message msg;
71066
71067 + pax_track_stack();
71068 +
71069 do_gettimeofday(&now);
71070
71071 write_lock_irq(&client->egress_lock);
71072 diff -urNp linux-2.6.32.45/net/atm/proc.c linux-2.6.32.45/net/atm/proc.c
71073 --- linux-2.6.32.45/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
71074 +++ linux-2.6.32.45/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
71075 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
71076 const struct k_atm_aal_stats *stats)
71077 {
71078 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71079 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
71080 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
71081 - atomic_read(&stats->rx_drop));
71082 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71083 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71084 + atomic_read_unchecked(&stats->rx_drop));
71085 }
71086
71087 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71088 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
71089 {
71090 struct sock *sk = sk_atm(vcc);
71091
71092 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71093 + seq_printf(seq, "%p ", NULL);
71094 +#else
71095 seq_printf(seq, "%p ", vcc);
71096 +#endif
71097 +
71098 if (!vcc->dev)
71099 seq_printf(seq, "Unassigned ");
71100 else
71101 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
71102 {
71103 if (!vcc->dev)
71104 seq_printf(seq, sizeof(void *) == 4 ?
71105 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71106 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
71107 +#else
71108 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
71109 +#endif
71110 else
71111 seq_printf(seq, "%3d %3d %5d ",
71112 vcc->dev->number, vcc->vpi, vcc->vci);
71113 diff -urNp linux-2.6.32.45/net/atm/resources.c linux-2.6.32.45/net/atm/resources.c
71114 --- linux-2.6.32.45/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
71115 +++ linux-2.6.32.45/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
71116 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
71117 static void copy_aal_stats(struct k_atm_aal_stats *from,
71118 struct atm_aal_stats *to)
71119 {
71120 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71121 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71122 __AAL_STAT_ITEMS
71123 #undef __HANDLE_ITEM
71124 }
71125 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
71126 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71127 struct atm_aal_stats *to)
71128 {
71129 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71130 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71131 __AAL_STAT_ITEMS
71132 #undef __HANDLE_ITEM
71133 }
71134 diff -urNp linux-2.6.32.45/net/bluetooth/l2cap.c linux-2.6.32.45/net/bluetooth/l2cap.c
71135 --- linux-2.6.32.45/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
71136 +++ linux-2.6.32.45/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
71137 @@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
71138 err = -ENOTCONN;
71139 break;
71140 }
71141 -
71142 + memset(&cinfo, 0, sizeof(cinfo));
71143 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
71144 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
71145
71146 @@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
71147
71148 /* Reject if config buffer is too small. */
71149 len = cmd_len - sizeof(*req);
71150 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71151 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71152 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
71153 l2cap_build_conf_rsp(sk, rsp,
71154 L2CAP_CONF_REJECT, flags), rsp);
71155 diff -urNp linux-2.6.32.45/net/bluetooth/rfcomm/sock.c linux-2.6.32.45/net/bluetooth/rfcomm/sock.c
71156 --- linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
71157 +++ linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
71158 @@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
71159
71160 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
71161
71162 + memset(&cinfo, 0, sizeof(cinfo));
71163 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
71164 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
71165
71166 diff -urNp linux-2.6.32.45/net/bridge/br_private.h linux-2.6.32.45/net/bridge/br_private.h
71167 --- linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:35:30.000000000 -0400
71168 +++ linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:34:01.000000000 -0400
71169 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event,
71170
71171 #ifdef CONFIG_SYSFS
71172 /* br_sysfs_if.c */
71173 -extern struct sysfs_ops brport_sysfs_ops;
71174 +extern const struct sysfs_ops brport_sysfs_ops;
71175 extern int br_sysfs_addif(struct net_bridge_port *p);
71176
71177 /* br_sysfs_br.c */
71178 diff -urNp linux-2.6.32.45/net/bridge/br_stp_if.c linux-2.6.32.45/net/bridge/br_stp_if.c
71179 --- linux-2.6.32.45/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
71180 +++ linux-2.6.32.45/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
71181 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
71182 char *envp[] = { NULL };
71183
71184 if (br->stp_enabled == BR_USER_STP) {
71185 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
71186 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
71187 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
71188 br->dev->name, r);
71189
71190 diff -urNp linux-2.6.32.45/net/bridge/br_sysfs_if.c linux-2.6.32.45/net/bridge/br_sysfs_if.c
71191 --- linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
71192 +++ linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
71193 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
71194 return ret;
71195 }
71196
71197 -struct sysfs_ops brport_sysfs_ops = {
71198 +const struct sysfs_ops brport_sysfs_ops = {
71199 .show = brport_show,
71200 .store = brport_store,
71201 };
71202 diff -urNp linux-2.6.32.45/net/bridge/netfilter/ebtables.c linux-2.6.32.45/net/bridge/netfilter/ebtables.c
71203 --- linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
71204 +++ linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
71205 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
71206 unsigned int entries_size, nentries;
71207 char *entries;
71208
71209 + pax_track_stack();
71210 +
71211 if (cmd == EBT_SO_GET_ENTRIES) {
71212 entries_size = t->private->entries_size;
71213 nentries = t->private->nentries;
71214 diff -urNp linux-2.6.32.45/net/can/bcm.c linux-2.6.32.45/net/can/bcm.c
71215 --- linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
71216 +++ linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
71217 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
71218 struct bcm_sock *bo = bcm_sk(sk);
71219 struct bcm_op *op;
71220
71221 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71222 + seq_printf(m, ">>> socket %p", NULL);
71223 + seq_printf(m, " / sk %p", NULL);
71224 + seq_printf(m, " / bo %p", NULL);
71225 +#else
71226 seq_printf(m, ">>> socket %p", sk->sk_socket);
71227 seq_printf(m, " / sk %p", sk);
71228 seq_printf(m, " / bo %p", bo);
71229 +#endif
71230 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
71231 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
71232 seq_printf(m, " <<<\n");
71233 diff -urNp linux-2.6.32.45/net/core/dev.c linux-2.6.32.45/net/core/dev.c
71234 --- linux-2.6.32.45/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
71235 +++ linux-2.6.32.45/net/core/dev.c 2011-08-05 20:33:55.000000000 -0400
71236 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
71237 if (no_module && capable(CAP_NET_ADMIN))
71238 no_module = request_module("netdev-%s", name);
71239 if (no_module && capable(CAP_SYS_MODULE)) {
71240 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
71241 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
71242 +#else
71243 if (!request_module("%s", name))
71244 pr_err("Loading kernel module for a network device "
71245 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71246 "instead\n", name);
71247 +#endif
71248 }
71249 }
71250 EXPORT_SYMBOL(dev_load);
71251 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct
71252
71253 struct dev_gso_cb {
71254 void (*destructor)(struct sk_buff *skb);
71255 -};
71256 +} __no_const;
71257
71258 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71259
71260 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
71261 }
71262 EXPORT_SYMBOL(netif_rx_ni);
71263
71264 -static void net_tx_action(struct softirq_action *h)
71265 +static void net_tx_action(void)
71266 {
71267 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71268
71269 @@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
71270 EXPORT_SYMBOL(netif_napi_del);
71271
71272
71273 -static void net_rx_action(struct softirq_action *h)
71274 +static void net_rx_action(void)
71275 {
71276 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
71277 unsigned long time_limit = jiffies + 2;
71278 diff -urNp linux-2.6.32.45/net/core/flow.c linux-2.6.32.45/net/core/flow.c
71279 --- linux-2.6.32.45/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
71280 +++ linux-2.6.32.45/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
71281 @@ -35,11 +35,11 @@ struct flow_cache_entry {
71282 atomic_t *object_ref;
71283 };
71284
71285 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
71286 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71287
71288 static u32 flow_hash_shift;
71289 #define flow_hash_size (1 << flow_hash_shift)
71290 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
71291 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
71292
71293 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
71294
71295 @@ -52,7 +52,7 @@ struct flow_percpu_info {
71296 u32 hash_rnd;
71297 int count;
71298 };
71299 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
71300 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
71301
71302 #define flow_hash_rnd_recalc(cpu) \
71303 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
71304 @@ -69,7 +69,7 @@ struct flow_flush_info {
71305 atomic_t cpuleft;
71306 struct completion completion;
71307 };
71308 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
71309 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
71310
71311 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
71312
71313 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
71314 if (fle->family == family &&
71315 fle->dir == dir &&
71316 flow_key_compare(key, &fle->key) == 0) {
71317 - if (fle->genid == atomic_read(&flow_cache_genid)) {
71318 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
71319 void *ret = fle->object;
71320
71321 if (ret)
71322 @@ -228,7 +228,7 @@ nocache:
71323 err = resolver(net, key, family, dir, &obj, &obj_ref);
71324
71325 if (fle && !err) {
71326 - fle->genid = atomic_read(&flow_cache_genid);
71327 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
71328
71329 if (fle->object)
71330 atomic_dec(fle->object_ref);
71331 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
71332
71333 fle = flow_table(cpu)[i];
71334 for (; fle; fle = fle->next) {
71335 - unsigned genid = atomic_read(&flow_cache_genid);
71336 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
71337
71338 if (!fle->object || fle->genid == genid)
71339 continue;
71340 diff -urNp linux-2.6.32.45/net/core/rtnetlink.c linux-2.6.32.45/net/core/rtnetlink.c
71341 --- linux-2.6.32.45/net/core/rtnetlink.c 2011-03-27 14:31:47.000000000 -0400
71342 +++ linux-2.6.32.45/net/core/rtnetlink.c 2011-08-05 20:33:55.000000000 -0400
71343 @@ -57,7 +57,7 @@ struct rtnl_link
71344 {
71345 rtnl_doit_func doit;
71346 rtnl_dumpit_func dumpit;
71347 -};
71348 +} __no_const;
71349
71350 static DEFINE_MUTEX(rtnl_mutex);
71351
71352 diff -urNp linux-2.6.32.45/net/core/secure_seq.c linux-2.6.32.45/net/core/secure_seq.c
71353 --- linux-2.6.32.45/net/core/secure_seq.c 2011-08-16 20:37:25.000000000 -0400
71354 +++ linux-2.6.32.45/net/core/secure_seq.c 2011-08-07 19:48:09.000000000 -0400
71355 @@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be3
71356 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
71357
71358 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
71359 - __be16 dport)
71360 + __be16 dport)
71361 {
71362 u32 secret[MD5_MESSAGE_BYTES / 4];
71363 u32 hash[MD5_DIGEST_WORDS];
71364 @@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __b
71365 secret[i] = net_secret[i];
71366
71367 md5_transform(hash, secret);
71368 -
71369 return hash[0];
71370 }
71371 #endif
71372 diff -urNp linux-2.6.32.45/net/core/skbuff.c linux-2.6.32.45/net/core/skbuff.c
71373 --- linux-2.6.32.45/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
71374 +++ linux-2.6.32.45/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
71375 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
71376 struct sk_buff *frag_iter;
71377 struct sock *sk = skb->sk;
71378
71379 + pax_track_stack();
71380 +
71381 /*
71382 * __skb_splice_bits() only fails if the output has no room left,
71383 * so no point in going over the frag_list for the error case.
71384 diff -urNp linux-2.6.32.45/net/core/sock.c linux-2.6.32.45/net/core/sock.c
71385 --- linux-2.6.32.45/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
71386 +++ linux-2.6.32.45/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
71387 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
71388 break;
71389
71390 case SO_PEERCRED:
71391 + {
71392 + struct ucred peercred;
71393 if (len > sizeof(sk->sk_peercred))
71394 len = sizeof(sk->sk_peercred);
71395 - if (copy_to_user(optval, &sk->sk_peercred, len))
71396 + peercred = sk->sk_peercred;
71397 + if (copy_to_user(optval, &peercred, len))
71398 return -EFAULT;
71399 goto lenout;
71400 + }
71401
71402 case SO_PEERNAME:
71403 {
71404 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
71405 */
71406 smp_wmb();
71407 atomic_set(&sk->sk_refcnt, 1);
71408 - atomic_set(&sk->sk_drops, 0);
71409 + atomic_set_unchecked(&sk->sk_drops, 0);
71410 }
71411 EXPORT_SYMBOL(sock_init_data);
71412
71413 diff -urNp linux-2.6.32.45/net/decnet/sysctl_net_decnet.c linux-2.6.32.45/net/decnet/sysctl_net_decnet.c
71414 --- linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
71415 +++ linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
71416 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
71417
71418 if (len > *lenp) len = *lenp;
71419
71420 - if (copy_to_user(buffer, addr, len))
71421 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
71422 return -EFAULT;
71423
71424 *lenp = len;
71425 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
71426
71427 if (len > *lenp) len = *lenp;
71428
71429 - if (copy_to_user(buffer, devname, len))
71430 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
71431 return -EFAULT;
71432
71433 *lenp = len;
71434 diff -urNp linux-2.6.32.45/net/econet/Kconfig linux-2.6.32.45/net/econet/Kconfig
71435 --- linux-2.6.32.45/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
71436 +++ linux-2.6.32.45/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
71437 @@ -4,7 +4,7 @@
71438
71439 config ECONET
71440 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
71441 - depends on EXPERIMENTAL && INET
71442 + depends on EXPERIMENTAL && INET && BROKEN
71443 ---help---
71444 Econet is a fairly old and slow networking protocol mainly used by
71445 Acorn computers to access file and print servers. It uses native
71446 diff -urNp linux-2.6.32.45/net/ieee802154/dgram.c linux-2.6.32.45/net/ieee802154/dgram.c
71447 --- linux-2.6.32.45/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
71448 +++ linux-2.6.32.45/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
71449 @@ -318,7 +318,7 @@ out:
71450 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
71451 {
71452 if (sock_queue_rcv_skb(sk, skb) < 0) {
71453 - atomic_inc(&sk->sk_drops);
71454 + atomic_inc_unchecked(&sk->sk_drops);
71455 kfree_skb(skb);
71456 return NET_RX_DROP;
71457 }
71458 diff -urNp linux-2.6.32.45/net/ieee802154/raw.c linux-2.6.32.45/net/ieee802154/raw.c
71459 --- linux-2.6.32.45/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
71460 +++ linux-2.6.32.45/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
71461 @@ -206,7 +206,7 @@ out:
71462 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
71463 {
71464 if (sock_queue_rcv_skb(sk, skb) < 0) {
71465 - atomic_inc(&sk->sk_drops);
71466 + atomic_inc_unchecked(&sk->sk_drops);
71467 kfree_skb(skb);
71468 return NET_RX_DROP;
71469 }
71470 diff -urNp linux-2.6.32.45/net/ipv4/inet_diag.c linux-2.6.32.45/net/ipv4/inet_diag.c
71471 --- linux-2.6.32.45/net/ipv4/inet_diag.c 2011-07-13 17:23:04.000000000 -0400
71472 +++ linux-2.6.32.45/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
71473 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
71474 r->idiag_retrans = 0;
71475
71476 r->id.idiag_if = sk->sk_bound_dev_if;
71477 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71478 + r->id.idiag_cookie[0] = 0;
71479 + r->id.idiag_cookie[1] = 0;
71480 +#else
71481 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
71482 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
71483 +#endif
71484
71485 r->id.idiag_sport = inet->sport;
71486 r->id.idiag_dport = inet->dport;
71487 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
71488 r->idiag_family = tw->tw_family;
71489 r->idiag_retrans = 0;
71490 r->id.idiag_if = tw->tw_bound_dev_if;
71491 +
71492 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71493 + r->id.idiag_cookie[0] = 0;
71494 + r->id.idiag_cookie[1] = 0;
71495 +#else
71496 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
71497 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
71498 +#endif
71499 +
71500 r->id.idiag_sport = tw->tw_sport;
71501 r->id.idiag_dport = tw->tw_dport;
71502 r->id.idiag_src[0] = tw->tw_rcv_saddr;
71503 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
71504 if (sk == NULL)
71505 goto unlock;
71506
71507 +#ifndef CONFIG_GRKERNSEC_HIDESYM
71508 err = -ESTALE;
71509 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
71510 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
71511 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
71512 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
71513 goto out;
71514 +#endif
71515
71516 err = -ENOMEM;
71517 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
71518 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
71519 r->idiag_retrans = req->retrans;
71520
71521 r->id.idiag_if = sk->sk_bound_dev_if;
71522 +
71523 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71524 + r->id.idiag_cookie[0] = 0;
71525 + r->id.idiag_cookie[1] = 0;
71526 +#else
71527 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
71528 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
71529 +#endif
71530
71531 tmo = req->expires - jiffies;
71532 if (tmo < 0)
71533 diff -urNp linux-2.6.32.45/net/ipv4/inet_hashtables.c linux-2.6.32.45/net/ipv4/inet_hashtables.c
71534 --- linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71535 +++ linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:42:30.000000000 -0400
71536 @@ -18,12 +18,15 @@
71537 #include <linux/sched.h>
71538 #include <linux/slab.h>
71539 #include <linux/wait.h>
71540 +#include <linux/security.h>
71541
71542 #include <net/inet_connection_sock.h>
71543 #include <net/inet_hashtables.h>
71544 #include <net/secure_seq.h>
71545 #include <net/ip.h>
71546
71547 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
71548 +
71549 /*
71550 * Allocate and initialize a new local port bind bucket.
71551 * The bindhash mutex for snum's hash chain must be held here.
71552 @@ -491,6 +494,8 @@ ok:
71553 }
71554 spin_unlock(&head->lock);
71555
71556 + gr_update_task_in_ip_table(current, inet_sk(sk));
71557 +
71558 if (tw) {
71559 inet_twsk_deschedule(tw, death_row);
71560 inet_twsk_put(tw);
71561 diff -urNp linux-2.6.32.45/net/ipv4/inetpeer.c linux-2.6.32.45/net/ipv4/inetpeer.c
71562 --- linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-16 20:37:25.000000000 -0400
71563 +++ linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-07 19:48:09.000000000 -0400
71564 @@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 da
71565 struct inet_peer *p, *n;
71566 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
71567
71568 + pax_track_stack();
71569 +
71570 /* Look up for the address quickly. */
71571 read_lock_bh(&peer_pool_lock);
71572 p = lookup(daddr, NULL);
71573 @@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 da
71574 return NULL;
71575 n->v4daddr = daddr;
71576 atomic_set(&n->refcnt, 1);
71577 - atomic_set(&n->rid, 0);
71578 + atomic_set_unchecked(&n->rid, 0);
71579 n->ip_id_count = secure_ip_id(daddr);
71580 n->tcp_ts_stamp = 0;
71581
71582 diff -urNp linux-2.6.32.45/net/ipv4/ip_fragment.c linux-2.6.32.45/net/ipv4/ip_fragment.c
71583 --- linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
71584 +++ linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
71585 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
71586 return 0;
71587
71588 start = qp->rid;
71589 - end = atomic_inc_return(&peer->rid);
71590 + end = atomic_inc_return_unchecked(&peer->rid);
71591 qp->rid = end;
71592
71593 rc = qp->q.fragments && (end - start) > max;
71594 diff -urNp linux-2.6.32.45/net/ipv4/ip_sockglue.c linux-2.6.32.45/net/ipv4/ip_sockglue.c
71595 --- linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
71596 +++ linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
71597 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
71598 int val;
71599 int len;
71600
71601 + pax_track_stack();
71602 +
71603 if (level != SOL_IP)
71604 return -EOPNOTSUPP;
71605
71606 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c
71607 --- linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
71608 +++ linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
71609 @@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
71610 private = &tmp;
71611 }
71612 #endif
71613 + memset(&info, 0, sizeof(info));
71614 info.valid_hooks = t->valid_hooks;
71615 memcpy(info.hook_entry, private->hook_entry,
71616 sizeof(info.hook_entry));
71617 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c
71618 --- linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
71619 +++ linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
71620 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
71621 private = &tmp;
71622 }
71623 #endif
71624 + memset(&info, 0, sizeof(info));
71625 info.valid_hooks = t->valid_hooks;
71626 memcpy(info.hook_entry, private->hook_entry,
71627 sizeof(info.hook_entry));
71628 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c
71629 --- linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
71630 +++ linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
71631 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
71632
71633 *len = 0;
71634
71635 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
71636 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
71637 if (*octets == NULL) {
71638 if (net_ratelimit())
71639 printk("OOM in bsalg (%d)\n", __LINE__);
71640 diff -urNp linux-2.6.32.45/net/ipv4/raw.c linux-2.6.32.45/net/ipv4/raw.c
71641 --- linux-2.6.32.45/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
71642 +++ linux-2.6.32.45/net/ipv4/raw.c 2011-08-14 11:46:51.000000000 -0400
71643 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
71644 /* Charge it to the socket. */
71645
71646 if (sock_queue_rcv_skb(sk, skb) < 0) {
71647 - atomic_inc(&sk->sk_drops);
71648 + atomic_inc_unchecked(&sk->sk_drops);
71649 kfree_skb(skb);
71650 return NET_RX_DROP;
71651 }
71652 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
71653 int raw_rcv(struct sock *sk, struct sk_buff *skb)
71654 {
71655 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
71656 - atomic_inc(&sk->sk_drops);
71657 + atomic_inc_unchecked(&sk->sk_drops);
71658 kfree_skb(skb);
71659 return NET_RX_DROP;
71660 }
71661 @@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
71662
71663 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
71664 {
71665 + struct icmp_filter filter;
71666 +
71667 + if (optlen < 0)
71668 + return -EINVAL;
71669 if (optlen > sizeof(struct icmp_filter))
71670 optlen = sizeof(struct icmp_filter);
71671 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
71672 + if (copy_from_user(&filter, optval, optlen))
71673 return -EFAULT;
71674 + raw_sk(sk)->filter = filter;
71675 +
71676 return 0;
71677 }
71678
71679 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
71680 {
71681 int len, ret = -EFAULT;
71682 + struct icmp_filter filter;
71683
71684 if (get_user(len, optlen))
71685 goto out;
71686 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
71687 if (len > sizeof(struct icmp_filter))
71688 len = sizeof(struct icmp_filter);
71689 ret = -EFAULT;
71690 - if (put_user(len, optlen) ||
71691 - copy_to_user(optval, &raw_sk(sk)->filter, len))
71692 + filter = raw_sk(sk)->filter;
71693 + if (put_user(len, optlen) || len > sizeof filter ||
71694 + copy_to_user(optval, &filter, len))
71695 goto out;
71696 ret = 0;
71697 out: return ret;
71698 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
71699 sk_wmem_alloc_get(sp),
71700 sk_rmem_alloc_get(sp),
71701 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71702 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
71703 + atomic_read(&sp->sk_refcnt),
71704 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71705 + NULL,
71706 +#else
71707 + sp,
71708 +#endif
71709 + atomic_read_unchecked(&sp->sk_drops));
71710 }
71711
71712 static int raw_seq_show(struct seq_file *seq, void *v)
71713 diff -urNp linux-2.6.32.45/net/ipv4/route.c linux-2.6.32.45/net/ipv4/route.c
71714 --- linux-2.6.32.45/net/ipv4/route.c 2011-08-16 20:37:25.000000000 -0400
71715 +++ linux-2.6.32.45/net/ipv4/route.c 2011-08-07 19:48:09.000000000 -0400
71716 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be3
71717
71718 static inline int rt_genid(struct net *net)
71719 {
71720 - return atomic_read(&net->ipv4.rt_genid);
71721 + return atomic_read_unchecked(&net->ipv4.rt_genid);
71722 }
71723
71724 #ifdef CONFIG_PROC_FS
71725 @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct n
71726 unsigned char shuffle;
71727
71728 get_random_bytes(&shuffle, sizeof(shuffle));
71729 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
71730 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
71731 }
71732
71733 /*
71734 @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_oper
71735
71736 static __net_init int rt_secret_timer_init(struct net *net)
71737 {
71738 - atomic_set(&net->ipv4.rt_genid,
71739 + atomic_set_unchecked(&net->ipv4.rt_genid,
71740 (int) ((num_physpages ^ (num_physpages>>8)) ^
71741 (jiffies ^ (jiffies >> 7))));
71742
71743 diff -urNp linux-2.6.32.45/net/ipv4/tcp.c linux-2.6.32.45/net/ipv4/tcp.c
71744 --- linux-2.6.32.45/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
71745 +++ linux-2.6.32.45/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
71746 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
71747 int val;
71748 int err = 0;
71749
71750 + pax_track_stack();
71751 +
71752 /* This is a string value all the others are int's */
71753 if (optname == TCP_CONGESTION) {
71754 char name[TCP_CA_NAME_MAX];
71755 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
71756 struct tcp_sock *tp = tcp_sk(sk);
71757 int val, len;
71758
71759 + pax_track_stack();
71760 +
71761 if (get_user(len, optlen))
71762 return -EFAULT;
71763
71764 diff -urNp linux-2.6.32.45/net/ipv4/tcp_ipv4.c linux-2.6.32.45/net/ipv4/tcp_ipv4.c
71765 --- linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-16 20:37:25.000000000 -0400
71766 +++ linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-07 19:48:09.000000000 -0400
71767 @@ -85,6 +85,9 @@
71768 int sysctl_tcp_tw_reuse __read_mostly;
71769 int sysctl_tcp_low_latency __read_mostly;
71770
71771 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71772 +extern int grsec_enable_blackhole;
71773 +#endif
71774
71775 #ifdef CONFIG_TCP_MD5SIG
71776 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
71777 @@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
71778 return 0;
71779
71780 reset:
71781 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71782 + if (!grsec_enable_blackhole)
71783 +#endif
71784 tcp_v4_send_reset(rsk, skb);
71785 discard:
71786 kfree_skb(skb);
71787 @@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
71788 TCP_SKB_CB(skb)->sacked = 0;
71789
71790 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
71791 - if (!sk)
71792 + if (!sk) {
71793 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71794 + ret = 1;
71795 +#endif
71796 goto no_tcp_socket;
71797 + }
71798
71799 process:
71800 - if (sk->sk_state == TCP_TIME_WAIT)
71801 + if (sk->sk_state == TCP_TIME_WAIT) {
71802 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71803 + ret = 2;
71804 +#endif
71805 goto do_time_wait;
71806 + }
71807
71808 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
71809 goto discard_and_relse;
71810 @@ -1651,6 +1665,10 @@ no_tcp_socket:
71811 bad_packet:
71812 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
71813 } else {
71814 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71815 + if (!grsec_enable_blackhole || (ret == 1 &&
71816 + (skb->dev->flags & IFF_LOOPBACK)))
71817 +#endif
71818 tcp_v4_send_reset(NULL, skb);
71819 }
71820
71821 @@ -2195,14 +2213,14 @@ int tcp_proc_register(struct net *net, s
71822 int rc = 0;
71823 struct proc_dir_entry *p;
71824
71825 - afinfo->seq_fops.open = tcp_seq_open;
71826 - afinfo->seq_fops.read = seq_read;
71827 - afinfo->seq_fops.llseek = seq_lseek;
71828 - afinfo->seq_fops.release = seq_release_net;
71829 -
71830 - afinfo->seq_ops.start = tcp_seq_start;
71831 - afinfo->seq_ops.next = tcp_seq_next;
71832 - afinfo->seq_ops.stop = tcp_seq_stop;
71833 + *(void **)&afinfo->seq_fops.open = tcp_seq_open;
71834 + *(void **)&afinfo->seq_fops.read = seq_read;
71835 + *(void **)&afinfo->seq_fops.llseek = seq_lseek;
71836 + *(void **)&afinfo->seq_fops.release = seq_release_net;
71837 +
71838 + *(void **)&afinfo->seq_ops.start = tcp_seq_start;
71839 + *(void **)&afinfo->seq_ops.next = tcp_seq_next;
71840 + *(void **)&afinfo->seq_ops.stop = tcp_seq_stop;
71841
71842 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
71843 &afinfo->seq_fops, afinfo);
71844 @@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk
71845 0, /* non standard timer */
71846 0, /* open_requests have no inode */
71847 atomic_read(&sk->sk_refcnt),
71848 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71849 + NULL,
71850 +#else
71851 req,
71852 +#endif
71853 len);
71854 }
71855
71856 @@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *s
71857 sock_i_uid(sk),
71858 icsk->icsk_probes_out,
71859 sock_i_ino(sk),
71860 - atomic_read(&sk->sk_refcnt), sk,
71861 + atomic_read(&sk->sk_refcnt),
71862 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71863 + NULL,
71864 +#else
71865 + sk,
71866 +#endif
71867 jiffies_to_clock_t(icsk->icsk_rto),
71868 jiffies_to_clock_t(icsk->icsk_ack.ato),
71869 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
71870 @@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct in
71871 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
71872 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
71873 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
71874 - atomic_read(&tw->tw_refcnt), tw, len);
71875 + atomic_read(&tw->tw_refcnt),
71876 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71877 + NULL,
71878 +#else
71879 + tw,
71880 +#endif
71881 + len);
71882 }
71883
71884 #define TMPSZ 150
71885 diff -urNp linux-2.6.32.45/net/ipv4/tcp_minisocks.c linux-2.6.32.45/net/ipv4/tcp_minisocks.c
71886 --- linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
71887 +++ linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
71888 @@ -26,6 +26,10 @@
71889 #include <net/inet_common.h>
71890 #include <net/xfrm.h>
71891
71892 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71893 +extern int grsec_enable_blackhole;
71894 +#endif
71895 +
71896 #ifdef CONFIG_SYSCTL
71897 #define SYNC_INIT 0 /* let the user enable it */
71898 #else
71899 @@ -672,6 +676,10 @@ listen_overflow:
71900
71901 embryonic_reset:
71902 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
71903 +
71904 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71905 + if (!grsec_enable_blackhole)
71906 +#endif
71907 if (!(flg & TCP_FLAG_RST))
71908 req->rsk_ops->send_reset(sk, skb);
71909
71910 diff -urNp linux-2.6.32.45/net/ipv4/tcp_output.c linux-2.6.32.45/net/ipv4/tcp_output.c
71911 --- linux-2.6.32.45/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
71912 +++ linux-2.6.32.45/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
71913 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
71914 __u8 *md5_hash_location;
71915 int mss;
71916
71917 + pax_track_stack();
71918 +
71919 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
71920 if (skb == NULL)
71921 return NULL;
71922 diff -urNp linux-2.6.32.45/net/ipv4/tcp_probe.c linux-2.6.32.45/net/ipv4/tcp_probe.c
71923 --- linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
71924 +++ linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
71925 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
71926 if (cnt + width >= len)
71927 break;
71928
71929 - if (copy_to_user(buf + cnt, tbuf, width))
71930 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
71931 return -EFAULT;
71932 cnt += width;
71933 }
71934 diff -urNp linux-2.6.32.45/net/ipv4/tcp_timer.c linux-2.6.32.45/net/ipv4/tcp_timer.c
71935 --- linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
71936 +++ linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
71937 @@ -21,6 +21,10 @@
71938 #include <linux/module.h>
71939 #include <net/tcp.h>
71940
71941 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71942 +extern int grsec_lastack_retries;
71943 +#endif
71944 +
71945 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
71946 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
71947 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
71948 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
71949 }
71950 }
71951
71952 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71953 + if ((sk->sk_state == TCP_LAST_ACK) &&
71954 + (grsec_lastack_retries > 0) &&
71955 + (grsec_lastack_retries < retry_until))
71956 + retry_until = grsec_lastack_retries;
71957 +#endif
71958 +
71959 if (retransmits_timed_out(sk, retry_until)) {
71960 /* Has it gone just too far? */
71961 tcp_write_err(sk);
71962 diff -urNp linux-2.6.32.45/net/ipv4/udp.c linux-2.6.32.45/net/ipv4/udp.c
71963 --- linux-2.6.32.45/net/ipv4/udp.c 2011-07-13 17:23:04.000000000 -0400
71964 +++ linux-2.6.32.45/net/ipv4/udp.c 2011-08-05 20:33:55.000000000 -0400
71965 @@ -86,6 +86,7 @@
71966 #include <linux/types.h>
71967 #include <linux/fcntl.h>
71968 #include <linux/module.h>
71969 +#include <linux/security.h>
71970 #include <linux/socket.h>
71971 #include <linux/sockios.h>
71972 #include <linux/igmp.h>
71973 @@ -106,6 +107,10 @@
71974 #include <net/xfrm.h>
71975 #include "udp_impl.h"
71976
71977 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71978 +extern int grsec_enable_blackhole;
71979 +#endif
71980 +
71981 struct udp_table udp_table;
71982 EXPORT_SYMBOL(udp_table);
71983
71984 @@ -371,6 +376,9 @@ found:
71985 return s;
71986 }
71987
71988 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
71989 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
71990 +
71991 /*
71992 * This routine is called by the ICMP module when it gets some
71993 * sort of error condition. If err < 0 then the socket should
71994 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
71995 dport = usin->sin_port;
71996 if (dport == 0)
71997 return -EINVAL;
71998 +
71999 + err = gr_search_udp_sendmsg(sk, usin);
72000 + if (err)
72001 + return err;
72002 } else {
72003 if (sk->sk_state != TCP_ESTABLISHED)
72004 return -EDESTADDRREQ;
72005 +
72006 + err = gr_search_udp_sendmsg(sk, NULL);
72007 + if (err)
72008 + return err;
72009 +
72010 daddr = inet->daddr;
72011 dport = inet->dport;
72012 /* Open fast path for connected socket.
72013 @@ -945,6 +962,10 @@ try_again:
72014 if (!skb)
72015 goto out;
72016
72017 + err = gr_search_udp_recvmsg(sk, skb);
72018 + if (err)
72019 + goto out_free;
72020 +
72021 ulen = skb->len - sizeof(struct udphdr);
72022 copied = len;
72023 if (copied > ulen)
72024 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
72025 if (rc == -ENOMEM) {
72026 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
72027 is_udplite);
72028 - atomic_inc(&sk->sk_drops);
72029 + atomic_inc_unchecked(&sk->sk_drops);
72030 }
72031 goto drop;
72032 }
72033 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
72034 goto csum_error;
72035
72036 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
72037 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72038 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72039 +#endif
72040 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
72041
72042 /*
72043 @@ -1719,14 +1743,14 @@ int udp_proc_register(struct net *net, s
72044 struct proc_dir_entry *p;
72045 int rc = 0;
72046
72047 - afinfo->seq_fops.open = udp_seq_open;
72048 - afinfo->seq_fops.read = seq_read;
72049 - afinfo->seq_fops.llseek = seq_lseek;
72050 - afinfo->seq_fops.release = seq_release_net;
72051 -
72052 - afinfo->seq_ops.start = udp_seq_start;
72053 - afinfo->seq_ops.next = udp_seq_next;
72054 - afinfo->seq_ops.stop = udp_seq_stop;
72055 + *(void **)&afinfo->seq_fops.open = udp_seq_open;
72056 + *(void **)&afinfo->seq_fops.read = seq_read;
72057 + *(void **)&afinfo->seq_fops.llseek = seq_lseek;
72058 + *(void **)&afinfo->seq_fops.release = seq_release_net;
72059 +
72060 + *(void **)&afinfo->seq_ops.start = udp_seq_start;
72061 + *(void **)&afinfo->seq_ops.next = udp_seq_next;
72062 + *(void **)&afinfo->seq_ops.stop = udp_seq_stop;
72063
72064 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
72065 &afinfo->seq_fops, afinfo);
72066 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
72067 sk_wmem_alloc_get(sp),
72068 sk_rmem_alloc_get(sp),
72069 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72070 - atomic_read(&sp->sk_refcnt), sp,
72071 - atomic_read(&sp->sk_drops), len);
72072 + atomic_read(&sp->sk_refcnt),
72073 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72074 + NULL,
72075 +#else
72076 + sp,
72077 +#endif
72078 + atomic_read_unchecked(&sp->sk_drops), len);
72079 }
72080
72081 int udp4_seq_show(struct seq_file *seq, void *v)
72082 diff -urNp linux-2.6.32.45/net/ipv6/inet6_connection_sock.c linux-2.6.32.45/net/ipv6/inet6_connection_sock.c
72083 --- linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
72084 +++ linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
72085 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
72086 #ifdef CONFIG_XFRM
72087 {
72088 struct rt6_info *rt = (struct rt6_info *)dst;
72089 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
72090 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
72091 }
72092 #endif
72093 }
72094 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
72095 #ifdef CONFIG_XFRM
72096 if (dst) {
72097 struct rt6_info *rt = (struct rt6_info *)dst;
72098 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
72099 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
72100 sk->sk_dst_cache = NULL;
72101 dst_release(dst);
72102 dst = NULL;
72103 diff -urNp linux-2.6.32.45/net/ipv6/inet6_hashtables.c linux-2.6.32.45/net/ipv6/inet6_hashtables.c
72104 --- linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-16 20:37:25.000000000 -0400
72105 +++ linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-07 19:48:09.000000000 -0400
72106 @@ -119,7 +119,7 @@ out:
72107 }
72108 EXPORT_SYMBOL(__inet6_lookup_established);
72109
72110 -static int inline compute_score(struct sock *sk, struct net *net,
72111 +static inline int compute_score(struct sock *sk, struct net *net,
72112 const unsigned short hnum,
72113 const struct in6_addr *daddr,
72114 const int dif)
72115 diff -urNp linux-2.6.32.45/net/ipv6/ipv6_sockglue.c linux-2.6.32.45/net/ipv6/ipv6_sockglue.c
72116 --- linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
72117 +++ linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
72118 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
72119 int val, valbool;
72120 int retv = -ENOPROTOOPT;
72121
72122 + pax_track_stack();
72123 +
72124 if (optval == NULL)
72125 val=0;
72126 else {
72127 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
72128 int len;
72129 int val;
72130
72131 + pax_track_stack();
72132 +
72133 if (ip6_mroute_opt(optname))
72134 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
72135
72136 diff -urNp linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c
72137 --- linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
72138 +++ linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
72139 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
72140 private = &tmp;
72141 }
72142 #endif
72143 + memset(&info, 0, sizeof(info));
72144 info.valid_hooks = t->valid_hooks;
72145 memcpy(info.hook_entry, private->hook_entry,
72146 sizeof(info.hook_entry));
72147 diff -urNp linux-2.6.32.45/net/ipv6/raw.c linux-2.6.32.45/net/ipv6/raw.c
72148 --- linux-2.6.32.45/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
72149 +++ linux-2.6.32.45/net/ipv6/raw.c 2011-08-14 11:48:20.000000000 -0400
72150 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
72151 {
72152 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
72153 skb_checksum_complete(skb)) {
72154 - atomic_inc(&sk->sk_drops);
72155 + atomic_inc_unchecked(&sk->sk_drops);
72156 kfree_skb(skb);
72157 return NET_RX_DROP;
72158 }
72159
72160 /* Charge it to the socket. */
72161 if (sock_queue_rcv_skb(sk,skb)<0) {
72162 - atomic_inc(&sk->sk_drops);
72163 + atomic_inc_unchecked(&sk->sk_drops);
72164 kfree_skb(skb);
72165 return NET_RX_DROP;
72166 }
72167 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72168 struct raw6_sock *rp = raw6_sk(sk);
72169
72170 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72171 - atomic_inc(&sk->sk_drops);
72172 + atomic_inc_unchecked(&sk->sk_drops);
72173 kfree_skb(skb);
72174 return NET_RX_DROP;
72175 }
72176 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72177
72178 if (inet->hdrincl) {
72179 if (skb_checksum_complete(skb)) {
72180 - atomic_inc(&sk->sk_drops);
72181 + atomic_inc_unchecked(&sk->sk_drops);
72182 kfree_skb(skb);
72183 return NET_RX_DROP;
72184 }
72185 @@ -518,7 +518,7 @@ csum_copy_err:
72186 as some normal condition.
72187 */
72188 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
72189 - atomic_inc(&sk->sk_drops);
72190 + atomic_inc_unchecked(&sk->sk_drops);
72191 goto out;
72192 }
72193
72194 @@ -600,7 +600,7 @@ out:
72195 return err;
72196 }
72197
72198 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72199 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72200 struct flowi *fl, struct rt6_info *rt,
72201 unsigned int flags)
72202 {
72203 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
72204 u16 proto;
72205 int err;
72206
72207 + pax_track_stack();
72208 +
72209 /* Rough check on arithmetic overflow,
72210 better check is made in ip6_append_data().
72211 */
72212 @@ -916,12 +918,17 @@ do_confirm:
72213 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72214 char __user *optval, int optlen)
72215 {
72216 + struct icmp6_filter filter;
72217 +
72218 switch (optname) {
72219 case ICMPV6_FILTER:
72220 + if (optlen < 0)
72221 + return -EINVAL;
72222 if (optlen > sizeof(struct icmp6_filter))
72223 optlen = sizeof(struct icmp6_filter);
72224 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72225 + if (copy_from_user(&filter, optval, optlen))
72226 return -EFAULT;
72227 + raw6_sk(sk)->filter = filter;
72228 return 0;
72229 default:
72230 return -ENOPROTOOPT;
72231 @@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct so
72232 char __user *optval, int __user *optlen)
72233 {
72234 int len;
72235 + struct icmp6_filter filter;
72236
72237 switch (optname) {
72238 case ICMPV6_FILTER:
72239 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
72240 len = sizeof(struct icmp6_filter);
72241 if (put_user(len, optlen))
72242 return -EFAULT;
72243 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72244 + filter = raw6_sk(sk)->filter;
72245 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
72246 return -EFAULT;
72247 return 0;
72248 default:
72249 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
72250 0, 0L, 0,
72251 sock_i_uid(sp), 0,
72252 sock_i_ino(sp),
72253 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72254 + atomic_read(&sp->sk_refcnt),
72255 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72256 + NULL,
72257 +#else
72258 + sp,
72259 +#endif
72260 + atomic_read_unchecked(&sp->sk_drops));
72261 }
72262
72263 static int raw6_seq_show(struct seq_file *seq, void *v)
72264 diff -urNp linux-2.6.32.45/net/ipv6/tcp_ipv6.c linux-2.6.32.45/net/ipv6/tcp_ipv6.c
72265 --- linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-16 20:37:25.000000000 -0400
72266 +++ linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-07 19:48:09.000000000 -0400
72267 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
72268 }
72269 #endif
72270
72271 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72272 +extern int grsec_enable_blackhole;
72273 +#endif
72274 +
72275 static void tcp_v6_hash(struct sock *sk)
72276 {
72277 if (sk->sk_state != TCP_CLOSE) {
72278 @@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk
72279 return 0;
72280
72281 reset:
72282 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72283 + if (!grsec_enable_blackhole)
72284 +#endif
72285 tcp_v6_send_reset(sk, skb);
72286 discard:
72287 if (opt_skb)
72288 @@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
72289 TCP_SKB_CB(skb)->sacked = 0;
72290
72291 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72292 - if (!sk)
72293 + if (!sk) {
72294 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72295 + ret = 1;
72296 +#endif
72297 goto no_tcp_socket;
72298 + }
72299
72300 process:
72301 - if (sk->sk_state == TCP_TIME_WAIT)
72302 + if (sk->sk_state == TCP_TIME_WAIT) {
72303 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72304 + ret = 2;
72305 +#endif
72306 goto do_time_wait;
72307 + }
72308
72309 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
72310 goto discard_and_relse;
72311 @@ -1701,6 +1716,10 @@ no_tcp_socket:
72312 bad_packet:
72313 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72314 } else {
72315 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72316 + if (!grsec_enable_blackhole || (ret == 1 &&
72317 + (skb->dev->flags & IFF_LOOPBACK)))
72318 +#endif
72319 tcp_v6_send_reset(NULL, skb);
72320 }
72321
72322 @@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file
72323 uid,
72324 0, /* non standard timer */
72325 0, /* open_requests have no inode */
72326 - 0, req);
72327 + 0,
72328 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72329 + NULL
72330 +#else
72331 + req
72332 +#endif
72333 + );
72334 }
72335
72336 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
72337 @@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_fil
72338 sock_i_uid(sp),
72339 icsk->icsk_probes_out,
72340 sock_i_ino(sp),
72341 - atomic_read(&sp->sk_refcnt), sp,
72342 + atomic_read(&sp->sk_refcnt),
72343 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72344 + NULL,
72345 +#else
72346 + sp,
72347 +#endif
72348 jiffies_to_clock_t(icsk->icsk_rto),
72349 jiffies_to_clock_t(icsk->icsk_ack.ato),
72350 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
72351 @@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct se
72352 dest->s6_addr32[2], dest->s6_addr32[3], destp,
72353 tw->tw_substate, 0, 0,
72354 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72355 - atomic_read(&tw->tw_refcnt), tw);
72356 + atomic_read(&tw->tw_refcnt),
72357 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72358 + NULL
72359 +#else
72360 + tw
72361 +#endif
72362 + );
72363 }
72364
72365 static int tcp6_seq_show(struct seq_file *seq, void *v)
72366 diff -urNp linux-2.6.32.45/net/ipv6/udp.c linux-2.6.32.45/net/ipv6/udp.c
72367 --- linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:04.000000000 -0400
72368 +++ linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:27.000000000 -0400
72369 @@ -49,6 +49,10 @@
72370 #include <linux/seq_file.h>
72371 #include "udp_impl.h"
72372
72373 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72374 +extern int grsec_enable_blackhole;
72375 +#endif
72376 +
72377 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
72378 {
72379 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
72380 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
72381 if (rc == -ENOMEM) {
72382 UDP6_INC_STATS_BH(sock_net(sk),
72383 UDP_MIB_RCVBUFERRORS, is_udplite);
72384 - atomic_inc(&sk->sk_drops);
72385 + atomic_inc_unchecked(&sk->sk_drops);
72386 }
72387 goto drop;
72388 }
72389 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72390 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
72391 proto == IPPROTO_UDPLITE);
72392
72393 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72394 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72395 +#endif
72396 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
72397
72398 kfree_skb(skb);
72399 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
72400 0, 0L, 0,
72401 sock_i_uid(sp), 0,
72402 sock_i_ino(sp),
72403 - atomic_read(&sp->sk_refcnt), sp,
72404 - atomic_read(&sp->sk_drops));
72405 + atomic_read(&sp->sk_refcnt),
72406 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72407 + NULL,
72408 +#else
72409 + sp,
72410 +#endif
72411 + atomic_read_unchecked(&sp->sk_drops));
72412 }
72413
72414 int udp6_seq_show(struct seq_file *seq, void *v)
72415 diff -urNp linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c
72416 --- linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
72417 +++ linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
72418 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
72419 add_wait_queue(&self->open_wait, &wait);
72420
72421 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
72422 - __FILE__,__LINE__, tty->driver->name, self->open_count );
72423 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72424
72425 /* As far as I can see, we protect open_count - Jean II */
72426 spin_lock_irqsave(&self->spinlock, flags);
72427 if (!tty_hung_up_p(filp)) {
72428 extra_count = 1;
72429 - self->open_count--;
72430 + local_dec(&self->open_count);
72431 }
72432 spin_unlock_irqrestore(&self->spinlock, flags);
72433 - self->blocked_open++;
72434 + local_inc(&self->blocked_open);
72435
72436 while (1) {
72437 if (tty->termios->c_cflag & CBAUD) {
72438 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
72439 }
72440
72441 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
72442 - __FILE__,__LINE__, tty->driver->name, self->open_count );
72443 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72444
72445 schedule();
72446 }
72447 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
72448 if (extra_count) {
72449 /* ++ is not atomic, so this should be protected - Jean II */
72450 spin_lock_irqsave(&self->spinlock, flags);
72451 - self->open_count++;
72452 + local_inc(&self->open_count);
72453 spin_unlock_irqrestore(&self->spinlock, flags);
72454 }
72455 - self->blocked_open--;
72456 + local_dec(&self->blocked_open);
72457
72458 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
72459 - __FILE__,__LINE__, tty->driver->name, self->open_count);
72460 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
72461
72462 if (!retval)
72463 self->flags |= ASYNC_NORMAL_ACTIVE;
72464 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
72465 }
72466 /* ++ is not atomic, so this should be protected - Jean II */
72467 spin_lock_irqsave(&self->spinlock, flags);
72468 - self->open_count++;
72469 + local_inc(&self->open_count);
72470
72471 tty->driver_data = self;
72472 self->tty = tty;
72473 spin_unlock_irqrestore(&self->spinlock, flags);
72474
72475 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
72476 - self->line, self->open_count);
72477 + self->line, local_read(&self->open_count));
72478
72479 /* Not really used by us, but lets do it anyway */
72480 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
72481 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
72482 return;
72483 }
72484
72485 - if ((tty->count == 1) && (self->open_count != 1)) {
72486 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
72487 /*
72488 * Uh, oh. tty->count is 1, which means that the tty
72489 * structure will be freed. state->count should always
72490 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
72491 */
72492 IRDA_DEBUG(0, "%s(), bad serial port count; "
72493 "tty->count is 1, state->count is %d\n", __func__ ,
72494 - self->open_count);
72495 - self->open_count = 1;
72496 + local_read(&self->open_count));
72497 + local_set(&self->open_count, 1);
72498 }
72499
72500 - if (--self->open_count < 0) {
72501 + if (local_dec_return(&self->open_count) < 0) {
72502 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
72503 - __func__, self->line, self->open_count);
72504 - self->open_count = 0;
72505 + __func__, self->line, local_read(&self->open_count));
72506 + local_set(&self->open_count, 0);
72507 }
72508 - if (self->open_count) {
72509 + if (local_read(&self->open_count)) {
72510 spin_unlock_irqrestore(&self->spinlock, flags);
72511
72512 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
72513 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
72514 tty->closing = 0;
72515 self->tty = NULL;
72516
72517 - if (self->blocked_open) {
72518 + if (local_read(&self->blocked_open)) {
72519 if (self->close_delay)
72520 schedule_timeout_interruptible(self->close_delay);
72521 wake_up_interruptible(&self->open_wait);
72522 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
72523 spin_lock_irqsave(&self->spinlock, flags);
72524 self->flags &= ~ASYNC_NORMAL_ACTIVE;
72525 self->tty = NULL;
72526 - self->open_count = 0;
72527 + local_set(&self->open_count, 0);
72528 spin_unlock_irqrestore(&self->spinlock, flags);
72529
72530 wake_up_interruptible(&self->open_wait);
72531 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
72532 seq_putc(m, '\n');
72533
72534 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
72535 - seq_printf(m, "Open count: %d\n", self->open_count);
72536 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
72537 seq_printf(m, "Max data size: %d\n", self->max_data_size);
72538 seq_printf(m, "Max header size: %d\n", self->max_header_size);
72539
72540 diff -urNp linux-2.6.32.45/net/iucv/af_iucv.c linux-2.6.32.45/net/iucv/af_iucv.c
72541 --- linux-2.6.32.45/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
72542 +++ linux-2.6.32.45/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
72543 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
72544
72545 write_lock_bh(&iucv_sk_list.lock);
72546
72547 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
72548 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72549 while (__iucv_get_sock_by_name(name)) {
72550 sprintf(name, "%08x",
72551 - atomic_inc_return(&iucv_sk_list.autobind_name));
72552 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72553 }
72554
72555 write_unlock_bh(&iucv_sk_list.lock);
72556 diff -urNp linux-2.6.32.45/net/key/af_key.c linux-2.6.32.45/net/key/af_key.c
72557 --- linux-2.6.32.45/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
72558 +++ linux-2.6.32.45/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
72559 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
72560 struct xfrm_migrate m[XFRM_MAX_DEPTH];
72561 struct xfrm_kmaddress k;
72562
72563 + pax_track_stack();
72564 +
72565 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
72566 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
72567 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
72568 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
72569 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
72570 else
72571 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
72572 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72573 + NULL,
72574 +#else
72575 s,
72576 +#endif
72577 atomic_read(&s->sk_refcnt),
72578 sk_rmem_alloc_get(s),
72579 sk_wmem_alloc_get(s),
72580 diff -urNp linux-2.6.32.45/net/lapb/lapb_iface.c linux-2.6.32.45/net/lapb/lapb_iface.c
72581 --- linux-2.6.32.45/net/lapb/lapb_iface.c 2011-03-27 14:31:47.000000000 -0400
72582 +++ linux-2.6.32.45/net/lapb/lapb_iface.c 2011-08-05 20:33:55.000000000 -0400
72583 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev
72584 goto out;
72585
72586 lapb->dev = dev;
72587 - lapb->callbacks = *callbacks;
72588 + lapb->callbacks = callbacks;
72589
72590 __lapb_insert_cb(lapb);
72591
72592 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device
72593
72594 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
72595 {
72596 - if (lapb->callbacks.connect_confirmation)
72597 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
72598 + if (lapb->callbacks->connect_confirmation)
72599 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
72600 }
72601
72602 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
72603 {
72604 - if (lapb->callbacks.connect_indication)
72605 - lapb->callbacks.connect_indication(lapb->dev, reason);
72606 + if (lapb->callbacks->connect_indication)
72607 + lapb->callbacks->connect_indication(lapb->dev, reason);
72608 }
72609
72610 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
72611 {
72612 - if (lapb->callbacks.disconnect_confirmation)
72613 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
72614 + if (lapb->callbacks->disconnect_confirmation)
72615 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
72616 }
72617
72618 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
72619 {
72620 - if (lapb->callbacks.disconnect_indication)
72621 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
72622 + if (lapb->callbacks->disconnect_indication)
72623 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
72624 }
72625
72626 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
72627 {
72628 - if (lapb->callbacks.data_indication)
72629 - return lapb->callbacks.data_indication(lapb->dev, skb);
72630 + if (lapb->callbacks->data_indication)
72631 + return lapb->callbacks->data_indication(lapb->dev, skb);
72632
72633 kfree_skb(skb);
72634 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
72635 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *l
72636 {
72637 int used = 0;
72638
72639 - if (lapb->callbacks.data_transmit) {
72640 - lapb->callbacks.data_transmit(lapb->dev, skb);
72641 + if (lapb->callbacks->data_transmit) {
72642 + lapb->callbacks->data_transmit(lapb->dev, skb);
72643 used = 1;
72644 }
72645
72646 diff -urNp linux-2.6.32.45/net/mac80211/cfg.c linux-2.6.32.45/net/mac80211/cfg.c
72647 --- linux-2.6.32.45/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
72648 +++ linux-2.6.32.45/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
72649 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
72650 return err;
72651 }
72652
72653 -struct cfg80211_ops mac80211_config_ops = {
72654 +const struct cfg80211_ops mac80211_config_ops = {
72655 .add_virtual_intf = ieee80211_add_iface,
72656 .del_virtual_intf = ieee80211_del_iface,
72657 .change_virtual_intf = ieee80211_change_iface,
72658 diff -urNp linux-2.6.32.45/net/mac80211/cfg.h linux-2.6.32.45/net/mac80211/cfg.h
72659 --- linux-2.6.32.45/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
72660 +++ linux-2.6.32.45/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
72661 @@ -4,6 +4,6 @@
72662 #ifndef __CFG_H
72663 #define __CFG_H
72664
72665 -extern struct cfg80211_ops mac80211_config_ops;
72666 +extern const struct cfg80211_ops mac80211_config_ops;
72667
72668 #endif /* __CFG_H */
72669 diff -urNp linux-2.6.32.45/net/mac80211/debugfs_key.c linux-2.6.32.45/net/mac80211/debugfs_key.c
72670 --- linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
72671 +++ linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
72672 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
72673 size_t count, loff_t *ppos)
72674 {
72675 struct ieee80211_key *key = file->private_data;
72676 - int i, res, bufsize = 2 * key->conf.keylen + 2;
72677 + int i, bufsize = 2 * key->conf.keylen + 2;
72678 char *buf = kmalloc(bufsize, GFP_KERNEL);
72679 char *p = buf;
72680 + ssize_t res;
72681 +
72682 + if (buf == NULL)
72683 + return -ENOMEM;
72684
72685 for (i = 0; i < key->conf.keylen; i++)
72686 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
72687 diff -urNp linux-2.6.32.45/net/mac80211/debugfs_sta.c linux-2.6.32.45/net/mac80211/debugfs_sta.c
72688 --- linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
72689 +++ linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
72690 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
72691 int i;
72692 struct sta_info *sta = file->private_data;
72693
72694 + pax_track_stack();
72695 +
72696 spin_lock_bh(&sta->lock);
72697 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
72698 sta->ampdu_mlme.dialog_token_allocator + 1);
72699 diff -urNp linux-2.6.32.45/net/mac80211/ieee80211_i.h linux-2.6.32.45/net/mac80211/ieee80211_i.h
72700 --- linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
72701 +++ linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
72702 @@ -25,6 +25,7 @@
72703 #include <linux/etherdevice.h>
72704 #include <net/cfg80211.h>
72705 #include <net/mac80211.h>
72706 +#include <asm/local.h>
72707 #include "key.h"
72708 #include "sta_info.h"
72709
72710 @@ -635,7 +636,7 @@ struct ieee80211_local {
72711 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
72712 spinlock_t queue_stop_reason_lock;
72713
72714 - int open_count;
72715 + local_t open_count;
72716 int monitors, cooked_mntrs;
72717 /* number of interfaces with corresponding FIF_ flags */
72718 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
72719 diff -urNp linux-2.6.32.45/net/mac80211/iface.c linux-2.6.32.45/net/mac80211/iface.c
72720 --- linux-2.6.32.45/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
72721 +++ linux-2.6.32.45/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
72722 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
72723 break;
72724 }
72725
72726 - if (local->open_count == 0) {
72727 + if (local_read(&local->open_count) == 0) {
72728 res = drv_start(local);
72729 if (res)
72730 goto err_del_bss;
72731 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
72732 * Validate the MAC address for this device.
72733 */
72734 if (!is_valid_ether_addr(dev->dev_addr)) {
72735 - if (!local->open_count)
72736 + if (!local_read(&local->open_count))
72737 drv_stop(local);
72738 return -EADDRNOTAVAIL;
72739 }
72740 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
72741
72742 hw_reconf_flags |= __ieee80211_recalc_idle(local);
72743
72744 - local->open_count++;
72745 + local_inc(&local->open_count);
72746 if (hw_reconf_flags) {
72747 ieee80211_hw_config(local, hw_reconf_flags);
72748 /*
72749 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
72750 err_del_interface:
72751 drv_remove_interface(local, &conf);
72752 err_stop:
72753 - if (!local->open_count)
72754 + if (!local_read(&local->open_count))
72755 drv_stop(local);
72756 err_del_bss:
72757 sdata->bss = NULL;
72758 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
72759 WARN_ON(!list_empty(&sdata->u.ap.vlans));
72760 }
72761
72762 - local->open_count--;
72763 + local_dec(&local->open_count);
72764
72765 switch (sdata->vif.type) {
72766 case NL80211_IFTYPE_AP_VLAN:
72767 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
72768
72769 ieee80211_recalc_ps(local, -1);
72770
72771 - if (local->open_count == 0) {
72772 + if (local_read(&local->open_count) == 0) {
72773 ieee80211_clear_tx_pending(local);
72774 ieee80211_stop_device(local);
72775
72776 diff -urNp linux-2.6.32.45/net/mac80211/main.c linux-2.6.32.45/net/mac80211/main.c
72777 --- linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
72778 +++ linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
72779 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
72780 local->hw.conf.power_level = power;
72781 }
72782
72783 - if (changed && local->open_count) {
72784 + if (changed && local_read(&local->open_count)) {
72785 ret = drv_config(local, changed);
72786 /*
72787 * Goal:
72788 diff -urNp linux-2.6.32.45/net/mac80211/mlme.c linux-2.6.32.45/net/mac80211/mlme.c
72789 --- linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:35:30.000000000 -0400
72790 +++ linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:34:01.000000000 -0400
72791 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
72792 bool have_higher_than_11mbit = false, newsta = false;
72793 u16 ap_ht_cap_flags;
72794
72795 + pax_track_stack();
72796 +
72797 /*
72798 * AssocResp and ReassocResp have identical structure, so process both
72799 * of them in this function.
72800 diff -urNp linux-2.6.32.45/net/mac80211/pm.c linux-2.6.32.45/net/mac80211/pm.c
72801 --- linux-2.6.32.45/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
72802 +++ linux-2.6.32.45/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
72803 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
72804 }
72805
72806 /* stop hardware - this must stop RX */
72807 - if (local->open_count)
72808 + if (local_read(&local->open_count))
72809 ieee80211_stop_device(local);
72810
72811 local->suspended = true;
72812 diff -urNp linux-2.6.32.45/net/mac80211/rate.c linux-2.6.32.45/net/mac80211/rate.c
72813 --- linux-2.6.32.45/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
72814 +++ linux-2.6.32.45/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
72815 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
72816 struct rate_control_ref *ref, *old;
72817
72818 ASSERT_RTNL();
72819 - if (local->open_count)
72820 + if (local_read(&local->open_count))
72821 return -EBUSY;
72822
72823 ref = rate_control_alloc(name, local);
72824 diff -urNp linux-2.6.32.45/net/mac80211/tx.c linux-2.6.32.45/net/mac80211/tx.c
72825 --- linux-2.6.32.45/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
72826 +++ linux-2.6.32.45/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
72827 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
72828 return cpu_to_le16(dur);
72829 }
72830
72831 -static int inline is_ieee80211_device(struct ieee80211_local *local,
72832 +static inline int is_ieee80211_device(struct ieee80211_local *local,
72833 struct net_device *dev)
72834 {
72835 return local == wdev_priv(dev->ieee80211_ptr);
72836 diff -urNp linux-2.6.32.45/net/mac80211/util.c linux-2.6.32.45/net/mac80211/util.c
72837 --- linux-2.6.32.45/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
72838 +++ linux-2.6.32.45/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
72839 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
72840 local->resuming = true;
72841
72842 /* restart hardware */
72843 - if (local->open_count) {
72844 + if (local_read(&local->open_count)) {
72845 /*
72846 * Upon resume hardware can sometimes be goofy due to
72847 * various platform / driver / bus issues, so restarting
72848 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c
72849 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
72850 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
72851 @@ -564,7 +564,7 @@ static const struct file_operations ip_v
72852 .open = ip_vs_app_open,
72853 .read = seq_read,
72854 .llseek = seq_lseek,
72855 - .release = seq_release,
72856 + .release = seq_release_net,
72857 };
72858 #endif
72859
72860 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c
72861 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
72862 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
72863 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
72864 /* if the connection is not template and is created
72865 * by sync, preserve the activity flag.
72866 */
72867 - cp->flags |= atomic_read(&dest->conn_flags) &
72868 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
72869 (~IP_VS_CONN_F_INACTIVE);
72870 else
72871 - cp->flags |= atomic_read(&dest->conn_flags);
72872 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
72873 cp->dest = dest;
72874
72875 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
72876 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
72877 atomic_set(&cp->refcnt, 1);
72878
72879 atomic_set(&cp->n_control, 0);
72880 - atomic_set(&cp->in_pkts, 0);
72881 + atomic_set_unchecked(&cp->in_pkts, 0);
72882
72883 atomic_inc(&ip_vs_conn_count);
72884 if (flags & IP_VS_CONN_F_NO_CPORT)
72885 @@ -871,7 +871,7 @@ static const struct file_operations ip_v
72886 .open = ip_vs_conn_open,
72887 .read = seq_read,
72888 .llseek = seq_lseek,
72889 - .release = seq_release,
72890 + .release = seq_release_net,
72891 };
72892
72893 static const char *ip_vs_origin_name(unsigned flags)
72894 @@ -934,7 +934,7 @@ static const struct file_operations ip_v
72895 .open = ip_vs_conn_sync_open,
72896 .read = seq_read,
72897 .llseek = seq_lseek,
72898 - .release = seq_release,
72899 + .release = seq_release_net,
72900 };
72901
72902 #endif
72903 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
72904
72905 /* Don't drop the entry if its number of incoming packets is not
72906 located in [0, 8] */
72907 - i = atomic_read(&cp->in_pkts);
72908 + i = atomic_read_unchecked(&cp->in_pkts);
72909 if (i > 8 || i < 0) return 0;
72910
72911 if (!todrop_rate[i]) return 0;
72912 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c
72913 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
72914 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
72915 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
72916 ret = cp->packet_xmit(skb, cp, pp);
72917 /* do not touch skb anymore */
72918
72919 - atomic_inc(&cp->in_pkts);
72920 + atomic_inc_unchecked(&cp->in_pkts);
72921 ip_vs_conn_put(cp);
72922 return ret;
72923 }
72924 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
72925 * Sync connection if it is about to close to
72926 * encorage the standby servers to update the connections timeout
72927 */
72928 - pkts = atomic_add_return(1, &cp->in_pkts);
72929 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
72930 if (af == AF_INET &&
72931 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
72932 (((cp->protocol != IPPROTO_TCP ||
72933 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c
72934 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
72935 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
72936 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
72937 ip_vs_rs_hash(dest);
72938 write_unlock_bh(&__ip_vs_rs_lock);
72939 }
72940 - atomic_set(&dest->conn_flags, conn_flags);
72941 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
72942
72943 /* bind the service */
72944 if (!dest->svc) {
72945 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
72946 " %-7s %-6d %-10d %-10d\n",
72947 &dest->addr.in6,
72948 ntohs(dest->port),
72949 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72950 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72951 atomic_read(&dest->weight),
72952 atomic_read(&dest->activeconns),
72953 atomic_read(&dest->inactconns));
72954 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
72955 "%-7s %-6d %-10d %-10d\n",
72956 ntohl(dest->addr.ip),
72957 ntohs(dest->port),
72958 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72959 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72960 atomic_read(&dest->weight),
72961 atomic_read(&dest->activeconns),
72962 atomic_read(&dest->inactconns));
72963 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
72964 .open = ip_vs_info_open,
72965 .read = seq_read,
72966 .llseek = seq_lseek,
72967 - .release = seq_release_private,
72968 + .release = seq_release_net,
72969 };
72970
72971 #endif
72972 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
72973 .open = ip_vs_stats_seq_open,
72974 .read = seq_read,
72975 .llseek = seq_lseek,
72976 - .release = single_release,
72977 + .release = single_release_net,
72978 };
72979
72980 #endif
72981 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
72982
72983 entry.addr = dest->addr.ip;
72984 entry.port = dest->port;
72985 - entry.conn_flags = atomic_read(&dest->conn_flags);
72986 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
72987 entry.weight = atomic_read(&dest->weight);
72988 entry.u_threshold = dest->u_threshold;
72989 entry.l_threshold = dest->l_threshold;
72990 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
72991 unsigned char arg[128];
72992 int ret = 0;
72993
72994 + pax_track_stack();
72995 +
72996 if (!capable(CAP_NET_ADMIN))
72997 return -EPERM;
72998
72999 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
73000 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73001
73002 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73003 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73004 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73005 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73006 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73007 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73008 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c
73009 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
73010 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
73011 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const
73012
73013 if (opt)
73014 memcpy(&cp->in_seq, opt, sizeof(*opt));
73015 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
73016 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
73017 cp->state = state;
73018 cp->old_state = cp->state;
73019 /*
73020 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c
73021 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
73022 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
73023 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
73024 else
73025 rc = NF_ACCEPT;
73026 /* do not touch skb anymore */
73027 - atomic_inc(&cp->in_pkts);
73028 + atomic_inc_unchecked(&cp->in_pkts);
73029 goto out;
73030 }
73031
73032 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
73033 else
73034 rc = NF_ACCEPT;
73035 /* do not touch skb anymore */
73036 - atomic_inc(&cp->in_pkts);
73037 + atomic_inc_unchecked(&cp->in_pkts);
73038 goto out;
73039 }
73040
73041 diff -urNp linux-2.6.32.45/net/netfilter/Kconfig linux-2.6.32.45/net/netfilter/Kconfig
73042 --- linux-2.6.32.45/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
73043 +++ linux-2.6.32.45/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
73044 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
73045
73046 To compile it as a module, choose M here. If unsure, say N.
73047
73048 +config NETFILTER_XT_MATCH_GRADM
73049 + tristate '"gradm" match support'
73050 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73051 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73052 + ---help---
73053 + The gradm match allows to match on grsecurity RBAC being enabled.
73054 + It is useful when iptables rules are applied early on bootup to
73055 + prevent connections to the machine (except from a trusted host)
73056 + while the RBAC system is disabled.
73057 +
73058 config NETFILTER_XT_MATCH_HASHLIMIT
73059 tristate '"hashlimit" match support'
73060 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73061 diff -urNp linux-2.6.32.45/net/netfilter/Makefile linux-2.6.32.45/net/netfilter/Makefile
73062 --- linux-2.6.32.45/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
73063 +++ linux-2.6.32.45/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
73064 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
73065 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
73066 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73067 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73068 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73069 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73070 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73071 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73072 diff -urNp linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c
73073 --- linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
73074 +++ linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
73075 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
73076 static int
73077 ctnetlink_parse_tuple(const struct nlattr * const cda[],
73078 struct nf_conntrack_tuple *tuple,
73079 - enum ctattr_tuple type, u_int8_t l3num)
73080 + enum ctattr_type type, u_int8_t l3num)
73081 {
73082 struct nlattr *tb[CTA_TUPLE_MAX+1];
73083 int err;
73084 diff -urNp linux-2.6.32.45/net/netfilter/nfnetlink_log.c linux-2.6.32.45/net/netfilter/nfnetlink_log.c
73085 --- linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
73086 +++ linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
73087 @@ -68,7 +68,7 @@ struct nfulnl_instance {
73088 };
73089
73090 static DEFINE_RWLOCK(instances_lock);
73091 -static atomic_t global_seq;
73092 +static atomic_unchecked_t global_seq;
73093
73094 #define INSTANCE_BUCKETS 16
73095 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73096 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
73097 /* global sequence number */
73098 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73099 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73100 - htonl(atomic_inc_return(&global_seq)));
73101 + htonl(atomic_inc_return_unchecked(&global_seq)));
73102
73103 if (data_len) {
73104 struct nlattr *nla;
73105 diff -urNp linux-2.6.32.45/net/netfilter/xt_gradm.c linux-2.6.32.45/net/netfilter/xt_gradm.c
73106 --- linux-2.6.32.45/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73107 +++ linux-2.6.32.45/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
73108 @@ -0,0 +1,51 @@
73109 +/*
73110 + * gradm match for netfilter
73111 + * Copyright © Zbigniew Krzystolik, 2010
73112 + *
73113 + * This program is free software; you can redistribute it and/or modify
73114 + * it under the terms of the GNU General Public License; either version
73115 + * 2 or 3 as published by the Free Software Foundation.
73116 + */
73117 +#include <linux/module.h>
73118 +#include <linux/moduleparam.h>
73119 +#include <linux/skbuff.h>
73120 +#include <linux/netfilter/x_tables.h>
73121 +#include <linux/grsecurity.h>
73122 +#include <linux/netfilter/xt_gradm.h>
73123 +
73124 +static bool
73125 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
73126 +{
73127 + const struct xt_gradm_mtinfo *info = par->matchinfo;
73128 + bool retval = false;
73129 + if (gr_acl_is_enabled())
73130 + retval = true;
73131 + return retval ^ info->invflags;
73132 +}
73133 +
73134 +static struct xt_match gradm_mt_reg __read_mostly = {
73135 + .name = "gradm",
73136 + .revision = 0,
73137 + .family = NFPROTO_UNSPEC,
73138 + .match = gradm_mt,
73139 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
73140 + .me = THIS_MODULE,
73141 +};
73142 +
73143 +static int __init gradm_mt_init(void)
73144 +{
73145 + return xt_register_match(&gradm_mt_reg);
73146 +}
73147 +
73148 +static void __exit gradm_mt_exit(void)
73149 +{
73150 + xt_unregister_match(&gradm_mt_reg);
73151 +}
73152 +
73153 +module_init(gradm_mt_init);
73154 +module_exit(gradm_mt_exit);
73155 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
73156 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
73157 +MODULE_LICENSE("GPL");
73158 +MODULE_ALIAS("ipt_gradm");
73159 +MODULE_ALIAS("ip6t_gradm");
73160 diff -urNp linux-2.6.32.45/net/netlink/af_netlink.c linux-2.6.32.45/net/netlink/af_netlink.c
73161 --- linux-2.6.32.45/net/netlink/af_netlink.c 2011-03-27 14:31:47.000000000 -0400
73162 +++ linux-2.6.32.45/net/netlink/af_netlink.c 2011-05-04 17:56:28.000000000 -0400
73163 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock
73164 sk->sk_error_report(sk);
73165 }
73166 }
73167 - atomic_inc(&sk->sk_drops);
73168 + atomic_inc_unchecked(&sk->sk_drops);
73169 }
73170
73171 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
73172 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_f
73173 struct netlink_sock *nlk = nlk_sk(s);
73174
73175 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
73176 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73177 + NULL,
73178 +#else
73179 s,
73180 +#endif
73181 s->sk_protocol,
73182 nlk->pid,
73183 nlk->groups ? (u32)nlk->groups[0] : 0,
73184 sk_rmem_alloc_get(s),
73185 sk_wmem_alloc_get(s),
73186 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73187 + NULL,
73188 +#else
73189 nlk->cb,
73190 +#endif
73191 atomic_read(&s->sk_refcnt),
73192 - atomic_read(&s->sk_drops)
73193 + atomic_read_unchecked(&s->sk_drops)
73194 );
73195
73196 }
73197 diff -urNp linux-2.6.32.45/net/netrom/af_netrom.c linux-2.6.32.45/net/netrom/af_netrom.c
73198 --- linux-2.6.32.45/net/netrom/af_netrom.c 2011-03-27 14:31:47.000000000 -0400
73199 +++ linux-2.6.32.45/net/netrom/af_netrom.c 2011-04-17 15:56:46.000000000 -0400
73200 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *soc
73201 struct sock *sk = sock->sk;
73202 struct nr_sock *nr = nr_sk(sk);
73203
73204 + memset(sax, 0, sizeof(*sax));
73205 lock_sock(sk);
73206 if (peer != 0) {
73207 if (sk->sk_state != TCP_ESTABLISHED) {
73208 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *soc
73209 *uaddr_len = sizeof(struct full_sockaddr_ax25);
73210 } else {
73211 sax->fsa_ax25.sax25_family = AF_NETROM;
73212 - sax->fsa_ax25.sax25_ndigis = 0;
73213 sax->fsa_ax25.sax25_call = nr->source_addr;
73214 *uaddr_len = sizeof(struct sockaddr_ax25);
73215 }
73216 diff -urNp linux-2.6.32.45/net/packet/af_packet.c linux-2.6.32.45/net/packet/af_packet.c
73217 --- linux-2.6.32.45/net/packet/af_packet.c 2011-07-13 17:23:04.000000000 -0400
73218 +++ linux-2.6.32.45/net/packet/af_packet.c 2011-07-13 17:23:27.000000000 -0400
73219 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_fi
73220
73221 seq_printf(seq,
73222 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
73223 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73224 + NULL,
73225 +#else
73226 s,
73227 +#endif
73228 atomic_read(&s->sk_refcnt),
73229 s->sk_type,
73230 ntohs(po->num),
73231 diff -urNp linux-2.6.32.45/net/phonet/af_phonet.c linux-2.6.32.45/net/phonet/af_phonet.c
73232 --- linux-2.6.32.45/net/phonet/af_phonet.c 2011-03-27 14:31:47.000000000 -0400
73233 +++ linux-2.6.32.45/net/phonet/af_phonet.c 2011-04-17 15:56:46.000000000 -0400
73234 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
73235 {
73236 struct phonet_protocol *pp;
73237
73238 - if (protocol >= PHONET_NPROTO)
73239 + if (protocol < 0 || protocol >= PHONET_NPROTO)
73240 return NULL;
73241
73242 spin_lock(&proto_tab_lock);
73243 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_regist
73244 {
73245 int err = 0;
73246
73247 - if (protocol >= PHONET_NPROTO)
73248 + if (protocol < 0 || protocol >= PHONET_NPROTO)
73249 return -EINVAL;
73250
73251 err = proto_register(pp->prot, 1);
73252 diff -urNp linux-2.6.32.45/net/phonet/datagram.c linux-2.6.32.45/net/phonet/datagram.c
73253 --- linux-2.6.32.45/net/phonet/datagram.c 2011-03-27 14:31:47.000000000 -0400
73254 +++ linux-2.6.32.45/net/phonet/datagram.c 2011-05-04 17:56:28.000000000 -0400
73255 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s
73256 if (err < 0) {
73257 kfree_skb(skb);
73258 if (err == -ENOMEM)
73259 - atomic_inc(&sk->sk_drops);
73260 + atomic_inc_unchecked(&sk->sk_drops);
73261 }
73262 return err ? NET_RX_DROP : NET_RX_SUCCESS;
73263 }
73264 diff -urNp linux-2.6.32.45/net/phonet/pep.c linux-2.6.32.45/net/phonet/pep.c
73265 --- linux-2.6.32.45/net/phonet/pep.c 2011-03-27 14:31:47.000000000 -0400
73266 +++ linux-2.6.32.45/net/phonet/pep.c 2011-05-04 17:56:28.000000000 -0400
73267 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk,
73268
73269 case PNS_PEP_CTRL_REQ:
73270 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
73271 - atomic_inc(&sk->sk_drops);
73272 + atomic_inc_unchecked(&sk->sk_drops);
73273 break;
73274 }
73275 __skb_pull(skb, 4);
73276 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk,
73277 if (!err)
73278 return 0;
73279 if (err == -ENOMEM)
73280 - atomic_inc(&sk->sk_drops);
73281 + atomic_inc_unchecked(&sk->sk_drops);
73282 break;
73283 }
73284
73285 if (pn->rx_credits == 0) {
73286 - atomic_inc(&sk->sk_drops);
73287 + atomic_inc_unchecked(&sk->sk_drops);
73288 err = -ENOBUFS;
73289 break;
73290 }
73291 diff -urNp linux-2.6.32.45/net/phonet/socket.c linux-2.6.32.45/net/phonet/socket.c
73292 --- linux-2.6.32.45/net/phonet/socket.c 2011-03-27 14:31:47.000000000 -0400
73293 +++ linux-2.6.32.45/net/phonet/socket.c 2011-05-04 17:57:07.000000000 -0400
73294 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_f
73295 sk->sk_state,
73296 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
73297 sock_i_uid(sk), sock_i_ino(sk),
73298 - atomic_read(&sk->sk_refcnt), sk,
73299 - atomic_read(&sk->sk_drops), &len);
73300 + atomic_read(&sk->sk_refcnt),
73301 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73302 + NULL,
73303 +#else
73304 + sk,
73305 +#endif
73306 + atomic_read_unchecked(&sk->sk_drops), &len);
73307 }
73308 seq_printf(seq, "%*s\n", 127 - len, "");
73309 return 0;
73310 diff -urNp linux-2.6.32.45/net/rds/cong.c linux-2.6.32.45/net/rds/cong.c
73311 --- linux-2.6.32.45/net/rds/cong.c 2011-03-27 14:31:47.000000000 -0400
73312 +++ linux-2.6.32.45/net/rds/cong.c 2011-05-04 17:56:28.000000000 -0400
73313 @@ -77,7 +77,7 @@
73314 * finds that the saved generation number is smaller than the global generation
73315 * number, it wakes up the process.
73316 */
73317 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
73318 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
73319
73320 /*
73321 * Congestion monitoring
73322 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
73323 rdsdebug("waking map %p for %pI4\n",
73324 map, &map->m_addr);
73325 rds_stats_inc(s_cong_update_received);
73326 - atomic_inc(&rds_cong_generation);
73327 + atomic_inc_unchecked(&rds_cong_generation);
73328 if (waitqueue_active(&map->m_waitq))
73329 wake_up(&map->m_waitq);
73330 if (waitqueue_active(&rds_poll_waitq))
73331 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
73332
73333 int rds_cong_updated_since(unsigned long *recent)
73334 {
73335 - unsigned long gen = atomic_read(&rds_cong_generation);
73336 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
73337
73338 if (likely(*recent == gen))
73339 return 0;
73340 diff -urNp linux-2.6.32.45/net/rds/iw_rdma.c linux-2.6.32.45/net/rds/iw_rdma.c
73341 --- linux-2.6.32.45/net/rds/iw_rdma.c 2011-03-27 14:31:47.000000000 -0400
73342 +++ linux-2.6.32.45/net/rds/iw_rdma.c 2011-05-16 21:46:57.000000000 -0400
73343 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
73344 struct rdma_cm_id *pcm_id;
73345 int rc;
73346
73347 + pax_track_stack();
73348 +
73349 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
73350 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
73351
73352 diff -urNp linux-2.6.32.45/net/rds/Kconfig linux-2.6.32.45/net/rds/Kconfig
73353 --- linux-2.6.32.45/net/rds/Kconfig 2011-03-27 14:31:47.000000000 -0400
73354 +++ linux-2.6.32.45/net/rds/Kconfig 2011-04-17 15:56:46.000000000 -0400
73355 @@ -1,7 +1,7 @@
73356
73357 config RDS
73358 tristate "The RDS Protocol (EXPERIMENTAL)"
73359 - depends on INET && EXPERIMENTAL
73360 + depends on INET && EXPERIMENTAL && BROKEN
73361 ---help---
73362 The RDS (Reliable Datagram Sockets) protocol provides reliable,
73363 sequenced delivery of datagrams over Infiniband, iWARP,
73364 diff -urNp linux-2.6.32.45/net/rxrpc/af_rxrpc.c linux-2.6.32.45/net/rxrpc/af_rxrpc.c
73365 --- linux-2.6.32.45/net/rxrpc/af_rxrpc.c 2011-03-27 14:31:47.000000000 -0400
73366 +++ linux-2.6.32.45/net/rxrpc/af_rxrpc.c 2011-05-04 17:56:28.000000000 -0400
73367 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_
73368 __be32 rxrpc_epoch;
73369
73370 /* current debugging ID */
73371 -atomic_t rxrpc_debug_id;
73372 +atomic_unchecked_t rxrpc_debug_id;
73373
73374 /* count of skbs currently in use */
73375 atomic_t rxrpc_n_skbs;
73376 diff -urNp linux-2.6.32.45/net/rxrpc/ar-ack.c linux-2.6.32.45/net/rxrpc/ar-ack.c
73377 --- linux-2.6.32.45/net/rxrpc/ar-ack.c 2011-03-27 14:31:47.000000000 -0400
73378 +++ linux-2.6.32.45/net/rxrpc/ar-ack.c 2011-05-16 21:46:57.000000000 -0400
73379 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca
73380
73381 _enter("{%d,%d,%d,%d},",
73382 call->acks_hard, call->acks_unacked,
73383 - atomic_read(&call->sequence),
73384 + atomic_read_unchecked(&call->sequence),
73385 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
73386
73387 stop = 0;
73388 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca
73389
73390 /* each Tx packet has a new serial number */
73391 sp->hdr.serial =
73392 - htonl(atomic_inc_return(&call->conn->serial));
73393 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
73394
73395 hdr = (struct rxrpc_header *) txb->head;
73396 hdr->serial = sp->hdr.serial;
73397 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc
73398 */
73399 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
73400 {
73401 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
73402 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
73403 }
73404
73405 /*
73406 @@ -627,7 +627,7 @@ process_further:
73407
73408 latest = ntohl(sp->hdr.serial);
73409 hard = ntohl(ack.firstPacket);
73410 - tx = atomic_read(&call->sequence);
73411 + tx = atomic_read_unchecked(&call->sequence);
73412
73413 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
73414 latest,
73415 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_stru
73416 u32 abort_code = RX_PROTOCOL_ERROR;
73417 u8 *acks = NULL;
73418
73419 + pax_track_stack();
73420 +
73421 //printk("\n--------------------\n");
73422 _enter("{%d,%s,%lx} [%lu]",
73423 call->debug_id, rxrpc_call_states[call->state], call->events,
73424 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
73425 goto maybe_reschedule;
73426
73427 send_ACK_with_skew:
73428 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
73429 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
73430 ntohl(ack.serial));
73431 send_ACK:
73432 mtu = call->conn->trans->peer->if_mtu;
73433 @@ -1171,7 +1173,7 @@ send_ACK:
73434 ackinfo.rxMTU = htonl(5692);
73435 ackinfo.jumbo_max = htonl(4);
73436
73437 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
73438 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
73439 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
73440 ntohl(hdr.serial),
73441 ntohs(ack.maxSkew),
73442 @@ -1189,7 +1191,7 @@ send_ACK:
73443 send_message:
73444 _debug("send message");
73445
73446 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
73447 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
73448 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
73449 send_message_2:
73450
73451 diff -urNp linux-2.6.32.45/net/rxrpc/ar-call.c linux-2.6.32.45/net/rxrpc/ar-call.c
73452 --- linux-2.6.32.45/net/rxrpc/ar-call.c 2011-03-27 14:31:47.000000000 -0400
73453 +++ linux-2.6.32.45/net/rxrpc/ar-call.c 2011-05-04 17:56:28.000000000 -0400
73454 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
73455 spin_lock_init(&call->lock);
73456 rwlock_init(&call->state_lock);
73457 atomic_set(&call->usage, 1);
73458 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
73459 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73460 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
73461
73462 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
73463 diff -urNp linux-2.6.32.45/net/rxrpc/ar-connection.c linux-2.6.32.45/net/rxrpc/ar-connection.c
73464 --- linux-2.6.32.45/net/rxrpc/ar-connection.c 2011-03-27 14:31:47.000000000 -0400
73465 +++ linux-2.6.32.45/net/rxrpc/ar-connection.c 2011-05-04 17:56:28.000000000 -0400
73466 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al
73467 rwlock_init(&conn->lock);
73468 spin_lock_init(&conn->state_lock);
73469 atomic_set(&conn->usage, 1);
73470 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
73471 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73472 conn->avail_calls = RXRPC_MAXCALLS;
73473 conn->size_align = 4;
73474 conn->header_size = sizeof(struct rxrpc_header);
73475 diff -urNp linux-2.6.32.45/net/rxrpc/ar-connevent.c linux-2.6.32.45/net/rxrpc/ar-connevent.c
73476 --- linux-2.6.32.45/net/rxrpc/ar-connevent.c 2011-03-27 14:31:47.000000000 -0400
73477 +++ linux-2.6.32.45/net/rxrpc/ar-connevent.c 2011-05-04 17:56:28.000000000 -0400
73478 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
73479
73480 len = iov[0].iov_len + iov[1].iov_len;
73481
73482 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
73483 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73484 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
73485
73486 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
73487 diff -urNp linux-2.6.32.45/net/rxrpc/ar-input.c linux-2.6.32.45/net/rxrpc/ar-input.c
73488 --- linux-2.6.32.45/net/rxrpc/ar-input.c 2011-03-27 14:31:47.000000000 -0400
73489 +++ linux-2.6.32.45/net/rxrpc/ar-input.c 2011-05-04 17:56:28.000000000 -0400
73490 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx
73491 /* track the latest serial number on this connection for ACK packet
73492 * information */
73493 serial = ntohl(sp->hdr.serial);
73494 - hi_serial = atomic_read(&call->conn->hi_serial);
73495 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
73496 while (serial > hi_serial)
73497 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
73498 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
73499 serial);
73500
73501 /* request ACK generation for any ACK or DATA packet that requests
73502 diff -urNp linux-2.6.32.45/net/rxrpc/ar-internal.h linux-2.6.32.45/net/rxrpc/ar-internal.h
73503 --- linux-2.6.32.45/net/rxrpc/ar-internal.h 2011-03-27 14:31:47.000000000 -0400
73504 +++ linux-2.6.32.45/net/rxrpc/ar-internal.h 2011-05-04 17:56:28.000000000 -0400
73505 @@ -272,8 +272,8 @@ struct rxrpc_connection {
73506 int error; /* error code for local abort */
73507 int debug_id; /* debug ID for printks */
73508 unsigned call_counter; /* call ID counter */
73509 - atomic_t serial; /* packet serial number counter */
73510 - atomic_t hi_serial; /* highest serial number received */
73511 + atomic_unchecked_t serial; /* packet serial number counter */
73512 + atomic_unchecked_t hi_serial; /* highest serial number received */
73513 u8 avail_calls; /* number of calls available */
73514 u8 size_align; /* data size alignment (for security) */
73515 u8 header_size; /* rxrpc + security header size */
73516 @@ -346,7 +346,7 @@ struct rxrpc_call {
73517 spinlock_t lock;
73518 rwlock_t state_lock; /* lock for state transition */
73519 atomic_t usage;
73520 - atomic_t sequence; /* Tx data packet sequence counter */
73521 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
73522 u32 abort_code; /* local/remote abort code */
73523 enum { /* current state of call */
73524 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
73525 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
73526 */
73527 extern atomic_t rxrpc_n_skbs;
73528 extern __be32 rxrpc_epoch;
73529 -extern atomic_t rxrpc_debug_id;
73530 +extern atomic_unchecked_t rxrpc_debug_id;
73531 extern struct workqueue_struct *rxrpc_workqueue;
73532
73533 /*
73534 diff -urNp linux-2.6.32.45/net/rxrpc/ar-key.c linux-2.6.32.45/net/rxrpc/ar-key.c
73535 --- linux-2.6.32.45/net/rxrpc/ar-key.c 2011-03-27 14:31:47.000000000 -0400
73536 +++ linux-2.6.32.45/net/rxrpc/ar-key.c 2011-04-17 15:56:46.000000000 -0400
73537 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(s
73538 return ret;
73539
73540 plen -= sizeof(*token);
73541 - token = kmalloc(sizeof(*token), GFP_KERNEL);
73542 + token = kzalloc(sizeof(*token), GFP_KERNEL);
73543 if (!token)
73544 return -ENOMEM;
73545
73546 - token->kad = kmalloc(plen, GFP_KERNEL);
73547 + token->kad = kzalloc(plen, GFP_KERNEL);
73548 if (!token->kad) {
73549 kfree(token);
73550 return -ENOMEM;
73551 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key
73552 goto error;
73553
73554 ret = -ENOMEM;
73555 - token = kmalloc(sizeof(*token), GFP_KERNEL);
73556 + token = kzalloc(sizeof(*token), GFP_KERNEL);
73557 if (!token)
73558 goto error;
73559 - token->kad = kmalloc(plen, GFP_KERNEL);
73560 + token->kad = kzalloc(plen, GFP_KERNEL);
73561 if (!token->kad)
73562 goto error_free;
73563
73564 diff -urNp linux-2.6.32.45/net/rxrpc/ar-local.c linux-2.6.32.45/net/rxrpc/ar-local.c
73565 --- linux-2.6.32.45/net/rxrpc/ar-local.c 2011-03-27 14:31:47.000000000 -0400
73566 +++ linux-2.6.32.45/net/rxrpc/ar-local.c 2011-05-04 17:56:28.000000000 -0400
73567 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
73568 spin_lock_init(&local->lock);
73569 rwlock_init(&local->services_lock);
73570 atomic_set(&local->usage, 1);
73571 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
73572 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73573 memcpy(&local->srx, srx, sizeof(*srx));
73574 }
73575
73576 diff -urNp linux-2.6.32.45/net/rxrpc/ar-output.c linux-2.6.32.45/net/rxrpc/ar-output.c
73577 --- linux-2.6.32.45/net/rxrpc/ar-output.c 2011-03-27 14:31:47.000000000 -0400
73578 +++ linux-2.6.32.45/net/rxrpc/ar-output.c 2011-05-04 17:56:28.000000000 -0400
73579 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb
73580 sp->hdr.cid = call->cid;
73581 sp->hdr.callNumber = call->call_id;
73582 sp->hdr.seq =
73583 - htonl(atomic_inc_return(&call->sequence));
73584 + htonl(atomic_inc_return_unchecked(&call->sequence));
73585 sp->hdr.serial =
73586 - htonl(atomic_inc_return(&conn->serial));
73587 + htonl(atomic_inc_return_unchecked(&conn->serial));
73588 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
73589 sp->hdr.userStatus = 0;
73590 sp->hdr.securityIndex = conn->security_ix;
73591 diff -urNp linux-2.6.32.45/net/rxrpc/ar-peer.c linux-2.6.32.45/net/rxrpc/ar-peer.c
73592 --- linux-2.6.32.45/net/rxrpc/ar-peer.c 2011-03-27 14:31:47.000000000 -0400
73593 +++ linux-2.6.32.45/net/rxrpc/ar-peer.c 2011-05-04 17:56:28.000000000 -0400
73594 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
73595 INIT_LIST_HEAD(&peer->error_targets);
73596 spin_lock_init(&peer->lock);
73597 atomic_set(&peer->usage, 1);
73598 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
73599 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73600 memcpy(&peer->srx, srx, sizeof(*srx));
73601
73602 rxrpc_assess_MTU_size(peer);
73603 diff -urNp linux-2.6.32.45/net/rxrpc/ar-proc.c linux-2.6.32.45/net/rxrpc/ar-proc.c
73604 --- linux-2.6.32.45/net/rxrpc/ar-proc.c 2011-03-27 14:31:47.000000000 -0400
73605 +++ linux-2.6.32.45/net/rxrpc/ar-proc.c 2011-05-04 17:56:28.000000000 -0400
73606 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
73607 atomic_read(&conn->usage),
73608 rxrpc_conn_states[conn->state],
73609 key_serial(conn->key),
73610 - atomic_read(&conn->serial),
73611 - atomic_read(&conn->hi_serial));
73612 + atomic_read_unchecked(&conn->serial),
73613 + atomic_read_unchecked(&conn->hi_serial));
73614
73615 return 0;
73616 }
73617 diff -urNp linux-2.6.32.45/net/rxrpc/ar-transport.c linux-2.6.32.45/net/rxrpc/ar-transport.c
73618 --- linux-2.6.32.45/net/rxrpc/ar-transport.c 2011-03-27 14:31:47.000000000 -0400
73619 +++ linux-2.6.32.45/net/rxrpc/ar-transport.c 2011-05-04 17:56:28.000000000 -0400
73620 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all
73621 spin_lock_init(&trans->client_lock);
73622 rwlock_init(&trans->conn_lock);
73623 atomic_set(&trans->usage, 1);
73624 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
73625 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73626
73627 if (peer->srx.transport.family == AF_INET) {
73628 switch (peer->srx.transport_type) {
73629 diff -urNp linux-2.6.32.45/net/rxrpc/rxkad.c linux-2.6.32.45/net/rxrpc/rxkad.c
73630 --- linux-2.6.32.45/net/rxrpc/rxkad.c 2011-03-27 14:31:47.000000000 -0400
73631 +++ linux-2.6.32.45/net/rxrpc/rxkad.c 2011-05-16 21:46:57.000000000 -0400
73632 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(c
73633 u16 check;
73634 int nsg;
73635
73636 + pax_track_stack();
73637 +
73638 sp = rxrpc_skb(skb);
73639
73640 _enter("");
73641 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(cons
73642 u16 check;
73643 int nsg;
73644
73645 + pax_track_stack();
73646 +
73647 _enter("");
73648
73649 sp = rxrpc_skb(skb);
73650 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct
73651
73652 len = iov[0].iov_len + iov[1].iov_len;
73653
73654 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
73655 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73656 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
73657
73658 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
73659 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rx
73660
73661 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
73662
73663 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
73664 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73665 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
73666
73667 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
73668 diff -urNp linux-2.6.32.45/net/sctp/proc.c linux-2.6.32.45/net/sctp/proc.c
73669 --- linux-2.6.32.45/net/sctp/proc.c 2011-03-27 14:31:47.000000000 -0400
73670 +++ linux-2.6.32.45/net/sctp/proc.c 2011-04-17 15:56:46.000000000 -0400
73671 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_
73672 sctp_for_each_hentry(epb, node, &head->chain) {
73673 ep = sctp_ep(epb);
73674 sk = epb->sk;
73675 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
73676 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
73677 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73678 + NULL, NULL,
73679 +#else
73680 + ep, sk,
73681 +#endif
73682 sctp_sk(sk)->type, sk->sk_state, hash,
73683 epb->bind_addr.port,
73684 sock_i_uid(sk), sock_i_ino(sk));
73685 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct s
73686 seq_printf(seq,
73687 "%8p %8p %-3d %-3d %-2d %-4d "
73688 "%4d %8d %8d %7d %5lu %-5d %5d ",
73689 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
73690 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73691 + NULL, NULL,
73692 +#else
73693 + assoc, sk,
73694 +#endif
73695 + sctp_sk(sk)->type, sk->sk_state,
73696 assoc->state, hash,
73697 assoc->assoc_id,
73698 assoc->sndbuf_used,
73699 diff -urNp linux-2.6.32.45/net/sctp/socket.c linux-2.6.32.45/net/sctp/socket.c
73700 --- linux-2.6.32.45/net/sctp/socket.c 2011-03-27 14:31:47.000000000 -0400
73701 +++ linux-2.6.32.45/net/sctp/socket.c 2011-04-23 12:56:11.000000000 -0400
73702 @@ -5802,7 +5802,6 @@ pp_found:
73703 */
73704 int reuse = sk->sk_reuse;
73705 struct sock *sk2;
73706 - struct hlist_node *node;
73707
73708 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
73709 if (pp->fastreuse && sk->sk_reuse &&
73710 diff -urNp linux-2.6.32.45/net/socket.c linux-2.6.32.45/net/socket.c
73711 --- linux-2.6.32.45/net/socket.c 2011-03-27 14:31:47.000000000 -0400
73712 +++ linux-2.6.32.45/net/socket.c 2011-05-16 21:46:57.000000000 -0400
73713 @@ -87,6 +87,7 @@
73714 #include <linux/wireless.h>
73715 #include <linux/nsproxy.h>
73716 #include <linux/magic.h>
73717 +#include <linux/in.h>
73718
73719 #include <asm/uaccess.h>
73720 #include <asm/unistd.h>
73721 @@ -97,6 +98,21 @@
73722 #include <net/sock.h>
73723 #include <linux/netfilter.h>
73724
73725 +extern void gr_attach_curr_ip(const struct sock *sk);
73726 +extern int gr_handle_sock_all(const int family, const int type,
73727 + const int protocol);
73728 +extern int gr_handle_sock_server(const struct sockaddr *sck);
73729 +extern int gr_handle_sock_server_other(const struct sock *sck);
73730 +extern int gr_handle_sock_client(const struct sockaddr *sck);
73731 +extern int gr_search_connect(struct socket * sock,
73732 + struct sockaddr_in * addr);
73733 +extern int gr_search_bind(struct socket * sock,
73734 + struct sockaddr_in * addr);
73735 +extern int gr_search_listen(struct socket * sock);
73736 +extern int gr_search_accept(struct socket * sock);
73737 +extern int gr_search_socket(const int domain, const int type,
73738 + const int protocol);
73739 +
73740 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
73741 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
73742 unsigned long nr_segs, loff_t pos);
73743 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_sys
73744 mnt);
73745 }
73746
73747 -static struct vfsmount *sock_mnt __read_mostly;
73748 +struct vfsmount *sock_mnt __read_mostly;
73749
73750 static struct file_system_type sock_fs_type = {
73751 .name = "sockfs",
73752 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net
73753 return -EAFNOSUPPORT;
73754 if (type < 0 || type >= SOCK_MAX)
73755 return -EINVAL;
73756 + if (protocol < 0)
73757 + return -EINVAL;
73758
73759 /* Compatibility.
73760
73761 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int
73762 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
73763 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
73764
73765 + if(!gr_search_socket(family, type, protocol)) {
73766 + retval = -EACCES;
73767 + goto out;
73768 + }
73769 +
73770 + if (gr_handle_sock_all(family, type, protocol)) {
73771 + retval = -EACCES;
73772 + goto out;
73773 + }
73774 +
73775 retval = sock_create(family, type, protocol, &sock);
73776 if (retval < 0)
73777 goto out;
73778 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
73779 if (sock) {
73780 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
73781 if (err >= 0) {
73782 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
73783 + err = -EACCES;
73784 + goto error;
73785 + }
73786 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
73787 + if (err)
73788 + goto error;
73789 +
73790 err = security_socket_bind(sock,
73791 (struct sockaddr *)&address,
73792 addrlen);
73793 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
73794 (struct sockaddr *)
73795 &address, addrlen);
73796 }
73797 +error:
73798 fput_light(sock->file, fput_needed);
73799 }
73800 return err;
73801 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
73802 if ((unsigned)backlog > somaxconn)
73803 backlog = somaxconn;
73804
73805 + if (gr_handle_sock_server_other(sock->sk)) {
73806 + err = -EPERM;
73807 + goto error;
73808 + }
73809 +
73810 + err = gr_search_listen(sock);
73811 + if (err)
73812 + goto error;
73813 +
73814 err = security_socket_listen(sock, backlog);
73815 if (!err)
73816 err = sock->ops->listen(sock, backlog);
73817
73818 +error:
73819 fput_light(sock->file, fput_needed);
73820 }
73821 return err;
73822 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
73823 newsock->type = sock->type;
73824 newsock->ops = sock->ops;
73825
73826 + if (gr_handle_sock_server_other(sock->sk)) {
73827 + err = -EPERM;
73828 + sock_release(newsock);
73829 + goto out_put;
73830 + }
73831 +
73832 + err = gr_search_accept(sock);
73833 + if (err) {
73834 + sock_release(newsock);
73835 + goto out_put;
73836 + }
73837 +
73838 /*
73839 * We don't need try_module_get here, as the listening socket (sock)
73840 * has the protocol module (sock->ops->owner) held.
73841 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
73842 fd_install(newfd, newfile);
73843 err = newfd;
73844
73845 + gr_attach_curr_ip(newsock->sk);
73846 +
73847 out_put:
73848 fput_light(sock->file, fput_needed);
73849 out:
73850 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
73851 int, addrlen)
73852 {
73853 struct socket *sock;
73854 + struct sockaddr *sck;
73855 struct sockaddr_storage address;
73856 int err, fput_needed;
73857
73858 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
73859 if (err < 0)
73860 goto out_put;
73861
73862 + sck = (struct sockaddr *)&address;
73863 +
73864 + if (gr_handle_sock_client(sck)) {
73865 + err = -EACCES;
73866 + goto out_put;
73867 + }
73868 +
73869 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
73870 + if (err)
73871 + goto out_put;
73872 +
73873 err =
73874 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
73875 if (err)
73876 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
73877 int err, ctl_len, iov_size, total_len;
73878 int fput_needed;
73879
73880 + pax_track_stack();
73881 +
73882 err = -EFAULT;
73883 if (MSG_CMSG_COMPAT & flags) {
73884 if (get_compat_msghdr(&msg_sys, msg_compat))
73885 diff -urNp linux-2.6.32.45/net/sunrpc/sched.c linux-2.6.32.45/net/sunrpc/sched.c
73886 --- linux-2.6.32.45/net/sunrpc/sched.c 2011-08-09 18:35:30.000000000 -0400
73887 +++ linux-2.6.32.45/net/sunrpc/sched.c 2011-08-09 18:34:01.000000000 -0400
73888 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w
73889 #ifdef RPC_DEBUG
73890 static void rpc_task_set_debuginfo(struct rpc_task *task)
73891 {
73892 - static atomic_t rpc_pid;
73893 + static atomic_unchecked_t rpc_pid;
73894
73895 task->tk_magic = RPC_TASK_MAGIC_ID;
73896 - task->tk_pid = atomic_inc_return(&rpc_pid);
73897 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
73898 }
73899 #else
73900 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
73901 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c
73902 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c 2011-03-27 14:31:47.000000000 -0400
73903 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-04 17:56:20.000000000 -0400
73904 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR
73905 static unsigned int min_max_inline = 4096;
73906 static unsigned int max_max_inline = 65536;
73907
73908 -atomic_t rdma_stat_recv;
73909 -atomic_t rdma_stat_read;
73910 -atomic_t rdma_stat_write;
73911 -atomic_t rdma_stat_sq_starve;
73912 -atomic_t rdma_stat_rq_starve;
73913 -atomic_t rdma_stat_rq_poll;
73914 -atomic_t rdma_stat_rq_prod;
73915 -atomic_t rdma_stat_sq_poll;
73916 -atomic_t rdma_stat_sq_prod;
73917 +atomic_unchecked_t rdma_stat_recv;
73918 +atomic_unchecked_t rdma_stat_read;
73919 +atomic_unchecked_t rdma_stat_write;
73920 +atomic_unchecked_t rdma_stat_sq_starve;
73921 +atomic_unchecked_t rdma_stat_rq_starve;
73922 +atomic_unchecked_t rdma_stat_rq_poll;
73923 +atomic_unchecked_t rdma_stat_rq_prod;
73924 +atomic_unchecked_t rdma_stat_sq_poll;
73925 +atomic_unchecked_t rdma_stat_sq_prod;
73926
73927 /* Temporary NFS request map and context caches */
73928 struct kmem_cache *svc_rdma_map_cachep;
73929 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *ta
73930 len -= *ppos;
73931 if (len > *lenp)
73932 len = *lenp;
73933 - if (len && copy_to_user(buffer, str_buf, len))
73934 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
73935 return -EFAULT;
73936 *lenp = len;
73937 *ppos += len;
73938 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] =
73939 {
73940 .procname = "rdma_stat_read",
73941 .data = &rdma_stat_read,
73942 - .maxlen = sizeof(atomic_t),
73943 + .maxlen = sizeof(atomic_unchecked_t),
73944 .mode = 0644,
73945 .proc_handler = &read_reset_stat,
73946 },
73947 {
73948 .procname = "rdma_stat_recv",
73949 .data = &rdma_stat_recv,
73950 - .maxlen = sizeof(atomic_t),
73951 + .maxlen = sizeof(atomic_unchecked_t),
73952 .mode = 0644,
73953 .proc_handler = &read_reset_stat,
73954 },
73955 {
73956 .procname = "rdma_stat_write",
73957 .data = &rdma_stat_write,
73958 - .maxlen = sizeof(atomic_t),
73959 + .maxlen = sizeof(atomic_unchecked_t),
73960 .mode = 0644,
73961 .proc_handler = &read_reset_stat,
73962 },
73963 {
73964 .procname = "rdma_stat_sq_starve",
73965 .data = &rdma_stat_sq_starve,
73966 - .maxlen = sizeof(atomic_t),
73967 + .maxlen = sizeof(atomic_unchecked_t),
73968 .mode = 0644,
73969 .proc_handler = &read_reset_stat,
73970 },
73971 {
73972 .procname = "rdma_stat_rq_starve",
73973 .data = &rdma_stat_rq_starve,
73974 - .maxlen = sizeof(atomic_t),
73975 + .maxlen = sizeof(atomic_unchecked_t),
73976 .mode = 0644,
73977 .proc_handler = &read_reset_stat,
73978 },
73979 {
73980 .procname = "rdma_stat_rq_poll",
73981 .data = &rdma_stat_rq_poll,
73982 - .maxlen = sizeof(atomic_t),
73983 + .maxlen = sizeof(atomic_unchecked_t),
73984 .mode = 0644,
73985 .proc_handler = &read_reset_stat,
73986 },
73987 {
73988 .procname = "rdma_stat_rq_prod",
73989 .data = &rdma_stat_rq_prod,
73990 - .maxlen = sizeof(atomic_t),
73991 + .maxlen = sizeof(atomic_unchecked_t),
73992 .mode = 0644,
73993 .proc_handler = &read_reset_stat,
73994 },
73995 {
73996 .procname = "rdma_stat_sq_poll",
73997 .data = &rdma_stat_sq_poll,
73998 - .maxlen = sizeof(atomic_t),
73999 + .maxlen = sizeof(atomic_unchecked_t),
74000 .mode = 0644,
74001 .proc_handler = &read_reset_stat,
74002 },
74003 {
74004 .procname = "rdma_stat_sq_prod",
74005 .data = &rdma_stat_sq_prod,
74006 - .maxlen = sizeof(atomic_t),
74007 + .maxlen = sizeof(atomic_unchecked_t),
74008 .mode = 0644,
74009 .proc_handler = &read_reset_stat,
74010 },
74011 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
74012 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-03-27 14:31:47.000000000 -0400
74013 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-04 17:56:28.000000000 -0400
74014 @@ -495,7 +495,7 @@ next_sge:
74015 svc_rdma_put_context(ctxt, 0);
74016 goto out;
74017 }
74018 - atomic_inc(&rdma_stat_read);
74019 + atomic_inc_unchecked(&rdma_stat_read);
74020
74021 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
74022 chl_map->ch[ch_no].count -= read_wr.num_sge;
74023 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
74024 dto_q);
74025 list_del_init(&ctxt->dto_q);
74026 } else {
74027 - atomic_inc(&rdma_stat_rq_starve);
74028 + atomic_inc_unchecked(&rdma_stat_rq_starve);
74029 clear_bit(XPT_DATA, &xprt->xpt_flags);
74030 ctxt = NULL;
74031 }
74032 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
74033 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
74034 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
74035 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
74036 - atomic_inc(&rdma_stat_recv);
74037 + atomic_inc_unchecked(&rdma_stat_recv);
74038
74039 /* Build up the XDR from the receive buffers. */
74040 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
74041 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c
74042 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-03-27 14:31:47.000000000 -0400
74043 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-04 17:56:28.000000000 -0400
74044 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm
74045 write_wr.wr.rdma.remote_addr = to;
74046
74047 /* Post It */
74048 - atomic_inc(&rdma_stat_write);
74049 + atomic_inc_unchecked(&rdma_stat_write);
74050 if (svc_rdma_send(xprt, &write_wr))
74051 goto err;
74052 return 0;
74053 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c
74054 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-03-27 14:31:47.000000000 -0400
74055 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-04 17:56:28.000000000 -0400
74056 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
74057 return;
74058
74059 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
74060 - atomic_inc(&rdma_stat_rq_poll);
74061 + atomic_inc_unchecked(&rdma_stat_rq_poll);
74062
74063 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
74064 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
74065 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
74066 }
74067
74068 if (ctxt)
74069 - atomic_inc(&rdma_stat_rq_prod);
74070 + atomic_inc_unchecked(&rdma_stat_rq_prod);
74071
74072 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
74073 /*
74074 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
74075 return;
74076
74077 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
74078 - atomic_inc(&rdma_stat_sq_poll);
74079 + atomic_inc_unchecked(&rdma_stat_sq_poll);
74080 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
74081 if (wc.status != IB_WC_SUCCESS)
74082 /* Close the transport */
74083 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
74084 }
74085
74086 if (ctxt)
74087 - atomic_inc(&rdma_stat_sq_prod);
74088 + atomic_inc_unchecked(&rdma_stat_sq_prod);
74089 }
74090
74091 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
74092 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
74093 spin_lock_bh(&xprt->sc_lock);
74094 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
74095 spin_unlock_bh(&xprt->sc_lock);
74096 - atomic_inc(&rdma_stat_sq_starve);
74097 + atomic_inc_unchecked(&rdma_stat_sq_starve);
74098
74099 /* See if we can opportunistically reap SQ WR to make room */
74100 sq_cq_reap(xprt);
74101 diff -urNp linux-2.6.32.45/net/sysctl_net.c linux-2.6.32.45/net/sysctl_net.c
74102 --- linux-2.6.32.45/net/sysctl_net.c 2011-03-27 14:31:47.000000000 -0400
74103 +++ linux-2.6.32.45/net/sysctl_net.c 2011-04-17 15:56:46.000000000 -0400
74104 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
74105 struct ctl_table *table)
74106 {
74107 /* Allow network administrator to have same access as root. */
74108 - if (capable(CAP_NET_ADMIN)) {
74109 + if (capable_nolog(CAP_NET_ADMIN)) {
74110 int mode = (table->mode >> 6) & 7;
74111 return (mode << 6) | (mode << 3) | mode;
74112 }
74113 diff -urNp linux-2.6.32.45/net/unix/af_unix.c linux-2.6.32.45/net/unix/af_unix.c
74114 --- linux-2.6.32.45/net/unix/af_unix.c 2011-05-10 22:12:02.000000000 -0400
74115 +++ linux-2.6.32.45/net/unix/af_unix.c 2011-07-18 18:17:33.000000000 -0400
74116 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(stru
74117 err = -ECONNREFUSED;
74118 if (!S_ISSOCK(inode->i_mode))
74119 goto put_fail;
74120 +
74121 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
74122 + err = -EACCES;
74123 + goto put_fail;
74124 + }
74125 +
74126 u = unix_find_socket_byinode(net, inode);
74127 if (!u)
74128 goto put_fail;
74129 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(stru
74130 if (u) {
74131 struct dentry *dentry;
74132 dentry = unix_sk(u)->dentry;
74133 +
74134 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
74135 + err = -EPERM;
74136 + sock_put(u);
74137 + goto fail;
74138 + }
74139 +
74140 if (dentry)
74141 touch_atime(unix_sk(u)->mnt, dentry);
74142 } else
74143 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock
74144 err = security_path_mknod(&nd.path, dentry, mode, 0);
74145 if (err)
74146 goto out_mknod_drop_write;
74147 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
74148 + err = -EACCES;
74149 + goto out_mknod_drop_write;
74150 + }
74151 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
74152 out_mknod_drop_write:
74153 mnt_drop_write(nd.path.mnt);
74154 if (err)
74155 goto out_mknod_dput;
74156 +
74157 + gr_handle_create(dentry, nd.path.mnt);
74158 +
74159 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
74160 dput(nd.path.dentry);
74161 nd.path.dentry = dentry;
74162 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file
74163 unix_state_lock(s);
74164
74165 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
74166 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74167 + NULL,
74168 +#else
74169 s,
74170 +#endif
74171 atomic_read(&s->sk_refcnt),
74172 0,
74173 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
74174 diff -urNp linux-2.6.32.45/net/wireless/core.c linux-2.6.32.45/net/wireless/core.c
74175 --- linux-2.6.32.45/net/wireless/core.c 2011-03-27 14:31:47.000000000 -0400
74176 +++ linux-2.6.32.45/net/wireless/core.c 2011-08-05 20:33:55.000000000 -0400
74177 @@ -367,7 +367,7 @@ struct wiphy *wiphy_new(const struct cfg
74178
74179 wiphy_net_set(&rdev->wiphy, &init_net);
74180
74181 - rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block;
74182 + *(void **)&rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block;
74183 rdev->rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev),
74184 &rdev->wiphy.dev, RFKILL_TYPE_WLAN,
74185 &rdev->rfkill_ops, rdev);
74186 @@ -505,7 +505,7 @@ void wiphy_rfkill_start_polling(struct w
74187
74188 if (!rdev->ops->rfkill_poll)
74189 return;
74190 - rdev->rfkill_ops.poll = cfg80211_rfkill_poll;
74191 + *(void **)&rdev->rfkill_ops.poll = cfg80211_rfkill_poll;
74192 rfkill_resume_polling(rdev->rfkill);
74193 }
74194 EXPORT_SYMBOL(wiphy_rfkill_start_polling);
74195 diff -urNp linux-2.6.32.45/net/wireless/wext.c linux-2.6.32.45/net/wireless/wext.c
74196 --- linux-2.6.32.45/net/wireless/wext.c 2011-03-27 14:31:47.000000000 -0400
74197 +++ linux-2.6.32.45/net/wireless/wext.c 2011-04-17 15:56:46.000000000 -0400
74198 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struc
74199 */
74200
74201 /* Support for very large requests */
74202 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
74203 - (user_length > descr->max_tokens)) {
74204 + if (user_length > descr->max_tokens) {
74205 /* Allow userspace to GET more than max so
74206 * we can support any size GET requests.
74207 * There is still a limit : -ENOMEM.
74208 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struc
74209 }
74210 }
74211
74212 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
74213 - /*
74214 - * If this is a GET, but not NOMAX, it means that the extra
74215 - * data is not bounded by userspace, but by max_tokens. Thus
74216 - * set the length to max_tokens. This matches the extra data
74217 - * allocation.
74218 - * The driver should fill it with the number of tokens it
74219 - * provided, and it may check iwp->length rather than having
74220 - * knowledge of max_tokens. If the driver doesn't change the
74221 - * iwp->length, this ioctl just copies back max_token tokens
74222 - * filled with zeroes. Hopefully the driver isn't claiming
74223 - * them to be valid data.
74224 - */
74225 - iwp->length = descr->max_tokens;
74226 - }
74227 -
74228 err = handler(dev, info, (union iwreq_data *) iwp, extra);
74229
74230 iwp->length += essid_compat;
74231 diff -urNp linux-2.6.32.45/net/xfrm/xfrm_policy.c linux-2.6.32.45/net/xfrm/xfrm_policy.c
74232 --- linux-2.6.32.45/net/xfrm/xfrm_policy.c 2011-03-27 14:31:47.000000000 -0400
74233 +++ linux-2.6.32.45/net/xfrm/xfrm_policy.c 2011-05-04 17:56:20.000000000 -0400
74234 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x
74235 hlist_add_head(&policy->bydst, chain);
74236 xfrm_pol_hold(policy);
74237 net->xfrm.policy_count[dir]++;
74238 - atomic_inc(&flow_cache_genid);
74239 + atomic_inc_unchecked(&flow_cache_genid);
74240 if (delpol)
74241 __xfrm_policy_unlink(delpol, dir);
74242 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
74243 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct
74244 write_unlock_bh(&xfrm_policy_lock);
74245
74246 if (ret && delete) {
74247 - atomic_inc(&flow_cache_genid);
74248 + atomic_inc_unchecked(&flow_cache_genid);
74249 xfrm_policy_kill(ret);
74250 }
74251 return ret;
74252 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str
74253 write_unlock_bh(&xfrm_policy_lock);
74254
74255 if (ret && delete) {
74256 - atomic_inc(&flow_cache_genid);
74257 + atomic_inc_unchecked(&flow_cache_genid);
74258 xfrm_policy_kill(ret);
74259 }
74260 return ret;
74261 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u
74262 }
74263
74264 }
74265 - atomic_inc(&flow_cache_genid);
74266 + atomic_inc_unchecked(&flow_cache_genid);
74267 out:
74268 write_unlock_bh(&xfrm_policy_lock);
74269 return err;
74270 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic
74271 write_unlock_bh(&xfrm_policy_lock);
74272 if (pol) {
74273 if (dir < XFRM_POLICY_MAX)
74274 - atomic_inc(&flow_cache_genid);
74275 + atomic_inc_unchecked(&flow_cache_genid);
74276 xfrm_policy_kill(pol);
74277 return 0;
74278 }
74279 @@ -1477,7 +1477,7 @@ free_dst:
74280 goto out;
74281 }
74282
74283 -static int inline
74284 +static inline int
74285 xfrm_dst_alloc_copy(void **target, void *src, int size)
74286 {
74287 if (!*target) {
74288 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void
74289 return 0;
74290 }
74291
74292 -static int inline
74293 +static inline int
74294 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
74295 {
74296 #ifdef CONFIG_XFRM_SUB_POLICY
74297 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry
74298 #endif
74299 }
74300
74301 -static int inline
74302 +static inline int
74303 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
74304 {
74305 #ifdef CONFIG_XFRM_SUB_POLICY
74306 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc
74307 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
74308
74309 restart:
74310 - genid = atomic_read(&flow_cache_genid);
74311 + genid = atomic_read_unchecked(&flow_cache_genid);
74312 policy = NULL;
74313 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
74314 pols[pi] = NULL;
74315 @@ -1680,7 +1680,7 @@ restart:
74316 goto error;
74317 }
74318 if (nx == -EAGAIN ||
74319 - genid != atomic_read(&flow_cache_genid)) {
74320 + genid != atomic_read_unchecked(&flow_cache_genid)) {
74321 xfrm_pols_put(pols, npols);
74322 goto restart;
74323 }
74324 diff -urNp linux-2.6.32.45/net/xfrm/xfrm_user.c linux-2.6.32.45/net/xfrm/xfrm_user.c
74325 --- linux-2.6.32.45/net/xfrm/xfrm_user.c 2011-03-27 14:31:47.000000000 -0400
74326 +++ linux-2.6.32.45/net/xfrm/xfrm_user.c 2011-05-16 21:46:57.000000000 -0400
74327 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm
74328 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
74329 int i;
74330
74331 + pax_track_stack();
74332 +
74333 if (xp->xfrm_nr == 0)
74334 return 0;
74335
74336 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buf
74337 int err;
74338 int n = 0;
74339
74340 + pax_track_stack();
74341 +
74342 if (attrs[XFRMA_MIGRATE] == NULL)
74343 return -EINVAL;
74344
74345 diff -urNp linux-2.6.32.45/samples/kobject/kset-example.c linux-2.6.32.45/samples/kobject/kset-example.c
74346 --- linux-2.6.32.45/samples/kobject/kset-example.c 2011-03-27 14:31:47.000000000 -0400
74347 +++ linux-2.6.32.45/samples/kobject/kset-example.c 2011-04-17 15:56:46.000000000 -0400
74348 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
74349 }
74350
74351 /* Our custom sysfs_ops that we will associate with our ktype later on */
74352 -static struct sysfs_ops foo_sysfs_ops = {
74353 +static const struct sysfs_ops foo_sysfs_ops = {
74354 .show = foo_attr_show,
74355 .store = foo_attr_store,
74356 };
74357 diff -urNp linux-2.6.32.45/scripts/basic/fixdep.c linux-2.6.32.45/scripts/basic/fixdep.c
74358 --- linux-2.6.32.45/scripts/basic/fixdep.c 2011-03-27 14:31:47.000000000 -0400
74359 +++ linux-2.6.32.45/scripts/basic/fixdep.c 2011-04-17 15:56:46.000000000 -0400
74360 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
74361
74362 static void parse_config_file(char *map, size_t len)
74363 {
74364 - int *end = (int *) (map + len);
74365 + unsigned int *end = (unsigned int *) (map + len);
74366 /* start at +1, so that p can never be < map */
74367 - int *m = (int *) map + 1;
74368 + unsigned int *m = (unsigned int *) map + 1;
74369 char *p, *q;
74370
74371 for (; m < end; m++) {
74372 @@ -371,7 +371,7 @@ static void print_deps(void)
74373 static void traps(void)
74374 {
74375 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
74376 - int *p = (int *)test;
74377 + unsigned int *p = (unsigned int *)test;
74378
74379 if (*p != INT_CONF) {
74380 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
74381 diff -urNp linux-2.6.32.45/scripts/gcc-plugin.sh linux-2.6.32.45/scripts/gcc-plugin.sh
74382 --- linux-2.6.32.45/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
74383 +++ linux-2.6.32.45/scripts/gcc-plugin.sh 2011-08-05 20:33:55.000000000 -0400
74384 @@ -0,0 +1,3 @@
74385 +#!/bin/sh
74386 +
74387 +echo "#include \"gcc-plugin.h\"" | $* -x c - -c -o /dev/null -I`$* -print-file-name=plugin`/include>/dev/null 2>&1 && echo "y"
74388 diff -urNp linux-2.6.32.45/scripts/Makefile.build linux-2.6.32.45/scripts/Makefile.build
74389 --- linux-2.6.32.45/scripts/Makefile.build 2011-03-27 14:31:47.000000000 -0400
74390 +++ linux-2.6.32.45/scripts/Makefile.build 2011-06-04 20:46:51.000000000 -0400
74391 @@ -59,7 +59,7 @@ endif
74392 endif
74393
74394 # Do not include host rules unless needed
74395 -ifneq ($(hostprogs-y)$(hostprogs-m),)
74396 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
74397 include scripts/Makefile.host
74398 endif
74399
74400 diff -urNp linux-2.6.32.45/scripts/Makefile.clean linux-2.6.32.45/scripts/Makefile.clean
74401 --- linux-2.6.32.45/scripts/Makefile.clean 2011-03-27 14:31:47.000000000 -0400
74402 +++ linux-2.6.32.45/scripts/Makefile.clean 2011-06-04 20:47:19.000000000 -0400
74403 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
74404 __clean-files := $(extra-y) $(always) \
74405 $(targets) $(clean-files) \
74406 $(host-progs) \
74407 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
74408 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
74409 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
74410
74411 # as clean-files is given relative to the current directory, this adds
74412 # a $(obj) prefix, except for absolute paths
74413 diff -urNp linux-2.6.32.45/scripts/Makefile.host linux-2.6.32.45/scripts/Makefile.host
74414 --- linux-2.6.32.45/scripts/Makefile.host 2011-03-27 14:31:47.000000000 -0400
74415 +++ linux-2.6.32.45/scripts/Makefile.host 2011-06-04 20:48:22.000000000 -0400
74416 @@ -31,6 +31,7 @@
74417 # Note: Shared libraries consisting of C++ files are not supported
74418
74419 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
74420 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
74421
74422 # C code
74423 # Executables compiled from a single .c file
74424 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
74425 # Shared libaries (only .c supported)
74426 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
74427 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
74428 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
74429 # Remove .so files from "xxx-objs"
74430 host-cobjs := $(filter-out %.so,$(host-cobjs))
74431
74432 diff -urNp linux-2.6.32.45/scripts/mod/file2alias.c linux-2.6.32.45/scripts/mod/file2alias.c
74433 --- linux-2.6.32.45/scripts/mod/file2alias.c 2011-03-27 14:31:47.000000000 -0400
74434 +++ linux-2.6.32.45/scripts/mod/file2alias.c 2011-04-17 15:56:46.000000000 -0400
74435 @@ -72,7 +72,7 @@ static void device_id_check(const char *
74436 unsigned long size, unsigned long id_size,
74437 void *symval)
74438 {
74439 - int i;
74440 + unsigned int i;
74441
74442 if (size % id_size || size < id_size) {
74443 if (cross_build != 0)
74444 @@ -102,7 +102,7 @@ static void device_id_check(const char *
74445 /* USB is special because the bcdDevice can be matched against a numeric range */
74446 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
74447 static void do_usb_entry(struct usb_device_id *id,
74448 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
74449 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
74450 unsigned char range_lo, unsigned char range_hi,
74451 struct module *mod)
74452 {
74453 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
74454 for (i = 0; i < count; i++) {
74455 const char *id = (char *)devs[i].id;
74456 char acpi_id[sizeof(devs[0].id)];
74457 - int j;
74458 + unsigned int j;
74459
74460 buf_printf(&mod->dev_table_buf,
74461 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
74462 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
74463
74464 for (j = 0; j < PNP_MAX_DEVICES; j++) {
74465 const char *id = (char *)card->devs[j].id;
74466 - int i2, j2;
74467 + unsigned int i2, j2;
74468 int dup = 0;
74469
74470 if (!id[0])
74471 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
74472 /* add an individual alias for every device entry */
74473 if (!dup) {
74474 char acpi_id[sizeof(card->devs[0].id)];
74475 - int k;
74476 + unsigned int k;
74477
74478 buf_printf(&mod->dev_table_buf,
74479 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
74480 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, co
74481 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
74482 char *alias)
74483 {
74484 - int i, j;
74485 + unsigned int i, j;
74486
74487 sprintf(alias, "dmi*");
74488
74489 diff -urNp linux-2.6.32.45/scripts/mod/modpost.c linux-2.6.32.45/scripts/mod/modpost.c
74490 --- linux-2.6.32.45/scripts/mod/modpost.c 2011-03-27 14:31:47.000000000 -0400
74491 +++ linux-2.6.32.45/scripts/mod/modpost.c 2011-07-06 19:53:33.000000000 -0400
74492 @@ -835,6 +835,7 @@ enum mismatch {
74493 INIT_TO_EXIT,
74494 EXIT_TO_INIT,
74495 EXPORT_TO_INIT_EXIT,
74496 + DATA_TO_TEXT
74497 };
74498
74499 struct sectioncheck {
74500 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
74501 .fromsec = { "__ksymtab*", NULL },
74502 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
74503 .mismatch = EXPORT_TO_INIT_EXIT
74504 +},
74505 +/* Do not reference code from writable data */
74506 +{
74507 + .fromsec = { DATA_SECTIONS, NULL },
74508 + .tosec = { TEXT_SECTIONS, NULL },
74509 + .mismatch = DATA_TO_TEXT
74510 }
74511 };
74512
74513 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
74514 continue;
74515 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
74516 continue;
74517 - if (sym->st_value == addr)
74518 - return sym;
74519 /* Find a symbol nearby - addr are maybe negative */
74520 d = sym->st_value - addr;
74521 + if (d == 0)
74522 + return sym;
74523 if (d < 0)
74524 d = addr - sym->st_value;
74525 if (d < distance) {
74526 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
74527 "Fix this by removing the %sannotation of %s "
74528 "or drop the export.\n",
74529 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
74530 + case DATA_TO_TEXT:
74531 +/*
74532 + fprintf(stderr,
74533 + "The variable %s references\n"
74534 + "the %s %s%s%s\n",
74535 + fromsym, to, sec2annotation(tosec), tosym, to_p);
74536 +*/
74537 + break;
74538 case NO_MISMATCH:
74539 /* To get warnings on missing members */
74540 break;
74541 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modn
74542 static void check_sec_ref(struct module *mod, const char *modname,
74543 struct elf_info *elf)
74544 {
74545 - int i;
74546 + unsigned int i;
74547 Elf_Shdr *sechdrs = elf->sechdrs;
74548
74549 /* Walk through all sections */
74550 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
74551 va_end(ap);
74552 }
74553
74554 -void buf_write(struct buffer *buf, const char *s, int len)
74555 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
74556 {
74557 if (buf->size - buf->pos < len) {
74558 buf->size += len + SZ;
74559 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
74560 if (fstat(fileno(file), &st) < 0)
74561 goto close_write;
74562
74563 - if (st.st_size != b->pos)
74564 + if (st.st_size != (off_t)b->pos)
74565 goto close_write;
74566
74567 tmp = NOFAIL(malloc(b->pos));
74568 diff -urNp linux-2.6.32.45/scripts/mod/modpost.h linux-2.6.32.45/scripts/mod/modpost.h
74569 --- linux-2.6.32.45/scripts/mod/modpost.h 2011-03-27 14:31:47.000000000 -0400
74570 +++ linux-2.6.32.45/scripts/mod/modpost.h 2011-04-17 15:56:46.000000000 -0400
74571 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
74572
74573 struct buffer {
74574 char *p;
74575 - int pos;
74576 - int size;
74577 + unsigned int pos;
74578 + unsigned int size;
74579 };
74580
74581 void __attribute__((format(printf, 2, 3)))
74582 buf_printf(struct buffer *buf, const char *fmt, ...);
74583
74584 void
74585 -buf_write(struct buffer *buf, const char *s, int len);
74586 +buf_write(struct buffer *buf, const char *s, unsigned int len);
74587
74588 struct module {
74589 struct module *next;
74590 diff -urNp linux-2.6.32.45/scripts/mod/sumversion.c linux-2.6.32.45/scripts/mod/sumversion.c
74591 --- linux-2.6.32.45/scripts/mod/sumversion.c 2011-03-27 14:31:47.000000000 -0400
74592 +++ linux-2.6.32.45/scripts/mod/sumversion.c 2011-04-17 15:56:46.000000000 -0400
74593 @@ -455,7 +455,7 @@ static void write_version(const char *fi
74594 goto out;
74595 }
74596
74597 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
74598 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
74599 warn("writing sum in %s failed: %s\n",
74600 filename, strerror(errno));
74601 goto out;
74602 diff -urNp linux-2.6.32.45/scripts/package/mkspec linux-2.6.32.45/scripts/package/mkspec
74603 --- linux-2.6.32.45/scripts/package/mkspec 2011-03-27 14:31:47.000000000 -0400
74604 +++ linux-2.6.32.45/scripts/package/mkspec 2011-07-19 18:19:12.000000000 -0400
74605 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM
74606 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
74607 echo "%endif"
74608
74609 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
74610 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
74611 echo "%ifarch ia64"
74612 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
74613 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
74614 diff -urNp linux-2.6.32.45/scripts/pnmtologo.c linux-2.6.32.45/scripts/pnmtologo.c
74615 --- linux-2.6.32.45/scripts/pnmtologo.c 2011-03-27 14:31:47.000000000 -0400
74616 +++ linux-2.6.32.45/scripts/pnmtologo.c 2011-04-17 15:56:46.000000000 -0400
74617 @@ -237,14 +237,14 @@ static void write_header(void)
74618 fprintf(out, " * Linux logo %s\n", logoname);
74619 fputs(" */\n\n", out);
74620 fputs("#include <linux/linux_logo.h>\n\n", out);
74621 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
74622 + fprintf(out, "static unsigned char %s_data[] = {\n",
74623 logoname);
74624 }
74625
74626 static void write_footer(void)
74627 {
74628 fputs("\n};\n\n", out);
74629 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
74630 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
74631 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
74632 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
74633 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
74634 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
74635 fputs("\n};\n\n", out);
74636
74637 /* write logo clut */
74638 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
74639 + fprintf(out, "static unsigned char %s_clut[] = {\n",
74640 logoname);
74641 write_hex_cnt = 0;
74642 for (i = 0; i < logo_clutsize; i++) {
74643 diff -urNp linux-2.6.32.45/scripts/tags.sh linux-2.6.32.45/scripts/tags.sh
74644 --- linux-2.6.32.45/scripts/tags.sh 2011-03-27 14:31:47.000000000 -0400
74645 +++ linux-2.6.32.45/scripts/tags.sh 2011-06-07 18:06:04.000000000 -0400
74646 @@ -93,6 +93,11 @@ docscope()
74647 cscope -b -f cscope.out
74648 }
74649
74650 +dogtags()
74651 +{
74652 + all_sources | gtags -f -
74653 +}
74654 +
74655 exuberant()
74656 {
74657 all_sources | xargs $1 -a \
74658 @@ -164,6 +169,10 @@ case "$1" in
74659 docscope
74660 ;;
74661
74662 + "gtags")
74663 + dogtags
74664 + ;;
74665 +
74666 "tags")
74667 rm -f tags
74668 xtags ctags
74669 diff -urNp linux-2.6.32.45/security/capability.c linux-2.6.32.45/security/capability.c
74670 --- linux-2.6.32.45/security/capability.c 2011-03-27 14:31:47.000000000 -0400
74671 +++ linux-2.6.32.45/security/capability.c 2011-04-17 15:56:46.000000000 -0400
74672 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *ls
74673 }
74674 #endif /* CONFIG_AUDIT */
74675
74676 -struct security_operations default_security_ops = {
74677 +struct security_operations default_security_ops __read_only = {
74678 .name = "default",
74679 };
74680
74681 diff -urNp linux-2.6.32.45/security/commoncap.c linux-2.6.32.45/security/commoncap.c
74682 --- linux-2.6.32.45/security/commoncap.c 2011-03-27 14:31:47.000000000 -0400
74683 +++ linux-2.6.32.45/security/commoncap.c 2011-08-17 19:22:13.000000000 -0400
74684 @@ -27,7 +27,7 @@
74685 #include <linux/sched.h>
74686 #include <linux/prctl.h>
74687 #include <linux/securebits.h>
74688 -
74689 +#include <net/sock.h>
74690 /*
74691 * If a non-root user executes a setuid-root binary in
74692 * !secure(SECURE_NOROOT) mode, then we raise capabilities.
74693 @@ -50,9 +50,18 @@ static void warn_setuid_and_fcaps_mixed(
74694 }
74695 }
74696
74697 +#ifdef CONFIG_NET
74698 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
74699 +#endif
74700 +
74701 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
74702 {
74703 +#ifdef CONFIG_NET
74704 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
74705 +#else
74706 NETLINK_CB(skb).eff_cap = current_cap();
74707 +#endif
74708 +
74709 return 0;
74710 }
74711
74712 @@ -582,6 +591,9 @@ int cap_bprm_secureexec(struct linux_bin
74713 {
74714 const struct cred *cred = current_cred();
74715
74716 + if (gr_acl_enable_at_secure())
74717 + return 1;
74718 +
74719 if (cred->uid != 0) {
74720 if (bprm->cap_effective)
74721 return 1;
74722 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_api.c linux-2.6.32.45/security/integrity/ima/ima_api.c
74723 --- linux-2.6.32.45/security/integrity/ima/ima_api.c 2011-03-27 14:31:47.000000000 -0400
74724 +++ linux-2.6.32.45/security/integrity/ima/ima_api.c 2011-04-17 15:56:46.000000000 -0400
74725 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino
74726 int result;
74727
74728 /* can overflow, only indicator */
74729 - atomic_long_inc(&ima_htable.violations);
74730 + atomic_long_inc_unchecked(&ima_htable.violations);
74731
74732 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
74733 if (!entry) {
74734 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_fs.c linux-2.6.32.45/security/integrity/ima/ima_fs.c
74735 --- linux-2.6.32.45/security/integrity/ima/ima_fs.c 2011-03-27 14:31:47.000000000 -0400
74736 +++ linux-2.6.32.45/security/integrity/ima/ima_fs.c 2011-04-17 15:56:46.000000000 -0400
74737 @@ -27,12 +27,12 @@
74738 static int valid_policy = 1;
74739 #define TMPBUFLEN 12
74740 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
74741 - loff_t *ppos, atomic_long_t *val)
74742 + loff_t *ppos, atomic_long_unchecked_t *val)
74743 {
74744 char tmpbuf[TMPBUFLEN];
74745 ssize_t len;
74746
74747 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
74748 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
74749 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
74750 }
74751
74752 diff -urNp linux-2.6.32.45/security/integrity/ima/ima.h linux-2.6.32.45/security/integrity/ima/ima.h
74753 --- linux-2.6.32.45/security/integrity/ima/ima.h 2011-03-27 14:31:47.000000000 -0400
74754 +++ linux-2.6.32.45/security/integrity/ima/ima.h 2011-04-17 15:56:46.000000000 -0400
74755 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino
74756 extern spinlock_t ima_queue_lock;
74757
74758 struct ima_h_table {
74759 - atomic_long_t len; /* number of stored measurements in the list */
74760 - atomic_long_t violations;
74761 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
74762 + atomic_long_unchecked_t violations;
74763 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
74764 };
74765 extern struct ima_h_table ima_htable;
74766 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_queue.c linux-2.6.32.45/security/integrity/ima/ima_queue.c
74767 --- linux-2.6.32.45/security/integrity/ima/ima_queue.c 2011-03-27 14:31:47.000000000 -0400
74768 +++ linux-2.6.32.45/security/integrity/ima/ima_queue.c 2011-04-17 15:56:46.000000000 -0400
74769 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i
74770 INIT_LIST_HEAD(&qe->later);
74771 list_add_tail_rcu(&qe->later, &ima_measurements);
74772
74773 - atomic_long_inc(&ima_htable.len);
74774 + atomic_long_inc_unchecked(&ima_htable.len);
74775 key = ima_hash_key(entry->digest);
74776 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
74777 return 0;
74778 diff -urNp linux-2.6.32.45/security/Kconfig linux-2.6.32.45/security/Kconfig
74779 --- linux-2.6.32.45/security/Kconfig 2011-03-27 14:31:47.000000000 -0400
74780 +++ linux-2.6.32.45/security/Kconfig 2011-07-06 19:58:11.000000000 -0400
74781 @@ -4,6 +4,555 @@
74782
74783 menu "Security options"
74784
74785 +source grsecurity/Kconfig
74786 +
74787 +menu "PaX"
74788 +
74789 + config ARCH_TRACK_EXEC_LIMIT
74790 + bool
74791 +
74792 + config PAX_PER_CPU_PGD
74793 + bool
74794 +
74795 + config TASK_SIZE_MAX_SHIFT
74796 + int
74797 + depends on X86_64
74798 + default 47 if !PAX_PER_CPU_PGD
74799 + default 42 if PAX_PER_CPU_PGD
74800 +
74801 + config PAX_ENABLE_PAE
74802 + bool
74803 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
74804 +
74805 +config PAX
74806 + bool "Enable various PaX features"
74807 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
74808 + help
74809 + This allows you to enable various PaX features. PaX adds
74810 + intrusion prevention mechanisms to the kernel that reduce
74811 + the risks posed by exploitable memory corruption bugs.
74812 +
74813 +menu "PaX Control"
74814 + depends on PAX
74815 +
74816 +config PAX_SOFTMODE
74817 + bool 'Support soft mode'
74818 + select PAX_PT_PAX_FLAGS
74819 + help
74820 + Enabling this option will allow you to run PaX in soft mode, that
74821 + is, PaX features will not be enforced by default, only on executables
74822 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
74823 + is the only way to mark executables for soft mode use.
74824 +
74825 + Soft mode can be activated by using the "pax_softmode=1" kernel command
74826 + line option on boot. Furthermore you can control various PaX features
74827 + at runtime via the entries in /proc/sys/kernel/pax.
74828 +
74829 +config PAX_EI_PAX
74830 + bool 'Use legacy ELF header marking'
74831 + help
74832 + Enabling this option will allow you to control PaX features on
74833 + a per executable basis via the 'chpax' utility available at
74834 + http://pax.grsecurity.net/. The control flags will be read from
74835 + an otherwise reserved part of the ELF header. This marking has
74836 + numerous drawbacks (no support for soft-mode, toolchain does not
74837 + know about the non-standard use of the ELF header) therefore it
74838 + has been deprecated in favour of PT_PAX_FLAGS support.
74839 +
74840 + Note that if you enable PT_PAX_FLAGS marking support as well,
74841 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
74842 +
74843 +config PAX_PT_PAX_FLAGS
74844 + bool 'Use ELF program header marking'
74845 + help
74846 + Enabling this option will allow you to control PaX features on
74847 + a per executable basis via the 'paxctl' utility available at
74848 + http://pax.grsecurity.net/. The control flags will be read from
74849 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
74850 + has the benefits of supporting both soft mode and being fully
74851 + integrated into the toolchain (the binutils patch is available
74852 + from http://pax.grsecurity.net).
74853 +
74854 + If your toolchain does not support PT_PAX_FLAGS markings,
74855 + you can create one in most cases with 'paxctl -C'.
74856 +
74857 + Note that if you enable the legacy EI_PAX marking support as well,
74858 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
74859 +
74860 +choice
74861 + prompt 'MAC system integration'
74862 + default PAX_HAVE_ACL_FLAGS
74863 + help
74864 + Mandatory Access Control systems have the option of controlling
74865 + PaX flags on a per executable basis, choose the method supported
74866 + by your particular system.
74867 +
74868 + - "none": if your MAC system does not interact with PaX,
74869 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
74870 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
74871 +
74872 + NOTE: this option is for developers/integrators only.
74873 +
74874 + config PAX_NO_ACL_FLAGS
74875 + bool 'none'
74876 +
74877 + config PAX_HAVE_ACL_FLAGS
74878 + bool 'direct'
74879 +
74880 + config PAX_HOOK_ACL_FLAGS
74881 + bool 'hook'
74882 +endchoice
74883 +
74884 +endmenu
74885 +
74886 +menu "Non-executable pages"
74887 + depends on PAX
74888 +
74889 +config PAX_NOEXEC
74890 + bool "Enforce non-executable pages"
74891 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
74892 + help
74893 + By design some architectures do not allow for protecting memory
74894 + pages against execution or even if they do, Linux does not make
74895 + use of this feature. In practice this means that if a page is
74896 + readable (such as the stack or heap) it is also executable.
74897 +
74898 + There is a well known exploit technique that makes use of this
74899 + fact and a common programming mistake where an attacker can
74900 + introduce code of his choice somewhere in the attacked program's
74901 + memory (typically the stack or the heap) and then execute it.
74902 +
74903 + If the attacked program was running with different (typically
74904 + higher) privileges than that of the attacker, then he can elevate
74905 + his own privilege level (e.g. get a root shell, write to files for
74906 + which he does not have write access to, etc).
74907 +
74908 + Enabling this option will let you choose from various features
74909 + that prevent the injection and execution of 'foreign' code in
74910 + a program.
74911 +
74912 + This will also break programs that rely on the old behaviour and
74913 + expect that dynamically allocated memory via the malloc() family
74914 + of functions is executable (which it is not). Notable examples
74915 + are the XFree86 4.x server, the java runtime and wine.
74916 +
74917 +config PAX_PAGEEXEC
74918 + bool "Paging based non-executable pages"
74919 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
74920 + select S390_SWITCH_AMODE if S390
74921 + select S390_EXEC_PROTECT if S390
74922 + select ARCH_TRACK_EXEC_LIMIT if X86_32
74923 + help
74924 + This implementation is based on the paging feature of the CPU.
74925 + On i386 without hardware non-executable bit support there is a
74926 + variable but usually low performance impact, however on Intel's
74927 + P4 core based CPUs it is very high so you should not enable this
74928 + for kernels meant to be used on such CPUs.
74929 +
74930 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
74931 + with hardware non-executable bit support there is no performance
74932 + impact, on ppc the impact is negligible.
74933 +
74934 + Note that several architectures require various emulations due to
74935 + badly designed userland ABIs, this will cause a performance impact
74936 + but will disappear as soon as userland is fixed. For example, ppc
74937 + userland MUST have been built with secure-plt by a recent toolchain.
74938 +
74939 +config PAX_SEGMEXEC
74940 + bool "Segmentation based non-executable pages"
74941 + depends on PAX_NOEXEC && X86_32
74942 + help
74943 + This implementation is based on the segmentation feature of the
74944 + CPU and has a very small performance impact, however applications
74945 + will be limited to a 1.5 GB address space instead of the normal
74946 + 3 GB.
74947 +
74948 +config PAX_EMUTRAMP
74949 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
74950 + default y if PARISC
74951 + help
74952 + There are some programs and libraries that for one reason or
74953 + another attempt to execute special small code snippets from
74954 + non-executable memory pages. Most notable examples are the
74955 + signal handler return code generated by the kernel itself and
74956 + the GCC trampolines.
74957 +
74958 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
74959 + such programs will no longer work under your kernel.
74960 +
74961 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
74962 + utilities to enable trampoline emulation for the affected programs
74963 + yet still have the protection provided by the non-executable pages.
74964 +
74965 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
74966 + your system will not even boot.
74967 +
74968 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
74969 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
74970 + for the affected files.
74971 +
74972 + NOTE: enabling this feature *may* open up a loophole in the
74973 + protection provided by non-executable pages that an attacker
74974 + could abuse. Therefore the best solution is to not have any
74975 + files on your system that would require this option. This can
74976 + be achieved by not using libc5 (which relies on the kernel
74977 + signal handler return code) and not using or rewriting programs
74978 + that make use of the nested function implementation of GCC.
74979 + Skilled users can just fix GCC itself so that it implements
74980 + nested function calls in a way that does not interfere with PaX.
74981 +
74982 +config PAX_EMUSIGRT
74983 + bool "Automatically emulate sigreturn trampolines"
74984 + depends on PAX_EMUTRAMP && PARISC
74985 + default y
74986 + help
74987 + Enabling this option will have the kernel automatically detect
74988 + and emulate signal return trampolines executing on the stack
74989 + that would otherwise lead to task termination.
74990 +
74991 + This solution is intended as a temporary one for users with
74992 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
74993 + Modula-3 runtime, etc) or executables linked to such, basically
74994 + everything that does not specify its own SA_RESTORER function in
74995 + normal executable memory like glibc 2.1+ does.
74996 +
74997 + On parisc you MUST enable this option, otherwise your system will
74998 + not even boot.
74999 +
75000 + NOTE: this feature cannot be disabled on a per executable basis
75001 + and since it *does* open up a loophole in the protection provided
75002 + by non-executable pages, the best solution is to not have any
75003 + files on your system that would require this option.
75004 +
75005 +config PAX_MPROTECT
75006 + bool "Restrict mprotect()"
75007 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
75008 + help
75009 + Enabling this option will prevent programs from
75010 + - changing the executable status of memory pages that were
75011 + not originally created as executable,
75012 + - making read-only executable pages writable again,
75013 + - creating executable pages from anonymous memory,
75014 + - making read-only-after-relocations (RELRO) data pages writable again.
75015 +
75016 + You should say Y here to complete the protection provided by
75017 + the enforcement of non-executable pages.
75018 +
75019 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
75020 + this feature on a per file basis.
75021 +
75022 +config PAX_MPROTECT_COMPAT
75023 + bool "Use legacy/compat protection demoting (read help)"
75024 + depends on PAX_MPROTECT
75025 + default n
75026 + help
75027 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
75028 + by sending the proper error code to the application. For some broken
75029 + userland, this can cause problems with Python or other applications. The
75030 + current implementation however allows for applications like clamav to
75031 + detect if JIT compilation/execution is allowed and to fall back gracefully
75032 + to an interpreter-based mode if it does not. While we encourage everyone
75033 + to use the current implementation as-is and push upstream to fix broken
75034 + userland (note that the RWX logging option can assist with this), in some
75035 + environments this may not be possible. Having to disable MPROTECT
75036 + completely on certain binaries reduces the security benefit of PaX,
75037 + so this option is provided for those environments to revert to the old
75038 + behavior.
75039 +
75040 +config PAX_ELFRELOCS
75041 + bool "Allow ELF text relocations (read help)"
75042 + depends on PAX_MPROTECT
75043 + default n
75044 + help
75045 + Non-executable pages and mprotect() restrictions are effective
75046 + in preventing the introduction of new executable code into an
75047 + attacked task's address space. There remain only two venues
75048 + for this kind of attack: if the attacker can execute already
75049 + existing code in the attacked task then he can either have it
75050 + create and mmap() a file containing his code or have it mmap()
75051 + an already existing ELF library that does not have position
75052 + independent code in it and use mprotect() on it to make it
75053 + writable and copy his code there. While protecting against
75054 + the former approach is beyond PaX, the latter can be prevented
75055 + by having only PIC ELF libraries on one's system (which do not
75056 + need to relocate their code). If you are sure this is your case,
75057 + as is the case with all modern Linux distributions, then leave
75058 + this option disabled. You should say 'n' here.
75059 +
75060 +config PAX_ETEXECRELOCS
75061 + bool "Allow ELF ET_EXEC text relocations"
75062 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
75063 + select PAX_ELFRELOCS
75064 + default y
75065 + help
75066 + On some architectures there are incorrectly created applications
75067 + that require text relocations and would not work without enabling
75068 + this option. If you are an alpha, ia64 or parisc user, you should
75069 + enable this option and disable it once you have made sure that
75070 + none of your applications need it.
75071 +
75072 +config PAX_EMUPLT
75073 + bool "Automatically emulate ELF PLT"
75074 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
75075 + default y
75076 + help
75077 + Enabling this option will have the kernel automatically detect
75078 + and emulate the Procedure Linkage Table entries in ELF files.
75079 + On some architectures such entries are in writable memory, and
75080 + become non-executable leading to task termination. Therefore
75081 + it is mandatory that you enable this option on alpha, parisc,
75082 + sparc and sparc64, otherwise your system would not even boot.
75083 +
75084 + NOTE: this feature *does* open up a loophole in the protection
75085 + provided by the non-executable pages, therefore the proper
75086 + solution is to modify the toolchain to produce a PLT that does
75087 + not need to be writable.
75088 +
75089 +config PAX_DLRESOLVE
75090 + bool 'Emulate old glibc resolver stub'
75091 + depends on PAX_EMUPLT && SPARC
75092 + default n
75093 + help
75094 + This option is needed if userland has an old glibc (before 2.4)
75095 + that puts a 'save' instruction into the runtime generated resolver
75096 + stub that needs special emulation.
75097 +
75098 +config PAX_KERNEXEC
75099 + bool "Enforce non-executable kernel pages"
75100 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
75101 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
75102 + help
75103 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
75104 + that is, enabling this option will make it harder to inject
75105 + and execute 'foreign' code in kernel memory itself.
75106 +
75107 + Note that on x86_64 kernels there is a known regression when
75108 + this feature and KVM/VMX are both enabled in the host kernel.
75109 +
75110 +config PAX_KERNEXEC_MODULE_TEXT
75111 + int "Minimum amount of memory reserved for module code"
75112 + default "4"
75113 + depends on PAX_KERNEXEC && X86_32 && MODULES
75114 + help
75115 + Due to implementation details the kernel must reserve a fixed
75116 + amount of memory for module code at compile time that cannot be
75117 + changed at runtime. Here you can specify the minimum amount
75118 + in MB that will be reserved. Due to the same implementation
75119 + details this size will always be rounded up to the next 2/4 MB
75120 + boundary (depends on PAE) so the actually available memory for
75121 + module code will usually be more than this minimum.
75122 +
75123 + The default 4 MB should be enough for most users but if you have
75124 + an excessive number of modules (e.g., most distribution configs
75125 + compile many drivers as modules) or use huge modules such as
75126 + nvidia's kernel driver, you will need to adjust this amount.
75127 + A good rule of thumb is to look at your currently loaded kernel
75128 + modules and add up their sizes.
75129 +
75130 +endmenu
75131 +
75132 +menu "Address Space Layout Randomization"
75133 + depends on PAX
75134 +
75135 +config PAX_ASLR
75136 + bool "Address Space Layout Randomization"
75137 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
75138 + help
75139 + Many if not most exploit techniques rely on the knowledge of
75140 + certain addresses in the attacked program. The following options
75141 + will allow the kernel to apply a certain amount of randomization
75142 + to specific parts of the program thereby forcing an attacker to
75143 + guess them in most cases. Any failed guess will most likely crash
75144 + the attacked program which allows the kernel to detect such attempts
75145 + and react on them. PaX itself provides no reaction mechanisms,
75146 + instead it is strongly encouraged that you make use of Nergal's
75147 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
75148 + (http://www.grsecurity.net/) built-in crash detection features or
75149 + develop one yourself.
75150 +
75151 + By saying Y here you can choose to randomize the following areas:
75152 + - top of the task's kernel stack
75153 + - top of the task's userland stack
75154 + - base address for mmap() requests that do not specify one
75155 + (this includes all libraries)
75156 + - base address of the main executable
75157 +
75158 + It is strongly recommended to say Y here as address space layout
75159 + randomization has negligible impact on performance yet it provides
75160 + a very effective protection.
75161 +
75162 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
75163 + this feature on a per file basis.
75164 +
75165 +config PAX_RANDKSTACK
75166 + bool "Randomize kernel stack base"
75167 + depends on PAX_ASLR && X86_TSC && X86
75168 + help
75169 + By saying Y here the kernel will randomize every task's kernel
75170 + stack on every system call. This will not only force an attacker
75171 + to guess it but also prevent him from making use of possible
75172 + leaked information about it.
75173 +
75174 + Since the kernel stack is a rather scarce resource, randomization
75175 + may cause unexpected stack overflows, therefore you should very
75176 + carefully test your system. Note that once enabled in the kernel
75177 + configuration, this feature cannot be disabled on a per file basis.
75178 +
75179 +config PAX_RANDUSTACK
75180 + bool "Randomize user stack base"
75181 + depends on PAX_ASLR
75182 + help
75183 + By saying Y here the kernel will randomize every task's userland
75184 + stack. The randomization is done in two steps where the second
75185 + one may apply a big amount of shift to the top of the stack and
75186 + cause problems for programs that want to use lots of memory (more
75187 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
75188 + For this reason the second step can be controlled by 'chpax' or
75189 + 'paxctl' on a per file basis.
75190 +
75191 +config PAX_RANDMMAP
75192 + bool "Randomize mmap() base"
75193 + depends on PAX_ASLR
75194 + help
75195 + By saying Y here the kernel will use a randomized base address for
75196 + mmap() requests that do not specify one themselves. As a result
75197 + all dynamically loaded libraries will appear at random addresses
75198 + and therefore be harder to exploit by a technique where an attacker
75199 + attempts to execute library code for his purposes (e.g. spawn a
75200 + shell from an exploited program that is running at an elevated
75201 + privilege level).
75202 +
75203 + Furthermore, if a program is relinked as a dynamic ELF file, its
75204 + base address will be randomized as well, completing the full
75205 + randomization of the address space layout. Attacking such programs
75206 + becomes a guess game. You can find an example of doing this at
75207 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
75208 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
75209 +
75210 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
75211 + feature on a per file basis.
75212 +
75213 +endmenu
75214 +
75215 +menu "Miscellaneous hardening features"
75216 +
75217 +config PAX_MEMORY_SANITIZE
75218 + bool "Sanitize all freed memory"
75219 + help
75220 + By saying Y here the kernel will erase memory pages as soon as they
75221 + are freed. This in turn reduces the lifetime of data stored in the
75222 + pages, making it less likely that sensitive information such as
75223 + passwords, cryptographic secrets, etc stay in memory for too long.
75224 +
75225 + This is especially useful for programs whose runtime is short, long
75226 + lived processes and the kernel itself benefit from this as long as
75227 + they operate on whole memory pages and ensure timely freeing of pages
75228 + that may hold sensitive information.
75229 +
75230 + The tradeoff is performance impact, on a single CPU system kernel
75231 + compilation sees a 3% slowdown, other systems and workloads may vary
75232 + and you are advised to test this feature on your expected workload
75233 + before deploying it.
75234 +
75235 + Note that this feature does not protect data stored in live pages,
75236 + e.g., process memory swapped to disk may stay there for a long time.
75237 +
75238 +config PAX_MEMORY_STACKLEAK
75239 + bool "Sanitize kernel stack"
75240 + depends on X86
75241 + help
75242 + By saying Y here the kernel will erase the kernel stack before it
75243 + returns from a system call. This in turn reduces the information
75244 + that a kernel stack leak bug can reveal.
75245 +
75246 + Note that such a bug can still leak information that was put on
75247 + the stack by the current system call (the one eventually triggering
75248 + the bug) but traces of earlier system calls on the kernel stack
75249 + cannot leak anymore.
75250 +
75251 + The tradeoff is performance impact, on a single CPU system kernel
75252 + compilation sees a 1% slowdown, other systems and workloads may vary
75253 + and you are advised to test this feature on your expected workload
75254 + before deploying it.
75255 +
75256 + Note: full support for this feature requires gcc with plugin support
75257 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
75258 + is not supported). Using older gcc versions means that functions
75259 + with large enough stack frames may leave uninitialized memory behind
75260 + that may be exposed to a later syscall leaking the stack.
75261 +
75262 +config PAX_MEMORY_UDEREF
75263 + bool "Prevent invalid userland pointer dereference"
75264 + depends on X86 && !UML_X86 && !XEN
75265 + select PAX_PER_CPU_PGD if X86_64
75266 + help
75267 + By saying Y here the kernel will be prevented from dereferencing
75268 + userland pointers in contexts where the kernel expects only kernel
75269 + pointers. This is both a useful runtime debugging feature and a
75270 + security measure that prevents exploiting a class of kernel bugs.
75271 +
75272 + The tradeoff is that some virtualization solutions may experience
75273 + a huge slowdown and therefore you should not enable this feature
75274 + for kernels meant to run in such environments. Whether a given VM
75275 + solution is affected or not is best determined by simply trying it
75276 + out, the performance impact will be obvious right on boot as this
75277 + mechanism engages from very early on. A good rule of thumb is that
75278 + VMs running on CPUs without hardware virtualization support (i.e.,
75279 + the majority of IA-32 CPUs) will likely experience the slowdown.
75280 +
75281 +config PAX_REFCOUNT
75282 + bool "Prevent various kernel object reference counter overflows"
75283 + depends on GRKERNSEC && (X86 || SPARC64)
75284 + help
75285 + By saying Y here the kernel will detect and prevent overflowing
75286 + various (but not all) kinds of object reference counters. Such
75287 + overflows can normally occur due to bugs only and are often, if
75288 + not always, exploitable.
75289 +
75290 + The tradeoff is that data structures protected by an overflowed
75291 + refcount will never be freed and therefore will leak memory. Note
75292 + that this leak also happens even without this protection but in
75293 + that case the overflow can eventually trigger the freeing of the
75294 + data structure while it is still being used elsewhere, resulting
75295 + in the exploitable situation that this feature prevents.
75296 +
75297 + Since this has a negligible performance impact, you should enable
75298 + this feature.
75299 +
75300 +config PAX_USERCOPY
75301 + bool "Harden heap object copies between kernel and userland"
75302 + depends on X86 || PPC || SPARC || ARM
75303 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
75304 + help
75305 + By saying Y here the kernel will enforce the size of heap objects
75306 + when they are copied in either direction between the kernel and
75307 + userland, even if only a part of the heap object is copied.
75308 +
75309 + Specifically, this checking prevents information leaking from the
75310 + kernel heap during kernel to userland copies (if the kernel heap
75311 + object is otherwise fully initialized) and prevents kernel heap
75312 + overflows during userland to kernel copies.
75313 +
75314 + Note that the current implementation provides the strictest bounds
75315 + checks for the SLUB allocator.
75316 +
75317 + Enabling this option also enables per-slab cache protection against
75318 + data in a given cache being copied into/out of via userland
75319 + accessors. Though the whitelist of regions will be reduced over
75320 + time, it notably protects important data structures like task structs.
75321 +
75322 +
75323 + If frame pointers are enabled on x86, this option will also
75324 + restrict copies into and out of the kernel stack to local variables
75325 + within a single frame.
75326 +
75327 + Since this has a negligible performance impact, you should enable
75328 + this feature.
75329 +
75330 +endmenu
75331 +
75332 +endmenu
75333 +
75334 config KEYS
75335 bool "Enable access key retention support"
75336 help
75337 @@ -146,7 +695,7 @@ config INTEL_TXT
75338 config LSM_MMAP_MIN_ADDR
75339 int "Low address space for LSM to protect from user allocation"
75340 depends on SECURITY && SECURITY_SELINUX
75341 - default 65536
75342 + default 32768
75343 help
75344 This is the portion of low virtual memory which should be protected
75345 from userspace allocation. Keeping a user from writing to low pages
75346 diff -urNp linux-2.6.32.45/security/keys/keyring.c linux-2.6.32.45/security/keys/keyring.c
75347 --- linux-2.6.32.45/security/keys/keyring.c 2011-03-27 14:31:47.000000000 -0400
75348 +++ linux-2.6.32.45/security/keys/keyring.c 2011-04-18 22:03:00.000000000 -0400
75349 @@ -214,15 +214,15 @@ static long keyring_read(const struct ke
75350 ret = -EFAULT;
75351
75352 for (loop = 0; loop < klist->nkeys; loop++) {
75353 + key_serial_t serial;
75354 key = klist->keys[loop];
75355 + serial = key->serial;
75356
75357 tmp = sizeof(key_serial_t);
75358 if (tmp > buflen)
75359 tmp = buflen;
75360
75361 - if (copy_to_user(buffer,
75362 - &key->serial,
75363 - tmp) != 0)
75364 + if (copy_to_user(buffer, &serial, tmp))
75365 goto error;
75366
75367 buflen -= tmp;
75368 diff -urNp linux-2.6.32.45/security/min_addr.c linux-2.6.32.45/security/min_addr.c
75369 --- linux-2.6.32.45/security/min_addr.c 2011-03-27 14:31:47.000000000 -0400
75370 +++ linux-2.6.32.45/security/min_addr.c 2011-04-17 15:56:46.000000000 -0400
75371 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
75372 */
75373 static void update_mmap_min_addr(void)
75374 {
75375 +#ifndef SPARC
75376 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
75377 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
75378 mmap_min_addr = dac_mmap_min_addr;
75379 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
75380 #else
75381 mmap_min_addr = dac_mmap_min_addr;
75382 #endif
75383 +#endif
75384 }
75385
75386 /*
75387 diff -urNp linux-2.6.32.45/security/root_plug.c linux-2.6.32.45/security/root_plug.c
75388 --- linux-2.6.32.45/security/root_plug.c 2011-03-27 14:31:47.000000000 -0400
75389 +++ linux-2.6.32.45/security/root_plug.c 2011-04-17 15:56:46.000000000 -0400
75390 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security
75391 return 0;
75392 }
75393
75394 -static struct security_operations rootplug_security_ops = {
75395 +static struct security_operations rootplug_security_ops __read_only = {
75396 .bprm_check_security = rootplug_bprm_check_security,
75397 };
75398
75399 diff -urNp linux-2.6.32.45/security/security.c linux-2.6.32.45/security/security.c
75400 --- linux-2.6.32.45/security/security.c 2011-03-27 14:31:47.000000000 -0400
75401 +++ linux-2.6.32.45/security/security.c 2011-04-17 15:56:46.000000000 -0400
75402 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURI
75403 extern struct security_operations default_security_ops;
75404 extern void security_fixup_ops(struct security_operations *ops);
75405
75406 -struct security_operations *security_ops; /* Initialized to NULL */
75407 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
75408
75409 static inline int verify(struct security_operations *ops)
75410 {
75411 @@ -106,7 +106,7 @@ int __init security_module_enable(struct
75412 * If there is already a security module registered with the kernel,
75413 * an error will be returned. Otherwise %0 is returned on success.
75414 */
75415 -int register_security(struct security_operations *ops)
75416 +int __init register_security(struct security_operations *ops)
75417 {
75418 if (verify(ops)) {
75419 printk(KERN_DEBUG "%s could not verify "
75420 diff -urNp linux-2.6.32.45/security/selinux/hooks.c linux-2.6.32.45/security/selinux/hooks.c
75421 --- linux-2.6.32.45/security/selinux/hooks.c 2011-03-27 14:31:47.000000000 -0400
75422 +++ linux-2.6.32.45/security/selinux/hooks.c 2011-04-17 15:56:46.000000000 -0400
75423 @@ -131,7 +131,7 @@ int selinux_enabled = 1;
75424 * Minimal support for a secondary security module,
75425 * just to allow the use of the capability module.
75426 */
75427 -static struct security_operations *secondary_ops;
75428 +static struct security_operations *secondary_ops __read_only;
75429
75430 /* Lists of inode and superblock security structures initialized
75431 before the policy was loaded. */
75432 @@ -5457,7 +5457,7 @@ static int selinux_key_getsecurity(struc
75433
75434 #endif
75435
75436 -static struct security_operations selinux_ops = {
75437 +static struct security_operations selinux_ops __read_only = {
75438 .name = "selinux",
75439
75440 .ptrace_access_check = selinux_ptrace_access_check,
75441 @@ -5841,7 +5841,9 @@ int selinux_disable(void)
75442 avc_disable();
75443
75444 /* Reset security_ops to the secondary module, dummy or capability. */
75445 + pax_open_kernel();
75446 security_ops = secondary_ops;
75447 + pax_close_kernel();
75448
75449 /* Unregister netfilter hooks. */
75450 selinux_nf_ip_exit();
75451 diff -urNp linux-2.6.32.45/security/selinux/include/xfrm.h linux-2.6.32.45/security/selinux/include/xfrm.h
75452 --- linux-2.6.32.45/security/selinux/include/xfrm.h 2011-03-27 14:31:47.000000000 -0400
75453 +++ linux-2.6.32.45/security/selinux/include/xfrm.h 2011-05-18 20:09:37.000000000 -0400
75454 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
75455
75456 static inline void selinux_xfrm_notify_policyload(void)
75457 {
75458 - atomic_inc(&flow_cache_genid);
75459 + atomic_inc_unchecked(&flow_cache_genid);
75460 }
75461 #else
75462 static inline int selinux_xfrm_enabled(void)
75463 diff -urNp linux-2.6.32.45/security/selinux/ss/services.c linux-2.6.32.45/security/selinux/ss/services.c
75464 --- linux-2.6.32.45/security/selinux/ss/services.c 2011-03-27 14:31:47.000000000 -0400
75465 +++ linux-2.6.32.45/security/selinux/ss/services.c 2011-05-16 21:46:57.000000000 -0400
75466 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, siz
75467 int rc = 0;
75468 struct policy_file file = { data, len }, *fp = &file;
75469
75470 + pax_track_stack();
75471 +
75472 if (!ss_initialized) {
75473 avtab_cache_init();
75474 if (policydb_read(&policydb, fp)) {
75475 diff -urNp linux-2.6.32.45/security/smack/smack_lsm.c linux-2.6.32.45/security/smack/smack_lsm.c
75476 --- linux-2.6.32.45/security/smack/smack_lsm.c 2011-03-27 14:31:47.000000000 -0400
75477 +++ linux-2.6.32.45/security/smack/smack_lsm.c 2011-04-17 15:56:46.000000000 -0400
75478 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct
75479 return 0;
75480 }
75481
75482 -struct security_operations smack_ops = {
75483 +struct security_operations smack_ops __read_only = {
75484 .name = "smack",
75485
75486 .ptrace_access_check = smack_ptrace_access_check,
75487 diff -urNp linux-2.6.32.45/security/tomoyo/tomoyo.c linux-2.6.32.45/security/tomoyo/tomoyo.c
75488 --- linux-2.6.32.45/security/tomoyo/tomoyo.c 2011-03-27 14:31:47.000000000 -0400
75489 +++ linux-2.6.32.45/security/tomoyo/tomoyo.c 2011-04-17 15:56:46.000000000 -0400
75490 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct fil
75491 * tomoyo_security_ops is a "struct security_operations" which is used for
75492 * registering TOMOYO.
75493 */
75494 -static struct security_operations tomoyo_security_ops = {
75495 +static struct security_operations tomoyo_security_ops __read_only = {
75496 .name = "tomoyo",
75497 .cred_alloc_blank = tomoyo_cred_alloc_blank,
75498 .cred_prepare = tomoyo_cred_prepare,
75499 diff -urNp linux-2.6.32.45/sound/aoa/codecs/onyx.c linux-2.6.32.45/sound/aoa/codecs/onyx.c
75500 --- linux-2.6.32.45/sound/aoa/codecs/onyx.c 2011-03-27 14:31:47.000000000 -0400
75501 +++ linux-2.6.32.45/sound/aoa/codecs/onyx.c 2011-04-17 15:56:46.000000000 -0400
75502 @@ -53,7 +53,7 @@ struct onyx {
75503 spdif_locked:1,
75504 analog_locked:1,
75505 original_mute:2;
75506 - int open_count;
75507 + local_t open_count;
75508 struct codec_info *codec_info;
75509
75510 /* mutex serializes concurrent access to the device
75511 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
75512 struct onyx *onyx = cii->codec_data;
75513
75514 mutex_lock(&onyx->mutex);
75515 - onyx->open_count++;
75516 + local_inc(&onyx->open_count);
75517 mutex_unlock(&onyx->mutex);
75518
75519 return 0;
75520 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
75521 struct onyx *onyx = cii->codec_data;
75522
75523 mutex_lock(&onyx->mutex);
75524 - onyx->open_count--;
75525 - if (!onyx->open_count)
75526 + if (local_dec_and_test(&onyx->open_count))
75527 onyx->spdif_locked = onyx->analog_locked = 0;
75528 mutex_unlock(&onyx->mutex);
75529
75530 diff -urNp linux-2.6.32.45/sound/aoa/codecs/onyx.h linux-2.6.32.45/sound/aoa/codecs/onyx.h
75531 --- linux-2.6.32.45/sound/aoa/codecs/onyx.h 2011-03-27 14:31:47.000000000 -0400
75532 +++ linux-2.6.32.45/sound/aoa/codecs/onyx.h 2011-04-17 15:56:46.000000000 -0400
75533 @@ -11,6 +11,7 @@
75534 #include <linux/i2c.h>
75535 #include <asm/pmac_low_i2c.h>
75536 #include <asm/prom.h>
75537 +#include <asm/local.h>
75538
75539 /* PCM3052 register definitions */
75540
75541 diff -urNp linux-2.6.32.45/sound/core/seq/seq_device.c linux-2.6.32.45/sound/core/seq/seq_device.c
75542 --- linux-2.6.32.45/sound/core/seq/seq_device.c 2011-03-27 14:31:47.000000000 -0400
75543 +++ linux-2.6.32.45/sound/core/seq/seq_device.c 2011-08-05 20:33:55.000000000 -0400
75544 @@ -63,7 +63,7 @@ struct ops_list {
75545 int argsize; /* argument size */
75546
75547 /* operators */
75548 - struct snd_seq_dev_ops ops;
75549 + struct snd_seq_dev_ops *ops;
75550
75551 /* registred devices */
75552 struct list_head dev_list; /* list of devices */
75553 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
75554
75555 mutex_lock(&ops->reg_mutex);
75556 /* copy driver operators */
75557 - ops->ops = *entry;
75558 + ops->ops = entry;
75559 ops->driver |= DRIVER_LOADED;
75560 ops->argsize = argsize;
75561
75562 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
75563 dev->name, ops->id, ops->argsize, dev->argsize);
75564 return -EINVAL;
75565 }
75566 - if (ops->ops.init_device(dev) >= 0) {
75567 + if (ops->ops->init_device(dev) >= 0) {
75568 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
75569 ops->num_init_devices++;
75570 } else {
75571 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
75572 dev->name, ops->id, ops->argsize, dev->argsize);
75573 return -EINVAL;
75574 }
75575 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
75576 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
75577 dev->status = SNDRV_SEQ_DEVICE_FREE;
75578 dev->driver_data = NULL;
75579 ops->num_init_devices--;
75580 diff -urNp linux-2.6.32.45/sound/drivers/mts64.c linux-2.6.32.45/sound/drivers/mts64.c
75581 --- linux-2.6.32.45/sound/drivers/mts64.c 2011-03-27 14:31:47.000000000 -0400
75582 +++ linux-2.6.32.45/sound/drivers/mts64.c 2011-04-17 15:56:46.000000000 -0400
75583 @@ -27,6 +27,7 @@
75584 #include <sound/initval.h>
75585 #include <sound/rawmidi.h>
75586 #include <sound/control.h>
75587 +#include <asm/local.h>
75588
75589 #define CARD_NAME "Miditerminal 4140"
75590 #define DRIVER_NAME "MTS64"
75591 @@ -65,7 +66,7 @@ struct mts64 {
75592 struct pardevice *pardev;
75593 int pardev_claimed;
75594
75595 - int open_count;
75596 + local_t open_count;
75597 int current_midi_output_port;
75598 int current_midi_input_port;
75599 u8 mode[MTS64_NUM_INPUT_PORTS];
75600 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
75601 {
75602 struct mts64 *mts = substream->rmidi->private_data;
75603
75604 - if (mts->open_count == 0) {
75605 + if (local_read(&mts->open_count) == 0) {
75606 /* We don't need a spinlock here, because this is just called
75607 if the device has not been opened before.
75608 So there aren't any IRQs from the device */
75609 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
75610
75611 msleep(50);
75612 }
75613 - ++(mts->open_count);
75614 + local_inc(&mts->open_count);
75615
75616 return 0;
75617 }
75618 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
75619 struct mts64 *mts = substream->rmidi->private_data;
75620 unsigned long flags;
75621
75622 - --(mts->open_count);
75623 - if (mts->open_count == 0) {
75624 + if (local_dec_return(&mts->open_count) == 0) {
75625 /* We need the spinlock_irqsave here because we can still
75626 have IRQs at this point */
75627 spin_lock_irqsave(&mts->lock, flags);
75628 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
75629
75630 msleep(500);
75631
75632 - } else if (mts->open_count < 0)
75633 - mts->open_count = 0;
75634 + } else if (local_read(&mts->open_count) < 0)
75635 + local_set(&mts->open_count, 0);
75636
75637 return 0;
75638 }
75639 diff -urNp linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c
75640 --- linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c 2011-03-27 14:31:47.000000000 -0400
75641 +++ linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c 2011-08-05 20:33:55.000000000 -0400
75642 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
75643 MODULE_DESCRIPTION("OPL4 driver");
75644 MODULE_LICENSE("GPL");
75645
75646 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
75647 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
75648 {
75649 int timeout = 10;
75650 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
75651 diff -urNp linux-2.6.32.45/sound/drivers/portman2x4.c linux-2.6.32.45/sound/drivers/portman2x4.c
75652 --- linux-2.6.32.45/sound/drivers/portman2x4.c 2011-03-27 14:31:47.000000000 -0400
75653 +++ linux-2.6.32.45/sound/drivers/portman2x4.c 2011-04-17 15:56:46.000000000 -0400
75654 @@ -46,6 +46,7 @@
75655 #include <sound/initval.h>
75656 #include <sound/rawmidi.h>
75657 #include <sound/control.h>
75658 +#include <asm/local.h>
75659
75660 #define CARD_NAME "Portman 2x4"
75661 #define DRIVER_NAME "portman"
75662 @@ -83,7 +84,7 @@ struct portman {
75663 struct pardevice *pardev;
75664 int pardev_claimed;
75665
75666 - int open_count;
75667 + local_t open_count;
75668 int mode[PORTMAN_NUM_INPUT_PORTS];
75669 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
75670 };
75671 diff -urNp linux-2.6.32.45/sound/isa/cmi8330.c linux-2.6.32.45/sound/isa/cmi8330.c
75672 --- linux-2.6.32.45/sound/isa/cmi8330.c 2011-03-27 14:31:47.000000000 -0400
75673 +++ linux-2.6.32.45/sound/isa/cmi8330.c 2011-08-05 20:33:55.000000000 -0400
75674 @@ -455,16 +455,16 @@ static int __devinit snd_cmi8330_pcm(str
75675
75676 /* SB16 */
75677 ops = snd_sb16dsp_get_pcm_ops(CMI_SB_STREAM);
75678 - chip->streams[CMI_SB_STREAM].ops = *ops;
75679 + memcpy((void *)&chip->streams[CMI_SB_STREAM].ops, ops, sizeof(*ops));
75680 chip->streams[CMI_SB_STREAM].open = ops->open;
75681 - chip->streams[CMI_SB_STREAM].ops.open = cmi_open_callbacks[CMI_SB_STREAM];
75682 + *(void **)&chip->streams[CMI_SB_STREAM].ops.open = cmi_open_callbacks[CMI_SB_STREAM];
75683 chip->streams[CMI_SB_STREAM].private_data = chip->sb;
75684
75685 /* AD1848 */
75686 ops = snd_wss_get_pcm_ops(CMI_AD_STREAM);
75687 - chip->streams[CMI_AD_STREAM].ops = *ops;
75688 + memcpy((void *)&chip->streams[CMI_AD_STREAM].ops, ops, sizeof(*ops));
75689 chip->streams[CMI_AD_STREAM].open = ops->open;
75690 - chip->streams[CMI_AD_STREAM].ops.open = cmi_open_callbacks[CMI_AD_STREAM];
75691 + *(void **)&chip->streams[CMI_AD_STREAM].ops.open = cmi_open_callbacks[CMI_AD_STREAM];
75692 chip->streams[CMI_AD_STREAM].private_data = chip->wss;
75693
75694 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &chip->streams[SNDRV_PCM_STREAM_PLAYBACK].ops);
75695 diff -urNp linux-2.6.32.45/sound/oss/sb_audio.c linux-2.6.32.45/sound/oss/sb_audio.c
75696 --- linux-2.6.32.45/sound/oss/sb_audio.c 2011-03-27 14:31:47.000000000 -0400
75697 +++ linux-2.6.32.45/sound/oss/sb_audio.c 2011-04-17 15:56:46.000000000 -0400
75698 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
75699 buf16 = (signed short *)(localbuf + localoffs);
75700 while (c)
75701 {
75702 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
75703 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
75704 if (copy_from_user(lbuf8,
75705 userbuf+useroffs + p,
75706 locallen))
75707 diff -urNp linux-2.6.32.45/sound/oss/swarm_cs4297a.c linux-2.6.32.45/sound/oss/swarm_cs4297a.c
75708 --- linux-2.6.32.45/sound/oss/swarm_cs4297a.c 2011-03-27 14:31:47.000000000 -0400
75709 +++ linux-2.6.32.45/sound/oss/swarm_cs4297a.c 2011-04-17 15:56:46.000000000 -0400
75710 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
75711 {
75712 struct cs4297a_state *s;
75713 u32 pwr, id;
75714 - mm_segment_t fs;
75715 int rval;
75716 #ifndef CONFIG_BCM_CS4297A_CSWARM
75717 u64 cfg;
75718 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
75719 if (!rval) {
75720 char *sb1250_duart_present;
75721
75722 +#if 0
75723 + mm_segment_t fs;
75724 fs = get_fs();
75725 set_fs(KERNEL_DS);
75726 -#if 0
75727 val = SOUND_MASK_LINE;
75728 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
75729 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
75730 val = initvol[i].vol;
75731 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
75732 }
75733 + set_fs(fs);
75734 // cs4297a_write_ac97(s, 0x18, 0x0808);
75735 #else
75736 // cs4297a_write_ac97(s, 0x5e, 0x180);
75737 cs4297a_write_ac97(s, 0x02, 0x0808);
75738 cs4297a_write_ac97(s, 0x18, 0x0808);
75739 #endif
75740 - set_fs(fs);
75741
75742 list_add(&s->list, &cs4297a_devs);
75743
75744 diff -urNp linux-2.6.32.45/sound/pci/ac97/ac97_codec.c linux-2.6.32.45/sound/pci/ac97/ac97_codec.c
75745 --- linux-2.6.32.45/sound/pci/ac97/ac97_codec.c 2011-03-27 14:31:47.000000000 -0400
75746 +++ linux-2.6.32.45/sound/pci/ac97/ac97_codec.c 2011-04-17 15:56:46.000000000 -0400
75747 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
75748 }
75749
75750 /* build_ops to do nothing */
75751 -static struct snd_ac97_build_ops null_build_ops;
75752 +static const struct snd_ac97_build_ops null_build_ops;
75753
75754 #ifdef CONFIG_SND_AC97_POWER_SAVE
75755 static void do_update_power(struct work_struct *work)
75756 diff -urNp linux-2.6.32.45/sound/pci/ac97/ac97_patch.c linux-2.6.32.45/sound/pci/ac97/ac97_patch.c
75757 --- linux-2.6.32.45/sound/pci/ac97/ac97_patch.c 2011-03-27 14:31:47.000000000 -0400
75758 +++ linux-2.6.32.45/sound/pci/ac97/ac97_patch.c 2011-04-23 12:56:12.000000000 -0400
75759 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
75760 return 0;
75761 }
75762
75763 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
75764 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
75765 .build_spdif = patch_yamaha_ymf743_build_spdif,
75766 .build_3d = patch_yamaha_ymf7x3_3d,
75767 };
75768 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
75769 return 0;
75770 }
75771
75772 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
75773 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
75774 .build_3d = patch_yamaha_ymf7x3_3d,
75775 .build_post_spdif = patch_yamaha_ymf753_post_spdif
75776 };
75777 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
75778 return 0;
75779 }
75780
75781 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
75782 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
75783 .build_specific = patch_wolfson_wm9703_specific,
75784 };
75785
75786 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
75787 return 0;
75788 }
75789
75790 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
75791 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
75792 .build_specific = patch_wolfson_wm9704_specific,
75793 };
75794
75795 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific
75796 return 0;
75797 }
75798
75799 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
75800 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
75801 .build_specific = patch_wolfson_wm9705_specific,
75802 };
75803
75804 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific
75805 return 0;
75806 }
75807
75808 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
75809 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
75810 .build_specific = patch_wolfson_wm9711_specific,
75811 };
75812
75813 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume
75814 }
75815 #endif
75816
75817 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
75818 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
75819 .build_specific = patch_wolfson_wm9713_specific,
75820 .build_3d = patch_wolfson_wm9713_3d,
75821 #ifdef CONFIG_PM
75822 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_speci
75823 return 0;
75824 }
75825
75826 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
75827 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
75828 .build_3d = patch_sigmatel_stac9700_3d,
75829 .build_specific = patch_sigmatel_stac97xx_specific
75830 };
75831 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_speci
75832 return patch_sigmatel_stac97xx_specific(ac97);
75833 }
75834
75835 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
75836 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
75837 .build_3d = patch_sigmatel_stac9708_3d,
75838 .build_specific = patch_sigmatel_stac9708_specific
75839 };
75840 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_speci
75841 return 0;
75842 }
75843
75844 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
75845 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
75846 .build_3d = patch_sigmatel_stac9700_3d,
75847 .build_specific = patch_sigmatel_stac9758_specific
75848 };
75849 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(stru
75850 return 0;
75851 }
75852
75853 -static struct snd_ac97_build_ops patch_cirrus_ops = {
75854 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
75855 .build_spdif = patch_cirrus_build_spdif
75856 };
75857
75858 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(st
75859 return 0;
75860 }
75861
75862 -static struct snd_ac97_build_ops patch_conexant_ops = {
75863 +static const struct snd_ac97_build_ops patch_conexant_ops = {
75864 .build_spdif = patch_conexant_build_spdif
75865 };
75866
75867 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct
75868 }
75869 }
75870
75871 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
75872 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
75873 #ifdef CONFIG_PM
75874 .resume = ad18xx_resume
75875 #endif
75876 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct
75877 return 0;
75878 }
75879
75880 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
75881 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
75882 .build_specific = &patch_ad1885_specific,
75883 #ifdef CONFIG_PM
75884 .resume = ad18xx_resume
75885 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct
75886 return 0;
75887 }
75888
75889 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
75890 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
75891 .build_specific = &patch_ad1886_specific,
75892 #ifdef CONFIG_PM
75893 .resume = ad18xx_resume
75894 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct
75895 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
75896 }
75897
75898 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
75899 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
75900 .build_post_spdif = patch_ad198x_post_spdif,
75901 .build_specific = patch_ad1981a_specific,
75902 #ifdef CONFIG_PM
75903 @@ -1951,7 +1951,7 @@ static int patch_ad1981b_specific(struct
75904 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
75905 }
75906
75907 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
75908 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
75909 .build_post_spdif = patch_ad198x_post_spdif,
75910 .build_specific = patch_ad1981b_specific,
75911 #ifdef CONFIG_PM
75912 @@ -2090,7 +2090,7 @@ static int patch_ad1888_specific(struct
75913 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
75914 }
75915
75916 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
75917 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
75918 .build_post_spdif = patch_ad198x_post_spdif,
75919 .build_specific = patch_ad1888_specific,
75920 #ifdef CONFIG_PM
75921 @@ -2139,7 +2139,7 @@ static int patch_ad1980_specific(struct
75922 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
75923 }
75924
75925 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
75926 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
75927 .build_post_spdif = patch_ad198x_post_spdif,
75928 .build_specific = patch_ad1980_specific,
75929 #ifdef CONFIG_PM
75930 @@ -2254,7 +2254,7 @@ static int patch_ad1985_specific(struct
75931 ARRAY_SIZE(snd_ac97_ad1985_controls));
75932 }
75933
75934 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
75935 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
75936 .build_post_spdif = patch_ad198x_post_spdif,
75937 .build_specific = patch_ad1985_specific,
75938 #ifdef CONFIG_PM
75939 @@ -2546,7 +2546,7 @@ static int patch_ad1986_specific(struct
75940 ARRAY_SIZE(snd_ac97_ad1985_controls));
75941 }
75942
75943 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
75944 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
75945 .build_post_spdif = patch_ad198x_post_spdif,
75946 .build_specific = patch_ad1986_specific,
75947 #ifdef CONFIG_PM
75948 @@ -2651,7 +2651,7 @@ static int patch_alc650_specific(struct
75949 return 0;
75950 }
75951
75952 -static struct snd_ac97_build_ops patch_alc650_ops = {
75953 +static const struct snd_ac97_build_ops patch_alc650_ops = {
75954 .build_specific = patch_alc650_specific,
75955 .update_jacks = alc650_update_jacks
75956 };
75957 @@ -2803,7 +2803,7 @@ static int patch_alc655_specific(struct
75958 return 0;
75959 }
75960
75961 -static struct snd_ac97_build_ops patch_alc655_ops = {
75962 +static const struct snd_ac97_build_ops patch_alc655_ops = {
75963 .build_specific = patch_alc655_specific,
75964 .update_jacks = alc655_update_jacks
75965 };
75966 @@ -2915,7 +2915,7 @@ static int patch_alc850_specific(struct
75967 return 0;
75968 }
75969
75970 -static struct snd_ac97_build_ops patch_alc850_ops = {
75971 +static const struct snd_ac97_build_ops patch_alc850_ops = {
75972 .build_specific = patch_alc850_specific,
75973 .update_jacks = alc850_update_jacks
75974 };
75975 @@ -2977,7 +2977,7 @@ static int patch_cm9738_specific(struct
75976 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
75977 }
75978
75979 -static struct snd_ac97_build_ops patch_cm9738_ops = {
75980 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
75981 .build_specific = patch_cm9738_specific,
75982 .update_jacks = cm9738_update_jacks
75983 };
75984 @@ -3068,7 +3068,7 @@ static int patch_cm9739_post_spdif(struc
75985 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
75986 }
75987
75988 -static struct snd_ac97_build_ops patch_cm9739_ops = {
75989 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
75990 .build_specific = patch_cm9739_specific,
75991 .build_post_spdif = patch_cm9739_post_spdif,
75992 .update_jacks = cm9739_update_jacks
75993 @@ -3242,7 +3242,7 @@ static int patch_cm9761_specific(struct
75994 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
75995 }
75996
75997 -static struct snd_ac97_build_ops patch_cm9761_ops = {
75998 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
75999 .build_specific = patch_cm9761_specific,
76000 .build_post_spdif = patch_cm9761_post_spdif,
76001 .update_jacks = cm9761_update_jacks
76002 @@ -3338,7 +3338,7 @@ static int patch_cm9780_specific(struct
76003 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
76004 }
76005
76006 -static struct snd_ac97_build_ops patch_cm9780_ops = {
76007 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
76008 .build_specific = patch_cm9780_specific,
76009 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
76010 };
76011 @@ -3458,7 +3458,7 @@ static int patch_vt1616_specific(struct
76012 return 0;
76013 }
76014
76015 -static struct snd_ac97_build_ops patch_vt1616_ops = {
76016 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
76017 .build_specific = patch_vt1616_specific
76018 };
76019
76020 @@ -3812,7 +3812,7 @@ static int patch_it2646_specific(struct
76021 return 0;
76022 }
76023
76024 -static struct snd_ac97_build_ops patch_it2646_ops = {
76025 +static const struct snd_ac97_build_ops patch_it2646_ops = {
76026 .build_specific = patch_it2646_specific,
76027 .update_jacks = it2646_update_jacks
76028 };
76029 @@ -3846,7 +3846,7 @@ static int patch_si3036_specific(struct
76030 return 0;
76031 }
76032
76033 -static struct snd_ac97_build_ops patch_si3036_ops = {
76034 +static const struct snd_ac97_build_ops patch_si3036_ops = {
76035 .build_specific = patch_si3036_specific,
76036 };
76037
76038 @@ -3913,7 +3913,7 @@ static int patch_ucb1400_specific(struct
76039 return 0;
76040 }
76041
76042 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
76043 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
76044 .build_specific = patch_ucb1400_specific,
76045 };
76046
76047 diff -urNp linux-2.6.32.45/sound/pci/hda/hda_codec.h linux-2.6.32.45/sound/pci/hda/hda_codec.h
76048 --- linux-2.6.32.45/sound/pci/hda/hda_codec.h 2011-03-27 14:31:47.000000000 -0400
76049 +++ linux-2.6.32.45/sound/pci/hda/hda_codec.h 2011-08-05 20:33:55.000000000 -0400
76050 @@ -580,7 +580,7 @@ struct hda_bus_ops {
76051 /* notify power-up/down from codec to controller */
76052 void (*pm_notify)(struct hda_bus *bus);
76053 #endif
76054 -};
76055 +} __no_const;
76056
76057 /* template to pass to the bus constructor */
76058 struct hda_bus_template {
76059 @@ -705,7 +705,7 @@ struct hda_pcm_ops {
76060 struct snd_pcm_substream *substream);
76061 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
76062 struct snd_pcm_substream *substream);
76063 -};
76064 +} __no_const;
76065
76066 /* PCM information for each substream */
76067 struct hda_pcm_stream {
76068 diff -urNp linux-2.6.32.45/sound/pci/hda/hda_generic.c linux-2.6.32.45/sound/pci/hda/hda_generic.c
76069 --- linux-2.6.32.45/sound/pci/hda/hda_generic.c 2011-03-27 14:31:47.000000000 -0400
76070 +++ linux-2.6.32.45/sound/pci/hda/hda_generic.c 2011-08-05 20:33:55.000000000 -0400
76071 @@ -1097,7 +1097,7 @@ int snd_hda_parse_generic_codec(struct h
76072 (err = parse_output(codec)) < 0)
76073 goto error;
76074
76075 - codec->patch_ops = generic_patch_ops;
76076 + memcpy((void *)&codec->patch_ops, &generic_patch_ops, sizeof(generic_patch_ops));
76077
76078 return 0;
76079
76080 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_analog.c linux-2.6.32.45/sound/pci/hda/patch_analog.c
76081 --- linux-2.6.32.45/sound/pci/hda/patch_analog.c 2011-03-27 14:31:47.000000000 -0400
76082 +++ linux-2.6.32.45/sound/pci/hda/patch_analog.c 2011-08-05 20:33:55.000000000 -0400
76083 @@ -1069,7 +1069,7 @@ static int patch_ad1986a(struct hda_code
76084 #endif
76085 spec->vmaster_nid = 0x1b;
76086
76087 - codec->patch_ops = ad198x_patch_ops;
76088 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76089
76090 /* override some parameters */
76091 board_config = snd_hda_check_board_config(codec, AD1986A_MODELS,
76092 @@ -1120,8 +1120,8 @@ static int patch_ad1986a(struct hda_code
76093 if (!is_jack_available(codec, 0x25))
76094 spec->multiout.dig_out_nid = 0;
76095 spec->input_mux = &ad1986a_automic_capture_source;
76096 - codec->patch_ops.unsol_event = ad1986a_automic_unsol_event;
76097 - codec->patch_ops.init = ad1986a_automic_init;
76098 + *(void **)&codec->patch_ops.unsol_event = ad1986a_automic_unsol_event;
76099 + *(void **)&codec->patch_ops.init = ad1986a_automic_init;
76100 break;
76101 case AD1986A_SAMSUNG_P50:
76102 spec->num_mixers = 2;
76103 @@ -1137,8 +1137,8 @@ static int patch_ad1986a(struct hda_code
76104 if (!is_jack_available(codec, 0x25))
76105 spec->multiout.dig_out_nid = 0;
76106 spec->input_mux = &ad1986a_automic_capture_source;
76107 - codec->patch_ops.unsol_event = ad1986a_samsung_p50_unsol_event;
76108 - codec->patch_ops.init = ad1986a_samsung_p50_init;
76109 + *(void **)&codec->patch_ops.unsol_event = ad1986a_samsung_p50_unsol_event;
76110 + *(void **)&codec->patch_ops.init = ad1986a_samsung_p50_init;
76111 break;
76112 case AD1986A_LAPTOP_AUTOMUTE:
76113 spec->num_mixers = 3;
76114 @@ -1154,8 +1154,8 @@ static int patch_ad1986a(struct hda_code
76115 if (!is_jack_available(codec, 0x25))
76116 spec->multiout.dig_out_nid = 0;
76117 spec->input_mux = &ad1986a_laptop_eapd_capture_source;
76118 - codec->patch_ops.unsol_event = ad1986a_hp_unsol_event;
76119 - codec->patch_ops.init = ad1986a_hp_init;
76120 + *(void **)&codec->patch_ops.unsol_event = ad1986a_hp_unsol_event;
76121 + *(void **)&codec->patch_ops.init = ad1986a_hp_init;
76122 /* Lenovo N100 seems to report the reversed bit
76123 * for HP jack-sensing
76124 */
76125 @@ -1363,7 +1363,7 @@ static int patch_ad1983(struct hda_codec
76126 #endif
76127 spec->vmaster_nid = 0x05;
76128
76129 - codec->patch_ops = ad198x_patch_ops;
76130 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76131
76132 return 0;
76133 }
76134 @@ -1769,7 +1769,7 @@ static int patch_ad1981(struct hda_codec
76135 #endif
76136 spec->vmaster_nid = 0x05;
76137
76138 - codec->patch_ops = ad198x_patch_ops;
76139 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76140
76141 /* override some parameters */
76142 board_config = snd_hda_check_board_config(codec, AD1981_MODELS,
76143 @@ -1783,8 +1783,8 @@ static int patch_ad1981(struct hda_codec
76144 spec->multiout.dig_out_nid = 0;
76145 spec->input_mux = &ad1981_hp_capture_source;
76146
76147 - codec->patch_ops.init = ad1981_hp_init;
76148 - codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76149 + *(void **)&codec->patch_ops.init = ad1981_hp_init;
76150 + *(void **)&codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76151 break;
76152 case AD1981_THINKPAD:
76153 spec->mixers[0] = ad1981_thinkpad_mixers;
76154 @@ -1805,8 +1805,8 @@ static int patch_ad1981(struct hda_codec
76155 spec->init_verbs[1] = ad1981_toshiba_init_verbs;
76156 spec->multiout.dig_out_nid = 0;
76157 spec->input_mux = &ad1981_hp_capture_source;
76158 - codec->patch_ops.init = ad1981_hp_init;
76159 - codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76160 + *(void **)&codec->patch_ops.init = ad1981_hp_init;
76161 + *(void **)&codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76162 break;
76163 }
76164 return 0;
76165 @@ -3096,14 +3096,14 @@ static int patch_ad1988(struct hda_codec
76166 if (spec->dig_in_nid && codec->vendor_id < 0x11d4989a)
76167 spec->mixers[spec->num_mixers++] = ad1988_spdif_in_mixers;
76168
76169 - codec->patch_ops = ad198x_patch_ops;
76170 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76171 switch (board_config) {
76172 case AD1988_AUTO:
76173 - codec->patch_ops.init = ad1988_auto_init;
76174 + *(void **)&codec->patch_ops.init = ad1988_auto_init;
76175 break;
76176 case AD1988_LAPTOP:
76177 case AD1988_LAPTOP_DIG:
76178 - codec->patch_ops.unsol_event = ad1988_laptop_unsol_event;
76179 + *(void **)&codec->patch_ops.unsol_event = ad1988_laptop_unsol_event;
76180 break;
76181 }
76182 #ifdef CONFIG_SND_HDA_POWER_SAVE
76183 @@ -3321,7 +3321,7 @@ static int patch_ad1884(struct hda_codec
76184 /* we need to cover all playback volumes */
76185 spec->slave_vols = ad1884_slave_vols;
76186
76187 - codec->patch_ops = ad198x_patch_ops;
76188 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76189
76190 return 0;
76191 }
76192 @@ -3529,7 +3529,7 @@ static int patch_ad1984(struct hda_codec
76193 case AD1984_BASIC:
76194 /* additional digital mics */
76195 spec->mixers[spec->num_mixers++] = ad1984_dmic_mixers;
76196 - codec->patch_ops.build_pcms = ad1984_build_pcms;
76197 + *(void **)&codec->patch_ops.build_pcms = ad1984_build_pcms;
76198 break;
76199 case AD1984_THINKPAD:
76200 spec->multiout.dig_out_nid = AD1884_SPDIF_OUT;
76201 @@ -4229,7 +4229,7 @@ static int patch_ad1884a(struct hda_code
76202 #ifdef CONFIG_SND_HDA_POWER_SAVE
76203 spec->loopback.amplist = ad1884a_loopbacks;
76204 #endif
76205 - codec->patch_ops = ad198x_patch_ops;
76206 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76207
76208 /* override some parameters */
76209 board_config = snd_hda_check_board_config(codec, AD1884A_MODELS,
76210 @@ -4240,8 +4240,8 @@ static int patch_ad1884a(struct hda_code
76211 spec->mixers[0] = ad1884a_laptop_mixers;
76212 spec->init_verbs[spec->num_init_verbs++] = ad1884a_laptop_verbs;
76213 spec->multiout.dig_out_nid = 0;
76214 - codec->patch_ops.unsol_event = ad1884a_laptop_unsol_event;
76215 - codec->patch_ops.init = ad1884a_laptop_init;
76216 + *(void **)&codec->patch_ops.unsol_event = ad1884a_laptop_unsol_event;
76217 + *(void **)&codec->patch_ops.init = ad1884a_laptop_init;
76218 /* set the upper-limit for mixer amp to 0dB for avoiding the
76219 * possible damage by overloading
76220 */
76221 @@ -4255,8 +4255,8 @@ static int patch_ad1884a(struct hda_code
76222 spec->mixers[0] = ad1884a_mobile_mixers;
76223 spec->init_verbs[0] = ad1884a_mobile_verbs;
76224 spec->multiout.dig_out_nid = 0;
76225 - codec->patch_ops.unsol_event = ad1884a_hp_unsol_event;
76226 - codec->patch_ops.init = ad1884a_hp_init;
76227 + *(void **)&codec->patch_ops.unsol_event = ad1884a_hp_unsol_event;
76228 + *(void **)&codec->patch_ops.init = ad1884a_hp_init;
76229 /* set the upper-limit for mixer amp to 0dB for avoiding the
76230 * possible damage by overloading
76231 */
76232 @@ -4272,15 +4272,15 @@ static int patch_ad1884a(struct hda_code
76233 ad1984a_thinkpad_verbs;
76234 spec->multiout.dig_out_nid = 0;
76235 spec->input_mux = &ad1984a_thinkpad_capture_source;
76236 - codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event;
76237 - codec->patch_ops.init = ad1984a_thinkpad_init;
76238 + *(void **)&codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event;
76239 + *(void **)&codec->patch_ops.init = ad1984a_thinkpad_init;
76240 break;
76241 case AD1984A_TOUCHSMART:
76242 spec->mixers[0] = ad1984a_touchsmart_mixers;
76243 spec->init_verbs[0] = ad1984a_touchsmart_verbs;
76244 spec->multiout.dig_out_nid = 0;
76245 - codec->patch_ops.unsol_event = ad1984a_touchsmart_unsol_event;
76246 - codec->patch_ops.init = ad1984a_touchsmart_init;
76247 + *(void **)&codec->patch_ops.unsol_event = ad1984a_touchsmart_unsol_event;
76248 + *(void **)&codec->patch_ops.init = ad1984a_touchsmart_init;
76249 /* set the upper-limit for mixer amp to 0dB for avoiding the
76250 * possible damage by overloading
76251 */
76252 @@ -4607,7 +4607,7 @@ static int patch_ad1882(struct hda_codec
76253 #endif
76254 spec->vmaster_nid = 0x04;
76255
76256 - codec->patch_ops = ad198x_patch_ops;
76257 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76258
76259 /* override some parameters */
76260 board_config = snd_hda_check_board_config(codec, AD1882_MODELS,
76261 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c
76262 --- linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c 2011-03-27 14:31:47.000000000 -0400
76263 +++ linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c 2011-08-05 20:33:55.000000000 -0400
76264 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_code
76265 */
76266 spec->multiout.dig_out_nid = CVT_NID;
76267
76268 - codec->patch_ops = atihdmi_patch_ops;
76269 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
76270
76271 return 0;
76272 }
76273 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_ca0110.c linux-2.6.32.45/sound/pci/hda/patch_ca0110.c
76274 --- linux-2.6.32.45/sound/pci/hda/patch_ca0110.c 2011-03-27 14:31:47.000000000 -0400
76275 +++ linux-2.6.32.45/sound/pci/hda/patch_ca0110.c 2011-08-05 20:33:55.000000000 -0400
76276 @@ -525,7 +525,7 @@ static int patch_ca0110(struct hda_codec
76277 if (err < 0)
76278 goto error;
76279
76280 - codec->patch_ops = ca0110_patch_ops;
76281 + memcpy((void *)&codec->patch_ops, &ca0110_patch_ops, sizeof(ca0110_patch_ops));
76282
76283 return 0;
76284
76285 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_cirrus.c linux-2.6.32.45/sound/pci/hda/patch_cirrus.c
76286 --- linux-2.6.32.45/sound/pci/hda/patch_cirrus.c 2011-05-10 22:12:02.000000000 -0400
76287 +++ linux-2.6.32.45/sound/pci/hda/patch_cirrus.c 2011-08-05 20:33:55.000000000 -0400
76288 @@ -1191,7 +1191,7 @@ static int patch_cs420x(struct hda_codec
76289 if (err < 0)
76290 goto error;
76291
76292 - codec->patch_ops = cs_patch_ops;
76293 + memcpy((void *)&codec->patch_ops, &cs_patch_ops, sizeof(cs_patch_ops));
76294
76295 return 0;
76296
76297 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_cmedia.c linux-2.6.32.45/sound/pci/hda/patch_cmedia.c
76298 --- linux-2.6.32.45/sound/pci/hda/patch_cmedia.c 2011-03-27 14:31:47.000000000 -0400
76299 +++ linux-2.6.32.45/sound/pci/hda/patch_cmedia.c 2011-08-05 20:33:55.000000000 -0400
76300 @@ -728,7 +728,7 @@ static int patch_cmi9880(struct hda_code
76301
76302 spec->adc_nids = cmi9880_adc_nids;
76303
76304 - codec->patch_ops = cmi9880_patch_ops;
76305 + memcpy((void *)&codec->patch_ops, &cmi9880_patch_ops, sizeof(cmi9880_patch_ops));
76306
76307 return 0;
76308 }
76309 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_conexant.c linux-2.6.32.45/sound/pci/hda/patch_conexant.c
76310 --- linux-2.6.32.45/sound/pci/hda/patch_conexant.c 2011-03-27 14:31:47.000000000 -0400
76311 +++ linux-2.6.32.45/sound/pci/hda/patch_conexant.c 2011-08-05 20:33:55.000000000 -0400
76312 @@ -1119,55 +1119,55 @@ static int patch_cxt5045(struct hda_code
76313 spec->channel_mode = cxt5045_modes,
76314
76315
76316 - codec->patch_ops = conexant_patch_ops;
76317 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
76318
76319 board_config = snd_hda_check_board_config(codec, CXT5045_MODELS,
76320 cxt5045_models,
76321 cxt5045_cfg_tbl);
76322 switch (board_config) {
76323 case CXT5045_LAPTOP_HPSENSE:
76324 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76325 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76326 spec->input_mux = &cxt5045_capture_source;
76327 spec->num_init_verbs = 2;
76328 spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
76329 spec->mixers[0] = cxt5045_mixers;
76330 - codec->patch_ops.init = cxt5045_init;
76331 + *(void **)&codec->patch_ops.init = cxt5045_init;
76332 break;
76333 case CXT5045_LAPTOP_MICSENSE:
76334 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76335 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76336 spec->input_mux = &cxt5045_capture_source;
76337 spec->num_init_verbs = 2;
76338 spec->init_verbs[1] = cxt5045_mic_sense_init_verbs;
76339 spec->mixers[0] = cxt5045_mixers;
76340 - codec->patch_ops.init = cxt5045_init;
76341 + *(void **)&codec->patch_ops.init = cxt5045_init;
76342 break;
76343 default:
76344 case CXT5045_LAPTOP_HPMICSENSE:
76345 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76346 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76347 spec->input_mux = &cxt5045_capture_source;
76348 spec->num_init_verbs = 3;
76349 spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
76350 spec->init_verbs[2] = cxt5045_mic_sense_init_verbs;
76351 spec->mixers[0] = cxt5045_mixers;
76352 - codec->patch_ops.init = cxt5045_init;
76353 + *(void **)&codec->patch_ops.init = cxt5045_init;
76354 break;
76355 case CXT5045_BENQ:
76356 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76357 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76358 spec->input_mux = &cxt5045_capture_source_benq;
76359 spec->num_init_verbs = 1;
76360 spec->init_verbs[0] = cxt5045_benq_init_verbs;
76361 spec->mixers[0] = cxt5045_mixers;
76362 spec->mixers[1] = cxt5045_benq_mixers;
76363 spec->num_mixers = 2;
76364 - codec->patch_ops.init = cxt5045_init;
76365 + *(void **)&codec->patch_ops.init = cxt5045_init;
76366 break;
76367 case CXT5045_LAPTOP_HP530:
76368 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76369 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
76370 spec->input_mux = &cxt5045_capture_source_hp530;
76371 spec->num_init_verbs = 2;
76372 spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
76373 spec->mixers[0] = cxt5045_mixers_hp530;
76374 - codec->patch_ops.init = cxt5045_init;
76375 + *(void **)&codec->patch_ops.init = cxt5045_init;
76376 break;
76377 #ifdef CONFIG_SND_DEBUG
76378 case CXT5045_TEST:
76379 @@ -1556,7 +1556,7 @@ static int patch_cxt5047(struct hda_code
76380 spec->num_channel_mode = ARRAY_SIZE(cxt5047_modes),
76381 spec->channel_mode = cxt5047_modes,
76382
76383 - codec->patch_ops = conexant_patch_ops;
76384 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
76385
76386 board_config = snd_hda_check_board_config(codec, CXT5047_MODELS,
76387 cxt5047_models,
76388 @@ -1565,13 +1565,13 @@ static int patch_cxt5047(struct hda_code
76389 case CXT5047_LAPTOP:
76390 spec->num_mixers = 2;
76391 spec->mixers[1] = cxt5047_hp_spk_mixers;
76392 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76393 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76394 break;
76395 case CXT5047_LAPTOP_HP:
76396 spec->num_mixers = 2;
76397 spec->mixers[1] = cxt5047_hp_only_mixers;
76398 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76399 - codec->patch_ops.init = cxt5047_hp_init;
76400 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76401 + *(void **)&codec->patch_ops.init = cxt5047_hp_init;
76402 break;
76403 case CXT5047_LAPTOP_EAPD:
76404 spec->input_mux = &cxt5047_toshiba_capture_source;
76405 @@ -1579,14 +1579,14 @@ static int patch_cxt5047(struct hda_code
76406 spec->mixers[1] = cxt5047_hp_spk_mixers;
76407 spec->num_init_verbs = 2;
76408 spec->init_verbs[1] = cxt5047_toshiba_init_verbs;
76409 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76410 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76411 break;
76412 #ifdef CONFIG_SND_DEBUG
76413 case CXT5047_TEST:
76414 spec->input_mux = &cxt5047_test_capture_source;
76415 spec->mixers[0] = cxt5047_test_mixer;
76416 spec->init_verbs[0] = cxt5047_test_init_verbs;
76417 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76418 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
76419 #endif
76420 }
76421 spec->vmaster_nid = 0x13;
76422 @@ -1904,8 +1904,8 @@ static int patch_cxt5051(struct hda_code
76423 codec->spec = spec;
76424 codec->pin_amp_workaround = 1;
76425
76426 - codec->patch_ops = conexant_patch_ops;
76427 - codec->patch_ops.init = cxt5051_init;
76428 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
76429 + *(void **)&codec->patch_ops.init = cxt5051_init;
76430
76431 spec->multiout.max_channels = 2;
76432 spec->multiout.num_dacs = ARRAY_SIZE(cxt5051_dac_nids);
76433 @@ -1923,7 +1923,7 @@ static int patch_cxt5051(struct hda_code
76434 spec->cur_adc = 0;
76435 spec->cur_adc_idx = 0;
76436
76437 - codec->patch_ops.unsol_event = cxt5051_hp_unsol_event;
76438 + *(void **)&codec->patch_ops.unsol_event = cxt5051_hp_unsol_event;
76439
76440 board_config = snd_hda_check_board_config(codec, CXT5051_MODELS,
76441 cxt5051_models,
76442 @@ -2372,8 +2372,8 @@ static int patch_cxt5066(struct hda_code
76443 return -ENOMEM;
76444 codec->spec = spec;
76445
76446 - codec->patch_ops = conexant_patch_ops;
76447 - codec->patch_ops.init = cxt5066_init;
76448 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
76449 + *(void **)&codec->patch_ops.init = cxt5066_init;
76450
76451 spec->dell_automute = 0;
76452 spec->multiout.max_channels = 2;
76453 @@ -2413,7 +2413,7 @@ static int patch_cxt5066(struct hda_code
76454 spec->dell_automute = 1;
76455 break;
76456 case CXT5066_OLPC_XO_1_5:
76457 - codec->patch_ops.unsol_event = cxt5066_unsol_event;
76458 + *(void **)&codec->patch_ops.unsol_event = cxt5066_unsol_event;
76459 spec->init_verbs[0] = cxt5066_init_verbs_olpc;
76460 spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
76461 spec->mixers[spec->num_mixers++] = cxt5066_mixers;
76462 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c
76463 --- linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c 2011-03-27 14:31:47.000000000 -0400
76464 +++ linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c 2011-08-05 20:33:55.000000000 -0400
76465 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(str
76466 cp_ready);
76467
76468 /* TODO */
76469 - if (cp_state)
76470 - ;
76471 - if (cp_ready)
76472 - ;
76473 + if (cp_state) {
76474 + }
76475 + if (cp_ready) {
76476 + }
76477 }
76478
76479
76480 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hd
76481 spec->multiout.dig_out_nid = cvt_nid;
76482
76483 codec->spec = spec;
76484 - codec->patch_ops = intel_hdmi_patch_ops;
76485 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
76486
76487 snd_hda_eld_proc_new(codec, &spec->sink_eld);
76488
76489 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c
76490 --- linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c 2011-03-27 14:31:47.000000000 -0400
76491 +++ linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c 2011-08-05 20:33:55.000000000 -0400
76492 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_c
76493 spec->multiout.max_channels = 8;
76494 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
76495
76496 - codec->patch_ops = nvhdmi_patch_ops_8ch;
76497 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
76498
76499 return 0;
76500 }
76501 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_c
76502 spec->multiout.max_channels = 2;
76503 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
76504
76505 - codec->patch_ops = nvhdmi_patch_ops_2ch;
76506 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
76507
76508 return 0;
76509 }
76510 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_realtek.c linux-2.6.32.45/sound/pci/hda/patch_realtek.c
76511 --- linux-2.6.32.45/sound/pci/hda/patch_realtek.c 2011-06-25 12:55:35.000000000 -0400
76512 +++ linux-2.6.32.45/sound/pci/hda/patch_realtek.c 2011-08-05 20:33:55.000000000 -0400
76513 @@ -4856,7 +4856,7 @@ static int patch_alc880(struct hda_codec
76514
76515 spec->vmaster_nid = 0x0c;
76516
76517 - codec->patch_ops = alc_patch_ops;
76518 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76519 if (board_config == ALC880_AUTO)
76520 spec->init_hook = alc880_auto_init;
76521 #ifdef CONFIG_SND_HDA_POWER_SAVE
76522 @@ -6479,7 +6479,7 @@ static int patch_alc260(struct hda_codec
76523
76524 spec->vmaster_nid = 0x08;
76525
76526 - codec->patch_ops = alc_patch_ops;
76527 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76528 if (board_config == ALC260_AUTO)
76529 spec->init_hook = alc260_auto_init;
76530 #ifdef CONFIG_SND_HDA_POWER_SAVE
76531 @@ -9997,7 +9997,7 @@ static int patch_alc882(struct hda_codec
76532
76533 spec->vmaster_nid = 0x0c;
76534
76535 - codec->patch_ops = alc_patch_ops;
76536 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76537 if (board_config == ALC882_AUTO)
76538 spec->init_hook = alc882_auto_init;
76539 #ifdef CONFIG_SND_HDA_POWER_SAVE
76540 @@ -11871,7 +11871,7 @@ static int patch_alc262(struct hda_codec
76541
76542 spec->vmaster_nid = 0x0c;
76543
76544 - codec->patch_ops = alc_patch_ops;
76545 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76546 if (board_config == ALC262_AUTO)
76547 spec->init_hook = alc262_auto_init;
76548 #ifdef CONFIG_SND_HDA_POWER_SAVE
76549 @@ -12950,7 +12950,7 @@ static int patch_alc268(struct hda_codec
76550
76551 spec->vmaster_nid = 0x02;
76552
76553 - codec->patch_ops = alc_patch_ops;
76554 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76555 if (board_config == ALC268_AUTO)
76556 spec->init_hook = alc268_auto_init;
76557
76558 @@ -13636,7 +13636,7 @@ static int patch_alc269(struct hda_codec
76559
76560 spec->vmaster_nid = 0x02;
76561
76562 - codec->patch_ops = alc_patch_ops;
76563 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76564 if (board_config == ALC269_AUTO)
76565 spec->init_hook = alc269_auto_init;
76566 #ifdef CONFIG_SND_HDA_POWER_SAVE
76567 @@ -14741,7 +14741,7 @@ static int patch_alc861(struct hda_codec
76568
76569 spec->vmaster_nid = 0x03;
76570
76571 - codec->patch_ops = alc_patch_ops;
76572 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76573 if (board_config == ALC861_AUTO)
76574 spec->init_hook = alc861_auto_init;
76575 #ifdef CONFIG_SND_HDA_POWER_SAVE
76576 @@ -15727,7 +15727,7 @@ static int patch_alc861vd(struct hda_cod
76577
76578 spec->vmaster_nid = 0x02;
76579
76580 - codec->patch_ops = alc_patch_ops;
76581 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76582
76583 if (board_config == ALC861VD_AUTO)
76584 spec->init_hook = alc861vd_auto_init;
76585 @@ -17652,7 +17652,7 @@ static int patch_alc662(struct hda_codec
76586
76587 spec->vmaster_nid = 0x02;
76588
76589 - codec->patch_ops = alc_patch_ops;
76590 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
76591 if (board_config == ALC662_AUTO)
76592 spec->init_hook = alc662_auto_init;
76593 #ifdef CONFIG_SND_HDA_POWER_SAVE
76594 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_si3054.c linux-2.6.32.45/sound/pci/hda/patch_si3054.c
76595 --- linux-2.6.32.45/sound/pci/hda/patch_si3054.c 2011-03-27 14:31:47.000000000 -0400
76596 +++ linux-2.6.32.45/sound/pci/hda/patch_si3054.c 2011-08-05 20:33:55.000000000 -0400
76597 @@ -275,7 +275,7 @@ static int patch_si3054(struct hda_codec
76598 if (spec == NULL)
76599 return -ENOMEM;
76600 codec->spec = spec;
76601 - codec->patch_ops = si3054_patch_ops;
76602 + memcpy((void *)&codec->patch_ops, &si3054_patch_ops, sizeof(si3054_patch_ops));
76603 return 0;
76604 }
76605
76606 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c
76607 --- linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c 2011-06-25 12:55:35.000000000 -0400
76608 +++ linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c 2011-08-05 20:33:55.000000000 -0400
76609 @@ -4899,7 +4899,7 @@ static int patch_stac9200(struct hda_cod
76610 if (spec->board_config == STAC_9200_PANASONIC)
76611 spec->hp_detect = 0;
76612
76613 - codec->patch_ops = stac92xx_patch_ops;
76614 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76615
76616 return 0;
76617 }
76618 @@ -4981,7 +4981,7 @@ static int patch_stac925x(struct hda_cod
76619 return err;
76620 }
76621
76622 - codec->patch_ops = stac92xx_patch_ops;
76623 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76624
76625 return 0;
76626 }
76627 @@ -5125,7 +5125,7 @@ again:
76628 if (spec->board_config == STAC_92HD73XX_NO_JD)
76629 spec->hp_detect = 0;
76630
76631 - codec->patch_ops = stac92xx_patch_ops;
76632 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76633
76634 codec->proc_widget_hook = stac92hd7x_proc_hook;
76635
76636 @@ -5220,7 +5220,7 @@ again:
76637 snd_hda_codec_write_cache(codec, nid, 0,
76638 AC_VERB_SET_CONNECT_SEL, num_dacs);
76639
76640 - codec->patch_ops = stac92xx_patch_ops;
76641 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76642
76643 codec->proc_widget_hook = stac92hd_proc_hook;
76644
76645 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hd
76646 return -ENOMEM;
76647
76648 codec->spec = spec;
76649 - codec->patch_ops = stac92xx_patch_ops;
76650 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76651 spec->num_pins = STAC92HD71BXX_NUM_PINS;
76652 switch (codec->vendor_id) {
76653 case 0x111d76b6:
76654 @@ -5515,7 +5515,7 @@ again:
76655 spec->gpio_dir |= spec->gpio_led;
76656 spec->gpio_data |= spec->gpio_led;
76657 /* register check_power_status callback. */
76658 - codec->patch_ops.check_power_status =
76659 + *(void **)&codec->patch_ops.check_power_status =
76660 stac92xx_hp_check_power_status;
76661 }
76662 #endif
76663 @@ -5634,7 +5634,7 @@ static int patch_stac922x(struct hda_cod
76664 return err;
76665 }
76666
76667 - codec->patch_ops = stac92xx_patch_ops;
76668 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76669
76670 /* Fix Mux capture level; max to 2 */
76671 snd_hda_override_amp_caps(codec, 0x12, HDA_OUTPUT,
76672 @@ -5757,7 +5757,7 @@ static int patch_stac927x(struct hda_cod
76673 return err;
76674 }
76675
76676 - codec->patch_ops = stac92xx_patch_ops;
76677 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76678
76679 codec->proc_widget_hook = stac927x_proc_hook;
76680
76681 @@ -5880,7 +5880,7 @@ static int patch_stac9205(struct hda_cod
76682 return err;
76683 }
76684
76685 - codec->patch_ops = stac92xx_patch_ops;
76686 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76687
76688 codec->proc_widget_hook = stac9205_proc_hook;
76689
76690 @@ -5974,7 +5974,7 @@ static int patch_stac9872(struct hda_cod
76691 return -EINVAL;
76692 }
76693 spec->input_mux = &spec->private_imux;
76694 - codec->patch_ops = stac92xx_patch_ops;
76695 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
76696 return 0;
76697 }
76698
76699 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_via.c linux-2.6.32.45/sound/pci/hda/patch_via.c
76700 --- linux-2.6.32.45/sound/pci/hda/patch_via.c 2011-03-27 14:31:47.000000000 -0400
76701 +++ linux-2.6.32.45/sound/pci/hda/patch_via.c 2011-08-05 20:33:55.000000000 -0400
76702 @@ -1399,9 +1399,9 @@ static int patch_vt1708(struct hda_codec
76703 spec->num_mixers++;
76704 }
76705
76706 - codec->patch_ops = via_patch_ops;
76707 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76708
76709 - codec->patch_ops.init = via_auto_init;
76710 + *(void **)&codec->patch_ops.init = via_auto_init;
76711 #ifdef CONFIG_SND_HDA_POWER_SAVE
76712 spec->loopback.amplist = vt1708_loopbacks;
76713 #endif
76714 @@ -1870,10 +1870,10 @@ static int patch_vt1709_10ch(struct hda_
76715 spec->num_mixers++;
76716 }
76717
76718 - codec->patch_ops = via_patch_ops;
76719 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76720
76721 - codec->patch_ops.init = via_auto_init;
76722 - codec->patch_ops.unsol_event = via_unsol_event;
76723 + *(void **)&codec->patch_ops.init = via_auto_init;
76724 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76725 #ifdef CONFIG_SND_HDA_POWER_SAVE
76726 spec->loopback.amplist = vt1709_loopbacks;
76727 #endif
76728 @@ -1964,10 +1964,10 @@ static int patch_vt1709_6ch(struct hda_c
76729 spec->num_mixers++;
76730 }
76731
76732 - codec->patch_ops = via_patch_ops;
76733 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76734
76735 - codec->patch_ops.init = via_auto_init;
76736 - codec->patch_ops.unsol_event = via_unsol_event;
76737 + *(void **)&codec->patch_ops.init = via_auto_init;
76738 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76739 #ifdef CONFIG_SND_HDA_POWER_SAVE
76740 spec->loopback.amplist = vt1709_loopbacks;
76741 #endif
76742 @@ -2418,10 +2418,10 @@ static int patch_vt1708B_8ch(struct hda_
76743 spec->num_mixers++;
76744 }
76745
76746 - codec->patch_ops = via_patch_ops;
76747 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76748
76749 - codec->patch_ops.init = via_auto_init;
76750 - codec->patch_ops.unsol_event = via_unsol_event;
76751 + *(void **)&codec->patch_ops.init = via_auto_init;
76752 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76753 #ifdef CONFIG_SND_HDA_POWER_SAVE
76754 spec->loopback.amplist = vt1708B_loopbacks;
76755 #endif
76756 @@ -2470,10 +2470,10 @@ static int patch_vt1708B_4ch(struct hda_
76757 spec->num_mixers++;
76758 }
76759
76760 - codec->patch_ops = via_patch_ops;
76761 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76762
76763 - codec->patch_ops.init = via_auto_init;
76764 - codec->patch_ops.unsol_event = via_unsol_event;
76765 + *(void **)&codec->patch_ops.init = via_auto_init;
76766 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76767 #ifdef CONFIG_SND_HDA_POWER_SAVE
76768 spec->loopback.amplist = vt1708B_loopbacks;
76769 #endif
76770 @@ -2905,10 +2905,10 @@ static int patch_vt1708S(struct hda_code
76771 spec->num_mixers++;
76772 }
76773
76774 - codec->patch_ops = via_patch_ops;
76775 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76776
76777 - codec->patch_ops.init = via_auto_init;
76778 - codec->patch_ops.unsol_event = via_unsol_event;
76779 + *(void **)&codec->patch_ops.init = via_auto_init;
76780 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76781 #ifdef CONFIG_SND_HDA_POWER_SAVE
76782 spec->loopback.amplist = vt1708S_loopbacks;
76783 #endif
76784 @@ -3223,10 +3223,10 @@ static int patch_vt1702(struct hda_codec
76785 spec->num_mixers++;
76786 }
76787
76788 - codec->patch_ops = via_patch_ops;
76789 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
76790
76791 - codec->patch_ops.init = via_auto_init;
76792 - codec->patch_ops.unsol_event = via_unsol_event;
76793 + *(void **)&codec->patch_ops.init = via_auto_init;
76794 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
76795 #ifdef CONFIG_SND_HDA_POWER_SAVE
76796 spec->loopback.amplist = vt1702_loopbacks;
76797 #endif
76798 diff -urNp linux-2.6.32.45/sound/pci/ice1712/ice1712.h linux-2.6.32.45/sound/pci/ice1712/ice1712.h
76799 --- linux-2.6.32.45/sound/pci/ice1712/ice1712.h 2011-03-27 14:31:47.000000000 -0400
76800 +++ linux-2.6.32.45/sound/pci/ice1712/ice1712.h 2011-08-05 20:33:55.000000000 -0400
76801 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
76802 unsigned int mask_flags; /* total mask bits */
76803 struct snd_akm4xxx_ops {
76804 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
76805 - } ops;
76806 + } __no_const ops;
76807 };
76808
76809 struct snd_ice1712_spdif {
76810 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
76811 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
76812 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
76813 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
76814 - } ops;
76815 + } __no_const ops;
76816 };
76817
76818
76819 diff -urNp linux-2.6.32.45/sound/pci/intel8x0m.c linux-2.6.32.45/sound/pci/intel8x0m.c
76820 --- linux-2.6.32.45/sound/pci/intel8x0m.c 2011-03-27 14:31:47.000000000 -0400
76821 +++ linux-2.6.32.45/sound/pci/intel8x0m.c 2011-04-23 12:56:12.000000000 -0400
76822 @@ -1264,7 +1264,7 @@ static struct shortname_table {
76823 { 0x5455, "ALi M5455" },
76824 { 0x746d, "AMD AMD8111" },
76825 #endif
76826 - { 0 },
76827 + { 0, },
76828 };
76829
76830 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
76831 diff -urNp linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c
76832 --- linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c 2011-03-27 14:31:47.000000000 -0400
76833 +++ linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c 2011-05-04 17:56:28.000000000 -0400
76834 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
76835 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
76836 break;
76837 }
76838 - if (atomic_read(&chip->interrupt_sleep_count)) {
76839 - atomic_set(&chip->interrupt_sleep_count, 0);
76840 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
76841 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
76842 wake_up(&chip->interrupt_sleep);
76843 }
76844 __end:
76845 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
76846 continue;
76847 init_waitqueue_entry(&wait, current);
76848 add_wait_queue(&chip->interrupt_sleep, &wait);
76849 - atomic_inc(&chip->interrupt_sleep_count);
76850 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
76851 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
76852 remove_wait_queue(&chip->interrupt_sleep, &wait);
76853 }
76854 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
76855 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
76856 spin_unlock(&chip->reg_lock);
76857
76858 - if (atomic_read(&chip->interrupt_sleep_count)) {
76859 - atomic_set(&chip->interrupt_sleep_count, 0);
76860 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
76861 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
76862 wake_up(&chip->interrupt_sleep);
76863 }
76864 }
76865 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s
76866 spin_lock_init(&chip->reg_lock);
76867 spin_lock_init(&chip->voice_lock);
76868 init_waitqueue_head(&chip->interrupt_sleep);
76869 - atomic_set(&chip->interrupt_sleep_count, 0);
76870 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
76871 chip->card = card;
76872 chip->pci = pci;
76873 chip->irq = -1;
76874 diff -urNp linux-2.6.32.45/sound/soc/soc-core.c linux-2.6.32.45/sound/soc/soc-core.c
76875 --- linux-2.6.32.45/sound/soc/soc-core.c 2011-03-27 14:31:47.000000000 -0400
76876 +++ linux-2.6.32.45/sound/soc/soc-core.c 2011-08-05 20:33:55.000000000 -0400
76877 @@ -1107,13 +1107,13 @@ static int soc_new_pcm(struct snd_soc_de
76878
76879 dai_link->pcm = pcm;
76880 pcm->private_data = rtd;
76881 - soc_pcm_ops.mmap = platform->pcm_ops->mmap;
76882 - soc_pcm_ops.pointer = platform->pcm_ops->pointer;
76883 - soc_pcm_ops.ioctl = platform->pcm_ops->ioctl;
76884 - soc_pcm_ops.copy = platform->pcm_ops->copy;
76885 - soc_pcm_ops.silence = platform->pcm_ops->silence;
76886 - soc_pcm_ops.ack = platform->pcm_ops->ack;
76887 - soc_pcm_ops.page = platform->pcm_ops->page;
76888 + *(void **)&soc_pcm_ops.mmap = platform->pcm_ops->mmap;
76889 + *(void **)&soc_pcm_ops.pointer = platform->pcm_ops->pointer;
76890 + *(void **)&soc_pcm_ops.ioctl = platform->pcm_ops->ioctl;
76891 + *(void **)&soc_pcm_ops.copy = platform->pcm_ops->copy;
76892 + *(void **)&soc_pcm_ops.silence = platform->pcm_ops->silence;
76893 + *(void **)&soc_pcm_ops.ack = platform->pcm_ops->ack;
76894 + *(void **)&soc_pcm_ops.page = platform->pcm_ops->page;
76895
76896 if (playback)
76897 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &soc_pcm_ops);
76898 diff -urNp linux-2.6.32.45/sound/usb/usbaudio.c linux-2.6.32.45/sound/usb/usbaudio.c
76899 --- linux-2.6.32.45/sound/usb/usbaudio.c 2011-03-27 14:31:47.000000000 -0400
76900 +++ linux-2.6.32.45/sound/usb/usbaudio.c 2011-08-05 20:33:55.000000000 -0400
76901 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(
76902 switch (cmd) {
76903 case SNDRV_PCM_TRIGGER_START:
76904 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
76905 - subs->ops.prepare = prepare_playback_urb;
76906 + *(void **)&subs->ops.prepare = prepare_playback_urb;
76907 return 0;
76908 case SNDRV_PCM_TRIGGER_STOP:
76909 return deactivate_urbs(subs, 0, 0);
76910 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
76911 - subs->ops.prepare = prepare_nodata_playback_urb;
76912 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
76913 return 0;
76914 default:
76915 return -EINVAL;
76916 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(s
76917
76918 switch (cmd) {
76919 case SNDRV_PCM_TRIGGER_START:
76920 - subs->ops.retire = retire_capture_urb;
76921 + *(void **)&subs->ops.retire = retire_capture_urb;
76922 return start_urbs(subs, substream->runtime);
76923 case SNDRV_PCM_TRIGGER_STOP:
76924 return deactivate_urbs(subs, 0, 0);
76925 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
76926 - subs->ops.retire = retire_paused_capture_urb;
76927 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
76928 return 0;
76929 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
76930 - subs->ops.retire = retire_capture_urb;
76931 + *(void **)&subs->ops.retire = retire_capture_urb;
76932 return 0;
76933 default:
76934 return -EINVAL;
76935 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct sn
76936 /* for playback, submit the URBs now; otherwise, the first hwptr_done
76937 * updates for all URBs would happen at the same time when starting */
76938 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
76939 - subs->ops.prepare = prepare_nodata_playback_urb;
76940 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
76941 return start_urbs(subs, runtime);
76942 } else
76943 return 0;
76944 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_us
76945 subs->direction = stream;
76946 subs->dev = as->chip->dev;
76947 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
76948 - subs->ops = audio_urb_ops[stream];
76949 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
76950 } else {
76951 - subs->ops = audio_urb_ops_high_speed[stream];
76952 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
76953 switch (as->chip->usb_id) {
76954 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
76955 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
76956 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
76957 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
76958 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
76959 break;
76960 }
76961 }
76962 diff -urNp linux-2.6.32.45/tools/gcc/constify_plugin.c linux-2.6.32.45/tools/gcc/constify_plugin.c
76963 --- linux-2.6.32.45/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
76964 +++ linux-2.6.32.45/tools/gcc/constify_plugin.c 2011-08-11 19:12:51.000000000 -0400
76965 @@ -0,0 +1,189 @@
76966 +/*
76967 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
76968 + * Licensed under the GPL v2, or (at your option) v3
76969 + *
76970 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
76971 + *
76972 + * Usage:
76973 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
76974 + * $ gcc -fplugin=constify_plugin.so test.c -O2
76975 + */
76976 +
76977 +#include "gcc-plugin.h"
76978 +#include "config.h"
76979 +#include "system.h"
76980 +#include "coretypes.h"
76981 +#include "tree.h"
76982 +#include "tree-pass.h"
76983 +#include "intl.h"
76984 +#include "plugin-version.h"
76985 +#include "tm.h"
76986 +#include "toplev.h"
76987 +#include "function.h"
76988 +#include "tree-flow.h"
76989 +#include "plugin.h"
76990 +
76991 +int plugin_is_GPL_compatible;
76992 +
76993 +static struct plugin_info const_plugin_info = {
76994 + .version = "20110721",
76995 + .help = "no-constify\tturn off constification\n",
76996 +};
76997 +
76998 +static bool walk_struct(tree node);
76999 +
77000 +static void deconstify_node(tree node)
77001 +{
77002 + tree field;
77003 +
77004 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
77005 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
77006 + if (code == RECORD_TYPE || code == UNION_TYPE)
77007 + deconstify_node(TREE_TYPE(field));
77008 + TREE_READONLY(field) = 0;
77009 + TREE_READONLY(TREE_TYPE(field)) = 0;
77010 + }
77011 +}
77012 +
77013 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
77014 +{
77015 + if (TREE_CODE(*node) == FUNCTION_DECL) {
77016 + error("%qE attribute does not apply to functions", name);
77017 + *no_add_attrs = true;
77018 + return NULL_TREE;
77019 + }
77020 +
77021 + if (DECL_P(*node) && lookup_attribute("no_const", TYPE_ATTRIBUTES(TREE_TYPE(*node)))) {
77022 + error("%qE attribute is already applied to the type" , name);
77023 + *no_add_attrs = true;
77024 + return NULL_TREE;
77025 + }
77026 +
77027 + if (TREE_CODE(*node) == TYPE_DECL && !TREE_READONLY(TREE_TYPE(*node))) {
77028 + error("%qE attribute used on type that is not constified" , name);
77029 + *no_add_attrs = true;
77030 + return NULL_TREE;
77031 + }
77032 +
77033 + if (TREE_CODE(*node) == TYPE_DECL) {
77034 + tree chain = TREE_CHAIN(TREE_TYPE(*node));
77035 + TREE_TYPE(*node) = copy_node(TREE_TYPE(*node));
77036 + TREE_CHAIN(TREE_TYPE(*node)) = copy_list(chain);
77037 + TREE_READONLY(TREE_TYPE(*node)) = 0;
77038 + deconstify_node(TREE_TYPE(*node));
77039 + return NULL_TREE;
77040 + }
77041 +
77042 + return NULL_TREE;
77043 +}
77044 +
77045 +static struct attribute_spec no_const_attr = {
77046 + .name = "no_const",
77047 + .min_length = 0,
77048 + .max_length = 0,
77049 + .decl_required = false,
77050 + .type_required = false,
77051 + .function_type_required = false,
77052 + .handler = handle_no_const_attribute
77053 +};
77054 +
77055 +static void register_attributes(void *event_data, void *data)
77056 +{
77057 + register_attribute(&no_const_attr);
77058 +}
77059 +
77060 +/*
77061 +static void printnode(char *prefix, tree node)
77062 +{
77063 + enum tree_code code;
77064 + enum tree_code_class tclass;
77065 +
77066 + tclass = TREE_CODE_CLASS(TREE_CODE (node));
77067 +
77068 + code = TREE_CODE(node);
77069 + fprintf(stderr, "\n%s node: %p, code: %d type: %s\n", prefix, node, code, tree_code_name[(int)code]);
77070 + if (DECL_CONTEXT(node) != NULL_TREE && TYPE_NAME(DECL_CONTEXT(node)) != NULL_TREE)
77071 + fprintf(stderr, "struct name: %s\n", IDENTIFIER_POINTER(TYPE_NAME(DECL_CONTEXT(node))));
77072 + if (tclass == tcc_declaration && DECL_NAME(node) != NULL_TREE)
77073 + fprintf(stderr, "field name: %s\n", IDENTIFIER_POINTER(DECL_NAME(node)));
77074 +}
77075 +*/
77076 +
77077 +static void constify_node(tree node)
77078 +{
77079 + TREE_READONLY(node) = 1;
77080 +}
77081 +
77082 +static bool is_fptr(tree field)
77083 +{
77084 + tree ptr = TREE_TYPE(field);
77085 +
77086 + if (TREE_CODE(ptr) != POINTER_TYPE)
77087 + return false;
77088 +
77089 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
77090 +}
77091 +
77092 +static bool walk_struct(tree node)
77093 +{
77094 + tree field;
77095 +
77096 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
77097 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
77098 + if (code == RECORD_TYPE || code == UNION_TYPE) {
77099 + if (!(walk_struct(TREE_TYPE(field))))
77100 + return false;
77101 + } else if (is_fptr(field) == false && !TREE_READONLY(field))
77102 + return false;
77103 + }
77104 + return true;
77105 +}
77106 +
77107 +static void finish_type(void *event_data, void *data)
77108 +{
77109 + tree node = (tree)event_data;
77110 +
77111 + if (node == NULL_TREE)
77112 + return;
77113 +
77114 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
77115 + return;
77116 +
77117 + if (TREE_READONLY(node))
77118 + return;
77119 +
77120 + if (TYPE_FIELDS(node) == NULL_TREE)
77121 + return;
77122 +
77123 + if (walk_struct(node))
77124 + constify_node(node);
77125 +}
77126 +
77127 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77128 +{
77129 + const char * const plugin_name = plugin_info->base_name;
77130 + const int argc = plugin_info->argc;
77131 + const struct plugin_argument * const argv = plugin_info->argv;
77132 + int i;
77133 + bool constify = true;
77134 +
77135 + if (!plugin_default_version_check(version, &gcc_version)) {
77136 + error(G_("incompatible gcc/plugin versions"));
77137 + return 1;
77138 + }
77139 +
77140 + for (i = 0; i < argc; ++i) {
77141 + if (!(strcmp(argv[i].key, "no-constify"))) {
77142 + constify = false;
77143 + continue;
77144 + }
77145 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77146 + }
77147 +
77148 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
77149 + if (constify)
77150 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
77151 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
77152 +
77153 + return 0;
77154 +}
77155 Binary files linux-2.6.32.45/tools/gcc/constify_plugin.so and linux-2.6.32.45/tools/gcc/constify_plugin.so differ
77156 diff -urNp linux-2.6.32.45/tools/gcc/Makefile linux-2.6.32.45/tools/gcc/Makefile
77157 --- linux-2.6.32.45/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
77158 +++ linux-2.6.32.45/tools/gcc/Makefile 2011-08-05 20:33:55.000000000 -0400
77159 @@ -0,0 +1,12 @@
77160 +#CC := gcc
77161 +#PLUGIN_SOURCE_FILES := pax_plugin.c
77162 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
77163 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
77164 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
77165 +
77166 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
77167 +
77168 +hostlibs-y := stackleak_plugin.so constify_plugin.so
77169 +always := $(hostlibs-y)
77170 +stackleak_plugin-objs := stackleak_plugin.o
77171 +constify_plugin-objs := constify_plugin.o
77172 Binary files linux-2.6.32.45/tools/gcc/pax_plugin.so and linux-2.6.32.45/tools/gcc/pax_plugin.so differ
77173 diff -urNp linux-2.6.32.45/tools/gcc/stackleak_plugin.c linux-2.6.32.45/tools/gcc/stackleak_plugin.c
77174 --- linux-2.6.32.45/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
77175 +++ linux-2.6.32.45/tools/gcc/stackleak_plugin.c 2011-08-05 20:33:55.000000000 -0400
77176 @@ -0,0 +1,243 @@
77177 +/*
77178 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77179 + * Licensed under the GPL v2
77180 + *
77181 + * Note: the choice of the license means that the compilation process is
77182 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77183 + * but for the kernel it doesn't matter since it doesn't link against
77184 + * any of the gcc libraries
77185 + *
77186 + * gcc plugin to help implement various PaX features
77187 + *
77188 + * - track lowest stack pointer
77189 + *
77190 + * TODO:
77191 + * - initialize all local variables
77192 + *
77193 + * BUGS:
77194 + * - cloned functions are instrumented twice
77195 + */
77196 +#include "gcc-plugin.h"
77197 +#include "plugin-version.h"
77198 +#include "config.h"
77199 +#include "system.h"
77200 +#include "coretypes.h"
77201 +#include "tm.h"
77202 +#include "toplev.h"
77203 +#include "basic-block.h"
77204 +#include "gimple.h"
77205 +//#include "expr.h" where are you...
77206 +#include "diagnostic.h"
77207 +#include "rtl.h"
77208 +#include "emit-rtl.h"
77209 +#include "function.h"
77210 +#include "tree.h"
77211 +#include "tree-pass.h"
77212 +#include "intl.h"
77213 +
77214 +int plugin_is_GPL_compatible;
77215 +
77216 +static int track_frame_size = -1;
77217 +static const char track_function[] = "pax_track_stack";
77218 +static bool init_locals;
77219 +
77220 +static struct plugin_info stackleak_plugin_info = {
77221 + .version = "201106030000",
77222 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
77223 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
77224 +};
77225 +
77226 +static bool gate_stackleak_track_stack(void);
77227 +static unsigned int execute_stackleak_tree_instrument(void);
77228 +static unsigned int execute_stackleak_final(void);
77229 +
77230 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
77231 + .pass = {
77232 + .type = GIMPLE_PASS,
77233 + .name = "stackleak_tree_instrument",
77234 + .gate = gate_stackleak_track_stack,
77235 + .execute = execute_stackleak_tree_instrument,
77236 + .sub = NULL,
77237 + .next = NULL,
77238 + .static_pass_number = 0,
77239 + .tv_id = TV_NONE,
77240 + .properties_required = PROP_gimple_leh | PROP_cfg,
77241 + .properties_provided = 0,
77242 + .properties_destroyed = 0,
77243 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
77244 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
77245 + }
77246 +};
77247 +
77248 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
77249 + .pass = {
77250 + .type = RTL_PASS,
77251 + .name = "stackleak_final",
77252 + .gate = gate_stackleak_track_stack,
77253 + .execute = execute_stackleak_final,
77254 + .sub = NULL,
77255 + .next = NULL,
77256 + .static_pass_number = 0,
77257 + .tv_id = TV_NONE,
77258 + .properties_required = 0,
77259 + .properties_provided = 0,
77260 + .properties_destroyed = 0,
77261 + .todo_flags_start = 0,
77262 + .todo_flags_finish = 0
77263 + }
77264 +};
77265 +
77266 +static bool gate_stackleak_track_stack(void)
77267 +{
77268 + return track_frame_size >= 0;
77269 +}
77270 +
77271 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
77272 +{
77273 + gimple call;
77274 + tree decl, type;
77275 +
77276 + // insert call to void pax_track_stack(void)
77277 + type = build_function_type_list(void_type_node, NULL_TREE);
77278 + decl = build_fn_decl(track_function, type);
77279 + DECL_ASSEMBLER_NAME(decl); // for LTO
77280 + call = gimple_build_call(decl, 0);
77281 + if (before)
77282 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
77283 + else
77284 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
77285 +}
77286 +
77287 +static unsigned int execute_stackleak_tree_instrument(void)
77288 +{
77289 + basic_block bb;
77290 + gimple_stmt_iterator gsi;
77291 +
77292 + // 1. loop through BBs and GIMPLE statements
77293 + FOR_EACH_BB(bb) {
77294 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
77295 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
77296 + tree decl;
77297 + gimple stmt = gsi_stmt(gsi);
77298 +
77299 + if (!is_gimple_call(stmt))
77300 + continue;
77301 + decl = gimple_call_fndecl(stmt);
77302 + if (!decl)
77303 + continue;
77304 + if (TREE_CODE(decl) != FUNCTION_DECL)
77305 + continue;
77306 + if (!DECL_BUILT_IN(decl))
77307 + continue;
77308 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
77309 + continue;
77310 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
77311 + continue;
77312 +
77313 + // 2. insert track call after each __builtin_alloca call
77314 + stackleak_add_instrumentation(&gsi, false);
77315 +// print_node(stderr, "pax", decl, 4);
77316 + }
77317 + }
77318 +
77319 + // 3. insert track call at the beginning
77320 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
77321 + gsi = gsi_start_bb(bb);
77322 + stackleak_add_instrumentation(&gsi, true);
77323 +
77324 + return 0;
77325 +}
77326 +
77327 +static unsigned int execute_stackleak_final(void)
77328 +{
77329 + rtx insn;
77330 +
77331 + if (cfun->calls_alloca)
77332 + return 0;
77333 +
77334 + // 1. find pax_track_stack calls
77335 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
77336 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
77337 + rtx body;
77338 +
77339 + if (!CALL_P(insn))
77340 + continue;
77341 + body = PATTERN(insn);
77342 + if (GET_CODE(body) != CALL)
77343 + continue;
77344 + body = XEXP(body, 0);
77345 + if (GET_CODE(body) != MEM)
77346 + continue;
77347 + body = XEXP(body, 0);
77348 + if (GET_CODE(body) != SYMBOL_REF)
77349 + continue;
77350 + if (strcmp(XSTR(body, 0), track_function))
77351 + continue;
77352 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
77353 + // 2. delete call if function frame is not big enough
77354 + if (get_frame_size() >= track_frame_size)
77355 + continue;
77356 + delete_insn_and_edges(insn);
77357 + }
77358 +
77359 +// print_simple_rtl(stderr, get_insns());
77360 +// print_rtl(stderr, get_insns());
77361 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
77362 +
77363 + return 0;
77364 +}
77365 +
77366 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77367 +{
77368 + const char * const plugin_name = plugin_info->base_name;
77369 + const int argc = plugin_info->argc;
77370 + const struct plugin_argument * const argv = plugin_info->argv;
77371 + int i;
77372 + struct register_pass_info stackleak_tree_instrument_pass_info = {
77373 + .pass = &stackleak_tree_instrument_pass.pass,
77374 +// .reference_pass_name = "tree_profile",
77375 + .reference_pass_name = "optimized",
77376 + .ref_pass_instance_number = 0,
77377 + .pos_op = PASS_POS_INSERT_AFTER
77378 + };
77379 + struct register_pass_info stackleak_final_pass_info = {
77380 + .pass = &stackleak_final_rtl_opt_pass.pass,
77381 + .reference_pass_name = "final",
77382 + .ref_pass_instance_number = 0,
77383 + .pos_op = PASS_POS_INSERT_BEFORE
77384 + };
77385 +
77386 + if (!plugin_default_version_check(version, &gcc_version)) {
77387 + error(G_("incompatible gcc/plugin versions"));
77388 + return 1;
77389 + }
77390 +
77391 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
77392 +
77393 + for (i = 0; i < argc; ++i) {
77394 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
77395 + if (!argv[i].value) {
77396 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77397 + continue;
77398 + }
77399 + track_frame_size = atoi(argv[i].value);
77400 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
77401 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
77402 + continue;
77403 + }
77404 + if (!strcmp(argv[i].key, "initialize-locals")) {
77405 + if (argv[i].value) {
77406 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
77407 + continue;
77408 + }
77409 + init_locals = true;
77410 + continue;
77411 + }
77412 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77413 + }
77414 +
77415 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
77416 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
77417 +
77418 + return 0;
77419 +}
77420 Binary files linux-2.6.32.45/tools/gcc/stackleak_plugin.so and linux-2.6.32.45/tools/gcc/stackleak_plugin.so differ
77421 diff -urNp linux-2.6.32.45/usr/gen_init_cpio.c linux-2.6.32.45/usr/gen_init_cpio.c
77422 --- linux-2.6.32.45/usr/gen_init_cpio.c 2011-03-27 14:31:47.000000000 -0400
77423 +++ linux-2.6.32.45/usr/gen_init_cpio.c 2011-04-17 15:56:46.000000000 -0400
77424 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
77425 int retval;
77426 int rc = -1;
77427 int namesize;
77428 - int i;
77429 + unsigned int i;
77430
77431 mode |= S_IFREG;
77432
77433 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
77434 *env_var = *expanded = '\0';
77435 strncat(env_var, start + 2, end - start - 2);
77436 strncat(expanded, new_location, start - new_location);
77437 - strncat(expanded, getenv(env_var), PATH_MAX);
77438 - strncat(expanded, end + 1, PATH_MAX);
77439 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
77440 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
77441 strncpy(new_location, expanded, PATH_MAX);
77442 + new_location[PATH_MAX] = 0;
77443 } else
77444 break;
77445 }
77446 diff -urNp linux-2.6.32.45/virt/kvm/kvm_main.c linux-2.6.32.45/virt/kvm/kvm_main.c
77447 --- linux-2.6.32.45/virt/kvm/kvm_main.c 2011-03-27 14:31:47.000000000 -0400
77448 +++ linux-2.6.32.45/virt/kvm/kvm_main.c 2011-08-05 20:33:55.000000000 -0400
77449 @@ -2494,7 +2494,7 @@ asmlinkage void kvm_handle_fault_on_rebo
77450 if (kvm_rebooting)
77451 /* spin while reset goes on */
77452 while (true)
77453 - ;
77454 + cpu_relax();
77455 /* Fault while not rebooting. We want the trace. */
77456 BUG();
77457 }
77458 @@ -2714,7 +2714,7 @@ static void kvm_sched_out(struct preempt
77459 kvm_arch_vcpu_put(vcpu);
77460 }
77461
77462 -int kvm_init(void *opaque, unsigned int vcpu_size,
77463 +int kvm_init(const void *opaque, unsigned int vcpu_size,
77464 struct module *module)
77465 {
77466 int r;
77467 @@ -2767,15 +2767,17 @@ int kvm_init(void *opaque, unsigned int
77468 /* A kmem cache lets us meet the alignment requirements of fx_save. */
77469 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
77470 __alignof__(struct kvm_vcpu),
77471 - 0, NULL);
77472 + SLAB_USERCOPY, NULL);
77473 if (!kvm_vcpu_cache) {
77474 r = -ENOMEM;
77475 goto out_free_5;
77476 }
77477
77478 - kvm_chardev_ops.owner = module;
77479 - kvm_vm_fops.owner = module;
77480 - kvm_vcpu_fops.owner = module;
77481 + pax_open_kernel();
77482 + *(void **)&kvm_chardev_ops.owner = module;
77483 + *(void **)&kvm_vm_fops.owner = module;
77484 + *(void **)&kvm_vcpu_fops.owner = module;
77485 + pax_close_kernel();
77486
77487 r = misc_register(&kvm_dev);
77488 if (r) {